query_id
stringlengths
32
32
query
stringlengths
9
4.01k
positive_passages
listlengths
1
1
negative_passages
listlengths
88
101
e3bcf07f29dd0a2ef68aa2ee5d2f3ad4
Test that matchers get registered on an object instance, not just on the class
[ { "docid": "4a39c5a1523715880d07644b7dfcff65", "score": "0.7619825", "text": "def test_matcher_on_instance(self):\n\n skill = _TestSkill(None, None)\n self.assertTrue(hasattr(skill.hello_skill, \"matchers\"))", "title": "" } ]
[ { "docid": "3d7a2ce6ee5ae6ba010c1db150b1f754", "score": "0.65179825", "text": "def test_constructors(self, name, obj):\n assert getattr(forge, name) == obj", "title": "" }, { "docid": "3d7a2ce6ee5ae6ba010c1db150b1f754", "score": "0.65179825", "text": "def test_constructors(self, name, obj):\n assert getattr(forge, name) == obj", "title": "" }, { "docid": "057abe5ff00d73ca335a327c20f4a2fe", "score": "0.6283899", "text": "def test_matcher(self):\n\n if self.config.xml_generator == \"gccxml\":\n return\n\n decls = parser.parse([self.header], self.config)\n global_ns = declarations.get_global_namespace(decls)\n criteria = declarations.declaration_matcher(name=\"myClass\")\n _ = declarations.matcher.find(criteria, global_ns)", "title": "" }, { "docid": "d50a49b9659205b7b7e7050a0bbb9079", "score": "0.62650055", "text": "def test(self, obj):\n pass", "title": "" }, { "docid": "6e2d38040acc17eb50d2596747ed5987", "score": "0.6227033", "text": "def __init__(self, *args):\n self.args = args\n self.matchers = []\n for a in args:\n if a is _:\n a = lambda k: True\n elif isinstance(a, basestring):\n a = a.__eq__\n elif isinstance(a, (list, tuple, set)):\n a = (lambda ary: (lambda k: k in ary))(a)\n elif hasattr(a, 'search'):\n a = a.search\n else:\n a = str(a).__eq__\n self.matchers.append(a)", "title": "" }, { "docid": "76cd5f36bdb9258b7f447a85e1b962b0", "score": "0.6125274", "text": "def test_instance():\n AgentCheck()\n # rely on default\n check = AgentCheck()\n assert check.init_config == {}\n assert check.instances == []\n\n # pass dict for 'init_config', a list for 'instances'\n init_config = {'foo': 'bar'}\n instances = [{'bar': 'baz'}]\n check = AgentCheck(init_config=init_config, instances=instances)\n assert check.init_config == {'foo': 'bar'}\n assert check.instances == [{'bar': 'baz'}]", "title": "" }, { "docid": "3b69200b30903ca93221c5991385e525", "score": "0.6120083", "text": "def test_decorator():\n\n @register_instance\n class Manequin:\n \"\"\"Our test object\"\"\"\n\n def __init__(self):\n self.value = 1\n\n global Register\n\n korper = Manequin()\n\n assert Register.Manequin # is object in Register?\n assert Register.Manequin.value == 1 # is it's value correct?\n\n del korper", "title": "" }, { "docid": "d9d7dbf35b44684fa5075365c57b23be", "score": "0.61159885", "text": "def test_instance(self):\n self.assertEqual(True, type(self.Test.defined_associations['something']) is pyperry.association.BelongsTo)", "title": "" }, { "docid": "ed53d13f4ae8d448af1385f2c5847d4b", "score": "0.6099952", "text": "def test_actor_matches_activity(self):", "title": "" }, { "docid": "d2c7c47c35717848048be199c13d2ef3", "score": "0.60987437", "text": "def test_class_started(self, cls):", "title": "" }, { "docid": "50cb09dcf09f13c05f226e3e7f8bc227", "score": "0.60797256", "text": "def test_instance_method(self):\n self.assertEqual(self.Test.update_attributes.im_class, self.Test)", "title": "" }, { "docid": "df4f8fc5c20afa5c32183a43a87e357a", "score": "0.60273933", "text": "def test_class_methods(self):\n\n x = BaseTransformer()\n\n h.test_object_method(obj=x, expected_method=\"fit\", msg=\"fit\")\n\n h.test_object_method(obj=x, expected_method=\"transform\", msg=\"transform\")\n\n h.test_object_method(\n obj=x, expected_method=\"columns_set_or_check\", msg=\"columns_set_or_check\"\n )\n\n h.test_object_method(\n obj=x, expected_method=\"columns_check\", msg=\"columns_check\"\n )", "title": "" }, { "docid": "5dfe110a8d6b8e82ae6c4a3f6e1cfabc", "score": "0.60034794", "text": "def test_is_instance(self):\n self.assertIsInstance(self.obj, Rectangle, \"created obj is not an \" +\n \"instance of Rectangle class.\")", "title": "" }, { "docid": "0ed4f705eda428906be71d6411b51e6b", "score": "0.60003585", "text": "def test_new_instance_every_time(self):\n registry = ClassRegistry(attr_name='element')\n registry.register(Wartortle)\n\n self.assertIsNot(registry['water'], registry['water'])", "title": "" }, { "docid": "66abe6ac199973b76533c2c2b4231084", "score": "0.59865934", "text": "def test_create_obj_by_type(self):\n test_obj = mock.MagicMock()\n returned_obj = self.tested_class._create_obj_by_type(test_obj)\n self.assertIs(returned_obj, test_obj)", "title": "" }, { "docid": "31d0e606b0cb1f1417649b082a3c6244", "score": "0.59721553", "text": "def test_instance(self):\n self.assertIsInstance(self.user_1, User)", "title": "" }, { "docid": "5f2f0a8a893fa64c565216762bbfc1bd", "score": "0.59708726", "text": "def mock_instance(self, name=\"\", patches=None, **kwargs_patches):\n return self.mock_class(name)(patches, **kwargs_patches)", "title": "" }, { "docid": "339afedc5e3d30f03d9d670dee7be253", "score": "0.5970366", "text": "def test_user_instances(self):\n obj = User()\n self.assertIsInstance(obj, User)", "title": "" }, { "docid": "8dcd64dad7e151ff3452238ad1a19950", "score": "0.5955174", "text": "def test_instantiation(self):\n rule = Rule()\n self.assertTrue(rule)", "title": "" }, { "docid": "3dd9c88fb75e86dc8fd62f89cd9fc2e1", "score": "0.59517026", "text": "def test_class_ended(self, cls):", "title": "" }, { "docid": "6711b846967125bd7170a7e6294fa185", "score": "0.59413725", "text": "def test_multiple_objects(self):\n\n sio = StringIO()\n m = Mirror(sio, mode='record')\n\n # constructor arguments should have no effect on SomeService.hello()\n i1 = m(SomeService(\"\"), id=\"name1\")\n i2 = m(SomeService(\"\"), id=\"name2\")\n\n r1 = i1.hello(\"other1\")\n r2 = i2.hello(\"other2\")\n\n m.save()\n sio.seek(0)\n\n m2 = Mirror(sio, mode='replay', strict=True)\n i1_ = m2(SomeService(\"name1\"), id=\"name1\")\n i2_ = m2(SomeService(\"name2\"), id=\"name2\")\n\n self.assertEqual(r1, i1_.hello(\"other1\"))\n self.assertEqual(r2, i2_.hello(\"other2\"))\n self.assertEqual(i1_.count, 0)\n self.assertEqual(i2_.count, 0)\n\n self.assertRaises(KeyError, i1_.hello, \"other2\")\n self.assertRaises(KeyError, i2_.hello, \"other1\")", "title": "" }, { "docid": "85344c3b1082346b0a827d8d8b8f592b", "score": "0.59233445", "text": "def test_instantiation(self):\n occurrence = Occurrence()\n self.assertTrue(occurrence)", "title": "" }, { "docid": "75b8b646b832d836a0b5f9577a77357c", "score": "0.5918288", "text": "def test_contains_when_class_init_requires_arguments(self):\n registry = ClassRegistry(attr_name='element')\n\n @registry.register\n class Butterfree(Pokemon):\n element = 'bug'\n\n def __init__(self, name):\n super(Butterfree, self).__init__(name)\n\n self.assertTrue('bug' in registry)", "title": "" }, { "docid": "b0109b4da3b6ef9c05052c544e20c74b", "score": "0.5899984", "text": "def __init__(self, matcher, generate):\n self.matcher = matcher\n self._generate = generate", "title": "" }, { "docid": "1ca8e8afabc6cc888b1ec545246f0426", "score": "0.58798015", "text": "def test_mock_a_class_func():\n print()\n\n myclass = mymodule.MyClass()\n myclass.class_mock_one = Mock()\n myclass.class_mock_one.return_value = 2\n xx = myclass.class_mock_two()\n print(xx)\n myclass.class_mock_one.assert_called_with()", "title": "" }, { "docid": "8ba5b61fc7270a5ee9e43592012d3544", "score": "0.58760124", "text": "def test_instantiation(self):\n self.assertIsInstance(self.amenity, Amenity)", "title": "" }, { "docid": "2049635cd54dc49bc113e2df9796823b", "score": "0.587368", "text": "def test_instances(self):\n kb = logic.PropKB()\n kb.tell(logic.expr('ISA(mammal, animal)'))\n kb.tell(logic.expr('ISA(cat, mammal)'))\n kb.tell(logic.expr('INSTANCEOF(petunia, cat)'))\n self.assertAllBindingsEqual(\n kb.ask_all(logic.expr('ISA(petunia, ?x)')),\n [{'?x': 'petunia'}, {'?x': 'cat'}, {'?x': 'mammal'}, {'?x': 'animal'}])\n self.assertAllBindingsEqual(\n kb.ask_all(logic.expr('ISINSTANCE(petunia)')), [{}])\n self.assertAllBindingsEqual(\n kb.ask_all(logic.expr('ISINSTANCE(?x)')), [{'?x': 'petunia'}])\n\n self.assertAllBindingsEqual(\n kb.ask_all(logic.expr('INSTANCEOF(petunia, ?x)')),\n [{'?x': 'cat'}])\n self.assertAllBindingsEqual(\n kb.ask_all(logic.expr('ISINSTANCE(?x)')), [{'?x': 'petunia'}])\n self.assertAllBindingsEqual(kb.ask_all(logic.expr('INSTANCEOF(?x, ?y)')),\n [{'?x': 'petunia', '?y': 'cat'}])\n self.assertAllBindingsEqual(\n kb.ask_all(logic.expr('ISINSTANCE(petunia)')), [{}])", "title": "" }, { "docid": "866aea7010565fc2a03d540af497f008", "score": "0.5858816", "text": "def test_instance(self):\n self.assertEqual(True, type(self.Test.defined_associations['thing']) is pyperry.association.HasOne)", "title": "" }, { "docid": "d9ef0acf7c6995e2c33ff703c4f4c4c3", "score": "0.58586234", "text": "def test_class_eq_method(self, test_instances):\n a, b, _ = test_instances\n\n assert a == b", "title": "" }, { "docid": "fbe8a6fc53c8bffd9f349b3db6fda7df", "score": "0.58145773", "text": "def testInstance(self):\n self.assertTrue(isinstance(self, AppswellUnitTest))", "title": "" }, { "docid": "a69af638e509ea49f2fa7b66e5df8190", "score": "0.5777688", "text": "def test_register_existing_attr(self):\n pass", "title": "" }, { "docid": "93048bcdbc1b968d44bf991dff7e08bc", "score": "0.5759574", "text": "def test_check_instance_explainer_functionality():\n type_error = 'The suppress_warning parameter should be a boolean.'\n inheritance_warning = (\n 'Every explainer object should inherit from fatf.utils.transparency.'\n 'explainers.Explainer abstract class.')\n\n class ClassPlain(object):\n pass\n\n class_plain = ClassPlain()\n\n class ClassInit(fute.Explainer):\n def __init__(self):\n pass\n\n class_init = ClassInit()\n\n class ClassExplainer1(object):\n def explain_instance(self):\n pass # pragma: no cover\n\n class_explainer_1 = ClassExplainer1()\n\n class ClassExplainer2(fute.Explainer):\n def explain_instance(self, x, y):\n pass # pragma: no cover\n\n class_explainer_2 = ClassExplainer2()\n\n class ClassExplainer3(object):\n def explain_instance(self, x):\n pass # pragma: no cover\n\n class_explainer_3 = ClassExplainer3()\n\n class ClassExplainer4(fute.Explainer):\n def explain_instance(self, x, y=3):\n pass # pragma: no cover\n\n class_explainer_4 = ClassExplainer4()\n\n class ClassExplainer5(object):\n def explain_instance(self, x, y=3, z=3):\n pass # pragma: no cover\n\n class_explainer_5 = ClassExplainer5()\n\n with pytest.raises(TypeError) as exinf:\n fute.check_instance_explainer_functionality(class_plain, 'False')\n assert str(exinf.value) == type_error\n with pytest.raises(TypeError) as exinf:\n fute.check_instance_explainer_functionality(ClassPlain, 'True')\n assert str(exinf.value) == type_error\n\n msg = \"The *{}* (explainer) class is missing 'explain_instance' method.\"\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(class_plain,\n False) is False\n assert len(warning) == 2\n assert msg.format('ClassPlain') == str(warning[0].message)\n assert inheritance_warning == str(warning[1].message)\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(ClassPlain) is False\n assert len(warning) == 2\n assert msg.format('ClassPlain') == str(warning[0].message)\n assert inheritance_warning == str(warning[1].message)\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(class_plain,\n True) is False\n assert len(warning) == 1\n assert inheritance_warning == str(warning[0].message)\n\n msg = (\"The 'explain_instance' method of the *{}* (explainer) class has \"\n 'incorrect number ({}) of the required parameters. It needs to '\n 'have exactly 1 required parameter(s). Try using optional '\n 'parameters if you require more functionality.')\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(class_init,\n False) is False\n assert len(warning) == 1\n assert msg.format('ClassInit', 0) == str(warning[0].message)\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(ClassInit) is False\n assert len(warning) == 1\n assert msg.format('ClassInit', 0) == str(warning[0].message)\n\n assert fute.check_instance_explainer_functionality(class_init,\n True) is False\n\n #\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(\n class_explainer_1, False) is False\n assert len(warning) == 2\n assert msg.format('ClassExplainer1', 0) == str(warning[0].message)\n assert inheritance_warning == str(warning[1].message)\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(\n ClassExplainer1) is False\n assert len(warning) == 2\n assert msg.format('ClassExplainer1', 0) == str(warning[0].message)\n assert inheritance_warning == str(warning[1].message)\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(\n class_explainer_1, True) is False\n assert len(warning) == 1\n assert inheritance_warning == str(warning[0].message)\n\n #\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(\n class_explainer_2, False) is False\n assert len(warning) == 1\n assert msg.format('ClassExplainer2', 2) == str(warning[0].message)\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(\n class_explainer_2) is False\n assert len(warning) == 1\n assert msg.format('ClassExplainer2', 2) == str(warning[0].message)\n\n assert fute.check_instance_explainer_functionality(class_explainer_2,\n True) is False\n\n #\n #\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(\n class_explainer_3, False) is True\n assert len(warning) == 1\n assert inheritance_warning == str(warning[0].message)\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(\n ClassExplainer3, True) is True\n assert len(warning) == 1\n assert inheritance_warning == str(warning[0].message)\n\n #\n\n assert fute.check_instance_explainer_functionality(class_explainer_4,\n False) is True\n assert fute.check_instance_explainer_functionality(ClassExplainer4,\n True) is True\n\n #\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(\n class_explainer_5, False) is True\n assert len(warning) == 1\n assert inheritance_warning == str(warning[0].message)\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(\n ClassExplainer5, True) is True\n assert len(warning) == 1\n assert inheritance_warning == str(warning[0].message)", "title": "" }, { "docid": "6d8cb5161b02830ebd4530805cf4a3df", "score": "0.57537407", "text": "def test_instance(self):\n self.assertIsInstance(self.test1, BaseModel)", "title": "" }, { "docid": "880cebddac9afdb06e7e211e088a7245", "score": "0.5744803", "text": "def test_objects(obj):\n\n @type_checked\n def _run_test(something:obj):\n return something\n\n orig = obj()\n tested = _run_test(orig)\n assert orig == tested\n # py.test will alse ensure the ID doesn't change... but, being explict\n assert id(orig) == id(tested)", "title": "" }, { "docid": "4011d6f9ab2ca89e6bfa55cbe7f6a545", "score": "0.5738597", "text": "def test_require_in_instance_silently_succeeds_for_available_tests(self, test_generator):\n # pylint: disable=function-redefined\n\n with self.subTest(\"direct decorator\"):\n feature = test_generator()\n with mock_availability_test(feature) as check:\n check.assert_not_called()\n\n @feature.require_in_instance\n class Dummy:\n \"\"\"Dummy class.\"\"\"\n\n check.assert_not_called()\n Dummy()\n check.assert_called_once()\n\n with self.subTest(\"named decorator\"):\n feature = test_generator()\n with mock_availability_test(feature) as check:\n check.assert_not_called()\n\n @feature.require_in_instance(\"sentinel name\")\n class Dummy:\n \"\"\"Dummy class.\"\"\"\n\n check.assert_not_called()\n Dummy()\n check.assert_called_once()", "title": "" }, { "docid": "368536579dc9404289b875a75135baf2", "score": "0.5723847", "text": "def test_init(attributes):\n instance = Participant(**attributes)\n for attr, value in attributes.items():\n assert getattr(instance, attr) == value", "title": "" }, { "docid": "3f36d4982b0adb08de7653a3e0b31b93", "score": "0.57007056", "text": "def test__call__(self):\n mock = Mock()\n factory = Factory(mock)\n factory()\n mock.assert_called_once_with()", "title": "" }, { "docid": "7b383505c5afdec2bd294bf85ee07492", "score": "0.5691681", "text": "def test_instance():\n assert isinstance(lex.lex().build(), lex._lexer)", "title": "" }, { "docid": "60900d678bf5201ea63fd02aa2462af1", "score": "0.5670287", "text": "def test_init(self):\n t = Thing(store=self.store)\n u = User(store=self.store)\n a = UserActor(t, u)\n self.assertEqual(a.thing, t)\n self.assertEqual(a.user, u)\n self.assertEqual(a.store, self.store)\n self.assertEqual(IActor(t), a)", "title": "" }, { "docid": "0ab38bcccbe9ed599ba9438244ee950b", "score": "0.56581503", "text": "def verify_is_instance(self, obj, cls, msg=\"\"):\r\n try:\r\n self.assert_is_instance(obj, cls, msg)\r\n except AssertionError, e:\r\n if msg:\r\n m = \"%s:\\n%s\" % (msg, str(e))\r\n else:\r\n m = str(e)\r\n self.verification_erorrs.append(m)", "title": "" }, { "docid": "19e4a06c62b7339b8d26475644e80582", "score": "0.56577927", "text": "def test_instance_method():\n assert hasattr(ResRNNBlock, '__init__')\n assert inspect.signature(ResRNNBlock.__init__) == Signature(\n parameters=[\n Parameter(\n name='self',\n kind=Parameter.POSITIONAL_OR_KEYWORD,\n default=Parameter.empty,\n ),\n Parameter(\n name='d_hid',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='n_hid_lyr',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='p_hid',\n kind=Parameter.KEYWORD_ONLY,\n annotation=float,\n default=Parameter.empty,\n ),\n Parameter(\n name='kwargs',\n kind=Parameter.VAR_KEYWORD,\n annotation=Optional[Dict],\n ),\n ],\n return_annotation=Signature.empty,\n )\n\n assert hasattr(ResRNNBlock, 'forward')\n assert inspect.signature(ResRNNBlock.forward) == Signature(\n parameters=[\n Parameter(\n name='self',\n kind=Parameter.POSITIONAL_OR_KEYWORD,\n default=Parameter.empty,\n ),\n Parameter(\n name='batch_tk_reps',\n kind=Parameter.POSITIONAL_OR_KEYWORD,\n annotation=torch.Tensor,\n default=Parameter.empty,\n ),\n ],\n return_annotation=torch.Tensor,\n )\n\n assert hasattr(ResRNNModel, '__init__')\n assert inspect.signature(ResRNNModel.__init__) == Signature(\n parameters=[\n Parameter(\n name='self',\n kind=Parameter.POSITIONAL_OR_KEYWORD,\n default=Parameter.empty,\n ),\n Parameter(\n name='d_emb',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='d_hid',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='n_hid_lyr',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='n_post_hid_lyr',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='n_pre_hid_lyr',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='p_emb',\n kind=Parameter.KEYWORD_ONLY,\n annotation=float,\n default=Parameter.empty,\n ),\n Parameter(\n name='p_hid',\n kind=Parameter.KEYWORD_ONLY,\n annotation=float,\n default=Parameter.empty,\n ),\n Parameter(\n name='tknzr',\n kind=Parameter.KEYWORD_ONLY,\n annotation=BaseTknzr,\n default=Parameter.empty,\n ),\n Parameter(\n name='kwargs',\n kind=Parameter.VAR_KEYWORD,\n annotation=Optional[Dict],\n ),\n ],\n return_annotation=Signature.empty,\n )", "title": "" }, { "docid": "f9c82fcce3672cf836716181212cb178", "score": "0.56524", "text": "def _test(self):", "title": "" }, { "docid": "f9c82fcce3672cf836716181212cb178", "score": "0.56524", "text": "def _test(self):", "title": "" }, { "docid": "f9c82fcce3672cf836716181212cb178", "score": "0.56524", "text": "def _test(self):", "title": "" }, { "docid": "f9c82fcce3672cf836716181212cb178", "score": "0.56524", "text": "def _test(self):", "title": "" }, { "docid": "f9c82fcce3672cf836716181212cb178", "score": "0.56524", "text": "def _test(self):", "title": "" }, { "docid": "680e9157c05f2804ce3bf2f308761d1f", "score": "0.5649794", "text": "def test_instance(self):\n self.assertIsInstance(self.new_review, Review)", "title": "" }, { "docid": "e346b101000f22262d00f206f5440afe", "score": "0.564759", "text": "def test_constructor_params(self):\n registry = ClassRegistry(attr_name='element')\n registry.register(Bulbasaur)\n\n # Goofus uses positional arguments, which are magical and\n # make his code more difficult to read.\n goofus = registry.get('grass', 'goofus')\n\n # Gallant uses keyword arguments, producing self-documenting\n # code and being courteous to his fellow developers.\n # He still names his pokémon after himself, though. Narcissist.\n gallant = registry.get('grass', name='gallant')\n\n self.assertIsInstance(goofus, Bulbasaur)\n self.assertEqual(goofus.name, 'goofus')\n\n self.assertIsInstance(gallant, Bulbasaur)\n self.assertEqual(gallant.name, 'gallant')", "title": "" }, { "docid": "1f377d09289e5e6d4ef4347d466d6aa5", "score": "0.5641551", "text": "def test_custom_functions_and_attributes(self):\n book_data = {\n 'title': 'Python for Dummies',\n 'author': 'http://localhost/api/author/1'\n }\n response_book = StringResponse(\n {'Status': 200},\n json.dumps(book_data)\n )\n self.client.responses.append(response_book)\n\n class Book(RestObject):\n some_class_attribute = 'foobar'\n \n def __init__(self, *args, **kwargs):\n self.some_instance_attribute_before_init = 'foobar'\n super(Book, self).__init__(*args, **kwargs)\n self.some_instance_attribute_after_init = 'foobar'\n \n def get_title(self):\n return self['title'].title()\n \n class Meta:\n list = (r'^book/$', 'book_set')\n item = r'^book/(?P<id>\\d)$'\n \n self.assertTrue(hasattr(Book, 'get_title'))\n self.assertFalse(hasattr(RestObject, 'get_title'))\n\n self.assertTrue(hasattr(Book, 'some_class_attribute'))\n self.assertEqual(Book.some_class_attribute, 'foobar')\n self.assertFalse(hasattr(Book, 'some_instance_attribute_before_init'))\n self.assertFalse(hasattr(Book, 'some_instance_attribute_after_init'))\n \n book = Book.objects.get(client=self.client, id=1)\n self.assertEqual(book['title'], book_data['title'])\n self.assertTrue(hasattr(book, 'get_title'))\n self.assertEqual(book.get_title(), book_data['title'].title())\n\n self.assertTrue(hasattr(book, 'some_class_attribute'))\n self.assertEqual(book.some_class_attribute, 'foobar')\n self.assertTrue(hasattr(book, 'some_instance_attribute_before_init'))\n self.assertEqual(book.some_instance_attribute_before_init, 'foobar')\n self.assertTrue(hasattr(book, 'some_instance_attribute_after_init'))\n self.assertEqual(book.some_instance_attribute_after_init, 'foobar')", "title": "" }, { "docid": "14696be92ce242bd17a6ec49317842ed", "score": "0.5641384", "text": "def test_init(self):\n some_key_value1 = mock.MagicMock()\n some_key_value2 = \"some_str\"\n some_nested_value1 = mock.MagicMock()\n some_nested_value2 = [100, \"test_strt\", mock.MagicMock()]\n test_data = {\n \"some_key1\": some_key_value1,\n \"some_key2\": some_key_value2,\n \"some_key3\": {\n \"some_nested_key1\": some_nested_value1,\n \"some_nested_key2\": some_nested_value2\n }\n }\n data_holder = self.tested_class(test_data)\n\n self.assertIsInstance(data_holder, self.tested_class)\n self.assertEqual(data_holder.some_key1, some_key_value1)\n self.assertEqual(data_holder.some_key2, some_key_value2)\n self.assertIsInstance(data_holder.some_key3, self.tested_class)\n self.assertEqual(data_holder.some_key3.some_nested_key1, some_nested_value1)\n self.assertEqual(data_holder.some_key3.some_nested_key2, some_nested_value2)", "title": "" }, { "docid": "e2655bd474cda19bcab730aae50c9fe8", "score": "0.5638702", "text": "def test_instance_method(self):\n self.assertEqual(self.Test.save.im_class, self.Test)", "title": "" }, { "docid": "95714439d81d26dd0c69f96845a17cd3", "score": "0.5634926", "text": "def mockup(cls):\n pass", "title": "" }, { "docid": "6e3367a90bacdb2690801ec9c2f7d67d", "score": "0.56262636", "text": "def test_already_registered_002(self):\n\n class MyChecker(object):\n \"\"\"Do nothing.\"\"\"\n\n @staticmethod\n def get_long_code():\n \"\"\"Do nothing.\"\"\"\n return \"something\"\n\n @staticmethod\n def get_order():\n \"\"\"Do nothing.\"\"\"\n return 0\n\n @staticmethod\n def run(_, __):\n \"\"\"Do nothing.\"\"\"\n return []\n\n class MyContext(object):\n \"\"\"Do nothing.\"\"\"\n\n @staticmethod\n def get_order():\n \"\"\"Do nothing.\"\"\"\n return 0\n\n @staticmethod\n def run(_, __):\n \"\"\"Do nothing.\"\"\"\n return\n\n registry.register_checker(MyChecker)\n\n with self.assertRaises(EnvironmentError):\n registry.register_checker(MyChecker)\n\n registry.register_context(MyContext)\n\n with self.assertRaises(EnvironmentError):\n registry.register_context(MyContext)", "title": "" }, { "docid": "f8f5d87fe2d8241ac7b82dc0bad7aaf7", "score": "0.5619638", "text": "def test_register_function(self):\n registry = ClassRegistry()\n\n @registry.register('fire')\n def pokemon_factory(name=None):\n return Charmeleon(name=name)\n\n poke = registry.get('fire', name='trogdor')\n\n self.assertIsInstance(poke, Charmeleon)\n self.assertEqual(poke.name, 'trogdor')", "title": "" }, { "docid": "437c97d442c7b0856d38747992b2c2cc", "score": "0.5613283", "text": "def test_required_methods(self):", "title": "" }, { "docid": "93fbf0d7aecc556f17819771b3736bee", "score": "0.5610397", "text": "def test_instantiate_valid_target(self):\n # create test configs\n test_configs = [\n {\"_target_\": \"collections.deque\"},\n {\"_target_\": \"collections.UserString\", \"seq\": \"test string\"}\n ]\n\n # create truth objects\n truth_objs = [deque(), UserString(\"test string\")]\n\n # check that instantiate returns truth object for each config\n for truth_obj, test_config in zip(truth_objs, test_configs):\n self.assertEqual(truth_obj, instantiate(test_config))", "title": "" }, { "docid": "4c6c27db97c186411b2569aa712b2789", "score": "0.55960464", "text": "def test_constructor(self):\n pass", "title": "" }, { "docid": "a2fde745b06231d8ddeab29b6e2a85e4", "score": "0.5590084", "text": "def test_require_in_instance_raises_for_unavailable_tests(self, test_generator):\n # pylint: disable=function-redefined\n\n with self.subTest(\"direct decorator\"):\n feature = test_generator()\n with mock_availability_test(feature) as check:\n check.assert_not_called()\n\n @feature.require_in_instance\n class Dummy:\n \"\"\"Dummy class.\"\"\"\n\n check.assert_not_called()\n with self.assertRaisesRegex(MissingOptionalLibraryError, \"Dummy\"):\n Dummy()\n check.assert_called_once()\n with self.assertRaisesRegex(MissingOptionalLibraryError, \"Dummy\"):\n Dummy()\n check.assert_called_once()\n\n with self.subTest(\"named decorator\"):\n feature = test_generator()\n with mock_availability_test(feature) as check:\n check.assert_not_called()\n\n @feature.require_in_instance(\"sentinel message\")\n class Dummy:\n \"\"\"Dummy class.\"\"\"\n\n check.assert_not_called()\n with self.assertRaisesRegex(MissingOptionalLibraryError, \"sentinel message\"):\n Dummy()\n check.assert_called_once()\n with self.assertRaisesRegex(MissingOptionalLibraryError, \"sentinel message\"):\n Dummy()\n check.assert_called_once()", "title": "" }, { "docid": "e0441c3149bbde380eea47190e40a8c3", "score": "0.5589737", "text": "def testServiceMapping_ByClass(self):\n self.DoMappingTest({'/my-service': MyService})", "title": "" }, { "docid": "2d072a5633c8a95ea511dfd745aa4901", "score": "0.556841", "text": "def test_instance(self):\n self.assertIsInstance(self.my_city, City)", "title": "" }, { "docid": "e23f2c389dec812c5f5511b98c05fc23", "score": "0.5568234", "text": "def testMatch(self):\n\n self.inv._literals_filter['fruit'] = ['pear', 'apple']\n self.inv._literals_filter['xfruit'] = None\n self.inv._compiled_filter['shape'] = None\n self.inv._compiled_filter['xshape'] = None\n self.assertTrue(self.inv._Match('fruit', 'apple'))\n\n self.inv._literals_filter['fruit'] = None\n self.inv._compiled_filter['fruit'] = [re.compile('^apple$')]\n self.assertTrue(self.inv._Match('fruit', 'apple'))", "title": "" }, { "docid": "98725f5674637f2c96cd03d0bd99f64f", "score": "0.55676484", "text": "def test_instance(self):\n self.assertEqual(True, type(self.Test.defined_associations['things']) is pyperry.association.HasMany)", "title": "" }, { "docid": "7559bfce48a5a79a2528adff1e9e539f", "score": "0.5557296", "text": "def test_instanceBuilder(self):\n instance = igwt.IInstanceFactory(ChangeType()).buildInstance()\n self.assertTrue(instance is not None)\n self.assertTrue(isinstance(instance, Change))", "title": "" }, { "docid": "bb6f3ae87dc01772eef3d4804713962c", "score": "0.5557265", "text": "def test_car(self):\n try:\n self.test = oop1.Car()\n self.assertIsInstance(self.test, oop1.Car)\n print(\"\\nPASS : Car Class Exists\\n\")\n except NameError as e:\n print(e)", "title": "" }, { "docid": "2319d5a30711d4bcb9bb034d49c93372", "score": "0.5556577", "text": "def test_verify(self):\n self.testObject.verify()", "title": "" }, { "docid": "9fa2ff73df9933a6c14ca9ba3e87ede9", "score": "0.55559796", "text": "def test_attr():\n o = SampleProxy()\n for k, _ in inspect.getmembers(SampleClass()):\n assert(hasattr(o, k))", "title": "" }, { "docid": "e317328e5055886d40f82f0f4aad1ce5", "score": "0.5550482", "text": "def test_object_equal(self):\n test = self.Test({ 'id': 1, 'name': 'foo' })\n self.assertEqual(test, test)", "title": "" }, { "docid": "0ea63a93ef8e4705ae646a04054542aa", "score": "0.554605", "text": "def test_register_detect_keys(self):\n registry = ClassRegistry(attr_name='element')\n\n @registry.register\n class Charizard(Pokemon):\n element = 'fire'\n\n @registry.register\n class Blastoise(Pokemon):\n element = 'water'\n\n # You can still override the registry key if you want.\n @registry.register('poison')\n class Venusaur(Pokemon):\n element = 'grass'\n\n self.assertIsInstance(registry['fire'], Charizard)\n self.assertIsInstance(registry['water'], Blastoise)\n self.assertIsInstance(registry['poison'], Venusaur)\n\n # We overrode the registry key for this class.\n with self.assertRaises(RegistryKeyError):\n # noinspection PyStatementEffect\n registry['grass']", "title": "" }, { "docid": "1198d277128143f41d1220b0b9ebe8db", "score": "0.55414677", "text": "def test_multiple_instances(self):\n class Node:\n my_metric = Metric(Int64)\n self.node0 = Node()\n self.node1 = Node()\n\n self.node0.my_metric = 5\n self.node1.my_metric = 7\n\n self.assertEqual(self.node0.my_metric, 5)\n self.assertEqual(self.node1.my_metric, 7)", "title": "" }, { "docid": "bfcc9b6590de8cdaf3b2015ec0559461", "score": "0.5535653", "text": "def test_instances(self):\n a = CommonMixin()\n a.__dict__.update(**self.data)\n\n b = CommonMixin()\n b.__dict__.update(**self.data)\n\n c = ExampleClass()\n c.__dict__.update(**self.data)\n\n return a, b, c", "title": "" }, { "docid": "608d71f7af0fd504cb7a527bdb16f50e", "score": "0.55308115", "text": "def test_object_register():\n dummy = \"Stub\"\n value = \" \"\n\n o_reg = ObjectRegister()\n\n o_reg[dummy] = value\n\n assert dummy in o_reg # is dummy in register?\n\n assert o_reg.Stub == value # is value correct?\n\n o_reg.Stub = 1 # can I change the value?\n assert o_reg.Stub == 1\n\n # does the setter and getter send exceptions on bad key?\n import pytest\n\n with pytest.raises(KeyError):\n o_reg.Exception\n\n register = make_register(o_reg)\n\n @register\n def number(i):\n return i\n\n assert o_reg.number(2) == 2", "title": "" }, { "docid": "529ada71706394c1773e0627434a45cd", "score": "0.55243117", "text": "def test_init(self):\n user = object()\n used = object()\n a = Use(user, used)\n self.assertEqual(a.user, user)\n self.assertEqual(a.used, used)", "title": "" }, { "docid": "bf700f01538741355eb7d5f4bc318246", "score": "0.55142367", "text": "def test_instance_method(self):\n self.assertEqual(self.Test.delete.im_class, self.Test)", "title": "" }, { "docid": "7eed36af65d44d60a5fc67bf8afba739", "score": "0.55065536", "text": "def test_all_no_class(self):", "title": "" }, { "docid": "7eed36af65d44d60a5fc67bf8afba739", "score": "0.55065536", "text": "def test_all_no_class(self):", "title": "" }, { "docid": "ff7710678aa38df2a153faf780dd5d5e", "score": "0.5501672", "text": "def test_class_method(self):\n self.assertEqual(self.Test.has_one.im_self.__name__, 'Test')", "title": "" }, { "docid": "eab3e779f0e47c7f978d1d8c65ec0ef5", "score": "0.5493624", "text": "def test_instantiates_badge(self):\n\n attrs = self.get_sample_attrs()\n self.assertIsInstance(Badge(attrs), Badge)", "title": "" }, { "docid": "e8f5df601b88bc184d2fb8a9859d3df5", "score": "0.54928", "text": "def assert_is_instance(self, obj, cls, msg=\"\"):\r\n assert isinstance(obj, cls)", "title": "" }, { "docid": "160eec4a66e025f3081547fec2230880", "score": "0.5490804", "text": "def test_instantiation(self):\n self.assertIsInstance(self.testing, Place)\n self.assertIsInstance(self.testing2, Place)\n self.assertTrue(hasattr(self.testing, \"name\"))\n self.assertTrue(hasattr(self.testing2, \"user_id\"))\n self.assertTrue(hasattr(self.testing, \"city_id\"))\n self.assertTrue(hasattr(self.testing2, \"latitude\"))\n self.assertTrue(hasattr(self.testing, \"latitude\"))\n self.assertTrue(self.testing.id != self.testing2.id)", "title": "" }, { "docid": "e5a4821dfa61d1d9397a7bfda8623a8a", "score": "0.5485584", "text": "def test_route_subclass(self):\n with mock.patch.object(RouteSubclass, 'handler_class') as handler:\n errors = RouteSubclass.check()\n\n # Ensure the handler's check method is called...\n handler.check.assert_called_once_with(RouteSubclass)\n # ... and that the return value is passed back through.\n self.assertEqual(errors, handler.check(RouteSubclass))", "title": "" }, { "docid": "efbb678fe9115a3f4285edef9e25e505", "score": "0.548504", "text": "def test__init__(self):\n mocked_reconstructor = Mock()\n mocked_reconstructor.reconstruct.return_value = 'mocked'\n db_response = {'Item': {'test': True}}\n resp = GetResponse(db_response, mocked_reconstructor)\n assert resp.item == 'mocked'\n mocked_reconstructor.reconstruct.assert_called_with(db_response['Item'])", "title": "" }, { "docid": "8a5772d64347ea90ae459b1c4b9db52e", "score": "0.5477324", "text": "def __call__(self, *args, **kwargs):\r\n return Assert(self.obj(*args, **kwargs))", "title": "" }, { "docid": "501194f6f23beb8248e13399a553df00", "score": "0.5475063", "text": "def test_create(self):\n filter = Bleach()\n self.assertIsInstance(filter, Bleach)", "title": "" }, { "docid": "cd20f2e7bfa5f977ff36884117471272", "score": "0.54702485", "text": "def test_init(self, config: HandlerConfig) -> None:\n # Arrange and act done by fixture.\n # Assert.\n # It should have registered the cast.\n config.mock_from_unit.register_cast.assert_called_once_with(\n config.mock_to_unit.__class__, mock.ANY\n )", "title": "" }, { "docid": "f9e061acce59ee7607f3938acb2457a8", "score": "0.54675776", "text": "def test_example(self):\n self.assertEqual(self.example.get_example(), True)", "title": "" }, { "docid": "2ccec63b0d98851934bd6c9de4b4e043", "score": "0.54642093", "text": "def test_check_estimator_passed(estimator_class):\n estimator_instance = estimator_class.create_test_instance()\n\n result_class = check_estimator(estimator_class, verbose=False)\n assert all(x == \"PASSED\" for x in result_class.values())\n\n result_instance = check_estimator(estimator_instance, verbose=False)\n assert all(x == \"PASSED\" for x in result_instance.values())", "title": "" }, { "docid": "c75b40fafea169d2f97160d34fddf3ae", "score": "0.5463649", "text": "def test_init(self):\n\n class TestResource(BaseResource):\n\n name = 'test_resource'\n\n def process(self, message):\n pass\n\n api = Mock()\n api.endpoint = 'http://an_endpoint'\n route = '/a_route'\n TestResource.init(api, route)\n\n # validate the attribute values of the class\n self.assertEqual(api, TestResource.api)\n self.assertEqual(route, TestResource.route)\n self.assertEqual(api.mongodb, TestResource.mongodb)\n self.assertEqual(api.conf, TestResource.conf)\n self.assertEqual('http://an_endpoint/a_route', TestResource.endpoint)\n self.assertEqual('test_resource', TestResource.logger.name)", "title": "" }, { "docid": "529422797d133765d57effc83035df3b", "score": "0.54555005", "text": "def match(self, data_instance: Dict[str, Any]) -> bool:", "title": "" }, { "docid": "6b024b672d76304e528e991bae7ca9d6", "score": "0.5450914", "text": "def test_instance_Review(self):\n self.assertIsInstance(self.review, Review)", "title": "" }, { "docid": "a9b71318057f48924724985f7c452f6f", "score": "0.54460293", "text": "def test_lookup_parameter_handler_object(self, force_field):\n bonds = force_field[\"Bonds\"]\n with pytest.raises(NotImplementedError):\n force_field[bonds]\n with pytest.raises(NotImplementedError):\n force_field[type(bonds)]", "title": "" }, { "docid": "7f90dd06a0f1d5b7e26c55fa6b33fc28", "score": "0.5445616", "text": "def testMatch2(self):\n self.inv._literals_filter['fruit'] = ['pear', 'apple']\n self.assertFalse(self.inv._Match('fruit', []))\n self.assertFalse(self.inv._Match('fruit', ['grape', 'orange']))\n self.assertTrue(self.inv._Match('fruit', ['grape', 'apple']))\n self.assertTrue(self.inv._Match('fruit', [['grape'], ['orange', 'apple']]))", "title": "" }, { "docid": "27e3f898592eafe794ffbd41c9553d7c", "score": "0.54430187", "text": "def test_add_model_init(model_class):\n pdst = patch_deduce_swagger_type\n pr = patch_registry\n ppd = patch_parse_doc\n pgas = patch_getargspec\n pha = patch_hasattr\n\n with pdst() as mock_deduce_swagger_type:\n with patch_dir([\"__init__\"]), pr(), ppd(), pgas() as mock_getargspec:\n with pha() as mock_hasattr:\n swagger.add_model(model_class)\n mock_getargspec.assert_called_once_with(model_class.__init__)\n mock_hasattr.assert_not_called()\n mock_deduce_swagger_type.assert_not_called()", "title": "" }, { "docid": "f9db2654c2835f1bcf38f8050878f4aa", "score": "0.54404014", "text": "def matches(self, target):\n raise NotImplementedError()", "title": "" }, { "docid": "e5c7ef56200de036f6c539d029b181b9", "score": "0.5440047", "text": "def test_object(self):\n\n configurable = Configurable(\n conf=configuration(category('', Parameter('test', value=True)))\n )\n\n class Test(object):\n pass\n\n test = Test()\n\n configurable(test)\n configurable.applyconfiguration(targets=[test])\n\n self.assertTrue(test.test)\n\n test.test = False\n\n applyconfiguration(targets=[test])\n\n self.assertTrue(test.test)", "title": "" }, { "docid": "0286dfc44b540296582e51f7f94fad65", "score": "0.54238844", "text": "def __contains__(self, *args, **kwargs):\n ...", "title": "" }, { "docid": "72a1905847cde0cd2f5d2fb715a82777", "score": "0.5421039", "text": "def test_is_instance(self):\n self.assertTrue(type(self.new) is User)", "title": "" }, { "docid": "7c131f878d0b3d9ff0c8a843a3bb8c71", "score": "0.5419781", "text": "def test_instance(self):\n self.assertIsInstance(self.new_project, Project)", "title": "" }, { "docid": "51816e7b7367f9525102ad1b25b81d18", "score": "0.5416517", "text": "def match(self, other):", "title": "" }, { "docid": "bf5bf02af2d236dc371fea6b3eeb06ea", "score": "0.5406934", "text": "def test_class_method(self):\n self.assertEqual(pyperry.Base.add_processor.im_self.__name__, 'Base')", "title": "" }, { "docid": "7f22aaf4714b675cfdb98b17cba2ef73", "score": "0.54036665", "text": "def test_raises_when_called_from_instance(self):\n test = self.Test()\n self.assertRaises(Exception, getattr, test, 'foo')", "title": "" } ]
09455bd871518c4a53af2f9f48a37509
Create a dataset from a set of Pandas dataframes.
[ { "docid": "5f505662b44bbcd1d99bc426c0522263", "score": "0.7133669", "text": "def from_pandas(dfs: List[ObjectRef[\"pandas.DataFrame\"]],\n parallelism: int = 200) -> Dataset[ArrowRow]:\n import pyarrow as pa\n\n @ray.remote(num_returns=2)\n def df_to_block(df: \"pandas.DataFrame\"):\n block = ArrowBlock(pa.table(df))\n return block, block.get_metadata(input_files=None)\n\n res = [df_to_block.remote(df) for df in dfs]\n blocks, metadata = zip(*res)\n return Dataset(BlockList(blocks, ray.get(list(metadata))))", "title": "" } ]
[ { "docid": "e40768fbaba2d023630cbcdf0a2fbf9e", "score": "0.6521647", "text": "def create_datasets():\n df1 = apy.find_datasets(\"ENSEMBL_MART_ENSEMBL\")\n df1.to_pickle(os.path.join(DATADIR, \"datasets_ensembl.pkl\"))\n df2 = apy.find_datasets(\"ENSEMBL_MART_MOUSE\")\n df2.to_pickle(os.path.join(DATADIR, \"datasets_mouse.pkl\"))\n df3 = apy.find_datasets(\"ENSEMBL_MART_SEQUENCE\")\n df3.to_pickle(os.path.join(DATADIR, \"datasets_sequence.pkl\"))\n df4 = apy.find_datasets(\"ENSEMBL_MART_ONTOLOGY\")\n df4.to_pickle(os.path.join(DATADIR, \"datasets_ontology.pkl\"))\n df5 = apy.find_datasets(\"ENSEMBL_MART_GENOMIC\")\n df5.to_pickle(os.path.join(DATADIR, \"datasets_genomic.pkl\"))\n df6 = apy.find_datasets(\"ENSEMBL_MART_SNP\")\n df6.to_pickle(os.path.join(DATADIR, \"datasets_snp.pkl\"))\n df7 = apy.find_datasets(\"ENSEMBL_MART_FUNCGEN\")\n df7.to_pickle(os.path.join(DATADIR, \"datasets_funcgen.pkl\"))", "title": "" }, { "docid": "19b81883f001cf36aa9c53bc95253350", "score": "0.64829236", "text": "def from_dataframe(df: pd.DataFrame) -> xr.Dataset:\n return xr.Dataset.from_dataframe(df)", "title": "" }, { "docid": "e60fc3dc192e063a95d2352363a4058e", "score": "0.64523005", "text": "def load(ds_dicts, session=Session()):\n data_sets = []\n for data_set in ds_dicts:\n currency_pair = session.query(CurrencyPair).filter_by(\n instrument=data_set[\"instrument\"]).first()\n\n feature_set = session.query(FeatureSet).filter_by(\n name=data_set[\"feature_set\"]).first()\n\n existing = session.query(DataSet).filter_by(\n currency_pair=currency_pair, feature_set=feature_set).first()\n\n if existing is not None:\n data_sets.append(existing)\n else:\n data_set = DataSet(\n currency_pair=currency_pair, feature_set=feature_set)\n data_sets.append(data_set)\n session.add(data_set)\n\n session.commit()\n return data_sets", "title": "" }, { "docid": "708e03f953fb5934c1f613d59dafa73b", "score": "0.64013773", "text": "def dataset_load(set_name):\n data_file = 'mldata/%s.data' % set_name\n\n if set_name == 'laozone':\n # This dataset is already in good form for importing\n df = pd.read_csv(data_file)\n elif set_name == 'wpbc':\n field_names = ['id', 'outcome', 'time', \n 'radiusA', 'textureA', 'perimeterA', 'areaA', 'smoothnessA',\n 'compactnessA', 'concavityA', 'concaveptsA', 'symmetryA', 'fdimA',\n 'radiusB', 'textureB', 'perimeterB', 'areaB', 'smoothnessB',\n 'compactnessB', 'concavityB', 'concaveptsB', 'symmetryB', 'fdimB',\n 'radiusC', 'textureC', 'perimeterC', 'areaC', 'smoothnessC',\n 'compactnessC', 'concavityC', 'concaveptsC', 'symmetryC', 'fdimC',\n 'tumorsize', 'lymphstat']\n df = pd.read_csv(data_file, header=-1, names=field_names)\n elif set_name == 'saheart':\n df = pd.read_csv(data_file)\n del df['row.names']\n elif set_name == 'prostate':\n df = pd.read_csv(data_file, sep='\\t')\n del df['row']\n elif set_name == 'bone':\n df = pd.read_csv(data_file, sep='\\t')\n else:\n df = pd.DataFrame()\n\n return df", "title": "" }, { "docid": "06f8b912bf47dcf08a682bc702c24bac", "score": "0.62626445", "text": "def load_dataframes():\n movies_df = pd.read_csv(RAW_MOVIES_PATH, sep='::', header=None, encoding=\"ISO-8859-1\", engine='python', skiprows=[0])\n movies_df.columns = [\"movieId\", \"movie title\", \"genres\"]\n movies_df = movies_df.join(movies_df[\"genres\"].str.get_dummies('|')).drop('genres', axis=1)\n movies_df = movies_df.astype({'movieId': int})\n movies_df = movies_df.set_index('movieId')\n\n ratings_df = pd.read_csv(RAW_RATINGS_PATH, sep='::', header=None, engine='python', skiprows=[0])\n ratings_df.columns = ['userId', 'movieId', 'rating', 'timestamp']\n ratings_df = ratings_df.astype({'userId': int, 'movieId': int})\n ratings_df.rating = ratings_df.rating / ratings_df.rating.max()\n ratings_df = ratings_df.set_index(['userId', 'movieId'])\n\n return movies_df, ratings_df", "title": "" }, { "docid": "ff9d08f678bc2aba96b7a2b8a4feb835", "score": "0.62054026", "text": "def load_dataframes(self, **pandas_args) -> Union[DataFrame, List[DataFrame]]:\n if self.source == DATASOURCE.ONLINE:\n self.path = self._download()\n tmpdir = tempfile.mkdtemp()\n\n if self.dtype == DATATYPES.CSV:\n return read_csv(self.path, **pandas_args)\n elif self.dtype == DATATYPES.ZIP:\n ZipFile(self.path).extractall(tmpdir)\n elif self.dtype == DATATYPES.TAR:\n TarFile.open(self.path).extractall(tmpdir)\n\n data_files = (\n os.path.join(root, filename)\n for root, directories, filenames in os.walk(tmpdir)\n for filename in filenames\n )\n\n return [read_csv(data_file, **pandas_args) for data_file in data_files]", "title": "" }, { "docid": "630679f2fb8a44caff7bde00cc874a46", "score": "0.61866754", "text": "def read_set_pd(states: List[str] = list(states.keys()), loadpositions: List[str] = loadpositions, motorspeeds: List[str] = motorspeeds, testruns: List[str] = testruns, rootpath: Path = rootpath, nrows: int = None):\n from pandas import DataFrame\n dfs = [df for df in iterate_set(states, loadpositions, motorspeeds, testruns, rootpath, nrows)]\n return DataFrame().append(dfs, ignore_index=True, sort=False)", "title": "" }, { "docid": "d1f6d47dc29dda989fbaeca3e4a53d44", "score": "0.6068663", "text": "def load_dataframes(self, dfs):\n max_dbh_col = max([max(getDBHcol(df)) for df in dfs] )\n self.max_stems = max_dbh_col + 1\n for df in dfs:\n df.drop_duplicates(inplace=True)\n df.reset_index(drop=True,inplace=True)\n addDBH_NA( df, max_dbh_col )\n assert_not_null(df)\n make_locator_col(df,name=self.loc_col,plot_x=self.plot_x)\n self.dfs_j = pandas.concat( dfs, ignore_index=True)", "title": "" }, { "docid": "10586304726ea07ba685ee26f9035973", "score": "0.6041146", "text": "def from_dfs(cls, train_df: pd.DataFrame, \n val_df: pd.DataFrame=None, test_df: pd.DataFrame=None,\n fields: Dict[ColumnName, FieldOrFields]=None) -> Iterable['TabularDataset']:\n train = cls.from_df(train_df, fields, train=True)\n yield train\n if val_df is not None:\n yield cls.from_df(val_df, fields, train=False)\n if test_df is not None:\n # remove all target fields\n non_target_fields = {}\n \n for k, fld in fields.items():\n if fld is None: continue\n if isinstance(fld, (tuple, list)):\n non_target_fields[k] = []\n for f in fld:\n if not f.is_target: non_target_fields[k].append(f)\n if len(non_target_fields[k]) == 0: non_target_fields[k] = None\n else:\n if not fld.is_target:\n non_target_fields[k] = fld\n else:\n non_target_fields[k] = None\n yield cls.from_df(test_df, non_target_fields, train=False)", "title": "" }, { "docid": "1b61cbf49639517b9c320db1ec48a5d4", "score": "0.6026803", "text": "def from_mars(df: \"mars.DataFrame\",\n parallelism: int = 200) -> Dataset[ArrowRow]:\n raise NotImplementedError # P1", "title": "" }, { "docid": "a2b891355f3c24152618bc71d7a7a6fa", "score": "0.6025968", "text": "def sample_dfs():\n return [pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}),\n pd.DataFrame({'a': [0, np.Infinity, np.NaN], 'b': [-1.0, -np.Infinity, math.pi]}),\n pd.DataFrame({'col.1': ['a', 'b'], 'col.2': ['c', 'd']}),\n pd.DataFrame({'a': [datetime(2000, 1, 1), datetime(2001, 1, 1)]}),\n pd.DataFrame({'a': [timedelta(days=2), timedelta(seconds=50)]}),\n pd.DataFrame({'obj': [{'a': 1}, {'b': 2, 'c': 3}]}),\n pd.DataFrame(np.arange(16).reshape((4, 4)),\n columns=pd.MultiIndex.from_product((['A', 'B'], [1, 2])),\n index=pd.MultiIndex.from_product((['C', 'D'], [3, 4])))\n ]", "title": "" }, { "docid": "9c769bf40b82437a5af23166bb36dfc9", "score": "0.6014361", "text": "def prepare_dataset_sets(dt):\n # create train & test set\n X, y, X_train, X_test, y_train, y_test, X_submit, id_submit, i_train, i_test = __create_train_test(dt)\n\n # prepare y values\n y, y_train, y_test = __prepare_y(dt, y, y_train, y_test)\n\n # create cv folds\n cv_folds = __create_cv(dt, X_train, y_train)\n\n # prepare and store eval set\n y_eval_list, y_eval, idx_eval, idx_eval0 = __store_eval_set(dt, y_train, y_test, cv_folds)\n\n # then store all these results in a pickle store\n ds = XySet(X, y, X_train, y_train, X_test, y_test, X_submit, id_submit, cv_folds, y_eval_list, y_eval, idx_eval, idx_eval0)\n pickle.dump(ds, open(get_dataset_folder(dt.dataset_id) + '/data/eval_set.pkl', 'wb'))\n\n # and keep index of split for future use (eg match predictions with initial file)\n pickle.dump([i_train, i_test], open(get_dataset_folder(dt.dataset_id) + '/data/i_train_test.pkl', 'wb'))", "title": "" }, { "docid": "6df6e724e2bfd1b538651383fc36f3c5", "score": "0.59826064", "text": "def get_dataset(name, df, image_ids, transform=None):\n return(dataset(name, df, image_ids, transform))", "title": "" }, { "docid": "51dc0758c917b4505863e1cc0265ca20", "score": "0.5935394", "text": "def create_pandas(reqs):\n\treturn dataframe", "title": "" }, { "docid": "89973f7302df5028809426a2b38c4969", "score": "0.5930283", "text": "def load_df(contexts):\n ret = pd.DataFrame(contexts)\n from numbers import Number\n\n def make_hashable(elem):\n if isinstance(elem, Number):\n return elem\n elif isinstance(elem, np.ndarray) and elem.ndim == 0:\n return elem.item()\n else:\n return tuple(elem)\n\n # Make it possible to hash (and therefore group) certain columns\n ret['obs'] = ret['obs'].apply(make_hashable)\n ret['obs_p'] = ret['obs_p'].apply(make_hashable)\n\n return ret", "title": "" }, { "docid": "a92e50336acb8aa76abaf6547ce8af56", "score": "0.5930242", "text": "def load_dataset(tags_categories='__all__', load_from_pickle=True):\n\n def load_dataset_and_preprocess():\n dataset_path = os.path.join(DATA_DIR, DATASET_FILENAME)\n dataset_df = pd.read_csv(dataset_path)\n return preprocess_full_dataset(dataset_df)\n\n assert tags_categories == '__all__' or isinstance(tags_categories, list) or isinstance(tags_categories, tuple), \\\n (\"Argument <tags_categories> should be a type of 'list' or 'tuple' or a string with explicit value '__all__'.\"\n \"Instead it got the value {}\".format(tags_categories))\n\n pickle_path = os.path.join(DATA_DIR, DATASET_PICKLE_FILENAME)\n\n if load_from_pickle:\n try:\n dataset_df = pd.read_pickle(pickle_path)\n except Exception as e:\n logger.warning(e)\n dataset_df = load_dataset_and_preprocess()\n dataset_df.to_pickle(pickle_path)\n else:\n dataset_df = load_dataset_and_preprocess()\n dataset_df.to_pickle(pickle_path)\n\n return dataset_df if tags_categories == '__all__' else dataset_df.loc[dataset_df['tags'].isin(tags_categories)]", "title": "" }, { "docid": "11a4ff70f9559f9277f84073d8eec886", "score": "0.59263384", "text": "def _setDatasets(self, metadata):\n # Pull the lot into a list\n cols = list(self.getCSVReader(metadata))\n\n # sort the cols by view name and ordinal (they come sorted, but lets be sure)\n cols.sort(key=lambda x: [x['viewname'], int(x['ordinal'])])\n\n # Now group them by their viewname\n for key, grp in groupby(cols, key=lambda x: x[\"viewname\"]):\n self.datasets[key] = list(grp)", "title": "" }, { "docid": "d11f28355a4cd1442c8335155295cf89", "score": "0.58780146", "text": "def construct_initial_dfs():\n meta_df = pd.DataFrame(columns=['int_paper_id','paper_id','abstract','authors','title'], index=['int_paper_id'])\n categories_df = pd.DataFrame(columns=['int_paper_id','num_categories','categories'], index=['int_paper_id'], dtype='object')\n text_df = pd.DataFrame(columns=['int_paper_id','text'], index=['int_paper_id'])\n expanded_categories_df = pd.DataFrame(columns=['int_paper_id'], index = ['int_paper_id'])\n reduced_categories_df = pd.DataFrame(columns=['int_paper_id'], index = ['int_paper_id'])\n \n return meta_df, categories_df, reduced_categories_df, expanded_categories_df, text_df", "title": "" }, { "docid": "9d6d4586ad9b1e88fbacaf4aa1f134ea", "score": "0.58726823", "text": "def create_dataset(self, filenames, filter_fn):\n logging.info('Creating dataset: %s', filenames)\n assert filenames, 'No input data found'\n if FLAGS.input_type == 'tfrecords':\n dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=10)\n example_parser = parse_example\n elif FLAGS.input_type == 'sstable':\n dataset = tf.data.SSTableDataset(filenames)\n example_parser = lambda key, value: parse_example(value)\n elif FLAGS.input_type == 'raw_tfrecords':\n dataset = tf.data.Dataset.from_tensor_slices(filenames).interleave(\n lambda filename: tf.data.TFRecordDataset(filename).repeat(),\n # Consume data from all input files in parallel.\n cycle_length=len(filenames),\n # We want to get the same number of elements from each input file in\n # the shuffle buffer.\n block_length=1)\n example_parser = parse_example\n else:\n assert False, 'Unknown input type {}'.format(FLAGS.input_type)\n\n # When the input data is already randomized by shuffle_examples, this\n # randomization is not critical. Still good hygiene to do it here, e.g. so\n # that we get examples in different order when we do multiple passes over\n # the dataset, or to ensure that we don't rely on / overfit to a specific\n # order, e.g. if we would tune hyperparameters.\n # When the input data is not randomized (input_type=raw_sstable), this is\n # critical.\n ds = dataset.map(\n example_parser, num_parallel_calls=5).filter(filter_fn).map(\n strip_additional_example_info).shuffle(\n buffer_size=FLAGS.dataset_buffer_size).batch(\n Const.BATCH_SIZE).prefetch(100).repeat()\n return ds", "title": "" }, { "docid": "59a3d774d9b1b8f4d4d0f54781359b96", "score": "0.5872182", "text": "def construct_dataset(self, main_stock):\n\n first = True\n\n base_dataframes = self.dataframes.copy()\n\n main_df = self.main_dataframes[main_stock].copy()\n\n if main_df.empty:\n return pd.DataFrame()\n dataframes = self.order_dataframes(main_df, base_dataframes)\n\n for df in dataframes:\n if first:\n ds = df\n first = False\n else:\n ds = pd.merge(ds, df, left_index=True, right_index=True)\n\n cols = ds.columns\n\n scaler = MinMaxScaler()\n\n ds[cols] = scaler.fit_transform(ds[cols])\n\n ds['Date'] = ds.index\n ds['Month'] = pd.DatetimeIndex(ds['Date']).month\n ds['Weekday'] = pd.DatetimeIndex(ds['Date']).weekday\n ds = ds.sort_index(axis=0)\n\n ds = ds.drop(columns=['Date'])\n\n return ds", "title": "" }, { "docid": "e9e099cf95cb490297ba4dd7d0ea093f", "score": "0.5854457", "text": "def load_pandas():\n data = _get_data()\n return du.process_pandas(data, endog_idx=0)", "title": "" }, { "docid": "f2443cd0288eecc01775c171aa21019b", "score": "0.58506554", "text": "def load_dataset(dataset_name, include_meta=False, sets=[\"train\", \"test\", \"val\"], shuffle=False, batch_size=1024, filter=None, **kwargs):\n return_sets = []\n print(\"loading dataset:\", dataset_name)\n for set in sets:\n print(\"loading sample set:\", set)\n\n data = tf.data.experimental.make_csv_dataset(\n \"../data/{}/{}.csv\".format(dataset_name, set), batch_size=batch_size, shuffle=False, **kwargs)\n if filter is not None:\n f=get_filter(filter)\n data=data.unbatch().filter(f).batch(batch_size)\n if include_meta:\n meta = tf.data.experimental.make_csv_dataset(\n \"../data/{}/stats/{}_meta.csv\".format(dataset_name, set), batch_size=batch_size, shuffle=False)\n return_sets.append([data,meta])\n else:\n return_sets.append(data)\n print(\"finished loading dataset\")\n return return_sets", "title": "" }, { "docid": "b80c4034472090fdb6723ebeb0990e44", "score": "0.5840473", "text": "def prep_datasets(df):\n all_ds = prep_single_dataset(df)\n\n result = [all_ds]\n fns = [has_code_block, has_url, has_log]\n print(f\"Shape before filtering: {df.shape}\")\n for fn in fns:\n df_ = df[df[\"X\"].apply(fn)]\n print(f\"Shape after {fn}: {df_.shape}\")\n assert df_.size <= df.size, \"filtered df should not be of bigger size than original one\"\n ds = prep_single_dataset(df_)\n result.append(ds)\n\n return result", "title": "" }, { "docid": "c22c584a3653ff5f6bd4ee5f392c43e7", "score": "0.5835776", "text": "def create_Dataset(data_files, cache_file,batch_size,\n shuffle=True, repeat=True):\n\n # Create Tensorflow Dataset using the list of filenames\n ds = tf.data.Dataset.from_tensor_slices((data_files))\n\n #=============================================================================\n # Define Tensorflow ParallelMapDataset object that reads seismic data from\n # disk as needed rather than from memory. Done with py_function, since\n # pure Python does not work on tensorflow objects (which is slower to\n # execute but we do it once and cache the results). Tune the number of\n # parallel calls automatically at runtime\n #=============================================================================\n ds = ds.map(lambda x: tf.py_function(func=get_seismic,\n inp=[x],\n Tout=(tf.float32)),\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n # =============================================================================\n # Define PrefetchDataset object. Cache file is created in directory \n # \"./cache/\". Manually remove the cache file if you make any changes to the \n # preprocessing or batch_size. Do not shuffle and repeat the test_ds \n # object. Its sorting needs to match that of the test_split DataFrame \n # since both are used for QC plots after training\n # =============================================================================\n ds = prepare_for_training(ds,cache=cache_file,batch_size=batch_size,\n shuffle=shuffle, repeat=repeat)\n return ds", "title": "" }, { "docid": "b661553bd10da74cfc48c514dca9d331", "score": "0.58277464", "text": "def load_dataset(file_names_dict: dict, survey_name: str = 'DES',\n initial_training: Union[str, int] = 'original',\n ia_training_fraction: float = 0.5, is_queryable: bool = False,\n is_separate_files: bool = False, samples_list: list = [None],\n is_load_build_samples: bool = True,\n number_of_classes: int = 2,\n feature_extraction_method: str = 'Bazin',\n is_save_samples: bool = False) -> DataBase:\n\n # initiate object\n database_class = DataBase()\n for sample in samples_list:\n database_class.load_features(\n file_names_dict[sample], survey=survey_name, sample=sample,\n method=feature_extraction_method)\n if is_load_build_samples:\n database_class.build_samples(\n initial_training=initial_training, nclass=number_of_classes,\n Ia_frac=ia_training_fraction, queryable=is_queryable,\n save_samples=is_save_samples, sep_files=is_separate_files,\n survey=survey_name)\n return database_class", "title": "" }, { "docid": "77e6eba78f636983cb992ed3c2e6c92d", "score": "0.580638", "text": "def CreateDataframe(benchmark, tags, limit):\n items = []\n for tag_set in tags:\n items.extend(FetchItemIds(tag_set, limit))\n\n dfs = []\n try:\n temp_dir = tempfile.mkdtemp('perf_csvs')\n for idx, item in enumerate(items):\n dfs.append(FetchItemData(item['task_id'], benchmark, idx, temp_dir))\n idx += 1\n finally:\n shutil.rmtree(temp_dir)\n return pandas.concat(dfs, ignore_index=True)", "title": "" }, { "docid": "fdbb5d00f9ff8e19365cb510df1a685d", "score": "0.58045626", "text": "def get_data_frame(self, path_names, target_subset, array_bool, use_small=True):\n path_names = path_names\n data_frame = []\n\n # If gs read from google storage bucket\n paths = []\n if self.location == 'gs':\n # Different image sizes: Dataset (101,101,3), DataSet2 (224,224,3) used for vgg16\n if '/DataSet/' in self.root_directory:\n file_stream = file_io.FileIO(\"gs://data-daisy/full_gs_paths_subset3.pickle\", mode='rb')\n else:\n file_stream = file_io.FileIO(\"gs://data-daisy/full_gs_paths_large_size.pickle\", mode='rb')\n paths = pickle.load(file_stream)\n\n data_frame = self.read_from_full_paths(paths, target_subset)\n\n else: # Read from local machine\n for class_label in path_names.keys():\n for dir_path in path_names[class_label]:\n paths.append([self.collect_image_paths(dir_path),\n class_label]) # hard coded to fit file structure of dataset\n corrected_path_df = []\n for path in paths: # Different structure (i.e., [([filenames], class)....]\n class_ = path[1]\n for file_name in path[0]:\n corrected_path_df.append([file_name, class_])\n data_frame = self.read_from_full_paths(corrected_path_df, target_subset)\n return data_frame", "title": "" }, { "docid": "311865be60c2e06b9c736d390d9377c1", "score": "0.57931757", "text": "def concat(cls, batches):\n batches = list(filter(lambda x: len(x) > 0, batches))\n zips = {k: [dic[k] for dic in batches] for k in batches[0].dfs.keys()}\n dic = {}\n for k, z in zips.items():\n non_null = next((b for b in z if b is not None), None)\n if non_null is not None:\n dic[k] = pd.concat(z, ignore_index=True, sort=False)\n for c in non_null.columns:\n if hasattr(non_null[c], 'cat'):\n dic[k][c] = dic[k][c].astype('category')\n else:\n dic[k] = None\n return Dataset(**dic)", "title": "" }, { "docid": "045408fc7d1624b0c148b77e7594cf5e", "score": "0.57905895", "text": "def prepare_data(dfList):\n for df in dfList:\n set_dataframe_index(df)\n set_prices_to_floats(df)\n drop_bad_columns(df)", "title": "" }, { "docid": "8073ef0eeffeed789e15ab653221a076", "score": "0.57809526", "text": "def get_dataset_list():\r\n\r\n try:\r\n\r\n INSEE_sdmx_link_dataflow = \"https://bdm.insee.fr/series/sdmx/dataflow\"\r\n INSEE_api_link_dataflow = \"https://api.insee.fr/series/BDM/V1/dataflow/FR1/all\"\r\n\r\n results = _request_insee(\r\n api_url=INSEE_api_link_dataflow, sdmx_url=INSEE_sdmx_link_dataflow\r\n )\r\n\r\n # create temporary directory\r\n dirpath = _get_temp_dir()\r\n\r\n dataflow_file = dirpath + \"\\\\dataflow_file\"\r\n\r\n with open(dataflow_file, \"wb\") as f:\r\n f.write(results.content)\r\n\r\n root = ET.parse(dataflow_file).getroot()\r\n\r\n if os.path.exists(dataflow_file):\r\n os.remove(dataflow_file)\r\n\r\n data = root[1][0]\r\n\r\n n_dataflow = len(data)\r\n\r\n list_df = []\r\n\r\n for i in trange(n_dataflow, desc=\"Getting datasets list\"):\r\n\r\n dataset = {\r\n \"id\": [next(iter(data[i].attrib.values()))],\r\n \"Name.fr\": [data[i][1].text],\r\n \"Name.en\": [data[i][2].text],\r\n \"url\": [data[i][0][0][0].text],\r\n \"n_series\": [data[i][0][1][0].text],\r\n }\r\n\r\n dt = pd.DataFrame(\r\n dataset, columns=[\"id\", \"Name.fr\", \"Name.en\", \"url\", \"n_series\"]\r\n )\r\n list_df.append(dt)\r\n\r\n # concatenate list of dataframes\r\n df = pd.concat(list_df)\r\n\r\n # clean up columns\r\n df = df.astype(str)\r\n\r\n df[\"n_series\"] = df[\"n_series\"].str.replace(\"\\\\D\", \"\", regex=True)\r\n df[\"n_series\"] = df[\"n_series\"].astype(\"int\")\r\n\r\n df = df[df[\"id\"] != \"SERIES_BDM\"]\r\n\r\n df[\"Name.en\"] = df[\"Name.en\"].str.replace(\"^\\\\n\\\\s{0,}\", \"\", regex=True)\r\n df[\"Name.fr\"] = df[\"Name.fr\"].str.replace(\"^\\\\n\\\\s{0,}\", \"\", regex=True)\r\n df = df[df[\"Name.en\"] != \"\"]\r\n df = df[df[\"Name.fr\"] != \"\"]\r\n\r\n except:\r\n df = _get_dataset_list_internal()\r\n\r\n logger.error(\r\n \"Package's internal data has been used !\\n\"\r\n \"Dataset list download failed !\"\r\n \"Please contact the package maintainer if this error persists !\"\r\n )\r\n\r\n df = df.reset_index(drop=True)\r\n\r\n return df", "title": "" }, { "docid": "9aa5b651ebc3bbe1ff39c9cbb5f0146b", "score": "0.57773376", "text": "def get_dataset_pandas():\n axle_1, axle_2, axle_3, axle_4, axle_5 = load_data()\n\n visualise_datasets(axle_1, axle_2, axle_3, axle_4, axle_5)\n\n print(axle_1.dtypes)\n\n return axle_1, axle_2, axle_3, axle_4, axle_5", "title": "" }, { "docid": "9d3ab96e67762b4387ade9652c98cdee", "score": "0.57700276", "text": "def util_1(dataset):\n dummy_ = []\n for player in dataset:\n dummy_.append(\n pd.DataFrame(\n player[1][:,1:],\n index = (player[1][:,0]).astype(np.int),\n columns = [player[0]+ '_x',\n player[0] + '_y']))\n return pd.concat(dummy_, axis=1)", "title": "" }, { "docid": "435d1cb532f78f68cb71becc5bffc958", "score": "0.5769793", "text": "def make_data_frame(datasets, source,\n reduced_dir=None, unmask_dir=None):\n X = []\n keys = []\n for dataset in datasets:\n if source == 'unmasked':\n this_X = pd.read_pickle(join(unmask_dir, dataset, 'imgs.pkl'))\n else:\n this_X = pd.read_pickle(join(reduced_dir, source, dataset, 'Xt.pkl'))\n\n # Curation\n this_X = this_X.reset_index(level=['direction'], drop=True)\n if dataset == 'brainomics':\n this_X = this_X.drop(['effects_of_interest'], level='contrast')\n if dataset == 'brainpedia':\n contrasts = this_X.index.get_level_values('contrast').values\n indices = []\n for i, contrast in enumerate(contrasts):\n if contrast.endswith('baseline'):\n indices.append(i)\n this_X = this_X.iloc[indices]\n for i, (sub_dataset, this_sub_X) in \\\n enumerate(this_X.groupby(level='dataset')):\n if sub_dataset == 'ds102':\n continue\n this_sub_X = this_sub_X.loc[sub_dataset]\n X.append(this_sub_X.astype(np.float32))\n keys.append(sub_dataset)\n else:\n X.append(this_X)\n keys.append(dataset)\n X = pd.concat(X, keys=keys, names=['dataset'])\n X.sort_index(inplace=True)\n return X", "title": "" }, { "docid": "1a833a9b7fef109633b51a6140b996f4", "score": "0.573154", "text": "def _merge_datasets(datasets):\n dframes = []\n\n for dataset in datasets:\n dframes.append(dataset.dframe().add_parent_column(dataset.dataset_id))\n\n return concat(dframes, ignore_index=True)", "title": "" }, { "docid": "f9efd450095dcad5cc2f9317ce07d6ae", "score": "0.5723912", "text": "def dataframe(self, hash_list):\n spark = SparkSession.builder.getOrCreate()\n assert len(hash_list) == 1, \"Multi-file DFs not supported yet.\"\n filehash = hash_list[0]\n df = spark.read.parquet(self._object_path(filehash))\n return df", "title": "" }, { "docid": "9fb04c952827abb4eeee3d66187bafee", "score": "0.5716038", "text": "def hdf2ds(fnames):\r\n from mvpa2.base.hdf5 import h5load\r\n dss = []\r\n for fname in fnames:\r\n content = h5load(fname)\r\n if is_datasetlike(content):\r\n dss.append(content)\r\n else:\r\n for c in content:\r\n if is_datasetlike(c):\r\n dss.append(c)\r\n return dss", "title": "" }, { "docid": "55c65df2daa3ca3fac9a9e19c992b9d8", "score": "0.57145816", "text": "def load_dataset():\n dataframe_list = []\n \n for i in range(1, NO_FILES):\n dataframe = load_file(\"/Users/vahagn/Projets d\\'informatique/Visusearch project/searchs/\"+str(i)+\".json\")\n dataframe_list.append(dataframe)\n \n dataframe = pd.concat(dataframe_list)\n dataframe = dataframe.sort_index()\n \n return dataframe", "title": "" }, { "docid": "ec2c57b89ba51f8c12ad45e13320bf60", "score": "0.5661983", "text": "def _make_dataset(self):\n dataset = []\n\n for df, video_dirpath in self.gen_data():\n sample_init = {\n 'video': video_dirpath\n }\n\n labels = [self.class2idx[label] for label in df.phase]\n\n sample = copy.deepcopy(sample_init)\n\n sample['frame_indices'] = list(df.frame)\n\n sample['label'] = labels\n\n dataset.append(sample)\n\n return dataset", "title": "" }, { "docid": "bbe1e0b2bbdbf768daa0c3c136323dd5", "score": "0.5653369", "text": "def create_dataset(file_paths,\n batch_size,\n params,\n is_training=True,\n input_pipeline_context=None):\n dataset = tf.data.Dataset.list_files(file_paths, shuffle=is_training)\n\n if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1:\n if not is_training or params.input_sharding:\n dataset = dataset.shard(input_pipeline_context.num_input_pipelines,\n input_pipeline_context.input_pipeline_id)\n\n if is_training:\n dataset = dataset.repeat()\n # We set shuffle buffer to exactly match total number of\n # training files to ensure that training data is well shuffled.\n dataset = dataset.shuffle(len(file_paths))\n\n # In parallel, create tf record dataset for each train files.\n # cycle_length = 8 means that up to 8 files will be read and deserialized in\n # parallel. You may want to increase this number if you have a large number of\n # CPU cores.\n dataset = dataset.interleave(\n tf.data.TFRecordDataset,\n cycle_length=8,\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n if is_training:\n dataset = dataset.shuffle(100)\n\n if params.get(\"multi_channel_cross_attention\", value=False):\n dataset = process_multidoc_dataset(dataset, batch_size, params)\n else:\n if not params.input_data_not_padded:\n dataset = process_singledoc_dataset(dataset, batch_size, params)\n else:\n dataset = process_singledoc_transformer_dataset(dataset, batch_size,\n params)\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n return dataset", "title": "" }, { "docid": "4f018e40e31321c012025fe3f4fa9fd5", "score": "0.5634404", "text": "def create_Dataset3(data_files,model_files, cache_file,batch_size,\n shuffle=True, repeat=True):\n\n # Create Tensorflow Dataset using the list of filenames\n ds = tf.data.Dataset.from_tensor_slices((data_files, model_files))\n\n #=============================================================================\n # Define Tensorflow ParallelMapDataset object that reads seismic data from\n # disk as needed rather than from memory. Done with py_function, since\n # pure Python does not work on tensorflow objects (which is slower to\n # execute but we do it once and cache the results). Tune the number of\n # parallel calls automatically at runtime\n #=============================================================================\n ds = ds.map(lambda x,y: new_py_function(\n get_seismic_and_vpvs,\n inp=[x,y],\n Tout=({\"input_1\" : tf.float32},\n {\"vp_output\": tf.float32,\n \"vs_output\": tf.float32})\n ),\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n \n # =============================================================================\n # Define PrefetchDataset object. Cache file is created in directory \"./cache/\"\n # Remove the cache file if you make any changes to the preprocessing or\n # batch_size. Do not shuffle and repeat the test_ds object. Its sorting needs\n # to match that of the test_split DataFrame sinceboth are used for QC plots\n # after training\n # =============================================================================\n ds = prepare_for_training(ds,cache=cache_file,batch_size=batch_size,\n shuffle=shuffle, repeat=repeat)\n return ds", "title": "" }, { "docid": "746e39a812272856f4e93b07ad2c63de", "score": "0.5632585", "text": "def as_tf_dataset(\n self, folds=2, subset=None, keep_individual=False, cache_dir=None\n ):\n samples = self._images\n if subset == 'train':\n samples = self._train_set\n elif subset == 'validation':\n samples = self._validation_set\n elif subset == 'test':\n samples = self._test_set\n\n def ds_gen(sample_set, folds):\n augment = create_augment_fn(random_state=self._random_state)\n for _ in range(folds):\n for x, y, z in (self._get_from_path(p) for p in sample_set):\n yield self.preprocess(x, y, z, augment, keep_individual)\n\n ds = tf.data.Dataset.from_generator(\n partial(ds_gen, samples, folds),\n (\n tf.dtypes.float32,\n tf.dtypes.uint8,\n tf.dtypes.float32\n ),\n (\n tf.TensorShape([512, 512, 3]),\n tf.TensorShape([512, 512, self._NUM_CLASSES]),\n tf.TensorShape([512, 512, 1])\n )\n )\n if cache_dir:\n cache = Path(cache_dir) / (\n f'{subset if subset else \"all\"}'\n f'_folds{folds}'\n f'_{\"squashed\" if not keep_individual else \"\"}'\n )\n ds = ds.cache(str(cache))\n return ds", "title": "" }, { "docid": "f0af4f887ac404b291b8d983b76aa4b1", "score": "0.5631819", "text": "def load_dataset(files):\n data = []\n for file in files:\n data.extend(read_file(file))\n return data", "title": "" }, { "docid": "684ab4236e32c1951dcaed53f318ccce", "score": "0.5621287", "text": "def load_dataset(df_path):\n df = pd.read_parquet(df_path)\n df.rename(columns={s: s.lower() for s in df.columns}, inplace=True)\n df.stat_cause_code = df.stat_cause_code.astype(int)\n df = df.filter(items=INTERESTING_COLS)\n # Drop codes 9 and 13, corresponding to miscellaneous and unknown causes,\n # which are pretty useless.\n df = df[(df.stat_cause_code != 9) & (df.stat_cause_code != 13)]\n labels = pd.get_dummies(df.stat_cause_descr)\n df['burn_time'] = df.cont_date - df.discovery_date\n df['burn_time_notna'] = df.burn_time.notna().astype(int)\n data = df.filter(items=FEATURE_COLS)\n data = data.join(pd.get_dummies(df.state))\n data = data.rename(columns={s: s.lower() for s in data.columns})\n data = normalize(data)\n Xtrain, Ytrain = data.iloc[:SPLIT], labels.iloc[:SPLIT]\n Xtest, Ytest = data.iloc[SPLIT:], labels.iloc[SPLIT:]\n return (Xtrain, Ytrain), (Xtest, Ytest)", "title": "" }, { "docid": "21405177dc8a123dd38aa4bc1d435915", "score": "0.56209415", "text": "def load_all_data():\n\n dataframes = []\n for data_file in os.listdir(\"data_conserv/\"):\n if '.csv' in data_file:\n data = pd.read_csv(\"data_conserv/\" + data_file)\n dataframes.append(data)\n dataframe = pd.concat(dataframes)\n dataframe.apply(lambda k: _process_row(k), axis=1)\n return dataframe", "title": "" }, { "docid": "31ae4c5468cafb10289f4cade55cab89", "score": "0.5613406", "text": "def get_df_files(self, file_paths, dtypes):\n \n prefix = str(file_paths[0].parent)\n suffix = file_paths[0].suffix\n \n n_files = len(file_paths)\n print(f'Creating dataframe from {n_files} file{\"s\" if n_files > 1 else \"\"} for table {prefix}...')\n \n dfs = []\n if suffix == '.csv':\n for f in tqdm(file_paths, desc='Files Read: '):\n dfs.append(pd.read_csv(f, dtype=dtypes, low_memory=False))\n else:\n for f in tqdm(file_paths, desc='Files Read: '):\n dfs.append(pd.read_json(f, dtype=dtypes, lines=True))\n \n df_train = pd.concat(dfs)\n \n print(f'Dataframe finished for train table at {prefix} with'\n f' {len(df_train.columns):,d} columns and '\n f'{len(df_train):,d} rows.')\n \n return df_train", "title": "" }, { "docid": "95eb4f7b5b7aa721d54d508d8f9fd53a", "score": "0.5575595", "text": "def test_frame_upload_pandas(self):\n import pandas as pd\n from pandas.util.testing import assert_frame_equal\n import numpy as np\n data = [[1, 'one', [1.0, 1.1]], [2, 'two', [2.0, 2.2]], [3, 'three', [3.0, 3.3]]]\n schema = [('n', ta.int64), ('s', str), ('v', ta.vector(2))] # 'n' is int64, pandas default\n source = dict(zip(zip(*schema)[0], zip(*data)))\n df0 = pd.DataFrame(source)\n self.assertEqual(np.int64, df0['n'].dtype)\n self.assertEqual(np.object, df0['s'].dtype)\n self.assertEqual(np.object, df0['v'].dtype)\n p = ta.Pandas(df0, schema)\n frame = ta.Frame(p)\n df1 = frame.download()\n # print repr(df0)\n # print repr(df1)\n assert_frame_equal(df0, df1)", "title": "" }, { "docid": "e05b7435388a862f6eb4f2ba112032c1", "score": "0.5574462", "text": "def generate_dataframes(self):\n pp = pprint.PrettyPrinter(indent=4, compact=True)\n\n # get dataframes\n dataframe, maps, num_classes = self.generate_dataframe()\n\n # save metadata about the data for processing later\n actual_data=dataframe.filter(items=self.columns)\n metadata = {}\n metadata[\"num_classes\"] = num_classes\n metadata[\"col_max\"] = actual_data.max(axis=0).tolist()\n metadata[\"col_min\"] = actual_data.min(axis=0).tolist()\n metadata[\"col_mean\"] = actual_data.mean(axis=0).tolist()\n metadata[\"col_std\"] = actual_data.std(axis=0).tolist()\n metadata[\"field_names\"] = actual_data.columns.tolist()\n metadata[\"corr_mat\"]=actual_data.corr(method=\"spearman\").to_numpy().tolist()\n # dtype object not serializable so turn into string first\n dtypes = [str(x) for x in actual_data.dtypes]\n metadata[\"dtypes\"] = dtypes\n\n # create dataset folder if it doesnt exist\n if not os.path.exists(\"../data/{}\".format(self.dataset_name)):\n os.mkdir(\"../data/{}\".format(self.dataset_name))\n os.makedirs(\"../data/{}/maps\".format(self.dataset_name))\n os.makedirs(\"../data/{}/stats\".format(self.dataset_name))\n\n print(\"splitting dataframe...\")\n # split data into train, val, test and save\n self.train_data, test_data = split_dataframe(\n dataframe, self.train_test_split)\n self.test_data, self.val_data = split_dataframe(\n test_data, self.test_val_split)\n\n num_train = len(self.train_data.index)\n num_val = len(self.val_data.index)\n num_test = len(self.test_data.index)\n\n print(\"data splitted into train:{}, test:{}, val:{}\".format(\n num_train, num_test, num_val))\n\n metadata[\"num_train\"] = num_train\n metadata[\"num_val\"] = num_val\n metadata[\"num_test\"] = num_test\n metadata[\"meta_col\"]=self.meta_col\n\n self.dataframe = dataframe\n # save the maps\n for key, val in maps.items():\n save_map(\"../data/{}/maps/{}.csv\".format(self.dataset_name,\n key), val)\n\n with open('../data/{}/metadata.txt'.format(self.dataset_name), 'w') as outfile:\n json.dump(metadata, outfile, indent=True)", "title": "" }, { "docid": "c974ea53219d1f349bfcd16e5f6db007", "score": "0.5574333", "text": "def generate_dataframe(self):\n\n datasets = []\n # get all files under data_directory\n for file in os.listdir(self.data_directory):\n # ignore .gitignore\n is_in_files=False\n for i in self.files:\n if i in file:\n is_in_files=True\n if file.endswith(\".csv\") and (is_in_files == self.ignore):\n print(\"processing file\", file)\n df_chunk = pd.read_csv(os.path.join(\n self.data_directory, file), header=0, usecols=self.columns+self.meta_col, chunksize=100000, encoding=\"utf-8\")\n\n for chunk in df_chunk:\n\n if self.type == \"KU\":\n if self.use_filename_as_label:\n if self.replace_label:\n chunk[self.label_col] = file.split(\".\")[0]\n else:\n chunk[self.label_col] = chunk[self.label_col]+file.split(\".\")[0]\n\n if len(self.protocols) > 0:\n chunk = chunk[chunk[\"protocol_type\"].isin(\n self.protocols)]\n chunk[\"same_sip_src_bytes\"]/=1000\n chunk[\"same_dip_dst_bytes\"]/=1000\n datasets.append(chunk)\n\n\n print(\"finished loading datasets\")\n #processing actual data\n all_data = pd.concat(datasets)\n # some headers have spaces in front\n all_data = all_data.rename(columns=lambda x: x.lstrip())\n\n # # drop duplicate since duplicate columns ends with .n\n # for colname in all_data.columns:\n # if colname[-1].isdigit():\n # all_data = all_data.drop([colname], axis=1)\n\n # filter attacks\n if self.attack_type is not None:\n all_data = all_data[all_data[self.label_col].isin(self.attack_type)]\n if all_data.empty:\n raise ValueError(\"Specified attack type results in empty dataframe\")\n\n # convert label to categorical\n maps = {}\n # errors=ignore since some meta col is not of type object\n cat_col = all_data.select_dtypes(['object']).columns.drop(self.meta_col, errors='ignore')\n\n all_data[cat_col] = all_data[cat_col].astype(\"category\")\n\n for i in cat_col:\n maps[i] = list(all_data[i].cat.categories)\n\n all_data[cat_col] = all_data[cat_col].apply(lambda x: x.cat.codes)\n\n num_classes = all_data[self.label_col].nunique()\n\n self.num_classes=num_classes\n # # remove negative and nan values\n # all_data[all_data < 0] = np.nan\n # all_data = all_data.fillna(0)\n\n return all_data, maps, num_classes", "title": "" }, { "docid": "8ac43bf2175c5e701f9b543407391c00", "score": "0.5569221", "text": "def load_dataset(self, files: List[str]):\n\n m_data_reader = SemevalReader(self.cM)\n for file in files:\n m_data_reader.read_data(file)\n\n return m_data_reader", "title": "" }, { "docid": "ffa48cdd44c556298a498f7bf40d89a7", "score": "0.5566568", "text": "def get_all_dataframe():\n i = 0\n old_df = None\n ne_df = None\n while True:\n # if the file exists\n df_name = \"transaction_dataframe_\"+str(i)+\".tsv\"\n # df_name = \"old_dataset/transaction_dataframe_\"+str(i)+\".tsv\"\n if (os.path.isfile(df_name)):\n df = pd.DataFrame.from_csv(df_name, sep='\\t')\n new_df = pd.concat([old_df, df])\n old_df = new_df\n else:\n break\n i += 1\n return new_df", "title": "" }, { "docid": "6eb238f278417e7f3fbbd80da55261f7", "score": "0.5564529", "text": "def _load_data(ds, variables, extent, index_names):\n if type(variables) is str: variables = [variables] # convert to list of string\n try:\n df = ds[index_names + variables].to_dataframe()\n except KeyError as e:\n raise Exception(\n 'The given variabes ' + ', '.join(variables) + ' do not match the names in the input data.' + str(e))\n df.dropna(axis='index', subset=variables, inplace=True)\n if extent: # === geographical subset ===\n lat, lon = globals.index_names\n df = df[(df[lon] >= extent[0]) & (df[lon] <= extent[1]) & (df[lat] >= extent[2]) & (df[lat] <= extent[3])]\n df.reset_index(drop=True, inplace=True)\n return df", "title": "" }, { "docid": "5e5999247f9d6daab4cbc5aedfb24487", "score": "0.55641973", "text": "def create_cameras_dataset(\n self,\n cameras: Union[Iterable[tfcam.TFCamera], Iterable[gpath.GPath]],\n flatten=False,\n shuffle=False):\n if isinstance(cameras[0], gpath.GPath) or isinstance(cameras[0], str):\n cameras = utils.parallel_map(self.load_camera, cameras)\n\n def _generator():\n for camera in cameras:\n yield {'camera_params': camera.get_parameters()}\n\n dataset = tf.data.Dataset.from_generator(\n _generator,\n output_signature={'camera_params': _TF_CAMERA_PARAMS_SIGNATURE})\n dataset = dataset.map(\n functools.partial(_camera_to_rays_fn, use_tf_camera=True), _TF_AUTOTUNE)\n\n if flatten:\n # Unbatch images to rows.\n dataset = dataset.unbatch()\n if shuffle:\n dataset = dataset.shuffle(20000)\n # Unbatch rows to rays.\n dataset = dataset.unbatch()\n if shuffle:\n dataset = dataset.shuffle(20000)\n\n return dataset", "title": "" }, { "docid": "4a8d9becec3189d6957ba33dfe5903f2", "score": "0.555017", "text": "def from_pandas(cls, df, return_dims=False):\n num_splits = GpuCount.get()\n put_func = cls._partition_class.put\n # For now, we default to row partitioning\n pandas_dfs = split_result_of_axis_func_pandas(0, num_splits, df)\n keys = [\n put_func(cls._get_gpu_managers()[i], pandas_dfs[i])\n for i in range(num_splits)\n ]\n keys = RayWrapper.materialize(keys)\n parts = cls._create_partitions(keys, cls._get_gpu_managers()).reshape(\n (num_splits, 1)\n )\n if not return_dims:\n return parts\n else:\n row_lengths = [len(df.index) for df in pandas_dfs]\n col_widths = [\n len(df.columns)\n ] # single value since we only have row partitions\n return parts, row_lengths, col_widths", "title": "" }, { "docid": "95ead32c2c4f9a314f1b6616cb7416c8", "score": "0.5537943", "text": "def get_datasets(configs: ConfigNode):\n dataset_name = configs.DATA.DATASET.DATASET_NAME\n data_root = configs.DATA.DATA_ROOT\n\n preprocessing = get_preprocessing_pipeline(configs)\n augmentation = get_augmentation_pipeline(configs)\n\n if dataset_name == 'aria':\n valid_ids = configs.DATA.DATASET.VALID_IDS\n test_ids = configs.DATA.DATASET.TEST_IDS\n data_type = configs.DATA.DATASET.DATA_TYPE\n\n train_set = ARIADataset(data_root=data_root,\n split_mode='train',\n valid_ids=valid_ids,\n test_ids=test_ids,\n preprocessing=preprocessing,\n augmentation=augmentation,\n data_type=data_type,\n download=False,\n extract=False)\n\n valid_set = ARIADataset(data_root=data_root,\n split_mode='valid',\n valid_ids=valid_ids,\n test_ids=test_ids,\n preprocessing=preprocessing,\n augmentation=None,\n data_type=data_type,\n download=False,\n extract=False)\n\n test_set = ARIADataset(data_root=data_root,\n split_mode='test',\n valid_ids=valid_ids,\n test_ids=test_ids,\n preprocessing=preprocessing,\n augmentation=None,\n data_type=data_type,\n download=False,\n extract=False)\n\n elif dataset_name == 'chase':\n valid_ids = configs.DATA.DATASET.VALID_IDS\n test_ids = configs.DATA.DATASET.TEST_IDS\n\n train_set = CHASEDB1Dataset(data_root=data_root,\n split_mode='train',\n valid_ids=valid_ids,\n test_ids=test_ids,\n preprocessing=preprocessing,\n augmentation=augmentation,\n download=False,\n extract=False)\n\n valid_set = CHASEDB1Dataset(data_root=data_root,\n split_mode='valid',\n valid_ids=valid_ids,\n test_ids=test_ids,\n preprocessing=preprocessing,\n augmentation=None,\n download=False,\n extract=False)\n test_set = CHASEDB1Dataset(data_root=data_root,\n split_mode='test',\n valid_ids=valid_ids,\n test_ids=test_ids,\n preprocessing=preprocessing,\n augmentation=None,\n download=False,\n extract=False)\n\n elif dataset_name == 'drive':\n train_set = DRIVEDataset(data_root=data_root,\n split_mode='train',\n preprocessing=preprocessing,\n augmentation=augmentation,\n download_code=None,\n download=False,\n extract=False)\n\n valid_set = DRIVEDataset(data_root=data_root,\n split_mode='valid',\n preprocessing=preprocessing,\n augmentation=None,\n download_code=None,\n download=False,\n extract=False)\n\n test_set = DRIVEDataset(data_root=data_root,\n split_mode='test',\n preprocessing=preprocessing,\n augmentation=None,\n download_code=None,\n download=False,\n extract=False)\n\n elif dataset_name == 'stare':\n valid_ids = configs.DATA.DATASET.VALID_IDS\n test_ids = configs.DATA.DATASET.TEST_IDS\n\n train_set = STAREDataset(data_root=data_root,\n split_mode='train',\n valid_ids=valid_ids,\n test_ids=test_ids,\n preprocessing=preprocessing,\n augmentation=augmentation,\n download=False,\n extract=False)\n\n valid_set = STAREDataset(data_root=data_root,\n split_mode='valid',\n valid_ids=valid_ids,\n test_ids=test_ids,\n preprocessing=preprocessing,\n augmentation=None,\n download=False,\n extract=False)\n\n test_set = STAREDataset(data_root=data_root,\n split_mode='test',\n valid_ids=valid_ids,\n test_ids=test_ids,\n preprocessing=preprocessing,\n augmentation=None,\n download=False,\n extract=False)\n\n elif dataset_name == 'hrf':\n valid_ids = configs.DATA.DATASET.VALID_IDS\n test_ids = configs.DATA.DATASET.TEST_IDS\n data_type = configs.DATA.DATASET.DATA_TYPE\n\n train_set = HRFDataset(data_root=data_root,\n split_mode='train',\n valid_ids=valid_ids,\n test_ids=test_ids,\n preprocessing=preprocessing,\n augmentation=augmentation,\n data_type=data_type,\n download=False,\n extract=False)\n\n valid_set = HRFDataset(data_root=data_root,\n split_mode='valid',\n valid_ids=valid_ids,\n test_ids=test_ids,\n preprocessing=preprocessing,\n augmentation=None,\n data_type=data_type,\n download=False,\n extract=False)\n\n test_set = HRFDataset(data_root=data_root,\n split_mode='test',\n valid_ids=valid_ids,\n test_ids=test_ids,\n preprocessing=preprocessing,\n augmentation=None,\n data_type=data_type,\n download=False,\n extract=False)\n\n else:\n LOGGER.error('Invalid dataset_name: %s', dataset_name)\n raise NotImplementedError(dataset_name)\n\n return train_set, valid_set, test_set", "title": "" }, { "docid": "105160404e3cda64d728c4d8ab296ce7", "score": "0.5526558", "text": "def dataframes(draw, min_rows=0, max_rows=2 ** 8, min_cols=0, max_cols=2 ** 8):\n pd = pytest.importorskip(\"pandas\")\n\n num_rows = draw(st.integers(min_value=min_rows, max_value=max_rows))\n col_types = draw(\n st.lists(\n st.sampled_from([\"continuous\", \"discrete\"]),\n min_size=min_cols,\n max_size=max_cols,\n )\n )\n col_names = draw(\n st.lists(\n st.text(),\n min_size=len(col_types),\n max_size=len(col_types),\n # the following line is required otherwise log_training_data_proflie()\n # tests in TestAutoMonitoring fail with columns that get normalized\n # to the same attribute key (VR-12274 to resolve)\n unique_by=RegisteredModelVersion._normalize_attribute_key,\n )\n )\n\n return pd.concat(\n [\n draw(series(num_rows, col_type, col_name))\n for col_type, col_name in zip(col_types, col_names)\n ],\n axis=\"columns\",\n )", "title": "" }, { "docid": "4f6f521f4fee1529e1ce227adfb40c3f", "score": "0.5524592", "text": "def open_datasets(filelist):\n ds = [annual_mean(selbox(xr.open_dataset(x, use_cftime=True))) for x in filelist]\n ds = xr.concat(\n ds,\n dim=pd.Index(name=\"ensemble_member\", data=[x.split(\"_\")[-2] for x in filelist]),\n )\n return ds", "title": "" }, { "docid": "d25533f70374cdb6d9a834ac17469e33", "score": "0.55100995", "text": "def populate_dataframe(self, tags_list):\n for tag in tags_list:\n try:\n self.dataframe[tag] = self.dataframe['DS'].apply(lambda x: _get_tag_from_loaded_dicom(x, tag))\n except Exception as error:\n print('Tag not found! Error: ', error)", "title": "" }, { "docid": "5406b179b277300e0a9be459e3012ab6", "score": "0.5509382", "text": "def get_dataset(\n self,\n X_train: Union[List, pd.DataFrame, np.ndarray],\n y_train: Union[List, pd.DataFrame, np.ndarray],\n X_test: Optional[Union[List, pd.DataFrame, np.ndarray]] = None,\n y_test: Optional[Union[List, pd.DataFrame, np.ndarray]] = None,\n resampling_strategy: Optional[ResamplingStrategies] = None,\n resampling_strategy_args: Optional[Dict[str, Any]] = None,\n dataset_name: Optional[str] = None,\n dataset_compression: Optional[DatasetCompressionSpec] = None,\n **kwargs: Any\n ) -> BaseDataset:\n dataset, _ = self._get_dataset_input_validator(\n X_train=X_train,\n y_train=y_train,\n X_test=X_test,\n y_test=y_test,\n resampling_strategy=resampling_strategy,\n resampling_strategy_args=resampling_strategy_args,\n dataset_name=dataset_name,\n dataset_compression=dataset_compression,\n **kwargs)\n\n return dataset", "title": "" }, { "docid": "4e01975757c2cfb2ab859e311eb12ea1", "score": "0.5486407", "text": "def list_to_dataframe(self, list_of_lists: [], ins: ins_man):\n columns = select_data_columns(ins)\n dataframe = pd.DataFrame(list_of_lists)\n dataframe = dataframe.transpose()\n dataframe.columns = columns\n return dataframe", "title": "" }, { "docid": "454d0140d4013ad75b837644aee34f8a", "score": "0.5486372", "text": "def get_dataset(input_dir1, gt_dir1, train_ids1, num_shards=None, shard_id=None, distribute=False):\n input_final_data = []\n gt_final_data = []\n for train_id in train_ids1:\n in_files = glob.glob(input_dir1 + '%05d_00*.hdf5' % train_id)\n\n gt_files = glob.glob(gt_dir1 + '%05d_00*.hdf5' % train_id)\n gt_path = gt_files[0]\n gt_fn = os.path.basename(gt_path)\n gt_exposure = float(gt_fn[9: -6])\n gt = h5py.File(gt_path, 'r')\n gt_rawed = gt.get('gt')[:]\n gt_image = np.expand_dims(np.float32(gt_rawed / 65535.0), axis=0)\n gt_image = gt_image.transpose([0, 3, 1, 2])\n\n for in_path in in_files:\n gt_final_data.append(gt_image[0])\n\n in_fn = os.path.basename(in_path)\n in_exposure = float(in_fn[9: -6])\n ratio = min(gt_exposure / in_exposure, 300)\n im = h5py.File(in_path, 'r')\n in_rawed = im.get('in')[:]\n input_image = np.expand_dims(pack_raw(in_rawed), axis=0) * ratio\n input_image = np.float32(input_image)\n input_image = input_image.transpose([0, 3, 1, 2])\n input_final_data.append(input_image[0])\n data = (input_final_data, gt_final_data)\n if distribute:\n datasets = ds.NumpySlicesDataset(data, ['input', 'label'], shuffle=False,\n num_shards=num_shards, shard_id=shard_id)\n else:\n datasets = ds.NumpySlicesDataset(data, ['input', 'label'], shuffle=False)\n return datasets", "title": "" }, { "docid": "cf3ccab0bd6895512dc9d59c86e3fe67", "score": "0.5483582", "text": "def list_series_df(a_list):\r\n s = pd.Series(a_list)\r\n df = pd.DataFrame(s)\r\n print(\"The DataFrame is\")\r\n return df", "title": "" }, { "docid": "b5f207281c4a4586b6bd5825ab03f3a7", "score": "0.5472024", "text": "def importDatasetFromHDF5(filepath, dataset_name):\n\n with h5py.File(filepath, 'r') as hf:\n ds = hf[dataset_name]\n df = np.DataFrame(data=ds[:])\n\n return df", "title": "" }, { "docid": "f2d9177c81bfc40f37ad085d6d1a4b2b", "score": "0.5465121", "text": "def import_data():\n datafiles = glob('data/NEON.*.csv')\n data = pd.DataFrame()\n for datafile in datafiles:\n new_data = pd.read_csv(datafile)\n data = data.append(new_data, ignore_index=True)\n return data", "title": "" }, { "docid": "9a66888ad23e17449224767b22987555", "score": "0.5447914", "text": "def select_frovedis_dataframe(self, targets):\n targets = list(check_string_or_array_like(targets, \"select\"))\n is_ser = len(targets) == 1\n ret = DataFrame(is_series=is_ser)\n ret_types = self.__get_column_types(targets)\n ret_cols = list(targets) #targets is a list\n ret.num_row = len(self) \n ret.index = self.index\n\n if self.has_index():\n targets = [self.index.name] + targets\n\n sz = len(targets) \n ptr_arr = get_string_array_pointer(targets)\n (host, port) = FrovedisServer.getServerInstance()\n proxy = rpclib.select_frovedis_dataframe(host, port, self.get(), \\\n ptr_arr, sz)\n excpt = rpclib.check_server_exception()\n if excpt[\"status\"]:\n raise RuntimeError(excpt[\"info\"])\n ret.load_dummy(proxy, ret_cols, ret_types)\n return ret", "title": "" }, { "docid": "59b91bb7c3e5aeae7dbb57e47bea37ba", "score": "0.54453963", "text": "def concatinate_dataframes(df_list):\n return pd.concat(df_list)", "title": "" }, { "docid": "3663b419a1b16b3b9075cd2da7ef16c9", "score": "0.5440875", "text": "def make_pandas(self):\n pass", "title": "" }, { "docid": "ecdac0c5bd7ac71dd04287c7054dc731", "score": "0.5439865", "text": "def make_dataframe(self):\n\n staging_df = []\n for file in self.file_list:\n staging_df.append(pandas.read_json(file))\n self.tweet_df = pandas.concat(staging_df, ignore_index=True)\n self.tweet_df.sort_values(\"created_at\", inplace=True)", "title": "" }, { "docid": "6a38a2f6a0a82f4fc95f953ddd9f78ca", "score": "0.54380697", "text": "def create_dataset(\n self, item_ids, flatten=False, shuffle=False) -> tf.data.Dataset:\n logging.info('*** Creating a dataset with %d items.', len(item_ids))\n data_dict = self.parallel_get_items(item_ids)\n return dataset_from_dict(data_dict,\n rng=self.rng,\n flatten=flatten,\n shuffle=shuffle)", "title": "" }, { "docid": "d25ced6eeb5caf384aa90fed30c24cad", "score": "0.54376", "text": "def create_dataset(args):\n path = args.path\n\n corpus = pd.read_csv(path + 'all.tsv', sep='\\t')\n corpus = shuffle(corpus)\n\n size_corpus = corpus.shape[0]\n\n split = [int(size_corpus * 0.8), int(size_corpus * 0.1)]\n\n train = corpus.iloc[:split[0]]\n val = corpus.iloc[split[0]:split[0] + split[1]]\n test = corpus.iloc[split[0] + split[1]:]\n\n train.to_csv(path + 'train.tsv', index=False, sep='\\t')\n val.to_csv(path + 'valid.tsv', index=False, sep='\\t')\n test.to_csv(path + 'test.tsv', index=False, sep='\\t')", "title": "" }, { "docid": "c4048fced9533ae31f91a5a8320bafde", "score": "0.5437289", "text": "def load_unprocessed_dataframe(self, file_paths: List[str]) -> pd.DataFrame:\n train_files = {\"train_identity.csv\", \"train_transaction.csv\"}\n test_files = {\"test_identity.csv\", \"test_transaction.csv\"}\n\n train_dfs, test_dfs = {}, {}\n\n for filename in train_files.union(test_files):\n split_name = os.path.splitext(filename)[0]\n file_df = self.load_file_to_dataframe(os.path.join(self.raw_dataset_dir, filename))\n if filename in train_files:\n train_dfs[split_name] = file_df\n elif filename in test_files:\n test_dfs[split_name] = file_df\n\n # Merge on TransactionID\n final_train = pd.merge(\n train_dfs[\"train_transaction\"], train_dfs[\"train_identity\"], on=\"TransactionID\", how=\"left\"\n )\n return final_train", "title": "" }, { "docid": "04c8e89037203bbf193893c2aaea3299", "score": "0.5427318", "text": "def load_dataset(self, source_files, source_categories):\n # Set containing list of tuples.\n dataset = []\n\n # Create table used for removing punctuations.\n table = str.maketrans({key: None for key in string.punctuation})\n\n # Process files with categories.\n for data_file, category in zip(source_files, source_categories):\n # Set absolute path to file.\n data_file = os.path.join(self.split_folder, data_file)\n self.logger.info('Loading dataset from {} (category: {})...'.format(data_file, category))\n # Load file content using '|' separator.\n df = pd.read_csv(filepath_or_buffer=data_file, sep='|',header=None,\n names=[self.key_image_ids,self.key_questions,self.key_answers])\n\n # Add tdqm bar.\n t = tqdm.tqdm(total=len(df.index))\n for _, row in df.iterrows():\n # Retrieve question and answer.\n question = row[self.key_questions]\n answer = row[self.key_answers]\n\n # Process question - if required.\n if self.remove_punctuation in [\"questions\",\"all\"]:\n question = question.translate(table)\n\n # Process answer - if required.\n if self.remove_punctuation in [\"answers\",\"all\"]:\n answer = answer.translate(table)\n\n # Add record to dataset.\n dataset.append({\n self.key_image_ids: row[self.key_image_ids],\n self.key_questions: question,\n self.key_answers: answer,\n # Add category.\n self.key_category_ids: category\n })\n\n t.update()\n t.close()\n\n # Return the created list.\n return dataset", "title": "" }, { "docid": "1ddfb625a9aa4c4a19788eb8db8cad03", "score": "0.5426803", "text": "def read_and_parse_data(files):\n ds = tf.data.Dataset.from_tensor_slices(files)\n ds = ds.repeat()\n ds = ds.map(parse_fn)\n\n return ds", "title": "" }, { "docid": "cec3f0dbb2925464fa94f4b58076a858", "score": "0.5414947", "text": "def from_spark(df: \"pyspark.sql.DataFrame\",\n parallelism: int = 200) -> Dataset[ArrowRow]:\n raise NotImplementedError # P2", "title": "" }, { "docid": "502909d5065f5bf0a49b3f925de0acdf", "score": "0.54089534", "text": "def construct_dataset(numbers):\n dataset = []\n for num in numbers:\n pic = Image.open('data/image{i}.jpg'.format(i=num))\n parts = lowres(cleansing.grid(pic, (80, 60)))\n dataset += cleansing.build_dataset(\n cleansing.compress(parts),\n read_labels('data/labels{i}.csv'.format(i=num))\n )\n return dataset", "title": "" }, { "docid": "304c1ff54cf967a3422eb6f90553fdcf", "score": "0.54065233", "text": "def create_dataframe(tuple_data):\n return pd.DataFrame.from_records(tuple_data[1:], columns=tuple_data[0])", "title": "" }, { "docid": "9b6e13517e112eeb8ab4e6ca2d57e0e9", "score": "0.5404978", "text": "def _get_iris_data_set(self) -> pd.DataFrame:\n return pd.read_csv(\n 'https://archive.ics.uci.edu/ml/'\n 'machine-learning-databases/iris/iris.data',\n header=None\n )", "title": "" }, { "docid": "502246d7c5a4e3cd6f94c33fec052124", "score": "0.53958356", "text": "def load_train_dfs():\n try:\n from kaggle.competitions import twosigmanews\n env = twosigmanews.make_env()\n (market_train_df, news_train_df) = env.get_training_data()\n except:\n market_train_df = pd.read_csv(TEST_MARKET_DATA, encoding=\"utf-8\", engine=\"python\")\n news_train_df = pd.read_csv(TEST_NEWS_DATA, encoding=\"utf-8\", engine=\"python\")\n env = None\n return env, market_train_df, news_train_df", "title": "" }, { "docid": "a3f3b265c3409dd473fa1dc4a4679a24", "score": "0.53917485", "text": "def importDatasetFromHDF5(filepath, dataset_name):\n\n with h5py.File(filepath, 'r') as hf:\n ds = hf[dataset_name]\n df = np.DataFrame(data=ds[:])\n\n return df", "title": "" }, { "docid": "5fb9c16e2423e1b31cd90bac5aa86147", "score": "0.53875786", "text": "def read_data(filenames=DATA_FILES):\n df, labels = load_dataset(filenames=filenames)\n df = pd.DataFrame(df).rename(columns={0: 'title'})\n return df, np.array(labels)", "title": "" }, { "docid": "5c8653d16b311e7280e9dddcf23e326b", "score": "0.5380016", "text": "def read_in_dataset(dset, verbose=False):\n \n df = pd.read_csv('{0}.csv'.format(dset), encoding = \"ISO-8859-1\")\n \n if verbose:\n print('\\n{0:*^80}'.format(' Reading in the {0} dataset '.format(dset)))\n print(\"\\nit has {0} rows and {1} columns\".format(*df.shape))\n print('\\n{0:*^80}\\n'.format(' It has the following columns '))\n print(df.columns)\n print('\\n{0:*^80}\\n'.format(' The first 5 rows look like this '))\n print(df.head())\n \n return df", "title": "" }, { "docid": "2d333b7fbb3a7c9fcd353e6f31100d61", "score": "0.53781176", "text": "def read_raw_dataset():\n \n # Directory names are saved as F, O, N, etc.\n # And set names are processed as A, B, C, etc.\n mapping_set_to_dir = {\n 'A': (0,'Z'),\n 'B': (1,'O'),\n 'C': (2,'N'),\n 'D': (3,'F'),\n 'E': (4,'S')\n }\n\n file_lists = []\n \n # get the list of files for each set\n # 1 file corresponds to 1 training example\n for s,d in mapping_set_to_dir.items():\n file_lists.insert(d[0], [f for f in listdir(d[1]) if isfile(join(d[1], f))])\n \n raw_dataset = { }\n\n # loop over all sets\n for s,d in mapping_set_to_dir.items():\n\n # loop over every file (training example) in each set\n for f in file_lists[d[0]]:\n \n # read the time series data\n curr_example = np.loadtxt(join(d[1], f))\n\n # create a key in the raw_database dict in case it doesn't exist already\n # otherwise just append the new example in the 2D array\n if (s in raw_dataset):\n raw_dataset[s] = np.append(raw_dataset[s], [curr_example], axis=0)\n else:\n raw_dataset[s] = np.array([curr_example])\n \n return raw_dataset", "title": "" }, { "docid": "04db8b7a6cb73c05135b31339a5f8b2c", "score": "0.53776336", "text": "def gen_dataset(\n self,\n data: Iterable[Dict[str, Any]],\n include_label_fields: bool = True,\n shard_range: Tuple[int, int] = None,\n ) -> textdata.Dataset:\n to_process = {}\n to_process.update(self.features)\n to_process.update(self.extra_fields)\n if include_label_fields:\n to_process.update(self.labels)\n else:\n to_process.pop(Target.TARGET_LABEL_FIELD, None)\n fields = {name: (name, field) for name, field in to_process.items()}\n # generate example from dataframe\n examples = [\n textdata.Example.fromdict(row, fields)\n for idx, row in enumerate(self.preprocess(data))\n if not shard_range or shard_range[0] <= idx <= shard_range[1]\n ]\n return textdata.Dataset(examples, to_process)", "title": "" }, { "docid": "6a0c24beb27b08bcb60c1ddcc69aef43", "score": "0.5374439", "text": "def to_dataframes_source(self,\n processing_instructions: ProcessingInstructions) \\\n -> Iterator['DataframesRecordsSource']:\n pass", "title": "" }, { "docid": "9915917bdb9039e784e36ce19cbe479a", "score": "0.5372318", "text": "def concat_df():\n dfs = [pd.DataFrame(np.random.randn(size_per, 4), columns=cols) for _ in range(N)]\n return pd.concat(dfs, ignore_index=True)", "title": "" }, { "docid": "b9f92761e48070ab34618b1660f61e98", "score": "0.5367842", "text": "def build_trainset(df, user_rows=None, colnames=None):\n if colnames is not None:\n train = df[colnames]\n else:\n train = df\n reader = Reader(line_format='user item rating')\n if user_rows is not None:\n trainset_load = Dataset.load_from_df(pd.concat([train, user_rows]), reader) #dataset plus user rows\n else:\n trainset_load = Dataset.load_from_df(train, reader)\n trainset = trainset_load.build_full_trainset() #build trainset\n return trainset", "title": "" }, { "docid": "84a24a88be7e8d9120ec93bd12e2e7a5", "score": "0.53663695", "text": "def create_train_test_set(dataframe,\n train_frac,\n test_frac,\n target,\n random_state = 123):\n train_dataset = dataframe.sample(frac = train_frac, random_state = random_state)\n tmp = dataframe.drop(train_dataset.index)\n test_dataset = tmp.sample(frac = test_frac, random_state = random_state)\n tmp.drop(test_dataset.index)\n train_labels = train_dataset.pop(target)\n train_features = train_dataset\n test_labels = test_dataset.pop(target)\n test_features =test_dataset\n return train_features, train_labels, test_features, test_labels", "title": "" }, { "docid": "5015f8acd66c446093207a7e76e6c9f4", "score": "0.53640664", "text": "def dataframe(self, hash_list):\n assert len(hash_list) == 1, \"Multi-file DFs not supported yet.\"\n filehash = hash_list[0]\n pfile = fastparquet.ParquetFile(self._object_path(filehash))\n return pfile.to_pandas()", "title": "" }, { "docid": "a90146a04a3d49ecfeb78aedcb85eb03", "score": "0.5358037", "text": "def get_dataset(features: List[InputSpan]) -> TensorDataset:\n all_input_ids = torch.tensor(\n [f.input_ids for f in features], dtype=torch.long)\n all_input_mask = torch.tensor(\n [f.input_mask for f in features], dtype=torch.long)\n all_segment_ids = torch.tensor(\n [f.segment_ids for f in features], dtype=torch.long)\n all_label_ids = torch.tensor(\n [f.label_ids for f in features], dtype=torch.long)\n all_prediction_mask = torch.tensor(\n [f.prediction_mask for f in features], dtype=torch.uint8)\n all_example_index = torch.tensor(\n [f.example_index for f in features], dtype=torch.long)\n all_doc_span_index = torch.tensor(\n [f.doc_span_index for f in features], dtype=torch.long)\n\n return TensorDataset(all_input_ids, all_input_mask, all_segment_ids,\n all_label_ids, all_prediction_mask,\n all_example_index, all_doc_span_index)", "title": "" }, { "docid": "f2d59ce0bbf5783e78037f0b42e85f39", "score": "0.5350685", "text": "def load_raw_datasets(path = 'datasets/UCI/', min_idx = None, max_idx = None):\n\n import arff\n # Get .arff file names\n ds_files = sorted([path + f for f in listdir(path) if isfile(join(path, f)) and f[-4:] == 'arff'])\n dss = []\n # The class (label) is sometimes specified with other than 'class'\n class_alias = ['class', 'symboling', 'survival_status', 'num', 'surgical_lesion', 'decision', 'contraceptive_method_used',\n 'class_attribute']\n\n # If no indexes specified, load all datasets\n if min_idx == None: min_idx = 0\n if max_idx == None: max_idx = len(ds_files)\n # Start dataset load\n for i in range(min_idx, max_idx):\n dsf = ds_files[i]\n try:\n print(str(i) + \" \" + dsf)\n # Load datasets\n dataset = arff.load(open(dsf, 'r'))\n # Extract meta-information\n meta_info = [attr for attr in dataset['attributes'] if attr[0].lower() not in class_alias]\n\n # Separate data and labels\n data = dataset['data']\n labels = [d[-1] for d in data]\n data = [d[:-1] for d in data]\n\n dss.append((data, labels, meta_info, dsf.split('/')[-1]))\n except Exception as e:\n print('--------------------------------')\n print(str(i) + \" \" + dsf)\n print(str(e))\n print('--------------------------------')\n\n return dss", "title": "" }, { "docid": "f2e0e1675fa37e68ca69a5a8cfb211a3", "score": "0.5348367", "text": "def prepare_dataset(texts: List[str],\n labels: List[int],\n bfe: BoWFeaturesExtractor) -> tf.data.Dataset:\n mat = bfe(texts).toarray()\n mat = np.array(mat, dtype=\"float64\")\n dataset = tf.data.Dataset.from_tensor_slices((mat, labels))\n return dataset", "title": "" }, { "docid": "693dd98fe69bf9391f9283992ae81424", "score": "0.5345807", "text": "def _init_from_pandas(self, s, *args):\n\n kdf = DataFrame(pd.DataFrame(s))\n self._init_from_spark(kdf._sdf[kdf._metadata.column_fields[0]],\n kdf, kdf._metadata.index_info)", "title": "" }, { "docid": "9fd69d4ada1495293876a79817892691", "score": "0.534406", "text": "def load_data_set(self):\n dataset = read_csv(self.url, names=self.names)\n return dataset", "title": "" }, { "docid": "b50c802915f8858c639e9e1ddb16ef2c", "score": "0.5338879", "text": "def load_pandas(self, data_frame: pd.DataFrame):\n self.data = data_frame", "title": "" }, { "docid": "dcd1b92b19e6a1bb5cfbeb5f6d5dbec1", "score": "0.5324376", "text": "def load_data_multilabel(data_type=None):\n if data_type is None:\n raise ValueError('Must provide data_type = train or val')\n filename = os.path.join(set_dir, data_type + \".txt\")\n cat_list = list_image_sets()\n df = pd.read_csv(\n filename,\n delim_whitespace=True,\n header=None,\n names=['filename'])\n # add all the blank rows for the multilabel case\n for cat_name in cat_list:\n df[cat_name] = 0\n for info in df.itertuples():\n index = info[0]\n fname = info[1]\n anno = load_annotation(fname)\n objs = anno.findAll('object')\n for obj in objs:\n obj_names = obj.findChildren('name')\n for name_tag in obj_names:\n tag_name = str(name_tag.contents[0])\n if tag_name in cat_list:\n df.at[index, tag_name] = 1\n return df", "title": "" }, { "docid": "1cee7956ec79e7faba4af9763a7b9284", "score": "0.5322191", "text": "def dataframes(self, *columns, **kwargs):\n drop_prefixes = kwargs.get('drop_stream_names', True)\n\n def cname(stream, path):\n return \"{}:{}\".format(stream['name'], \".\".join(path[1:]))\n\n for df in self.iterator(self.inverted):\n if drop_prefixes:\n # TODO: Figure out what needs to happen if names overlap\n z = df[[cname(**p()) if p != \".ts\" else p for p in columns]].copy() if columns else df.copy()\n z.rename(columns={k: k.split(\":\", 1)[1] for k in z.columns if \":\" in k}, inplace=True)\n yield z\n else:\n yield df[[cname(**p()) for p in columns]] if columns else df", "title": "" }, { "docid": "5bd55bfa4517e19b345974575e382243", "score": "0.53189605", "text": "def _build_dataframe(self, feature_set, filter_features=None, df_features=None):\n # create the data frame with just the features\n # from the feature set, at this point\n if df_features is None:\n df_features = self._build_dataframe_with_features(feature_set,\n filter_features)\n\n # if the id column is already in the data frame,\n # then raise an error; otherwise, just add the ids\n if self.id_col in df_features:\n raise ValueError(f'ID column name \"{self.id_col}\" already used as'\n ' feature name.')\n df_features[self.id_col] = feature_set.ids\n\n # if the the labels should exist but the column is already\n # in the data frame, then raise an error; otherwise, just add the labels\n if feature_set.has_labels:\n if self.label_col in df_features:\n raise ValueError(f'Class column name \"{self.label_col}\" '\n 'already used as feature name.')\n df_features[self.label_col] = feature_set.labels\n\n return df_features", "title": "" }, { "docid": "6f0f942966b30abfc6e3d7b9c9924456", "score": "0.5307582", "text": "def test_collections_correctly_joins_dataframes(self):\n mex1 = me.MockExtractor('1', 'col_1', 'foo')\n mex2 = me.MockExtractor('1', 'col_2', 'bar')\n mex3 = me.MockExtractor('2', 'col_3', 'baz')\n collection = fex.Collection()\n for ex in [mex1, mex2, mex3]:\n collection.add_feature_extractor(ex)\n collection.run(self.dataset_file)\n contents = open(self.dataset_file).read().splitlines()\n expected = [\n ',MockExtractor__col_1,MockExtractor__col_2,MockExtractor__col_3',\n '1,foo,bar,',\n '2,,,baz'\n ]\n self.assertEqual(expected, contents)", "title": "" }, { "docid": "bb7d024502a1373110108a91c84fa4df", "score": "0.53071314", "text": "def create_sim_data_sets():\n \n # list of quarters of data available\n data_qtrs = ['1Q14',\n '2Q14', \n '3Q14',\n '4Q14', \n '1Q15',\n '2Q15',\n '3Q15',\n '4Q15',\n '1Q16',\n '2Q16',\n '3Q16',\n '4Q16',\n '1Q17',\n '2Q17',\n '3Q17',\n '4Q17',\n '1Q18',\n '2Q18',\n '3Q18']\n \n # open combined_clean data\n D = pd.read_csv('data/combined_clean.csv', low_memory=False)\n D.drop(columns='Unnamed: 0', inplace=True)\n \n \n # sliding window counter\n start_window = 0\n end_window = 8\n \n # loop through data\n for i in range(1, 12):\n \n # create train sets\n train_qtrs = data_qtrs[start_window:end_window]\n train_temp = D[D['calendar_qtr'].isin(train_qtrs)]\n train_temp.to_csv('data/sim_training_sets/training_'+data_qtrs[end_window]+'.csv')\n \n \n # create test sets\n test_qtr = data_qtrs[end_window]\n test_temp = D[D['calendar_qtr'] == test_qtr]\n test_temp.to_csv('data/sim_test_sets/test_'+data_qtrs[end_window]+'.csv')\n \n # step counters \n start_window += 1\n end_window += 1", "title": "" }, { "docid": "1bb3bf1eb280b9e8f5ced21750cb1e45", "score": "0.5305703", "text": "def RowsetToDataframe(rowset, maxRows, columnList=None):\n # selected column indexes list to create the dataframe\n columnIndexes = []\n if columnList is None:\n # create data frame labels\n labels = [column.Name for column in rowset.Schema]\n else:\n for column in columnList:\n if type(column) == str:\n columnIndex = rowset.Schema.IndexOf(column)\n # columnIndex will be -1 if column does not exist\n if columnIndex >= 0:\n columnIndexes.append(columnIndex)\n else:\n raise RuntimeError(\"column \" + column + \" does not exist in the input rowset schema\")\n else:\n columnIndexes.append(column)\n\n # check column uniqueness in columnIndexes\n uniqueColumnList = set()\n for columnIndex in columnIndexes:\n if columnIndex not in uniqueColumnList:\n uniqueColumnList.add(columnIndex)\n else:\n raise RuntimeError(\"Find duplicate column in provided columnList, column index : \" + str(columnIndex))\n labels = [rowset.Schema[column].Name for column in columnIndexes]\n\n # user asked for all rows to be in single frame\n # set row count as max possible int\n if maxRows < 0:\n maxRows = sys.maxsize\n\n # iteratively adding rows to dataFrame is very inefficient,\n # better solution is to append all rows to a list,\n # then create the dataFrame from the list at once.\n rowList = []\n for row in rowset:\n if len(rowList) < maxRows:\n if columnList is None:\n rowData = [value for value in row]\n else:\n rowData = [row[columnIndex] for columnIndex in columnIndexes]\n # add created row to rowList\n rowList.append(rowData)\n if len(rowList) == maxRows:\n break\n\n # create data frame from rowList\n dataFrame = pandas.DataFrame(\n data=rowList, \n index=range(len(rowList)), \n columns=labels )\n return dataFrame", "title": "" }, { "docid": "95c63cc6bb968567d48e225470a763ad", "score": "0.5300165", "text": "def create_aspset510_dataset(data_dir: FSPath, split: str, **kwargs) -> Aspset510Dataset:\n if kwargs.get('temporal_downsample', None) is None:\n kwargs['temporal_downsample'] = 5 if split == 'test' else 1\n aspset = Aspset510(data_dir)\n clips = aspset.clips(split)\n return Aspset510Dataset(clips, **kwargs)", "title": "" } ]
f92705a33ea09aa3a95aff8186a07a47
Test that valid arguments does not result in crash. Only validates that there are no crashes, does not validate any other behavior!
[ { "docid": "8b03cc9fbef3905149ad3e71edf82a0f", "score": "0.63943064", "text": "def test_no_crash_on_valid_args(\n self, parsed_args_all_subparsers, dummyapi_instance, command_mock\n ):\n _repobee.cli.dispatch.dispatch_command(\n parsed_args_all_subparsers, dummyapi_instance, EMPTY_PATH\n )", "title": "" } ]
[ { "docid": "9f45e9ad983cfb134ea46c64981b0821", "score": "0.72151494", "text": "def test_CleanArgs_err():\n pass", "title": "" }, { "docid": "7d80397c5c922d3fab33ecabb0b21c0c", "score": "0.72042733", "text": "def validate_arguments(args):\n pass", "title": "" }, { "docid": "1e02fc1b787b62c0258650b99f5b2cfd", "score": "0.7041619", "text": "def test_bad_arguments(self):\n self.run_check(1,\n 'Error: no command specified\\n',\n '',\n [ self.TOOL ])\n self.run_check(1,\n 'Error: command must be either add or delete\\n',\n '',\n [ self.TOOL, 'foo' ])\n self.run_check(1,\n 'Error: extraneous arguments\\n',\n '',\n [ self.TOOL, 'add', 'user', 'pass', 'toomuch' ])\n self.run_check(1,\n 'Error: delete only needs username, not a password\\n',\n '',\n [ self.TOOL, 'delete', 'user', 'pass' ])", "title": "" }, { "docid": "eb7c0cd5230d219a8ba4695434cecad6", "score": "0.7022368", "text": "def test_parse_args_empty():\n with pytest.raises(SystemExit):\n parse_args(args=[])", "title": "" }, { "docid": "5823ddfb27810b0c2e450ec37c8870fc", "score": "0.69996107", "text": "def test_no_args():\n with raises(SystemExit) as ex:\n parser.parse_args([])\n assert ex.code != 0", "title": "" }, { "docid": "c2c09ed1d8064a5d1c98cdda0dc45562", "score": "0.69341683", "text": "def check_args():\n return True", "title": "" }, { "docid": "3c7386ab265dac35eab422efaef73e03", "score": "0.68599683", "text": "def test_unrestrained_callable_arguments(self):\n callable = lambda x: x\n self.any_func_args(callable)\n\n with self.assertRaises(RuntimeTypeError):\n self.any_func_args('bad_input')", "title": "" }, { "docid": "5d69113a35b47ed3c30bc262f6ac839a", "score": "0.68350786", "text": "def test_does_not_crash(self):\n my_function(5)", "title": "" }, { "docid": "7d98c3d8b59ee1aa77c5a299f0376338", "score": "0.67714447", "text": "def test_args_exception_exit(self):\n sys.argv=['string','3','r']\n with self.assertRaises(SystemExit):\n _check_cli_args()\n \n sys.argv=['string',]\n with self.assertRaises(SystemExit):\n _check_cli_args()", "title": "" }, { "docid": "8f4fa89e5ff129bb2b700560204577f9", "score": "0.67597204", "text": "def test_string_invalid_arguments():\n #assert generate_prime_factors('string123') == []\n with pytest.raises(ValueError):\n #generate_prime_factors('string123')\n generate_prime_factors(str)", "title": "" }, { "docid": "a3d218d8127fbf960cb4387aee2d2926", "score": "0.6757075", "text": "def verify_arguments(self, args):", "title": "" }, { "docid": "3cdacfeb1aa561c7ea5a19c1dc94d16c", "score": "0.6729706", "text": "def test_error_on_insufficient_arguments(ctx):\n def act(arg0, arg1):\n pytest.fail(test_error_on_insufficient_arguments.__doc__)\n act = ctx.__call__(act)\n with pytest.raises(TypeError):\n ctx.execute('act xyz')", "title": "" }, { "docid": "1cb6dfae1e5e79c23723c4fb736915c8", "score": "0.67289203", "text": "def test_build_invalid_params(self):\n with self.assertRaises(helpers.EarlyExitError):\n fuzzer_stats.build_results('', '', '', '', '')", "title": "" }, { "docid": "2d88d9537fa44957da8e8584a4831cd6", "score": "0.6724238", "text": "def test_bad_func(self):\n with self.assertRaises(RuntimeTypeError):\n self.test(5, 5)", "title": "" }, { "docid": "c9c1bfdd460667bc7fc20f7269a92f05", "score": "0.6719645", "text": "def test_with_empty_args(self):\n with self.assertRaises(SystemExit):\n self.parser.parse_args([])", "title": "" }, { "docid": "c9c1bfdd460667bc7fc20f7269a92f05", "score": "0.6719645", "text": "def test_with_empty_args(self):\n with self.assertRaises(SystemExit):\n self.parser.parse_args([])", "title": "" }, { "docid": "3b6d02a391030f6dbc7270f1ea16a8db", "score": "0.6677502", "text": "def test_main_error2():\n with raises(SystemExit):\n main([\"000000000000000000000000000000000000000001d6, d0\"])", "title": "" }, { "docid": "43b35bb0d5613f02101f4f7830c8014f", "score": "0.66308165", "text": "def test_bad_args(self):\n\n # various bad values\n with self.assertRaises(ProcessorException):\n Align(dict())\n\n with self.assertRaises(ProcessorException):\n Rate(dict())\n\n with self.assertRaises(ProcessorException):\n self._simple_ts.align(method='bogus')\n\n with self.assertRaises(ProcessorException):\n self._simple_ts.align(limit='bogus')\n\n # non event types\n ticket_range = dict(\n name=\"outages\",\n columns=[\"timerange\", \"title\", \"esnet_ticket\"],\n points=[\n [[1429673400000, 1429707600000], \"BOOM\", \"ESNET-20080101-001\"],\n [[1429673400000, 1429707600000], \"BAM!\", \"ESNET-20080101-002\"],\n ],\n )\n\n ts = TimeSeries(ticket_range)\n with self.assertRaises(ProcessorException):\n ts.align()\n\n with self.assertRaises(ProcessorException):\n ts.rate()", "title": "" }, { "docid": "e485e9f813134a2964e26a786fffa0d1", "score": "0.6624594", "text": "def test_multiple_arguments_error(self):\n returncode, output = run_cli(main, \"a\", \"b\")\n assert returncode != 0", "title": "" }, { "docid": "a9a8ecf460a332b5f4c3a86ca6923462", "score": "0.66199046", "text": "def validate_args(self):\n pass", "title": "" }, { "docid": "bd31b738b599745ee5085b5abe533e1e", "score": "0.66174436", "text": "def test_invalid_args(self):\n psbt = pointer(wally_psbt())\n\n # init\n cases = [\n (1, 0, 0, 0, 0, psbt), # Invalid version\n (0, 0, 0, 0, 0xff, psbt), # Invalid flags\n (2, 0, 0, 0, 0xff, psbt), # Invalid flags (v2)\n (0, 0, 0, 0, INIT_PSET, psbt), # v0 PSET\n (0, 0, 0, 0, 0, None), # NULL dest\n ]\n for args in cases:\n self.assertEqual(WALLY_EINVAL, wally_psbt_init_alloc(*args))\n\n # psbt_from_base64\n src_base64 = JSON['valid'][0]['psbt']\n for args in [(None, 0, psbt), # NULL base64\n ('', 0, psbt), # Empty base64\n (src_base64, 0xff, psbt), # Invalid flags\n (src_base64, 0, None)]: # NULL dest\n self.assertEqual(WALLY_EINVAL, wally_psbt_from_base64(*args))\n\n self.assertEqual(WALLY_OK, wally_psbt_from_base64(JSON['valid'][0]['psbt'], 0, psbt))\n\n # psbt_clone_alloc\n clone = pointer(wally_psbt())\n for args in [(None, 0x0, clone), # NULL src\n (psbt, 0x1, clone), # Invalid flags\n (psbt, 0x0, None)]: # NULL dest\n self.assertEqual(WALLY_EINVAL, wally_psbt_clone_alloc(*args))\n\n # Populate PSBT with one input and output to test various invalid args for taproot keypaths\n self.assertEqual(WALLY_OK, wally_psbt_init_alloc(2, 1, 1, 0, 0, psbt))\n tx_in = pointer(wally_tx_input())\n self.assertEqual(WALLY_OK, wally_psbt_add_tx_input_at(psbt, 0, 0, tx_in))\n\n tx_output = pointer(wally_tx_output())\n ret = wally_tx_output_init_alloc(1234, b'\\x59\\x59', 2, tx_output)\n self.assertEqual(WALLY_OK, ret)\n ret = wally_psbt_add_tx_output_at(psbt, 0, 0, tx_output)\n self.assertEqual(WALLY_OK, ret)\n\n pk, pk_len = make_cbuffer('339ce7e165e67d93adb3fef88a6d4beed33f01fa876f05a225242b82a631abc0')\n mkl, mkl_len = make_cbuffer('00' * 32)\n fpr, fpr_len = make_cbuffer('00' * 4)\n path, path_len = (c_uint32 * 1)(), 1\n i, flags = 0, 0\n\n invalid_args = [\n (None, i, flags, pk, pk_len, mkl, mkl_len, fpr, fpr_len, path, path_len), # NULL psbt\n (psbt, 1, flags, pk, pk_len, mkl, mkl_len, fpr, fpr_len, path, path_len), # Invalid index\n (psbt, i, 0x01, pk, pk_len, mkl, mkl_len, fpr, fpr_len, path, path_len), # Invalid flags\n (psbt, i, flags, pk, pk_len+1, mkl, mkl_len, fpr, fpr_len, path, path_len), # Bad pubkey length\n (psbt, i, flags, pk, pk_len, mkl, mkl_len-1, fpr, fpr_len, path, path_len), # Bad tapleaf_hashes_len\n (psbt, i, flags, pk, pk_len, None, mkl_len, fpr, fpr_len, path, path_len), # Merkle length should be 0\n (psbt, i, flags, None, pk_len, mkl, mkl_len, fpr, fpr_len, path, path_len), # No pubkey given\n (psbt, i, flags, pk, pk_len, mkl, mkl_len, fpr, fpr_len-1, path, path_len), # Bad fpr length\n (psbt, i, flags, pk, pk_len, mkl, mkl_len, fpr, fpr_len, None, path_len), # NULL child path\n (psbt, i, flags, pk, pk_len, mkl, mkl_len, fpr, fpr_len, path, path_len-1), # Bad child path length\n ]\n\n for args in invalid_args:\n self.assertEqual(WALLY_EINVAL, wally_psbt_add_input_taproot_keypath(*args))\n self.assertEqual(WALLY_EINVAL, wally_psbt_add_output_taproot_keypath(*args))\n\n valid_args = (psbt, i, flags, pk, pk_len, mkl, mkl_len, fpr, fpr_len, path, path_len)\n self.assertEqual(WALLY_OK, wally_psbt_add_input_taproot_keypath(*valid_args))\n self.assertEqual(WALLY_OK, wally_psbt_add_output_taproot_keypath(*valid_args))", "title": "" }, { "docid": "81cadd8992dbe0050d364e5408408cc1", "score": "0.6596214", "text": "def test_check_zero_raises():\n with pytest.raises(ValueError):\n check_zero_input(0)", "title": "" }, { "docid": "aec168248f2033a9bb7ad3bca5acc265", "score": "0.6595272", "text": "def test_errorfortoomanyarguments(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Rectangle(1, 2, 3, 4, 5, 7)\n self.assertEqual(\n str(e.exception),\n \"'__init__() takes from 3 to 6\" +\n \"positional arguments but 7 were given'\")", "title": "" }, { "docid": "fb8ffad286a505ed7b0f2d5829c69ffd", "score": "0.65714896", "text": "def _check_arguments(arguments):\n for arg in arguments:\n if arg in UNSUPPORTED_ARGUMENTS:\n raise ValueError(\"Argument {0} is not supported.\".format(arg))", "title": "" }, { "docid": "7b8bc7ed9b301db17b0e313a316f1065", "score": "0.6566124", "text": "def test_bad_argument(self):\n if False:\n cpap_extraction.sys.argv = [ \"cpap_extraction.py\", \"inputfile\", \"extrastuff\"]\n with self.assertRaises(SystemExit):\n cpap_extraction.setup_args()", "title": "" }, { "docid": "689f0c6b4534d81de82fcf6de854c91b", "score": "0.6564911", "text": "def test_error_on_superfluous_arguments(ctx):\n def act():\n pytest.fail(test_error_on_superfluous_arguments.__doc__)\n act = ctx.__call__(act)\n with pytest.raises(TypeError):\n ctx.execute('act xyz')", "title": "" }, { "docid": "ab2b6fefa6baba505744044d24ed60ec", "score": "0.65373904", "text": "def fails(arg2):\n raise RuntimeError", "title": "" }, { "docid": "6d09b610d44aea97187a7c932e624376", "score": "0.6530685", "text": "def test_unknown_args(self):\n\n argv = [get_name(), self.command, \"path\", \"-l\", \"82\"]\n with self.assertRaises(SystemExit):\n execute_from_command_line(argv)", "title": "" }, { "docid": "4b38fa81e4c9940188f88ac0c1f57d61", "score": "0.6527779", "text": "def test_too_few_args(self):\n argv = ['check_diff.py']\n exception = self.assertRaises(SystemExit, check_diff, argv)\n self.assertEqual(exception.code, 1)\n self.assertEqual(sys.stdout.getvalue(), \n 'usage: %(prog)s file1 file2\\n' % {'prog':argv[0]})", "title": "" }, { "docid": "155e933bd67a24a3e3ff8a117eb4e6f8", "score": "0.6479146", "text": "def test_calc_f107a_bad_inputs(self, inargs, vmsg):\n\n with pytest.raises(ValueError) as verr:\n mm_f107.calc_f107a(self.testInst, *inargs)\n\n assert str(verr).find(vmsg) >= 0\n return", "title": "" }, { "docid": "1345847e8434fbc6f172caa958b1609f", "score": "0.6463262", "text": "def test_too_many_args(self):\n argv = ['check_diff.py', 'file1', 'file2', 'extra']\n exception = self.assertRaises(SystemExit, check_diff, argv)\n self.assertEqual(exception.code, 1)\n self.assertEqual(sys.stdout.getvalue(), \n 'usage: %(prog)s file1 file2\\n' % {'prog':argv[0]})", "title": "" }, { "docid": "8de5f1c9242d133d3b388ff21d9734ce", "score": "0.6454369", "text": "def test_bad_input_pv_call_nof_arg(self): \n self.assertRaises(TypeError, DerivativePayoff.PlainVanilla, *(10.0,3,\"Uniform\"))", "title": "" }, { "docid": "c6a7374070a55d0c457647a237714b9b", "score": "0.6441079", "text": "def test04(self):\r\n self.assertRaises(TypeError, robustApply, oneArgument, \"this\", blah = \"that\")", "title": "" }, { "docid": "bc081d1706dbac652638e67ad6880b0e", "score": "0.64249605", "text": "def test_zero_arguments(self):\n parser = ArgumentParser()\n self.assertRaises(ArgumentError, parser.parse, ['dotrollcli'])", "title": "" }, { "docid": "1971fab9e904f3242db579cbaa26692a", "score": "0.64137787", "text": "def test_errorfortoomanyarguments(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Rectangle(1, 2, 3, 4, 5, 6)\n self.assertEqual(\n str(e.exception),\n \"__init__() takes from 3 to 6 positional \" +\n \"arguments but 7 were given\")", "title": "" }, { "docid": "62dad68b1a1ed7cb2e6fc0cca5ad8219", "score": "0.6406635", "text": "def check_args(args):\n if args.jobIdRange[0] < 1:\n raise RuntimeError('The first jobIdRange argument must be >= 1.')\n\n if args.jobIdRange[1] < args.jobIdRange[0]:\n raise RuntimeError('The second jobIdRange argument must be >= the first.')", "title": "" }, { "docid": "cee883e8c8eaca90ea57eb1bf39d3e8e", "score": "0.6405955", "text": "def _validate_fun_args(self) -> None:\n pass", "title": "" }, { "docid": "41cc908c0d5cb596736a0c8cd0eac166", "score": "0.63977826", "text": "def test_parser_without_arguments(parser):\n with pytest.raises(SystemExit):\n parser.parse_args([])", "title": "" }, { "docid": "78a2139b13ec9589094660c717f44380", "score": "0.638781", "text": "def invalid_op(line, *args):\n del args\n exit_error(3, line, arg_list[0])", "title": "" }, { "docid": "476d9a4dca75a211f3120dac211f1907", "score": "0.6376125", "text": "def test_check_int_input():\n with pytest.raises(ValueError):\n check_int_input(\"9\")", "title": "" }, { "docid": "f06e7491d2275477ff813629d95557f2", "score": "0.6369082", "text": "def test_invalid_option(self):\n\n sys.argv = ['', '-A', os.path.join(data_dir, 'dummy.pdb')]\n\n self.exec_module()\n\n self.assertEqual(self.retcode, 1)\n self.assertEqual(len(self.stdout), 0)\n self.assertEqual(self.stderr[0][:36],\n \"ERROR!! Script takes 1 argument, not\")", "title": "" }, { "docid": "ea5fd711baf416f4140c2fa4a709f447", "score": "0.63611466", "text": "def check_arguments(cls, args):\n pass", "title": "" }, { "docid": "eaad77ab4d56e1eb40390a1aee882105", "score": "0.6359769", "text": "def test_validate_arguments(self, case, sys_exit_called):\n with patch('sys.exit', autospec=True) as m_sys_exit:\n # Call method under test\n node.validate_arguments(case)\n\n # Assert that method exits on bad input\n self.assertEqual(m_sys_exit.called, sys_exit_called)", "title": "" }, { "docid": "e220d0a48e5cd20852ca9569dcea8ddc", "score": "0.6355074", "text": "def test_validate_input_rejection(self):\n with nose.assert_raises(exceptions.RejectionError):\n self.nfa.validate_input('abba')", "title": "" }, { "docid": "b9c6f325eabcbd0681bb66756716ed89", "score": "0.63518095", "text": "def test_error_on_empty_command_line():\n with pytest.raises(SystemExit):\n cli.run_from_line(\"\")", "title": "" }, { "docid": "f82ebc8799b81e9ad07e5ccad83066f9", "score": "0.6341171", "text": "def test_not_numbers():\n\n bad = random_string()\n args = [str(random.choice(range(10))), bad]\n\n for _ in range(2):\n args = list(reversed(args))\n rv, out = getstatusoutput(f'{prg} {\" \".join(args)}')\n assert rv != 0\n assert out.lower().startswith('usage')\n assert re.search(f\"invalid int value: '{bad}'\", out)", "title": "" }, { "docid": "552a51fd5e7f7ebb4e56d742a265631d", "score": "0.63249576", "text": "def _check_args(self, **kwargs):", "title": "" }, { "docid": "5f72b6e3cc5a272b513f7770124d0c28", "score": "0.63157886", "text": "def test_bad_exe_args_2():\n exe_args = [\"list-includes-int\", 5]\n with pytest.raises(TypeError):\n _ = RunSettings(\"python\", exe_args=exe_args)", "title": "" }, { "docid": "3bdf01b85e83241b7c15e3f4323ff256", "score": "0.63111335", "text": "def test_type_check_single_arg_wrong_argument_type(self):\n # The behavior of the following functions are meaningless,\n with self.assertRaises(TypeError):\n f_with_int(2.4)\n\n with self.assertRaises(TypeError):\n f_with_float(\"this is a string\")\n\n with self.assertRaises(TypeError):\n f_with_complex(None)\n\n with self.assertRaises(TypeError):\n f_with_bytes({4, 5, 6})\n\n with self.assertRaises(TypeError):\n f_with_bytearray(lambda x: x)\n\n with self.assertRaises(TypeError):\n f_with_memoryview(9.25)\n\n with self.assertRaises(TypeError):\n f_with_bool(\"this is a string\")\n\n with self.assertRaises(TypeError):\n f_with_str(3)\n\n with self.assertRaises(TypeError):\n f_with_list((3, 5))\n\n with self.assertRaises(TypeError):\n f_with_tuple([3, 4, 5])\n\n with self.assertRaises(TypeError):\n f_with_slice(234)\n\n with self.assertRaises(TypeError):\n f_with_set([3, 4, 5])\n\n with self.assertRaises(TypeError):\n f_with_frozenset((4, 5, 6, 7, 7))\n\n with self.assertRaises(TypeError):\n f_with_enumerate(23)\n\n with self.assertRaises(TypeError):\n f_with_custom(5j)", "title": "" }, { "docid": "e53b6f952cebe53a32b5e8067b095cb3", "score": "0.62843436", "text": "def bad_test():", "title": "" }, { "docid": "7c95737a219c04d7b900c3ed56750e97", "score": "0.62755", "text": "def test_main_wrong_args(self):\n\n wrong_args_list = [\n [\"in_toto_record.py\"],\n [\"in_toto_record.py\", \"--step-name\", \"some\"],\n [\"in_toto_record.py\", \"--key\", self.key_path],\n [\"in_toto_record.py\", \"--step-name\", \"test-step\", \"--key\",\n self.key_path, \"start\", \"--products\"],\n [\"in_toto_record.py\", \"--step-name\", \"test-step\", \"--key\",\n self.key_path, \"stop\", \"--materials\"]\n ]\n\n for wrong_args in wrong_args_list:\n with patch.object(sys, 'argv',\n wrong_args), self.assertRaises(SystemExit):\n in_toto_record_main()", "title": "" }, { "docid": "d9e9d024da4a9b23d95ede22c560d42f", "score": "0.62745625", "text": "def test_error_on_insufficient_argument_to_short(ctx):\n def act(*, count: ctx.Key):\n pytest.fail(\n test_error_on_insufficient_argument_to_short.__doc__)\n act = ctx.__call__(act)\n with pytest.raises(TypeError):\n ctx.execute('act -c')", "title": "" }, { "docid": "2bb891f2f623182d407a3201cb1799a7", "score": "0.6266708", "text": "def test_bad_func_call(self):\n def bad_callsig(x: str, y: str) -> int:\n return int(x + y)\n\n with self.assertRaises(RuntimeTypeError):\n self.test(bad_callsig, 5)", "title": "" }, { "docid": "1478002ec365688e7b4aa5730bc991a3", "score": "0.6250745", "text": "def validate(args):\n try:\n validate_aliases(args.aliases)\n validate_numbers(args.numbers)\n except ValueError as err:\n print(err, '\\n')\n raise", "title": "" }, { "docid": "8f8a8e28d999d57477abd2dd0b3482db", "score": "0.62459135", "text": "def _crash_func(_x) -> None:\n msg = \"Propagation Crash.\"\n raise ValueError(msg)", "title": "" }, { "docid": "308d1e15359ca09d3343f093ea10b4ce", "score": "0.6239394", "text": "def test_no_valid_args_prints_usage_and_exit(self):\r\n parser = FakeParser()\r\n self.assertRaises(SystemExit, parse_options, parser, (\"bazinga!\",))\r\n self.assertEqual([\"print_usage\"], parser.called)", "title": "" }, { "docid": "5ee79dbd7ccdbe93733dd265215695c4", "score": "0.62329644", "text": "async def test_exception() -> None:\n with pytest.raises(ValueError, match=\"invalid literal for int\"):\n assert await to_process.run_sync(int, \"a\")", "title": "" }, { "docid": "8e83f9262d4001649523d24e41eef784", "score": "0.62318355", "text": "def test_float_invalid_arguments():\n #assert generate_prime_factors(1.11) == []\n with pytest.raises(ValueError):\n #generate_prime_factors(1.11)\n generate_prime_factors(float)", "title": "" }, { "docid": "160e5feb12ecdcf7a7bfd135acd6ba40", "score": "0.6226601", "text": "def test_positional_only_and_arg_invalid_calls(a, b, /, c):", "title": "" }, { "docid": "ab49eec4bb806b6d6a4f2fcefa28121d", "score": "0.6220214", "text": "def my_func_test_fail(*args, **kwargs):\n return False", "title": "" }, { "docid": "46bfe6a4bcdaca9aac6fb8a3c8ae26d6", "score": "0.62190956", "text": "def test_wrong_number_args():\n\n for k in [0, 1, 3]:\n args = ' '.join(map(str, random.sample(range(10), k=k)))\n rv, out = getstatusoutput(f'{prg} {args}')\n assert rv != 0\n assert out.lower().startswith('usage')", "title": "" }, { "docid": "bf72d085812ce409a1ad68e39ce6c102", "score": "0.621531", "text": "def validate_args(args):\n if not os.path.isfile(args.field):\n print \"Error: The field file does not exist: \" + args.field\n sys.exit(-1)\n if not os.path.isfile(args.script):\n print \"Error: The script file does not exist: \" + args.script\n sys.exit(-1)", "title": "" }, { "docid": "5a432fb4a941139f63f0c1e3daf587d2", "score": "0.621297", "text": "async def test_get_arguments_no_arguments():\n with open(os.devnull, \"w\") as file:\n try:\n sys.stderr = file\n with pytest.raises(SystemExit):\n await parse_arguments()\n finally:\n sys.stderr = sys.__stderr__", "title": "" }, { "docid": "623423b40cad690ea8d204ccaf3bb024", "score": "0.62101436", "text": "def test_no_standard_args_usage(a, b, /, *, c):", "title": "" }, { "docid": "9ce07fc208e0fe11b6d718da095d157a", "score": "0.6200295", "text": "def test_raise_argument_errors(self):\n with self.assertRaises(TypeError) as exc:\n Rectangle(1)\n self.assertEqual(str(exc.exception),\n \"__init__() missing 1 required positional argument:\" +\n \" 'height'\")\n\n with self.assertRaises(TypeError) as exc:\n Rectangle()\n self.assertEqual(str(exc.exception),\n \"__init__() missing 2 required positional \" +\n \"arguments: 'width' and 'height'\")", "title": "" }, { "docid": "6010139f53226326b1bff25b3041c065", "score": "0.6199665", "text": "def test_bad_user_input(self):\n # self.set_up_game_env()\n\n\n # TODO\n return None", "title": "" }, { "docid": "6986f9ad6865dd122d71560625fe21f2", "score": "0.61775655", "text": "def test_list_my_attack_surfaces_command_command_when_invalid_args_are_provided(args, err_msg, client):\n from PassiveTotal_v2 import list_my_attack_surfaces_command\n with pytest.raises(ValueError) as de:\n list_my_attack_surfaces_command(client, args)\n assert str(de.value) == err_msg", "title": "" }, { "docid": "0a6626e54114c37c8b63e6b2815df9a9", "score": "0.6177333", "text": "def _check_call(self, func, *args):\n if func(*args) is None and self.raise_on_errors is True:\n raise UWSGICacheError(\n \"Call to {name}({args}) failed\".format(\n name=func.__name__,\n args=\", \".join(map(repr, args))\n )\n )", "title": "" }, { "docid": "1a3513315a3bfce9d24c87fd173959fe", "score": "0.61664176", "text": "def _check_bogus_input(self, grid_func):\n\n ## Check form of the density input.\n with self.assertRaises(ValueError):\n grid_func([])\n\n with self.assertRaises(TypeError):\n grid_func(density='fossa')\n\n ## Check the 'num_levels' parameter.\n with self.assertRaises(ValueError):\n grid_func(self.unique_density, num_levels=-1)\n\n with self.assertRaises(ValueError):\n grid_func(self.unique_density, num_levels=1)\n\n with self.assertRaises(TypeError):\n grid_func(self.unique_density, num_levels=2.17)\n\n with self.assertRaises(TypeError):\n grid_func(self.unique_density, num_levels='fossa')", "title": "" }, { "docid": "3ef836e99e24f41be02545f64608ad55", "score": "0.6154866", "text": "def test_too_many_args(self):\n Base._Base__nb_objects = 0\n with self.assertRaises(TypeError) as e:\n r1 = Rectangle(2, 4, 2, 4, 2, 2, 4)\n self.assertEqual(\n str(e.exception),\n \"__init__() takes from 3 to 6 positional \" +\n \"arguments but 8 were given\")", "title": "" }, { "docid": "962bf22901900d100b8266bc950b01c9", "score": "0.61510766", "text": "def validate_input_args(self):\r\n try:\r\n c = int(self.args.count)\r\n except ValueError:\r\n return False\r\n if c >= 0:\r\n return True\r\n return False", "title": "" }, { "docid": "a5608a11a0641db0192cb95e31711d8b", "score": "0.61437947", "text": "def check_args_integrity(self):\n self.logger.debug(\"Checking arguments integrity\")\n args_false_count = [self.kwargs.push].count(False)\n args_none_count = [self.kwargs.push].count(None)\n no_args_count = args_false_count + args_none_count\n if no_args_count in [1]:\n self.logger.critical(\"you must specify a command\")\n return False\n return True", "title": "" }, { "docid": "f8462fdf5279ef07601c1766f927c4a7", "score": "0.6137626", "text": "def test_communicate_failure(self):\n with self.assertRaises(TypeError) as c:\n self.run_python('foo', input=123)\n\n self.assertRegexpMatches(str(c.exception), r\"not '?int'?\")", "title": "" }, { "docid": "a50bef1c18390b4733b97b306e26ea53", "score": "0.61343807", "text": "def check_impl_args(build_script_impl, args):\n\n pipe = subprocess.Popen(\n [build_script_impl, '--check-args-only=1'] + args,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n _, err = pipe.communicate()\n\n if pipe.returncode != 0:\n raise ValueError(str(err.splitlines()[0].decode()))", "title": "" }, { "docid": "e3bd0ad405ae54080bf02955edadf598", "score": "0.6117975", "text": "def test_invalid_input_constructor():\n\n nose.tools.assert_raises(IOError, Picture, filename='nosuchfile')\n nose.tools.assert_raises(TypeError, Picture)\n nose.tools.assert_raises(ValueError, Picture, image='thisisnotanimage')\n nose.tools.assert_raises(ValueError, Picture, 0, 0)\n nose.tools.assert_raises(ValueError, Picture, 0, 1)\n nose.tools.assert_raises(ValueError, Picture, -1, -1)", "title": "" }, { "docid": "eca6af2bbe716d6925b777d804f03d86", "score": "0.61153895", "text": "def test_ensure_mount_input_invalid() -> None:\n with pytest.raises(\n subject.InvalidPipetteMountError, match=\"must be 'left' or 'right'\"\n ):\n subject.ensure_mount(\"oh no\")\n\n with pytest.raises(\n subject.PipetteMountTypeError,\n match=\"'left', 'right', or an opentrons.types.Mount\",\n ):\n subject.ensure_mount(42) # type: ignore[arg-type]\n\n with pytest.raises(\n subject.InvalidPipetteMountError, match=\"Use the left or right mounts instead\"\n ):\n subject.ensure_mount(Mount.EXTENSION)", "title": "" }, { "docid": "7feda6aa74e1b2e845cad98e77ac2893", "score": "0.6114847", "text": "def test_constructor_invalid():\n\n nose.tools.assert_raises(ValueError, Color, '', '', '')\n nose.tools.assert_raises(ValueError, Color, -1, -1, -1)\n nose.tools.assert_raises(ValueError, Color, 1000, 1000, 1000)", "title": "" }, { "docid": "266c8d90b35122800294c0ede3afabc7", "score": "0.6101743", "text": "def test_get_common_arguments_invalid_value_for_query():\n from PassiveTotal_v2 import get_common_arguments\n\n # Configure\n args = {\n 'query': '',\n 'start': '2016-02-02 02:32:44'\n }\n\n # Execute\n with pytest.raises(ValueError) as e:\n get_common_arguments(args)\n\n # Assert\n assert 'The given value for query is invalid.' == str(e.value)", "title": "" }, { "docid": "8a89138b1f2b01fd07d9cdeed1c2cf26", "score": "0.60988635", "text": "def test_pt_list_my_asi_assets_command_when_invalid_args_are_provided(args, err_msg):\n from PassiveTotal_v2 import list_my_asi_assets_command\n with pytest.raises(ValueError) as de:\n list_my_asi_assets_command(client, args)\n assert str(de.value) == err_msg", "title": "" }, { "docid": "3accc3eb2a3e2621dd1cb01831614fb6", "score": "0.6082647", "text": "def negative_test_value(self):\n self.assertRaises(ValueError, MyClass().my_func, None, [], \"a\")\n self.assertRaises(ValueError, MyClass().my_func, 1, None, \"a\")\n self.assertRaises(ValueError, MyClass().my_func, 1, [], None)\n self.assertRaises(ValueError, MyClass().my_func, a=None, b=[], c=\"a\")\n self.assertRaises(ValueError, MyClass().my_func, a=1, b=None, c=\"a\")\n self.assertRaises(ValueError, MyClass().my_func, a=1, b=[], c=None)", "title": "" }, { "docid": "e4b63053eaa854b9be06456723986767", "score": "0.60821223", "text": "def test_error_on_insufficient_argument_to_long(ctx):\n def act(*, opt):\n pytest.fail(\n test_error_on_insufficient_argument_to_long.__doc__)\n act = ctx.__call__(act)\n with pytest.raises(TypeError):\n ctx.execute('act --opt')", "title": "" }, { "docid": "948d585d5c184bed831e9ae88946a0da", "score": "0.60789675", "text": "def test_hints_from_strings_invalid():\n with pytest.raises(ValueError):\n hints_from_strings(\"abcde\")", "title": "" }, { "docid": "e62a159343830759d68f6a8a90854247", "score": "0.60759264", "text": "def validate_args(args):\n #Getting and validating args from command line\n if args[0] not in ['impute','evaluate']:\n print('invalid args: please choose an action in < impute , evaluate >')\n sys.exit(0)", "title": "" }, { "docid": "044e78ba9b011c2638c118f8eff7a8b4", "score": "0.60743177", "text": "def test_version_bad_input():\n v1 = Version_(\"abcdefg\")\n\n # todo: fix behavior to ensure versions are valid.\n assert v1", "title": "" }, { "docid": "6c8bdcb97521ba925a6e78b0ab8eab2c", "score": "0.60675097", "text": "def test_check_positive():\n assert check_positive(\"1\") == 1.0\n assert isinstance(check_positive(\"20\"), float)\n\n with pytest.raises(ValueError):\n check_positive(10) == 10.0 # input must be float\n with pytest.raises(argparse.ArgumentTypeError):\n check_positive(\"-1\")\n with pytest.raises(ValueError):\n check_positive(-1.0)", "title": "" }, { "docid": "6c794d71f0603ed7baf93b9653506eea", "score": "0.6058637", "text": "def checkArguments(argDict):\n\t# email, name and handle are required parameters, code is optional\n\tfor _ in (\"email\", \"name\", \"handle\"):\n\t\tif _ not in argDict:\n\t\t\treturn (False, \"Incorrect arguments\")\n\n\t# Sanitise all our arguments to UTF-8 strings\n\tglobals.sanitise(argDict)\n\n\t# Ensure the email doesn't start with @, contains one @ before\n\t# the dot, contains one dot and doesn't have an @ after the dot\n\tif not re.match(\"[^@]+@[^@]+\\.[^@]+\", argDict[\"email\"]):\n\t\treturn (False, \"Bad email\")\n\n\t# Handles are alphanumeric with underscore and hyphens\n\tif not re.match(\"^[\\-\\w]*$\", argDict[\"handle\"]):\n\t\treturn (False, \"Bad handle\")\n\n\t# Names are alphanumeric with space, underscores, hyphens and quotes\n\tif not re.match(\"^[\\-\\w' ]*$\", argDict[\"name\"]):\n\t\treturn (False, \"Bad name\")\n\n\t# The code check is a global, because we use it elsewhere too\n\tif \"code\" in argDict and globals.checkCode(argDict[\"code\"]) is False:\n\t\treturn (False, \"Bad code\")\n\t\t\n\t# We good\n\treturn (True, \"No error\")", "title": "" }, { "docid": "502a9a822237770e8ab3e4d38e098f5b", "score": "0.6056163", "text": "def test_bad_input_pv_call_model(self): \n self.assertRaises(AssertionError, DerivativePayoff.PlainVanilla, *(-5,1,[1,2,4],10.0,3))", "title": "" }, { "docid": "f9d4822b36a33edadbd09dca2bb4d1e8", "score": "0.6048985", "text": "def sanitize_args():\n try:\n numwords = int(arguments['--words'])\n poolsize = int(arguments['--poolsize'])\n minword = int(arguments['--minword'])\n maxword = int(arguments['--maxword'])\n except ValueError:\n print(\"Error: Option arguments must be integers.\")\n return 1\n\n try:\n if (minword < 1) or (maxword < 1) or (numwords < 1):\n raise ArgError(\"word count and length must be positive integers.\")\n if (poolsize > 10000) or (poolsize < 1):\n raise ArgError(\"pool size must be between 1 and 10000.\")\n except ArgError as e:\n print('Could not even: {}'.format(e))\n return 1\n\n return 0", "title": "" }, { "docid": "e07b5dcb3ea2ed0a0ca48124dd59fc40", "score": "0.6044365", "text": "def check_args(function_name, svc_inst):\n\n arguments = inspect.getfullargspec(globals()[function_name]).args\n arguments = [i for i in arguments if i not in\n ['verb', 'token', 'format', 'start_time', 'end_time', 'view']]\n\n valid_args = set(svc_inst._valid_get_args)\n if svc_inst._valid_assert_args:\n valid_args = valid_args.union(svc_inst._valid_assert_args)\n\n for arg in valid_args:\n assert arg in arguments, f\"{arg} missing from {function_name} arguments\"\n\n for arg in arguments:\n assert arg in valid_args, f\"extra argument {arg} in {function_name}\"", "title": "" }, { "docid": "69d52cdd6e2d6edd780ab992a60317d3", "score": "0.6042957", "text": "def test_gaussian_error_checking_sigma(func):\n with pytest.raises(RuntimeError):\n func(1, 0)\n with pytest.raises(RuntimeError):\n func(1, -1)", "title": "" }, { "docid": "9109c94d695e9819443ec9c392499c72", "score": "0.6041581", "text": "def validate_inputs(*args):\n for e in args:\n if type(e) != str:\n raise TypeError(\"you must provide a 0 or 1 string as parameters to this function\") \n if e != '1' and e != '0':\n raise ValueError(\"you must provide a 1 or a 0 string as inputs to this function\")", "title": "" }, { "docid": "0141dcc1fafe05942aaa0416f2811fe0", "score": "0.6036003", "text": "def test_check_minus_raises():\n with pytest.raises(ValueError) as excinfo:\n check_minus_input(-1)\n exception_msg = excinfo.value.args[0]\n assert exception_msg == \"Input number is minus. Please input an positive integer!\"", "title": "" }, { "docid": "65a4d7df2ecb6f294320a943084d12d2", "score": "0.6032307", "text": "def test_bad_input_pv_call_strike(self): \n self.assertRaises(AssertionError, DerivativePayoff.PlainVanilla, *(-5,1,\"Uniform\",10.0,3))", "title": "" }, { "docid": "601fe66ca4f571211be3506997fb4886", "score": "0.6030477", "text": "def test_bad_exe_args():\n exe_args = {\"dict\": \"is-wrong-type\"}\n with pytest.raises(TypeError):\n _ = RunSettings(\"python\", exe_args=exe_args)", "title": "" }, { "docid": "0b8c2ee1c3529be39d662d1fd6ccd3d3", "score": "0.6027635", "text": "def test_validate_input_invalid_undefined_transition(self):\n with nose.assert_raises(exceptions.RejectionError):\n self.dpda.validate_input('01')", "title": "" }, { "docid": "697054393d58331db185000ae7fec86a", "score": "0.6023932", "text": "def test_handle_args_empty(self):\n from make_dist import handle_args\n args = handle_args()\n self.assertEqual(args, (False, ) * 4)", "title": "" }, { "docid": "ba5612b06c51d31bc3736ebe6866815e", "score": "0.60143095", "text": "def test_invalid_option(self):\n\n sys.argv = ['', '-X', os.path.join(data_dir, 'dummy.pdb')]\n\n # Execute the script\n self.exec_module()\n\n self.assertEqual(self.retcode, 1)\n self.assertEqual(len(self.stdout), 0)\n self.assertEqual(self.stderr[0][:36],\n \"ERROR!! The following options are no\")", "title": "" }, { "docid": "e095baf96dbb7af928e1481e77ac92ad", "score": "0.6011899", "text": "def test_pt_list_my_asi_insights_command_when_invalid_args_are_provided(args, err_msg, client):\n from PassiveTotal_v2 import list_my_asi_insights_command\n with pytest.raises(ValueError) as de:\n list_my_asi_insights_command(client, args)\n assert str(de.value) == err_msg", "title": "" }, { "docid": "141fbf23b9c6521741fef36e61b9e4b9", "score": "0.60109234", "text": "def test_validate_input_bad_input(self):\n self.assertRaises(ValueError, _validate_input,\n None, None, None, None, None)\n self.assertRaises(ValueError, _validate_input,\n self.dist_matrix_header, self.dist_matrix,\n self.mapping_header, self.mapping, None)\n self.assertRaises(ValueError, _validate_input,\n self.dist_matrix_header, 12,\n self.mapping_header, self.mapping, None)\n self.assertRaises(ValueError, _validate_input,\n self.dist_matrix_header, self.dist_matrix,\n self.mapping_header, self.mapping, 42)\n self.assertRaises(ValueError, _validate_input,\n self.dist_matrix_header, self.dist_matrix,\n self.mapping_header, self.mapping, \"aeiou\")", "title": "" }, { "docid": "4827b092addd834d58aa03941bd93bf7", "score": "0.60083145", "text": "def validate_arguments(args):\n if not args.input_file[-4:] == \".pdb\":\n exit(\"ERROR: Input file should be in PDB format\")\n if args.n_decoys < 0:\n exit(\"ERROR: Number of decoys must be a non-negative value\")\n if args.c_weight < 0 or args.c_weight > 1.0:\n exit(\"ERROR: Constraints weight must be a non-negative value (< 1.0)\")", "title": "" } ]
0d2a15b708c95ece5c8488c626ddefba
Use scipy.optimize.fmin_cg() to find the maximum likelihood estimate for the data in logregression.txt.
[ { "docid": "591e0ae89011601957b664f98385a58a", "score": "0.734154", "text": "def prob4(filename=\"logregression.txt\"):\n def objective(b):\n return (np.log(1+np.exp(x.dot(b)))-y*(x.dot(b))).sum()\n \n data=np.loadtxt(filename)\n y=data[:,0]\n x=np.column_stack((np.ones_like(y), data[:,1:]))\n temp=np.ones(4)\n b=fmin_cg(objective, temp)\n return b", "title": "" } ]
[ { "docid": "d2644a818c39fe93faf5848ca557d225", "score": "0.5914348", "text": "def __call__(self, X, y):\n X = np.hstack((np.ones((len(X),1)), X))\n\n # optimizacija\n theta = fmin_l_bfgs_b(\n cost,\n x0=np.zeros(X.shape[1]),\n args=(X, y, self.lambda_),\n fprime=grad)[0]\n\n return LogRegClassifier(theta)", "title": "" }, { "docid": "47f8cfa93089828f58506031c075817b", "score": "0.5767875", "text": "def posdef_max_likelihood_objective(X, shrinkage):\n\n # replace missing values (nans) with zeros\n X0 = np.nan_to_num(X)\n\n # define an objective function, given the data\n def objective(alpha):\n cov = shrinkage.get_cov(alpha)\n return log_likelihood(cov, X0) if is_posdef(cov) else -np.inf\n\n # return the objective function\n return objective", "title": "" }, { "docid": "a184b84fe10f0699aa157329e5cf5345", "score": "0.56032497", "text": "def optimize(s):\r\n\t\t#pdb.set_trace()\r\n\t\tparsOpt = scipy.optimize.fmin(s.minusLogLike, s.pdf.parsVec(), maxfun = 1e4) \r\n\t\ts.pdf.parsVec(parsOpt)", "title": "" }, { "docid": "f359927b3a09ad8c02007f8369073b28", "score": "0.55586594", "text": "def gcg(a, b, M, reg1, reg2, f, df, G0=None, numItermax=10, numInnerItermax=200,\n stopThr=1e-9, stopThr2=1e-9, verbose=False, log=False, method=\"sinkhorn\"):\n\n loop = 1\n\n if log:\n log = {'loss': []}\n\n if G0 is None:\n G = np.outer(a, b)\n else:\n G = G0\n\n def cost(G):\n return np.sum(M * G) + reg1 * np.sum(G * np.log(G)) + reg2 * f(G)\n\n f_val = cost(G)\n if log:\n log['loss'].append(f_val)\n\n it = 0\n\n if verbose:\n print('{:5s}|{:12s}|{:8s}|{:8s}'.format(\n 'It.', 'Loss', 'Relative loss', 'Absolute loss') + '\\n' + '-' * 48)\n print('{:5d}|{:8e}|{:8e}|{:8e}'.format(it, f_val, 0, 0))\n\n while loop:\n\n it += 1\n old_fval = f_val\n\n # problem linearization\n Mi = M + reg2 * df(G)\n\n # solve linear program with Sinkhorn\n Gc = sinkhorn(a, b, Mi, reg1, numItermax=numInnerItermax)\n\n deltaG = Gc - G\n\n # line search\n dcost = Mi + reg1 * (1 + np.log(G)) # ??\n alpha, _, f_val = line_search_armijo(cost, G, deltaG, dcost, f_val)\n\n G = G + alpha * deltaG\n\n # test convergence\n if it >= numItermax:\n loop = 0\n\n abs_delta_fval = abs(f_val - old_fval)\n relative_delta_fval = abs_delta_fval / abs(f_val)\n\n if relative_delta_fval < stopThr or abs_delta_fval < stopThr2:\n loop = 0\n\n if log:\n log['loss'].append(f_val)\n\n if verbose:\n if it % 20 == 0:\n print('{:5s}|{:12s}|{:8s}|{:8s}'.format(\n 'It.', 'Loss', 'Relative loss', 'Absolute loss') + '\\n' + '-' * 48)\n print('{:5d}|{:8e}|{:8e}|{:8e}'.format(it, f_val, relative_delta_fval, abs_delta_fval))\n\n if log:\n return G, log\n else:\n return G", "title": "" }, { "docid": "b879db9e04a7a66a9ee4fb1f7c26be6c", "score": "0.55577624", "text": "def reg_logistic_regression(y, tx, lambda_, initial_w, max_iters, gamma):\n w, loss,tol = initial_w, [], 0.01\n for n_iter in range(max_iters):\n grad, e = compute_reg_log_grad(y, tx, w)\n w = w - gamma*grad\n loss.append(compute_mse(e))\n if (np.abs(loss[n_iter]-loss[n_iter-1]))<tol and n_iter>0:\n return w, loss[-1]\n return (w, loss[-1])", "title": "" }, { "docid": "ac9abe3b61a56791fb3095d675004d30", "score": "0.5541833", "text": "def logistic_regression(y, tx, initial_w, max_iters, gamma):\n w = initial_w\n losses = []\n threshold = 1e-8\n for i in range(max_iters):\n g = compute_gradient_likelihood(y, tx, w)\n w = w-gamma*g\n loss = calculate_loss(y, tx, w)\n losses.append(loss)\n if len(losses) > 1 and np.abs(losses[-1] - losses[-2] < threshold):\n break\n loss = calculate_loss(y, tx, w)\n return w, loss", "title": "" }, { "docid": "fa7a179e06b5ab49b35a7392af33ac49", "score": "0.55248344", "text": "def fit_log(X, y):\n raise NotImplementedError(\"Logrithmic best fit lines are not implemented\")", "title": "" }, { "docid": "bbd30a6ee705363a973a9124a83af331", "score": "0.5461958", "text": "def logistic_regression(y, tx, gamma, max_iters):\n # init parameters\n threshold = 1e-8\n losses = []\n w = np.zeros(tx.shape[1])\n # start the logistic regression\n for iter in range(max_iters):\n gradient = calculate_gradient_log_likelihood(y, tx, w)\n # get loss and updated w\n loss = calculate_loss_log_likelihood(y, tx, w)\n w = w - gamma * gradient \n # log info\n if iter % 50 == 0:\n print(\"Current iteration={i}, the loss={l}\".format(i=iter, l=loss))\n # converge criteria\n losses.append(loss)\n if len(losses) > 1 and np.abs(losses[-1] - losses[-2]) < threshold:\n break\n print(\"The of loss iteration={i} is {l} \".format(calculate_loss_log_likelihood(y, tx, w), i=iter))\n return w", "title": "" }, { "docid": "cc7f606153a115c408d612823d0bf9c6", "score": "0.5451117", "text": "def logistic_regression(y, tx, initial_w, max_iters, gamma):\n \n w = np.copy(initial_w)\n \n loss = calculate_loss_logistic(y, tx, w)\n # compute the cost\n \n grad = calculate_gradient_logistic(y, tx, w)\n # compute the gradient\n \n #hess = calculate_hessian_logistic(y, tx, w)\n # compute the hessian, for generalizations\n \n threshold = 1e-8\n \n \n previous_loss = 0\n \n for iter_ in range(max_iters):\n # get loss and update w.\n loss, w = learning_by_gradient_descent(y, tx, w, gamma)\n if iter_ > 1 and np.abs(loss - previousloss) < threshold:\n break\n previousloss = loss\n return w, mse_loss(y,tx,w)", "title": "" }, { "docid": "2f1b23ed6f36a35d9b7fbae33edb085d", "score": "0.54440814", "text": "def gaussian_log_likelihood(parameters, data, maximize=False):\n\n mean, sigma = parameters # unpack parameters\n N = data.size\n\n L = -0.5 * N * np.log(2 * np.pi * sigma ** 2) - sum([((i - mean) ** 2) / (2 * sigma ** 2) for i in data])\n\n if maximize:\n return -L\n else:\n return L", "title": "" }, { "docid": "65e3edcf8bcd83f7ddd03c07c2fb51cb", "score": "0.54342914", "text": "def negative_log_likelihood(data: np.ndarray):\n X_train = np.array(list(map(operator.itemgetter(0), data))).reshape(-1, 1)\n Y_train = np.array(list(map(operator.itemgetter(1), data))).reshape(-1, 1)\n\n\n def nll(theta):\n \"\"\"\n The actual negative log likelihood function\n :param theta: a list or tuple containing the GP parameters to be optimized.\n :return:\n \"\"\"\n l, sigma_f, sigma_n = theta\n K_f = kernel_se(X_train, X_train, l, sigma_f)\n K_y = K_f + sigma_n ** 2 * np.eye(len(X_train))\n K_y_inv=np.linalg.inv(K_y)\n\n\n \n #raise NotImplementedError('Implement the negative log likelihood of the gaussian process (term 1).')\n term_1 = np.dot(Y_train.transpose(),K_y_inv)\n term_1=np.dot(term_1,Y_train)\n term1=-1*term_1\n\n # You are given term 2 and 3\n L = np.linalg.cholesky(K_y)\n term_2 = np.sum(np.log(np.diag(L)))\n\n term_3 = 0.5 * len(X_train) * np.log(2 * np.pi)\n ans=term_1+term_2+term_3\n #print(ans)\n\n return ans[0]\n \n return nll", "title": "" }, { "docid": "a5a7c0f52694aabc631ac5bd1bf29386", "score": "0.54333556", "text": "def logLikelihood(self):\r\n y = np.mat(self.y_)\r\n X = np.mat(self.X_)\r\n Beta = self.weights \r\n ll = -1*(np.ones(len(self.y_))@np.log(1 + np.exp(X@Beta)) - y@X@Beta)\r\n return np.ravel(ll)[0]", "title": "" }, { "docid": "0dbf19856d5c8ef0ead5f01ebf16e4cf", "score": "0.54088485", "text": "def find_minimum(self, log_scale,use_data_only_for_bounds=False):\n # JWH stuff\n h1 = self.data.hist\n tobjarray = self.stack.GetStack()\n min_histo_range = None\n min_histo_range_total = 1.0\n\n if not log_scale:\n min_data_range = h1.GetBinContent(h1.GetMinimumBin()) - h1.GetBinError(h1.GetMinimumBin())\n for h in tobjarray:\n if not isinstance(h, ROOT.TH1): continue\n if min_histo_range is None:\n # min_histo_range = h.GetBinContent(h.GetMinimumBin())\n min_histo_range = h.GetBinContent(h.GetMinimumBin()) - h.GetBinError(h.GetMinimumBin())\n else:\n # min_histo_range = min(min_histo_range, h.GetBinContent(h.GetMinimumBin()))\n min_histo_range = min(min_histo_range, h.GetBinContent(h.GetMinimumBin()) - h.GetBinError(h.GetMinimumBin()))\n min_histo_range = min(min_histo_range, min_data_range)\n else:\n min_data_range = h1.GetBinContent(h1.GetMinimumBin()) - h1.GetBinError(h1.GetMinimumBin())\n if min_data_range <= 0:\n min_data_range = h1.GetBinContent(h1.GetMinimumBin())\n if min_data_range <= 0:\n min_data_range = 0\n for h in tobjarray:\n if not isinstance(h, ROOT.TH1): continue\n temp = h.GetBinContent(h.GetMinimumBin())\n # temp = h.GetBinContent(h.GetMinimumBin()) - h.GetBinError(h.GetMinimumBin())\n if temp <= 0:\n temp = h.GetBinContent(h.GetMinimumBin())\n if temp <= 0:\n temp = 0\n if min_histo_range is None:\n min_histo_range = temp\n elif not temp == 0 and not min_histo_range == 0:\n min_histo_range = min(min_histo_range, temp)\n elif not temp == 0 and min_histo_range == 0:\n min_histo_range = temp\n if min_histo_range == 0:\n min_histo_range = min_data_range\n if min_histo_range == 0:\n min_histo_range = 1e-1\n if not min_histo_range == 0 and not min_data_range == 0:\n min_histo_range = min(min_histo_range, min_data_range)\n\n # use minimum of the total bkg rather than the minumum of each process\n min_histo_range_total = self.stack.GetMinimum()\n if min_histo_range_total <=0:\n min_histo_range_total = 0\n if min_histo_range_total == 0:\n min_histo_range_total = min_data_range\n if min_histo_range_total == 0:\n min_histo_range_total = 1e-1\n if not min_histo_range_total == 0 and not min_data_range == 0:\n min_histo_range_total = min(min_histo_range_total, min_data_range)\n\n logging.debug(\"found content minimum of {}\".format(min_histo_range))\n logging.debug(\"found total content minimum of {}\".format(min_histo_range_total))\n if use_data_only_for_bounds:\n return min_data_range,min_histo_range_total\n return min_histo_range,min_histo_range_total\n\n # h1 = self.data.hist\n # tobjarray = self.stack.GetStack()\n # h2 = tobjarray.Last()\n # min_data = h1.GetBinContent(h1.GetMinimumBin())\n # if min_data != 0:\n # min_data -= h1.GetBinError(h1.GetMinimumBin())\n # min_MC = h2.GetBinContent(h2.GetMinimumBin())\n # mini = min(min_data, min_MC)\n # if mini == 0:\n # print \"found minimum of 0\"\n # newmin = min(h1.GetMinimum(1.e-5), h2.GetMinimum(1.e-5))\n # print \"next to minimum is found at\", newmin\n # return newmin\n # else:\n # print \"found minimum of \", mini\n # return mini", "title": "" }, { "docid": "297f29a322b73a889d7ce876d225faf9", "score": "0.54082376", "text": "def logmel2linear(\n lmspc: np.ndarray,\n fs: int,\n n_fft: int,\n n_mels: int,\n fmin: int = None,\n fmax: int = None,\n) -> np.ndarray:\n assert lmspc.shape[1] == n_mels\n fmin = 0 if fmin is None else fmin\n fmax = fs / 2 if fmax is None else fmax\n mspc = np.power(10.0, lmspc)\n mel_basis = librosa.filters.mel(\n sr=fs, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax\n )\n inv_mel_basis = np.linalg.pinv(mel_basis)\n return np.maximum(EPS, np.dot(inv_mel_basis, mspc.T).T)", "title": "" }, { "docid": "2c8e72c0d40d869c3dc1095c46976f13", "score": "0.5402232", "text": "def fit_temp_dens_relation(logoverden, logT):\n \n ind = np.where((0.1 < logoverden) * (logoverden < 1.0) * (0.1 < logT) * (logT < 5.0))\n\n logofor = logoverden[ind]\n logtfor = logT[ind]\n\n def min_func(param):\n \"\"\"Function to minimize: power law fit to temperature density relation.\"\"\"\n logT0 = param[0]\n gammam1 = param[1]\n #print(param)\n return logtfor - (logT0 + gammam1 * logofor)\n res = leastsq(min_func, np.array([np.log10(1e4), 0.5]), full_output=True)\n params = res[0]\n if res[-1] <= 0:\n print(res[3])\n return 10**params[0], params[1] + 1", "title": "" }, { "docid": "8e27381fce91f17bec563420abd3323a", "score": "0.54015136", "text": "def __call__(self, X, y):\n # Horizontally stack a vector of ones with data matrix X.\n X = np.hstack((np.ones((len(X), 1)), X))\n\n # optimization - find values theta using the cost function and the gradient producing function.\n theta = fmin_l_bfgs_b(\n cost,\n x0=np.zeros(X.shape[1]),\n args=(X, y, self.lambda_),\n fprime=grad)[0]\n\n # Return the trained classifier.\n return LogRegClassifier(theta)", "title": "" }, { "docid": "ab91695ef287e5830577df56cccce1c9", "score": "0.53883344", "text": "def logistic_regression(y, tx, initial_w, max_iters, gamma):\n w, loss,tol = initial_w, [], 0.01\n for n_iter in range(max_iters):\n grad, e = compute_log_grad(y, tx, w)\n w = w - gamma*grad\n loss.append(compute_mse(e))\n if (np.abs(loss[n_iter]-loss[n_iter-1]))<tol and n_iter>0:\n return w, loss[-1]\n return (w, loss[-1])", "title": "" }, { "docid": "e4e3deff7283121eae86b6ac1e6a4190", "score": "0.5353924", "text": "def logarithmic_model(x,y):\n A = np.array([[0 for x in range(2)] for x in range(len(x))])\n for i in range(len(x)):\n A[i][0] = 1\n A[i][1] = np.log(x[i])\n l = least_squares(A, y)\n return (np.log(l[0]),l[1])", "title": "" }, { "docid": "a6bda22ee9d48f060dcf8cb7b6042bae", "score": "0.53514683", "text": "def reg_logistic_regression(y, tx, lambda_, initial_w, max_iters, gamma):\n w = initial_w\n losses = []\n threshold = 1e-8\n for i in range(max_iters):\n #loss = ((-1)*(2*y*np.log(sigmoid(tx@w))+(1-y)*np.log(1-sigmoid(tx@w)))).sum()\n #loss, g = penalized_logistic_regression(y, tx, w, lambda_)\n loss = calculate_loss(y, tx, w, lambda_)\n g = compute_gradient_likelihood(y, tx, w, lambda_)\n #loss = calculate_loss(y, tx, w)\n losses.append(loss)\n #g = compute_gradient_likelihood(y, tx, w) - lambda_ * w\n #print(loss)\n w = w - gamma*g\n if len(losses) > 1 and np.abs(losses[-1]- losses[-2] < threshold):\n break\n\n return w, loss", "title": "" }, { "docid": "160710859c875ec72da26e306f9a2756", "score": "0.5341955", "text": "def log_prior(self, params, weights):\n\n\n bounds = []\n\n index = 0\n val = 0.0\n for f in self.list_functions:\n if self.min_values[f] is not None or self.max_values[f] is not None:\n bd = []\n if self.min_values[f] is not None:\n n_bounds = len(self.min_values[f])\n else:\n n_bounds = len(self.max_values[f])\n\n for i in range(n_bounds):\n bd_ = [None, None]\n if self.min_values[f] is not None:\n bd_[0] = self.min_values[f][i]\n if self.max_values[f] is not None:\n bd_[1] = self.max_values[f][i]\n bd += [bd_]\n bounds += bd\n\n else:\n bd = len(self.parameters_functions[f]) * [[None, None]]\n bounds += bd\n\n for i in range(len(params)):\n if bounds[i][0] is not None and params[i] < bounds[i][0]:\n # print \"wrong_bo\"\n val = -np.inf\n return val\n if bounds[i][1] is not None and params[i] > bounds[i][1]:\n # print \"wrong_bo\"\n val = -np.inf\n return val\n\n if self.weighted_combination(1, weights, params) >= \\\n self.weighted_combination(self.total_iterations, weights, params):\n # print \"no incease\"\n val = -np.inf\n\n if self.lower is not None and self.weighted_combination(1, weights, params) < self.lower:\n # print \"no lower\"\n\n val = - np.inf\n if self.upper is not None \\\n and self.weighted_combination(self.total_iterations, weights, params) > self.upper:\n # print \"no upper\"\n val = -np.inf\n return val", "title": "" }, { "docid": "019645483236b5b6d48ad77c86ab216f", "score": "0.533316", "text": "def log_likelihood(x,m,Q,sm2):\n # if den gets too small..\n #np.seterr(invalid='raise') # if log negative stop everything\n den = Q[0,0]+sm2\n assert Q[0,0]>=0, \"Q[00]={} must be positivie! The paramters are non\\\n physicals conditions or increase the rescaling\".format(Q[0,0])\n if den<1e-08:\n tmp = -(x-m[0,0])**2/(2*den)-0.5*(np.log(1e10*den)-np.log(1e10)+np.log(2*np.pi))\n else:\n tmp = -(x-m[0,0])**2/(2*den)-0.5*(np.log(den)+np.log(2*np.pi))\n return tmp", "title": "" }, { "docid": "03d5f955cfd7a09232027e62393eff69", "score": "0.5324539", "text": "def reg_logistic_regression(y, tx, lambda_, initial_w, max_iters, gamma):\n \n w = np.copy(initial_w)\n \n loss = calculate_loss_logistic(y, tx, w)\n # compute the cost\n \n grad = calculate_gradient_logistic(y, tx, w)\n # compute the gradient\n \n #hess = calculate_hessian_logistic(y, tx, w)\n # compute the hessian\n \n threshold = 1e-8\n \n previousloss = 0\n \n for iter_ in range(max_iters):\n # get loss and update w.\n loss, w = learning_by_penalized_gradient(y, tx, w, gamma, lambda_)\n if iter_ > 1 and np.abs(loss - previousloss) < threshold:\n break\n previousloss = loss\n \n return w, mse_loss(y,tx,w)", "title": "" }, { "docid": "ed6b6f7de1d83725ce8951eb9a6f9cde", "score": "0.5287439", "text": "def linearFit(b0=5):\n y0 = [1-0.003,0.001,0.001,0.001] \n T = 10\n Nt = 10000 \n t,y,S,E,C,R = solveFlu(T,Nt,1.0,b0,0.2,0.2,0.1,y0) \n i = np.where(C<=0.1)\n C1 = C[i] \n t1 = t[i] \n lC1 = np.log(C1)\n mu,c = np.polyfit(t1,lC1,1)\n lC = np.log(C)\n poly = c + mu*t\n plt.figure()\n plt.plot(t,lC,'b',label='log(C(t))')\n plt.plot(t,poly,'r',label='Fit polynomial')\n plt.legend(loc='best')\n plt.xlabel('t')\n plt.grid()\n plt.show()\n print mu", "title": "" }, { "docid": "ae047a0b1ac5cc3a622610cf0e0bdad5", "score": "0.528667", "text": "def fit_with_bfgs(self):\n def objective(x, k):\n self.weights[k,:] = np.exp(x)\n self.weights[k,:] = np.nan_to_num(self.weights[k,:])\n return np.nan_to_num(-self.log_posterior(ks=np.array([k])))\n\n def gradient(x, k):\n self.weights[k,:] = np.exp(x)\n self.weights[k,:] = np.nan_to_num(self.weights[k,:])\n return np.nan_to_num(-self.compute_gradient(k))\n\n itr = [0]\n def callback(x):\n if itr[0] % 10 == 0:\n print \"Iteration: %03d\\t LP: %.1f\" % (itr[0], self.log_posterior())\n itr[0] = itr[0] + 1\n\n for k in xrange(self.K):\n print \"Optimizing process \", k\n itr[0] = 0\n x0 = np.log(self.weights[k,:])\n res = minimize(objective, x0, args=(k,), jac=gradient, callback=callback)\n self.weights[k,:] = np.exp(res.x)", "title": "" }, { "docid": "733c45f353107a38639c9f6c8c03215e", "score": "0.5283702", "text": "def logistic_regression(y, tx, initial_w, max_iters, gamma):\n # init parameters\n threshold = 1e-8\n ws = [initial_w]\n losses = []\n w = initial_w\n\n # start the logistic regression\n for iter in range(max_iters):\n loss, gradient = logistic_loss_gradient(y, tx, w)\n w = w - gamma * gradient\n # if iter % 10 == 0:\n # print(\"Current iteration={}, loss={}\".format(iter, loss))\n losses.append(loss)\n ws.append(w)\n # added any.\n if len(losses) > 1 and np.abs((losses[-1] - losses[-2]).any()) < threshold:\n break\n\n return ws[-1], losses[-1]", "title": "" }, { "docid": "9a24fb6d65d317e9d43c7bb38ec2ab9b", "score": "0.52791977", "text": "def rbf_objective(log10g, X, f, v, N, e):\n # TODO: I can probably make this implementation more efficient, but as of\n # now, I don't need to.\n g = 10**(log10g)\n\n M, m = X.shape\n if e is None:\n ell = g*np.ones((m,1))\n if v is None:\n v = 1e-6*np.ones(f.shape)\n else:\n ell = g*np.sum(e)/e[:m]\n if v is None:\n v = g*np.sum(e[m:])*np.ones(f.shape)\n\n # covariance matrix\n K = exponential_squared(X, X, 1.0, ell)\n K += np.diag(v.reshape((M,)))\n L = np.linalg.cholesky(K)\n\n # polynomial basis\n B = polynomial_bases(X, N)[0]\n A = np.dot(B.T, np.linalg.solve(K, B))\n z = np.dot(B.T, np.linalg.solve(K, f))\n beta = np.linalg.solve(A, z)\n\n # residual\n res = f - np.dot(B, beta)\n\n # variance\n sig2 = np.dot(res.T, np.linalg.solve(K, res))/M\n\n r = np.sum(np.log(np.diag(L))) + M*np.log(sig2)\n return r", "title": "" }, { "docid": "546798e4266a632bb40726f90c96a2df", "score": "0.52735376", "text": "def regularized_logistic_regression(y, tx, lambda_, gamma, max_iters):\n # init parameters\n threshold = 1e-8\n losses = []\n w = np.zeros(tx.shape[1])\n\n # start the logistic regression\n for iter in range(max_iters):\n gradient = calculate_gradient_log_likelihood(y, tx, w) + (lambda_ * 2 * w)\n # get loss and updated w\n loss=calculate_loss_log_likelihood(y, tx, w)+lambda_*np.sum(w**2)\n w = w - gamma * gradient\n \n # log info\n if iter % 500 == 0:\n print(\"Current iteration={i}, the loss={l}\".format(i=iter, l=calculate_loss_log_likelihood(y, tx, w)+lambda_*np.sum(w**2)))\n losses.append(loss)\n if len(losses) > 1 and np.abs(losses[-1] - losses[-2]) < threshold:\n break\n #print(\"The loss of iteration={i} is {l} \".format(l=calculate_loss_log_likelihood(y, tx, w)+lambda_*np.sum(w**2), i=iter))\n return w\n\n\n ########################################", "title": "" }, { "docid": "0d2920a5083b5ce6593bc7ac48908686", "score": "0.52358544", "text": "def logistic_regression(y, tx, initial_w, max_iters, gamma, newton=False):\n\n def step_factor(w, grad):\n \"\"\"Calculate the step-factor depending on whether we are running the Newton method.\"\"\"\n return grad if not newton else np.linalg.solve(gradients.hessian(w, tx), grad)\n\n desc = 'LOG' if not newton else 'LOG-N'\n w = initial_w\n\n for n_iter in range(max_iters):\n grad = gradients.log_likelihood_gradient(y, tx, w) # compute log-likelihood gradient\n w = w - gamma * step_factor(w, grad) # compute new w\n loss = costs.compute_log_likelihood_error(y, tx, w)\n print(\"{desc}({bi}/{ti}): loss={l}\".format(desc=desc, bi=n_iter, ti=max_iters-1, l=loss))\n \n return w, loss", "title": "" }, { "docid": "ad88725ecc33cca1555d27a276e304a5", "score": "0.5230614", "text": "def least_squares_GD(y, tx, gamma, max_iters):\n # init parameters\n threshold = 1e-8\n losses = []\n w = np.zeros(tx.shape[1])\n # start the regression\n for iter in range(max_iters):\n gradient = calculate_gradient_mse(y, tx, w)\n # get loss and updated w\n loss = calculate_loss_mse(y, tx, w)\n w = w - gamma * gradient\n # log info\n if iter % 1000 == 0:\n print(\"Current iteration={i}, the loss={l}\".format(i=iter, l=loss))\n # converge criteria\n losses.append(loss)\n if len(losses) > 1 and np.abs(losses[-1] - losses[-2]) < threshold:\n break\n #print(\"The of loss iteration={i} is {l} \".format(l=calculate_loss_mse(y, tx, w), i=iter))\n return w", "title": "" }, { "docid": "760306c77b5c724032bc6e5a8af1e704", "score": "0.5226302", "text": "def calc_int(like, srcName, cl=0.95, verbosity=0,\n skip_global_opt=False, be_very_careful=False, freeze_all=False,\n delta_log_like_limits = 10.0, profile_optimizer = None,\n emin=100, emax=3e5, poi_values = []): \n saved_state = LikelihoodState(like)\n\n ###########################################################################\n #\n # This function has 4 main components:\n #\n # 1) Find the global maximum of the likelihood function using ST\n # 2) Define the integration limits by finding the points at which the\n # log likelihood has fallen by a certain amount\n # 3) Integrate the function using the QUADPACK adaptive integrator\n # 4) Calculate the upper limit by re-integrating the function using\n # the evaluations made by the adaptive integrator. Two schemes are\n # tried, splines to the function points and trapezoidal quadrature.\n #\n ###########################################################################\n\n # Optimizer uses verbosity level one smaller than given here\n optverbosity = max(verbosity-1, 0)\n\n ###########################################################################\n #\n # 1) Find the global maximum of the likelihood function using ST\n #\n ###########################################################################\n\n par = like.normPar(srcName)\n\n fitstat = None\n if not skip_global_opt:\n # Make sure desired parameter is free during global optimization\n par.setFree(True)\n like.syncSrcParams(srcName)\n\n # Perform global optimization\n if verbosity:\n print (\"Finding global maximum\")\n try:\n like.fit(optverbosity)\n fitstat = like.optObject.getRetCode()\n if verbosity and fitstat != 0:\n print (\"Minimizer returned with non-zero code: \",fitstat)\n except RuntimeError:\n print (\"Failed to find global maximum, results may be wrong\")\n pass\n pass\n \n original_optimizer = like.optimizer\n if profile_optimizer != None:\n like.optimizer = profile_optimizer\n\n # Store values of global fit\n maxval = -like()\n fitval = par.getValue()\n fiterr = par.error()\n limlo, limhi = par.getBounds()\n # limlo should not be allowed to go down to 0\n limlo = max(limlo,0.01*fiterr,1e-4)\n if verbosity:\n print (\"Maximum of %g with %s = %g +/- %g\"\\\n %(-maxval,srcName,fitval,fiterr))\n\n # Freeze all other model parameters if requested (much faster!)\n if(freeze_all):\n for i in range(len(like.model.params)):\n like.model[i].setFree(False)\n like.syncSrcParams(like[i].srcName)\n\n # Freeze the parameter of interest\n par.setFree(False)\n like.syncSrcParams(srcName)\n\n # Set up the caches for the optimum values and nuisance parameters\n optvalue_cache = dict()\n nuisance_cache = dict()\n optvalue_cache[fitval] = maxval\n _cache_nuisance(fitval, like, nuisance_cache)\n\n # Test if all parameters are frozen (could be true if we froze\n # them above or if they were frozen in the user's model\n all_frozen = True\n for i in range(len(like.model.params)):\n if like.model[i].isFree():\n all_frozen = False\n break\n\n ###########################################################################\n #\n # 2) Define the integration limits by finding the points at which the\n # log likelihood has fallen by a certain amount\n #\n ###########################################################################\n\n if verbosity:\n print (\"Finding integration bounds (delta log Like=%g)\"\\\n %(delta_log_like_limits))\n\n [xlo, xhi, ylo, yhi, exact_root_evals, approx_root_evals] = \\\n _find_interval(like, par, srcName, all_frozen,\n maxval, fitval, limlo, limhi,\n delta_log_like_limits, verbosity, like.tol,\n False, 5, optvalue_cache, nuisance_cache)\n\n if poi_values != None and len(poi_values)>0:\n xlo = max(min(xlo, min(poi_values)/2.0), limlo)\n xhi = min(max(xhi, max(poi_values)*2.0), limhi)\n\n if verbosity:\n print (\"Integration bounds: %g to %g (%d full fcn evals and %d approx)\"\\\n %(xlo,xhi,exact_root_evals,approx_root_evals))\n\n profile_dlogL1 = -0.5*scipy.stats.chi2.isf(1-cl, 1)\n profile_dlogL2 = -0.5*scipy.stats.chi2.isf(1-2*(cl-0.5), 1)\n\n if yhi - delta_log_like_limits > profile_dlogL1:\n print (\"calc_int error: parameter max\", xhi, \"is not large enough\")\n print (\"delta logLike =\", yhi - delta_log_like_limits)\n return -1, {}\n\n ###########################################################################\n #\n # 3) Integrate the function using the QUADPACK adaptive integrator\n #\n ###########################################################################\n\n #\n # Do integration using QUADPACK routine from SciPy -- the \"quad\"\n # routine uses adaptive quadrature, which *should* spend more time\n # evaluating the function where it counts the most.\n #\n points = []\n epsrel = (1.0-cl)*1e-3\n if be_very_careful:\n # In \"be very careful\" mode we explicitly tell \"quad\" that it\n # should examine more carefully the point at x=fitval, which\n # is the peak of the likelihood. We also use a tighter\n # tolerance value, but that seems to have a secondary effect.\n points = [ fitval ]\n epsrel = (1.0-cl)*1e-8\n\n if verbosity:\n print (\"Integrating probability distribution\")\n\n nfneval = -len(optvalue_cache)\n f_of_x = dict()\n quad_ival, quad_ierr = \\\n scipy.integrate.quad(_integrand, xlo, xhi,\\\n args = (f_of_x, like, par, srcName, maxval,\\\n verbosity, all_frozen,\n optvalue_cache, nuisance_cache),\\\n points=points, epsrel=epsrel, epsabs=1)\n nfneval += len(optvalue_cache)\n\n if verbosity:\n print (\"Total integral: %g +/- %g (%d fcn evals)\"\\\n %(quad_ival,quad_ierr,nfneval))\n\n ###########################################################################\n #\n # 4) Calculate the upper limit by re-integrating the function using\n # the evaluations made by the adaptive integrator. Two schemes are\n # tried, splines to the function points and trapezoidal quadrature.\n #\n ###########################################################################\n\n # Calculation of the upper limit requires integrating up to\n # various test points, and finding the one that contains the\n # prescribed fraction of the probability. Using the \"quad\"\n # function to do this by evaluating the likelihood function\n # directly would be computationally prohibitive, it is preferable\n # to use the function evaluations that have been saved in the\n # \"f_of_x\" variable.\n\n # We try 2 different integration approaches on this data:\n # trapezoidal quadrature and integration of a fitted spline, with\n # the expectation that the spline will be better, but that perhaps\n # the trapezoidal might be more robust if the spline fit goes\n # crazy. The method whose results are closest to those from \"quad\"\n # is picked to do the search.\n \n # Organize values computed into two vectors x & y\n x = list(f_of_x.keys())\n x.sort()\n y=[]\n logy=[]\n for xi in x:\n y.append(f_of_x[xi])\n logy.append(math.log(f_of_x[xi]))\n\n # Evaluate upper limit using trapezoidal rule\n trapz_ival = scipy.integrate.trapz(y,x)\n cint = 0\n Cint = [ 0 ]\n for i in range(len(x)-1):\n cint += 0.5*(f_of_x[x[i+1]]+f_of_x[x[i]])*(x[i+1]-x[i])\n Cint.append(cint)\n int_irep = scipy.interpolate.interp1d(x, Cint)\n xlim_trapz = scipy.optimize.brentq(_int1droot, x[0], x[-1],\n args = (cl*cint, int_irep))\n ylim_trapz = int_irep(xlim_trapz).item()/cint\n\n # Evaluate upper limit using spline\n spl_irep = scipy.interpolate.splrep(x,y,xb=xlo,xe=xhi)\n spl_ival = scipy.interpolate.splint(xlo,xhi,spl_irep)\n xlim_spl = scipy.optimize.brentq(_splintroot, xlo, xhi, \n args = (cl*spl_ival, xlo, spl_irep))\n ylim_spl = scipy.interpolate.splint(xlo,xlim_spl,spl_irep)/spl_ival\n\n # Test which is closest to QUADPACK adaptive method: TRAPZ or SPLINE\n if abs(spl_ival - quad_ival) < abs(trapz_ival - quad_ival):\n # Evaluate upper limit using spline\n if verbosity:\n print (\"Using spline integral: %g (delta=%g)\"\\\n %(spl_ival,abs(spl_ival/quad_ival-1)))\n xlim = xlim_spl\n ylim = ylim_spl\n if verbosity:\n print (\"Spline search: %g (P=%g)\"%(xlim,ylim))\n else:\n # Evaluate upper limit using trapezoidal rule\n if verbosity:\n print (\"Using trapezoidal integral: %g (delta=%g)\"\\\n %(trapz_ival,abs(trapz_ival/quad_ival-1)))\n xlim = xlim_trapz\n ylim = ylim_trapz\n if verbosity:\n print (\"Trapezoidal search: %g (P=%g)\"%(xlim,cl))\n\n like.optimizer = original_optimizer\n\n ###########################################################################\n #\n # Since we have computed the profile likelihood, calculate the\n # right side of the 2-sided confidence region at the CL% and\n # 2*(CL-50)% levels under the assumption that the likelihood is\n # distributed as chi^2 of 1 DOF. Again, use the root finder on a\n # spline and linear representation of logL.\n #\n ###########################################################################\n\n # The spline algorithm is prone to noise in the fitted logL,\n # especially in \"be_very_careful\" mode, so fall back to a linear\n # interpolation if necessary\n\n spl_drep = scipy.interpolate.splrep(x,logy,xb=xlo,xe=xhi)\n spl_pflux1 = scipy.optimize.brentq(_splevroot, fitval, xhi, \n args = (profile_dlogL1, spl_drep))\n spl_pflux2 = scipy.optimize.brentq(_splevroot, fitval, xhi, \n args = (profile_dlogL2, spl_drep))\n\n int_drep = scipy.interpolate.interp1d(x,logy)\n int_pflux1 = scipy.optimize.brentq(_int1droot, max(min(x),fitval), max(x), \n args = (profile_dlogL1, int_drep))\n int_pflux2 = scipy.optimize.brentq(_int1droot, max(min(x),fitval), max(x), \n args = (profile_dlogL2, int_drep))\n\n if (2.0*abs(int_pflux1-spl_pflux1)/abs(int_pflux1+spl_pflux1) > 0.05 or \\\n 2.0*abs(int_pflux2-spl_pflux2)/abs(int_pflux2+spl_pflux2) > 0.05):\n if verbosity:\n print (\"Using linear interpolation for profile UL estimate\")\n profile_flux1 = int_pflux1\n profile_flux2 = int_pflux2\n else:\n if verbosity:\n print (\"Using spline interpolation for profile UL estimate\")\n profile_flux1 = spl_pflux1\n profile_flux2 = spl_pflux2\n\n ###########################################################################\n #\n # Evaluate the probabilities of the \"points of interest\" using the integral\n #\n ###########################################################################\n\n poi_probs = [];\n poi_dlogL_interp = [];\n poi_chi2_equiv = [];\n\n for xval in poi_values:\n dLogL = None\n if(xval >= xhi):\n pval = 1.0\n elif(xval <= xlo):\n pval = 0.0\n # Same test as above to decide between TRAPZ and SPLINE\n elif abs(spl_ival - quad_ival) < abs(trapz_ival - quad_ival):\n pval = scipy.interpolate.splint(xlo,xval,spl_irep)/spl_ival\n dlogL = scipy.interpolate.splev(xval, spl_drep)\n else:\n pval = int_irep(xval).item()/cint\n dlogL = int_drep(xval).item() \n poi_probs.append(pval)\n poi_dlogL_interp.append(dlogL)\n poi_chi2_equiv.append(scipy.stats.chi2.isf(1-pval,1))\n\n ###########################################################################\n # \n # Calculate the integral flux at the upper limit parameter value\n #\n ###########################################################################\n \n # Set the parameter value that corresponds to the desired C.L.\n par.setValue(xlim)\n\n # Evaluate the flux corresponding to this upper limit.\n ul_flux = like[srcName].flux(emin, emax)\n\n saved_state.restore()\n\n # Pack up all the results\n results = dict(all_frozen = all_frozen,\n ul_frac = cl,\n ul_flux = ul_flux,\n ul_value = xlim,\n ul_trapz = xlim_trapz,\n ul_spl = xlim_spl,\n int_limits = [xlo, xhi],\n profile_x = x,\n profile_y = y,\n peak_fitstatus = fitstat,\n peak_value = fitval,\n peak_dvalue = fiterr,\n peak_loglike = maxval,\n prof_ul_frac1 = cl,\n prof_ul_dlogL1 = profile_dlogL1,\n prof_ul_value1 = profile_flux1,\n prof_ul_frac2 = 2*(cl-0.5),\n prof_ul_dlogL2 = profile_dlogL2,\n prof_ul_value2 = profile_flux2,\n poi_values = poi_values,\n poi_probs = poi_probs,\n poi_dlogL_interp = poi_dlogL_interp,\n poi_chi2_equiv = poi_chi2_equiv,\n flux_emin = emin,\n flux_emax = emax)\n\n return ul_flux, results", "title": "" }, { "docid": "402033c8421f8ad1dd11176cc6e7dfc2", "score": "0.5225475", "text": "def _lg_coefs(self):\n return self.anticlassifier.get_params()[\"logistic\"].coef_[0]", "title": "" }, { "docid": "8b217bfa3231e6c59f592c7a6d27121a", "score": "0.5224634", "text": "def evaMaxGLMLL(y, family, baseline, familyextra, weights):\n method = family\n if method == 'normid':\n ll = 0\n elif method == 'binomlogit':\n y = y/familyextra\n ll = -familyextra* ( np.matmul((weights*y).T,np.log(y+eps)) + np.matmul((weights*(1-y)).T,np.log(1-y+eps) ))\n elif method == 'poissexp':\n ll = -np.matmul((weights*y).T , np.log(y+eps)) + sum(weights*y)\n return ll", "title": "" }, { "docid": "33023950ea3ddb32b20746a1b04cada8", "score": "0.52127737", "text": "def compute_sparsemax_log_likelihood(states, continuous_attention, goal, init_endogenous):\n def cost_function(ac, ss, exp, vm):\n data_states = [s.numpy() for s in states]\n agent_states = []\n for t in range(10):\n if t == 0:\n endogenous = init_endogenous\n else:\n endogenous = states[t-1]\n\n # set up the agent\n macro_agent = MicroworldMacroAgent(A=A, B=B, true_B=true_B, init_endogenous=endogenous,\n subgoal_dimensions=[0, 1, 2, 3, 4],\n init_exogenous=init_exogenous, T=T-t, final_goal=goal, cost=ac,\n step_size=ss, von_mises_parameter=vm, exponential_parameter=exp,\n continuous_attention=continuous_attention, exo_cost=exo_cost,\n step_with_model=False, verbose=False)\n\n # take a step\n _, s_next, _ = macro_agent.step(stop_t=1)\n agent_states.append(s_next.numpy())\n\n return human_and_agent_states_to_log_likelihood(data_states, agent_states, exp, vm)\n\n pbounds = {'ac': (0., 30.), 'ss': step_size_range, 'exp': exp_range, 'vm': vm_range}\n optimizer = BayesianOptimization(\n f=cost_function,\n pbounds=pbounds,\n random_state=2021,\n )\n\n optimizer.maximize(\n init_points=OPT_ITERS,\n n_iter=OPT_ITERS\n )\n\n best_llh = optimizer.max['target']\n\n return best_llh", "title": "" }, { "docid": "2d0b20baebc724376977507364b188fc", "score": "0.5203675", "text": "def log_prior(self, params):\n # log likelihood function, see:\n # https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Likelihood_function\n var = self.std**2\n nitems = params.size\n diff = params - self.mean\n scaled_sq_err_term = -0.5 * jnp.dot(diff, diff) / var\n # log determinant of covariance matrix\n log_det_cov_term = -nitems * jnp.log(self.std)\n norm_term = -0.5 * nitems * jnp.log(2 * jnp.pi)\n return log_det_cov_term + scaled_sq_err_term + norm_term", "title": "" }, { "docid": "c71c472b6743d4132945acd23c0db699", "score": "0.5193155", "text": "def get_logreg(prof, tm, j, prods):\n # Filter relevant data\n dfj = prof.loc[prof[\"unit\"] == j, ].copy()\n dfj[\"tm\"] = [row[\"task\"] + \"-\" + row[\"mode\"] for i, row in dfj.iterrows()]\n dfj[\"tm-1\"] = dfj[\"tm\"].shift(-1)\n dfj.loc[pd.isna(dfj[\"tm-1\"]), \"tm-1\"] = \"None-None\"\n dfj = dfj[dfj[\"tm\"] == tm]\n # Train logistic regression\n if dfj.shape[0] > 0 and len(np.unique(dfj[\"tm-1\"])) > 1:\n X = np.array(dfj[prods])\n Y = np.array(dfj[\"tm-1\"])\n if(len(np.unique(Y)) > 2):\n # Multinomial if more than 2 classes\n logreg = linear_model.LogisticRegression(multi_class=\"multinomial\",\n solver=\"lbfgs\",\n # solver=\"sag\",\n max_iter=10000,\n verbose=2)\n else:\n # Binomial if only two classes\n logreg = linear_model.LogisticRegression(max_iter=10000,\n verbose=2)\n logreg.fit(X, Y)\n return logreg\n elif dfj.shape[0] > 0:\n return np.array(dfj[\"tm-1\"])[0]\n else:\n return \"None-None\"", "title": "" }, { "docid": "ba4816a62826b49c1c1a5e4693311083", "score": "0.51930934", "text": "def lnprior(params, args):\n log10M, c, tau, fmis, Am, B0, Rs = params\n #Prior edges\n if log10M < 11. or log10M > 18 or c <= 0. or Am <= 0. or tau <= 0. or fmis <= 0. or fmis >= 1.: return -np.inf\n if Rs <= 0.0 or B0 < 0.0 or Rs > 100.: return -np.inf\n #Gaussian priors on fmis, tau and Am\n Am_prior = args['Am_prior']\n Am_prior_var = args['Am_prior_var']\n LPfmis = (0.25 - fmis)**2/0.08**2 #Y1 \n LPtau = (0.17 - tau)**2/0.04**2 #Y1\n LPA = (Am_prior - Am)**2/Am_prior_var #Y1\n return -0.5*(LPfmis + LPtau + LPA)", "title": "" }, { "docid": "8411bf05626b9a1a4c520a73513aaba5", "score": "0.5192074", "text": "def gp_lc_fit(x_data, y_data, yerr_data=0.0, kernel='matern52'):\n\n # define the objective function (negative log-likelihood in this case)\n def neg_log_like(params):\n \"\"\"Negative log-likelihood.\"\"\"\n gp.set_parameter_vector(params)\n log_like = gp.log_likelihood(y, quiet=True)\n if np.isfinite(log_like):\n return -log_like\n else:\n return np.inf\n\n # and the gradient of the objective function\n def grad_neg_log_like(params):\n \"\"\"Gradient of the negative log-likelihood.\"\"\"\n gp.set_parameter_vector(params)\n return -gp.grad_log_likelihood(y, quiet=True)\n\n x, y, yerr = np.copy(x_data), np.copy(y_data), np.copy(yerr_data)\n\n # normalise the data for better results\n x_norm = 1e4\n x /= x_norm\n x_min, x_max = x.min(), x.max()\n\n y_norm = y.max()\n y /= y_norm\n yerr /= y_norm\n\n var, length_scale = np.var(y), np.diff(x).max()\n bounds_var, bounds_length = [(np.log(1e-6), np.log(10))], [(np.log(1e-8), np.log(1e2))]\n\n k1 = george.kernels.ConstantKernel(np.log(var))\n\n if kernel == 'matern52':\n k2 = george.kernels.Matern52Kernel(length_scale**2)\n elif kernel == 'matern32':\n k2 = george.kernels.Matern32Kernel(length_scale**2)\n elif kernel == 'squaredexp':\n k2 = george.kernels.ExpSquaredKernel(length_scale**2)\n else:\n raise ValueError(f'\"{kernel}\" is not a valid kernel.')\n\n ker = k1*k2\n\n gp = george.GP(kernel=ker, fit_mean=True)\n # initial guess\n gp.compute(x, yerr)\n\n # optimization routine for hyperparameters\n p0 = gp.get_parameter_vector()\n bounds = gp.get_parameter_bounds()\n results = scipy.optimize.minimize(neg_log_like, p0, jac=grad_neg_log_like,\n method=\"L-BFGS-B\", options={'maxiter':30},\n bounds=bounds)\n gp.set_parameter_vector(results.x)\n\n step = 0.01/x_norm\n x_pred = np.arange(x_min, x_max+step, step)\n\n mean, var = gp.predict(y, x_pred, return_var=True)\n std = np.sqrt(var)\n\n x_pred, mean, std = x_pred*x_norm, mean*y_norm, std*y_norm\n\n return x_pred, mean, std", "title": "" }, { "docid": "4457ee822c0e9acd8d3043b7160fd1ab", "score": "0.51916045", "text": "def log_likelihood(theta: np.ndarray) -> float:\n if theta[0] < 0:\n return -np.inf\n model = _straight_line(self._dt[diff_regime:], *theta)\n diff = (model - self._n[diff_regime:])\n logl = -0.5 * (logdet + np.matmul(diff.T, np.matmul(inv, diff)))\n return logl", "title": "" }, { "docid": "8a992b83af716812cc05b5a6428662e8", "score": "0.51777107", "text": "def GLMfit(y, X, qf, opts):\n \n #w0 = opts.w0.copy() #can be optimized here~\n w0 = np.zeros((X.shape[1],1))\n #w0 = ws[:,1].copy()\n #w0 = np.random.randn(X.shape[1],1)\n w = irls(y, X, w0, qf, opts.baseline, opts.family, opts.familyextra, opts.algo, opts.Display, opts.weights)\n \n l0 = evaMaxGLMLL(y, opts.family, opts.baseline, opts.familyextra, opts.weights)\n lleach = evalGLMLL(y, X, w, opts.baseline, opts.family, opts.familyextra, opts.weights)\n ll = lleach.sum()\n ll = ll - l0\n \n lp = 0.5*np.matmul(np.matmul(w.T, qf.toarray()), w) #quadratic penalty\n \n results = DotMap()\n results.w = w\n results.loglikelihood = -ll\n results.logpenalty = -lp\n results.loglikelihoodeach = -lleach\n return results", "title": "" }, { "docid": "788c4b4d12f6dacceac48efb8030244d", "score": "0.51377696", "text": "def fcn2min(params, x,data):\n a = params['a']\n b = params['b']\n c = params['c']\n d = params['d']\n e = params['e']\n Q = params['Q']\n sigma = params['sigma']\n h3 = params['h3']\n h4 = params['h4']\n model = np.zeros(1400,dtype=np.float)\n for line in linelist:\n model = model + Q*profile_function(line[0],sigma,line[1],get_lambd(x,a,b,c,d,e),h3,h4)\n #print get_lambd(x,a,b,c)\n \n return mask*(model-data)", "title": "" }, { "docid": "86240c63519cd732c8107e06d6ead3f6", "score": "0.51343495", "text": "def gun_regression(gun):\n\tlong_range = (gun[\"max_range\"] + gun[\"long_min\"]) / 2\n\teffective_range = (gun[\"long_min\"] + gun[\"effective_min\"]) / 2\n\tshort_range = (gun[\"effective_min\"] / 2)\n\tx = [long_range, effective_range, short_range]\n\ty = [gun[\"long_to_hit\"], gun[\"effective_to_hit\"], gun[\"short_to_hit\"]]\n\treturn(np.polyfit(np.log(x), y, 1))", "title": "" }, { "docid": "bc1775c50c24735e9a8e2497fc6e2103", "score": "0.51266634", "text": "def calculate_gradient_log_likelihood(y, tx, w):\n a=tx.dot(w)\n for i in range (len(a)):\n a[i]=sigmoid(a[i])\n gradient=np.transpose(tx).dot(a-y)\n return gradient/len(y)", "title": "" }, { "docid": "b2fc8f67993231717af181f0bf8d8837", "score": "0.51256275", "text": "def lbfgs(f,fgrad,x0,maxiter=100,max_corr=25,grad_norm_tol=1e-9, ihp=None,ls_criteria=\"armijo\"):\n x = x0.copy()\n yield x\n if ihp is None: ihp = InverseHessianPairs(max_corr)\n oldg = fgrad(x)\n if ls_criteria==\"armijo\": fval = f(x)\n p = -oldg/np.linalg.norm(oldg)\n\n log = logging.getLogger(\"lbfgs\")\n iter_count = 0\n while True:\n # TODO compare line searches\n g=None\n if ls_criteria == \"strong_wolfe\":\n alpha_star, _, _, fval, _, g = opt.line_search(f,fgrad,x,p,oldg) \n elif ls_criteria == \"armijo\":\n import scipy.optimize.linesearch\n alpha_star,_,fval=scipy.optimize.linesearch.line_search_armijo(f,x,p,oldg,fval)\n else:\n raise NotImplementedError\n\n if alpha_star is None:\n log.error(\"lbfgs line search failed!\")\n break\n s = alpha_star * p\n x += s\n yield x\n\n iter_count += 1\n \n if iter_count >= maxiter:\n break\n\n if g is None: \n log.debug(\"line search didn't give us a gradient. calculating\")\n g = fgrad(x)\n\n if np.linalg.norm(g) < grad_norm_tol:\n break\n\n\n y = g - oldg\n ihp.add( s,y )\n p = ihp.mvp(-g)\n oldg = g\n\n log.info(\"lbfgs iter %i %8.3e\",iter_count, fval)", "title": "" }, { "docid": "76d58bb535c99720b68fa474ee50f3fb", "score": "0.5124968", "text": "def new_logistic_regression(y, tx, lambda_, initial_w, max_iters, gamma):\n losses = []\n w = initial_w\n threshold = 1e-8\n for i in range(max_iters):\n loss = calculate_loss(y, tx, w)\n losses.append(loss)\n g = compute_gradient_likelihood(y, tx, w) + lambda_ * w\n hes = calculate_hessian(y, tx, w)\n w = w-gamma*np.linalg.inv(hes)@g\n if len(losses) > 1 and np.abs(losses[-1]- losses[-2] < threshold):\n break\n loss = calculate_loss(y, tx, w)\n return w,loss", "title": "" }, { "docid": "27b37859588fedc76668ded5cab0e6f9", "score": "0.51240313", "text": "def maxrayleigh_logcdf(x, n, sigma):\n return n * rayleigh_logcdf(x / sigma)", "title": "" }, { "docid": "7bdf4aa8e70127c869ead298bf5b2beb", "score": "0.51220745", "text": "def reg_logistic_regression(y, tx, lambda_, initial_w, max_iters, gamma, newton=False):\n\n def step_factor(w, grad):\n \"\"\"Calculate the step-factor depending on whether we are running the Newton method.\"\"\"\n return grad\n\n def calc_cost(w):\n \"\"\"Calculate log-likelihood error, adding the penalisation term.\"\"\"\n return costs.compute_log_likelihood_error(y, tx, w) + (lambda_ / 2.0) * w.dot(w)\n\n desc = 'RLOG'\n w = initial_w\n\n thres = 1e-8\n previous_loss = 0.0\n\n for n_iter in range(max_iters):\n # compute penalised log likelihood gradient\n grad = gradients.log_likelihood_gradient(y, tx, w) + lambda_ * w\n w = w - gamma * step_factor(w, grad) # compute new w\n\n loss = calc_cost(w)\n if np.abs(loss - previous_loss) < thres:\n break\n\n print(\"{desc}({bi}/{ti}): loss={l}\".format(desc=desc, bi=n_iter, ti=max_iters-1, l=loss))\n previous_loss = loss # update previous loss\n \n return w, loss", "title": "" }, { "docid": "3affc3b305909e78c35bc94b9aaf45b6", "score": "0.5119323", "text": "def log_prior_grad(self, weights):", "title": "" }, { "docid": "7cd2902aba4d97792e5e68974ce2a980", "score": "0.5111942", "text": "def power_regression(xi_list,yi_list):\n\timport math\n\tif (len(xi_list) == len(yi_list)):\n\t\tm,r2 = 0,0\n\t\txsum = sum(map(lambda x: math.log10(x),xi_list))\n\t\tx2sum = sum(map(lambda x: (math.log10(x))**2,xi_list))\n\t\txysum = sum(map(lambda x,y: math.log10(x)*math.log10(y),xi_list,yi_list))\n\t\tysum = sum(map(lambda y: math.log10(y),yi_list))\n\t\ty2sum = sum(map(lambda y: math.log10(y)**2,yi_list))\n\t\tm = float((((len(xi_list) * float(xysum)) - (float(xsum) * float(ysum)))/((len(xi_list) * float(x2sum)) - (float(xsum) * float(xsum)))))\n\t\tb = float((float(ysum) - (float(m) * float(xsum)))/(len(xi_list)))\n\t\tr2 = float((((len(xi_list) * float(xysum)) - (float(xsum) * float(ysum)))/((((len(xi_list) * float(x2sum)) - (float(xsum) * float(xsum)))**.5) * (((len(xi_list) * float(y2sum)) - (float(ysum) * float(ysum)))**.5)))**2)\n\t\tprint \" \"\n\t\tprint \"sum of Xi = \" + str(xsum) + \"\\nsum of Yi = \" + str(ysum) + \"\\nsum of XiYi = \" + str(xysum) + \"\\nsum of X2i = \" + str(x2sum) + \"\\nsum of Y2i = \" + str(y2sum)\n\t\tprint \" m = \" + str(m)\n\t\tprint \" b = \" + str((10**b))\n\t\tprint \" r2 = \" + str(r2)\n\t\tprint \" r = \" + str((float(r2))**.5)\n\t\tprint \"y = \" + str((10**b)) + \"(x)^\" + str(m)\n\telse:\n\t\tprint \"there was an unequal number of list elements in x list and the y list\"", "title": "" }, { "docid": "278c9c7dae2d27e510c0711c71fbeb6b", "score": "0.511026", "text": "def find_maximum(self, log_scale):\n # JWH stuff\n h1 = self.data.hist\n tobjarray = self.stack.GetStack()\n max_histo_range = None\n\n max_data_range = h1.GetBinContent(h1.GetMaximumBin()) + h1.GetBinError(h1.GetMaximumBin())\n for h in tobjarray:\n if not isinstance(h, ROOT.TH1): continue\n if max_histo_range is None:\n # max_histo_range = h.GetBinContent(h.GetMaximumBin()) + h.GetBinError(h.GetMaximumBin())\n max_histo_range = h.GetBinContent(h.GetMaximumBin())\n else:\n # max_histo_range = max(max_histo_range, h.GetBinContent(h.GetMaximumBin()) + h.GetBinError(h.GetMaximumBin()))\n max_histo_range = max(max_histo_range, h.GetBinContent(h.GetMaximumBin()))\n max_histo_range = max(max_histo_range, max_data_range)\n\n if log_scale and max_histo_range <= 0:\n max_histo_range = 1e-2\n\n logging.debug(\"found content maximum of {}\".format(max_histo_range))\n return max_histo_range\n\n # h1 = self.data.hist\n # tobjarray = self.stack.GetStack()\n # h2 = tobjarray.Last()\n # maximum = max(h1.GetBinContent(h1.GetMaximumBin())+h1.GetBinError(h1.GetMaximumBin()), h2.GetBinContent(h2.GetMaximumBin()))\n # print \"found maximum of\", maximum\n # return maximum", "title": "" }, { "docid": "5b59c80267c27e8b4206ba7aae26415d", "score": "0.5102782", "text": "def lbfgs(initial_solution, model, verbose=True):\n L = initial_solution.shape[0]\n\n # Define objective function\n def obj(x):\n coords = x.reshape(L, 3)\n return -model.evaluate(coords)\n\n # Define gradient function\n def jac(x):\n coords = x.reshape(L, 3)\n grad = model.gradient(coords)\n return grad.flatten()\n\n # Define callback function\n def callback(x):\n if verbose:\n print('L-BFGS: %f' % obj(x))\n\n # Solve the optimization problem\n x0 = initial_solution.flatten()\n res = scipy.optimize.minimize(\n obj, x0, jac=jac, method='L-BFGS-B', callback=callback)\n return res.x.reshape(L, 3)", "title": "" }, { "docid": "a5d5ab6779225f64381d6557e3d374e3", "score": "0.51017237", "text": "def variances_log_chems(chemnames,logprior=1.0e20) :\n #senstraj = load('EndogenousEGFR3T3sensNoPriors')\n times = senstraj.timepoints \n jtjinv = scipy.linalg.inv(jtjtrunc+1.0/logprior**2*np.eye(\n len(jtjtrunc),len(jtjtrunc))) \n var = {}\n bestfit = {} \n optvarkeys = list(clc.optimizableVars.keys())\n first = optvarkeys[0]\n last = optvarkeys[-1]\n for name in chemnames :\n var[name] = [] \n bestfit[name] = [] \n chemindex = senstraj.key_column.get(name) \n index1sens = senstraj.key_column.get((name,first))\n index2sens = senstraj.key_column.get((name,last))\n sensarray_this_chem = copy.copy(senstraj.values[:,index1sens:(index2sens+1)])\n traj_this_chem = copy.copy(senstraj.values[:,chemindex]) \n for j, pname in enumerate(ovvarnames) :\n sensarray_this_chem[:,j] = sensarray_this_chem[:,j]*curp.get(pname)\n # need to scale each row by 1/chemvalue to mimic a derivative w.r.t. \n # log chemicals. Add a small value to chemvalue to avoid divide by zero\n for i in range(len(times)) :\n sensarray_this_chem[i,:] = old_div(sensarray_this_chem[i,:],(traj_this_chem[i]+1.0e-6))\n\n tmp = np.dot(sensarray_this_chem,jtjinv)\n for i in range(len(tmp[:,0])) :\n var[name].append(np.dot(tmp[i,:],sensarray_this_chem[i,:]))\n \n bestfit[name] = senstraj.values[:,chemindex]\n var[name] = np.asarray(var[name])\n return times,bestfit,var", "title": "" }, { "docid": "bc9d0a0ba7e3e076cc9a78d55dc31067", "score": "0.5099913", "text": "def _optimize_core(self, **kwargs):\n domain_bounding_box = self.domain.get_bounding_box()\n domain_list = [(interval.min, interval.max) for interval in domain_bounding_box]\n domain_numpy = numpy.array(domain_list * self._num_points)\n\n # Parameters defined above in :class:`~moe.optimal_learning.python.python_version.optimization.LBFGSBParameters` class.\n return scipy.optimize.fmin_l_bfgs_b(\n func=self._scipy_decorator(self.objective_function.compute_objective_function, **kwargs),\n x0=self.objective_function.current_point.flatten(),\n bounds=domain_numpy,\n fprime=self._scipy_decorator(self.objective_function.compute_grad_objective_function, **kwargs),\n **self.optimization_parameters.scipy_kwargs()\n )[0]", "title": "" }, { "docid": "1713e0d732d6c72e48777bf3c509b7f9", "score": "0.5096502", "text": "def m(f):\n return -2.5 * np.log10(f / f0)", "title": "" }, { "docid": "03585d9df5ae60ab58282bed5d617bf2", "score": "0.5095442", "text": "def determine_qm_software(fullpath):\n with open(fullpath, 'r') as f:\n line = f.readline()\n software_log = None\n while line != '':\n if 'gaussian' in line.lower():\n f.close()\n software_log = GaussianLog(fullpath)\n break\n elif 'qchem' in line.lower():\n f.close()\n software_log = QChemLog(fullpath)\n break\n elif 'molpro' in line.lower():\n f.close()\n software_log = MolproLog(fullpath)\n break\n line = f.readline()\n else:\n raise InputError(\n \"File at {0} could not be identified as a Gaussian, QChem or Molpro log file.\".format(fullpath))\n return software_log", "title": "" }, { "docid": "00773af072590057dbb7ffc076fa977c", "score": "0.50911415", "text": "def _compute_log_likelihood(self, X):\r\n \r\n #return array (X.length,2) where 1st row is wv=0(dgamma) and 2nd row is wv=1(geninvgauss)\r\n #these functions are passed in, but are essentially hard coded\r\n dg,gigaus = self.emissionprob_\r\n #print('log likelihood emissions prob:',self.emissionprob_)\r\n \r\n d=dg[0]\r\n g=gigaus[0]\r\n \r\n #print(d)\r\n #print(gigaus)\r\n \r\n #the log of the pdf is the log likelihood for each observation passed in X\r\n non = stats.dgamma.logpdf(X, *d)\r\n wv = stats.geninvgauss.logpdf(X, *g)\r\n \r\n return np.append(non,wv,axis=1)", "title": "" }, { "docid": "0083445fdbd231242caa05ce1f10398c", "score": "0.50881934", "text": "def maximize(mod, p0, limits=None, ipfix=None, verbose=1, useGrad=False):\n\n if limits is None:\n limits = [[-30,30] for x in p0]\n\n if ipfix is None:\n ipfix = []\n npar = len(p0)\n pfix = np.array([p0[i] for i in ipfix])\n ivar = [i for i in range(npar) if not i in ipfix]\n info = [npar, pfix, ipfix, ivar]\n\n # main negative log-likelihood function #\n def f(x, mod, info):\n npar, pfix, ipfix, ivar = info\n x = np.array([np.clip(xx, l[0], l[1]) for xx,l in zip(x,limits)])\n y = np.zeros(npar, np.double)\n y[ipfix] = pfix\n y[ivar ] = x\n #y = np.array([np.clip(xx, l[0], l[1]) for xx,l in zip(y,limits)])\n\n try:\n l = mod.loglikelihood(y)\n except np.linalg.LinAlgError:\n l = -1e6\n\n if verbose and not useGrad:\n print('%10.6g | %s \\r'%(l, ' '.join(['%10.3g'%xx for xx in x])), end=\"\")\n\n return -l\n\n # first derivative of the negative log-likelihood\n def fprime(x, mod, info):\n npar, pfix, ipfix, ivar = info\n x = np.array([np.clip(xx, l[0], l[1]) for xx,l in zip(x,limits)])\n y = np.zeros(npar, np.double)\n y[ipfix] = pfix\n y[ivar ] = x\n\n try:\n l, g = mod.loglikelihood_derivative(y, calc_fisher=False)\n g = g[ivar]\n except np.linalg.LinAlgError:\n l = -1e6\n g = x*0 - 1e6\n if verbose:\n #print('%10.6g | %s | %s\\r'%(l, \n # ' '.join(['%10.3g'%xx for xx in x]), ' '.join(['%10.3g'%xx for xx in g])), end=\"\")\n print('%10.6g | %s | %s\\r'%(l, \n ' '.join(['%10.3g'%xx for xx in x]), '%10.3g'%np.max(np.abs(g))), end=\"\")\n return -g\n\n if not useGrad:\n fprime = None\n res = opt.minimize(f, p0[ivar], args=(mod, info), method='BFGS', tol=1e-4, jac=fprime, \n options={'gtol':1e-4})\n\n # last print \n if verbose: \n print('%10.6g | %s | %s\\r'%(-res.fun, \n ' '.join(['%10.3g'%xx for xx in res.x]), '%10.3g'%np.max(np.abs(res.jac))), end=\"\")\n print('\\n** done **\\n')\n\n p, pe = res.x, np.diag(res.hess_inv)**0.5\n y, ye = np.zeros(npar, np.double), np.zeros(npar, np.double)\n y[ipfix] = pfix\n y[ivar ] = p\n ye[ivar] = pe \n y = np.array([np.clip(xx, l[0], l[1]) for xx,l in zip(y,limits)])\n\n return y, ye, res", "title": "" }, { "docid": "ddba57e4f1f263e74c86a9eb4b2004b7", "score": "0.507938", "text": "def get_log_vgr(self):\n for line in self.content :\n \n if not re.search(\"logarithmic VGR\", line, re.I) == None :\n \n log_vgr = float(re.findall(\"[0-9]+\\.*[0-9]*\", line)[0])\n return log_vgr\n \n return 0.0", "title": "" }, { "docid": "6fc1d30b701ee4ac64bad5d082359cb7", "score": "0.50783783", "text": "def getslopeFM_lnC(filename, plot=False):\n data = pd.read_csv(filename,encoding='iso-8859-1',index_col='ProfileID') \n pid = np.unique(data.index.values.astype(float)) # index of profile start\n pid = pid[np.logical_not(np.isnan(pid))]\n slp = np.zeros((len(pid),2))\n dum = 0\n if plot == True:\n nrow = 4; ncol = 5\n nfig = math.ceil(len(pid)/(nrow*ncol*1.))\n for i in pid:\n print \"profile %d ....\"%i\n d14C = np.array(data.loc[i:i,'D14C_BulkLayer'].values).astype(float)\n fm = d14C/1000. + 1\n lnC = np.log(np.array(data.loc[i:i,'pct_C'].values).astype(float))\n slp[dum, 0] = i\n notNANs = ~np.any(np.isnan([lnC,fm]),0)\n if len(lnC[notNANs]) != 0:\n slp[dum, 1] = np.polyfit(lnC[notNANs], fm[notNANs], 1)[0]\n lsfit = np.poly1d(np.polyfit(lnC[notNANs], fm[notNANs], 1))\n else:\n slp[dum, 1] = np.polyfit(lnC, fm, 1)[0]\n lsfit = np.poly1d(np.polyfit(lnC, fm, 1))\n yfit = lsfit(lnC)\n # plot line\n if plot == True:\n axesn = int(math.fmod(dum, nrow*ncol))\n if axesn == 0:\n fig, axes = plt.subplots(nrows=nrow, ncols=ncol, figsize=(12,8))\n curfign = 0\n ax = fig.axes[axesn]\n ax.scatter(lnC, fm)\n ax.plot(lnC, yfit, '--')\n if isinstance(data.loc[i,'Site'], unicode):\n ax.set_title(str(int(i)) + \": \" + data.loc[i,'Site'])\n else:\n ax.set_title(str(int(i)) + \": \" + data.loc[i,'Site'].values[0])\n ax.set_xlabel(r\"ln C(%)\")\n ax.set_ylabel(r\"Fraction Modern\")\n curfign = curfign + 1\n if curfign == nrow*ncol or i == len(pid):\n matplotlib.rcParams.update({'font.size': 6}) \n plt.tight_layout() \n fig.savefig('./figures/FmvslnC_' + str(i) + '.png') \n plt.close()\n dum = dum + 1\n return slp", "title": "" }, { "docid": "c8c3ec5f21425ea29baf484f0300fad4", "score": "0.5072871", "text": "def fmax(func_to_maximize, initial_guess=0.5*V):\n func_to_minimize = lambda x : -func_to_maximize(x)\n return fmin(func_to_minimize, initial_guess, disp=False)[0]", "title": "" }, { "docid": "106db572432e2c221c9afe999791abc6", "score": "0.5072115", "text": "def min_RI_l(r_l, S, beta, severity):\n r = r_l*S**beta\n return -1./r*np.log(1-severity)", "title": "" }, { "docid": "e3317436d1e1f39f781d6b2d37458791", "score": "0.5064691", "text": "def log_feature(tX):\n for i in range(tX.shape[1]):\n tX[:,i] = np.log(tX[:,i] - min(tX[:,i]) + 1)\n return tX", "title": "" }, { "docid": "7f3cb567288d73416ecb645011047183", "score": "0.50583655", "text": "def log_linear_interpolation(src_params, dst_params, lamb, min_value):\n\n # clip to handle out-of-range end state values\n src_params = jnp.maximum(src_params, min_value)\n dst_params = jnp.maximum(dst_params, min_value)\n\n return jnp.exp(linear_interpolation(jnp.log(src_params), jnp.log(dst_params), lamb))", "title": "" }, { "docid": "1e53946e88ca722345f96931e422f441", "score": "0.50424355", "text": "def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon,\n maxiter=None, full_output=0, disp=1, retall=0, callback=None):\n x0 = asarray(x0).flatten()\n if maxiter is None:\n maxiter = len(x0)*200\n func_calls, f = wrap_function(f, args)\n if fprime is None:\n grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon))\n else:\n grad_calls, myfprime = wrap_function(fprime, args)\n gfk = myfprime(x0)\n k = 0\n N = len(x0)\n xk = x0\n old_fval = f(xk)\n old_old_fval = old_fval + 5000\n\n if retall:\n allvecs = [xk]\n sk = [2*gtol]\n warnflag = 0\n pk = -gfk\n gnorm = vecnorm(gfk,ord=norm)\n while (gnorm > gtol) and (k < maxiter):\n deltak = numpy.dot(gfk,gfk)\n\n # These values are modified by the line search, even if it fails\n old_fval_backup = old_fval\n old_old_fval_backup = old_old_fval\n\n alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \\\n line_search_wolfe1(f,myfprime,xk,pk,gfk,old_fval,\n old_old_fval,c2=0.4)\n if alpha_k is None: # line search failed -- use different one.\n alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \\\n line_search_wolfe2(f,myfprime,xk,pk,gfk,\n old_fval_backup,old_old_fval_backup)\n if alpha_k is None or alpha_k == 0:\n # This line search also failed to find a better solution.\n warnflag = 2\n break\n xk = xk + alpha_k*pk\n if retall:\n allvecs.append(xk)\n if gfkp1 is None:\n gfkp1 = myfprime(xk)\n yk = gfkp1 - gfk\n beta_k = pymax(0,numpy.dot(yk,gfkp1)/deltak)\n pk = -gfkp1 + beta_k * pk\n gfk = gfkp1\n gnorm = vecnorm(gfk,ord=norm)\n if callback is not None:\n callback(xk)\n k += 1\n\n\n if disp or full_output:\n fval = old_fval\n if warnflag == 2:\n if disp:\n print \"Warning: Desired error not necessarily achieved due to precision loss\"\n print \" Current function value: %f\" % fval\n print \" Iterations: %d\" % k\n print \" Function evaluations: %d\" % func_calls[0]\n print \" Gradient evaluations: %d\" % grad_calls[0]\n\n elif k >= maxiter:\n warnflag = 1\n if disp:\n print \"Warning: Maximum number of iterations has been exceeded\"\n print \" Current function value: %f\" % fval\n print \" Iterations: %d\" % k\n print \" Function evaluations: %d\" % func_calls[0]\n print \" Gradient evaluations: %d\" % grad_calls[0]\n else:\n if disp:\n print \"Optimization terminated successfully.\"\n print \" Current function value: %f\" % fval\n print \" Iterations: %d\" % k\n print \" Function evaluations: %d\" % func_calls[0]\n print \" Gradient evaluations: %d\" % grad_calls[0]\n\n\n if full_output:\n retlist = xk, fval, func_calls[0], grad_calls[0], warnflag\n if retall:\n retlist += (allvecs,)\n else:\n retlist = xk\n if retall:\n retlist = (xk, allvecs)\n\n return retlist", "title": "" }, { "docid": "2bcecfc909a12e18728a95959a47e5d6", "score": "0.50389254", "text": "def mle(x):\n\n def mle_eq(p, z):\n \"\"\"\n MLE equation set\n\n Parameters\n ----------\n p : list\n distribution parameters a, b, c\n z : array_like\n data\n \"\"\"\n loc, scale = p # unpack parameters\n n = z.size\n\n out = [loc + scale * np.log(1. / n * np.sum(np.exp(z / scale))),\n z.mean() - np.sum(z * np.exp(z / scale)) / np.sum(np.exp(z / scale)) - scale]\n\n return out\n\n x = np.array(x)\n a0, b0 = msm(x) # initial guess\n a, b = fsolve(mle_eq, [a0, b0], args=x)\n\n return a, b", "title": "" }, { "docid": "61a47132fc961fc39314eaea02973c08", "score": "0.5038765", "text": "def test_l_bfgs_b_numjac(self):\n retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,\n approx_grad=True,\n maxfun=self.maxiter)\n\n (params, fopt, d) = retval\n\n assert_allclose(self.func(params), self.func(self.solution),\n atol=1e-6)", "title": "" }, { "docid": "c4006552559ac127a03e5d1be6d8b5f6", "score": "0.5037766", "text": "def message(x, phi, psi, prior):\n \n minval = np.inf\n minarg = None\n for state in phi:\n val = -careful_log(phi[state])\n if pis != None:\n val -= careful_log(psi(state, x))\n if prior != None:\n val += prior[x]\n if val < minval:\n minval = val\n minarg = state\n \n return (minarg, minval)", "title": "" }, { "docid": "34523db55338e55c808f1d9afca4e62d", "score": "0.5034334", "text": "def optimization(likelihood, x0, args, method = 1, maxiter = 1000, xmin = [], xmax = [] ):\n Nfeval = 1\n #print(Nfeval, id(Nfeval))\n def callback(xk):\n nonlocal Nfeval\n xx = np.sqrt(np.exp(xk[:]))*1000\n #print(id(Nfeval))\n if Nfeval%10 == 0:\n if (len(xk) == 2):\n print('{0:4d} {1: 3.6f} {2: 3.6f}'.format(Nfeval, *xx))\n elif (len(xk) == 4):\n print('{0:4d} {1: 3.6f} {2: 3.6f} {3: 3.6f} {4: 3.6f}'.format(Nfeval, *xx))\n else:\n print('{0:4d} {1: 3.6f} ...'.format(Nfeval, *xx))\n\n Nfeval += 1\n\n if (method == 1):\n print('Nelder-Mead simplex method used for optimization')\n if (len(x0) == 2):\n print('{0:4s} {1:9s} {2:9s} '.format('Iter', ' X1', ' X2'))\n elif (len(x0) == 4):\n print('{0:4s} {1:9s} {2:9s} {3:9s} {4:9s} '.format('Iter', ' X1', ' X2', ' X3', ' X4'))\n else:\n print('{0:4s} {1:9s} ...'.format('Iter', ' X1'))\n xopt = opt.minimize(likelihood, x0, args, method='nelder-mead',\n options={'maxiter':maxiter,'disp': True}, callback = callback)\n elif (method == 2):\n #test = lambda x: 100*(x[1]-x[0]**2)**2+(1-x[0])**2\n #xopt = opt.fmin(func = likelihood, x0)\n print('BFGS method used for optimization')\n xopt = opt.minimize(likelihood, x0, args, method='BFGS',\n options={'maxiter':maxiter, 'disp': True}, callback = callback)\n elif (method == 3):\n print('L-BFGS-B method used for optimization')\n xopt = opt.minimize(likelihood, x0, args, method='L-BFGS-B',\n options={'maxiter':maxiter,'disp': None}, callback = callback)\n else:\n #x0 = [10., 10.] # the starting point\n #xmin = [1., 1.] # the bounds\n #xmax = [11., 11.]\n # rewrite the bounds in the way required by L-BFGS-B\n bounds = [(low, high) for low, high in zip(xmin, xmax)]\n # use method L-BFGS-B because the problem is smooth and bounded\n minimizer_kwargs = dict(method=\"L-BFGS-B\", bounds=bounds)\n xopt = opt.basinhopping(likelihood, x0, minimizer_kwargs=minimizer_kwargs) \n \n return xopt", "title": "" }, { "docid": "024eadbbc2c8dbdbe4acf249807e0b30", "score": "0.50315374", "text": "def main(empirical_data_path,plotdata_path,minx,maxx,miny,maxy):\n \n \n new_data = initAnalysis(empirical_data_path,plotdata_path,minx,maxx,miny,maxy)\n gvg,tt = loadVariogramFromData(plotdata_path,new_data)\n \n resum,gvgn,resultspd,results = fitGLSRobust(new_data,gvg,num_iterations=50,distance_threshold=1000000)\n \n \n \n\n #CovMat = buildSpatialStructure(new_data,gvg.model)\n #results,resum = calculateGLS(new_data,CovMat)\n\n \n logger.info(\"Writing to file\")\n f = open(\"gls1.txt\",'w')\n f.write(resum.as_text())\n f.close() \n \n logger.info(\"Finished! Results in: gls1.txt\")\n \n return {'dataframe':new_data,'variogram':gvgn,'modelGLS':resum,'results':resultspd,'model_results':results}", "title": "" }, { "docid": "f3a3e77730d932ce9e0ae5b7cc720a8f", "score": "0.5026697", "text": "def hessian_log_likelihood(xm,m,Q,sm2,H_mat):\n def G(x):\n if x=='e': dxe=1\n else: dxe=0\n dQ00x = H_mat['Q_{}'.format(x)][0,0]\n dm00x = H_mat['m_{}'.format(x)][0,0]\n Q00=Q[0,0];m00=m[0,0]\n return (-((dQ00x + dxe)*(Q00 + sm2)) + 2*dm00x*(Q00 + sm2)*(-m00 + xm)\\\n + (dQ00x + dxe)*(-m00 + xm)**2)/(2.*(Q00 + sm2)**2)\n def H(x,y):\n if x=='e': dxe=1\n else: dxe=0\n if y=='e': dye=1\n else: dye=0\n dQ00x = H_mat['Q_{}'.format(x)][0,0]\n dQ00xy = H_mat['Q_{}{}'.format(x,y)][0,0]\n dQ00y = H_mat['Q_{}'.format(y)][0,0]\n dm00x = H_mat['m_{}'.format(x)][0,0]\n dm00y = H_mat['m_{}'.format(y)][0,0]\n dm00xy = H_mat['m_{}{}'.format(x,y)][0,0]\n Q00=Q[0,0];m00=m[0,0]\n return ((dQ00x + dxe)*(dQ00y + dye)*(Q00 + sm2) - 2*dm00x*dm00y*(Q00 + sm2)**2 -\\\n dQ00xy*(Q00 + sm2)**2 - 2*dm00y*(dQ00x + dxe)*(Q00 + sm2)*(-m00 + xm) -\\\n 2*dm00x*(dQ00y + dye)*(Q00 + sm2)*(-m00 + xm) + 2*dm00xy*(Q00 + sm2)**2*(-m00 + xm) -\\\n 2*(dQ00x + dxe)*(dQ00y + dye)*(-m00 + xm)**2 + dQ00xy*(Q00 + sm2)*(-m00 + xm)**2)/\\\n (2.*(Q00 + sm2)**3)\n grad = np.array([G(x) for x in ['m','g','s','e']])[:,None]\n hess = np.array([H(x,y) for y in ['m','g','s','e'] for x in\\\n ['m','g','s','e']]).reshape(4,4)\n return grad, hess", "title": "" }, { "docid": "bc8b4cdf8f316c3d8c862f14624791f4", "score": "0.5026354", "text": "def test_l_bfgs_b_funjac(self):\n x, f, d = optimize.fmin_l_bfgs_b(self.fj, [0, -1], args=(2.0, ),\n bounds=self.bounds)\n assert_(d['warnflag'] == 0, d['task'])\n assert_allclose(x, self.solution, atol=1e-6)", "title": "" }, { "docid": "c006370b3fce92dd7ab5feaadc0356c5", "score": "0.50020134", "text": "def _log_prior(self):\n log_prob = scipy.stats.beta._logpdf(self.theta, 2., 1.).sum()\n if np.isneginf(log_prob):\n log_prob = SMALLEST_FLOAT\n return log_prob", "title": "" }, { "docid": "4e9e511f88d546bb1cd56dc879368aa0", "score": "0.49918845", "text": "def _load_cm_multiplier(self, log):\n cm_mult = None\n for line in log:\n if 'INFO:root:CM Multiplier:' in line:\n cm_mult = float(line.split(' ')[-1].strip())\n return cm_mult", "title": "" }, { "docid": "91b03db8c1c3377d14613acb4787db0f", "score": "0.4988446", "text": "def fmgls(non_linear_params):\n return -mgls(non_linear_params)[0]", "title": "" }, { "docid": "a09c9f58f308aa1c9580b239cede7323", "score": "0.49883476", "text": "def get_logM_bounds(self) -> Tuple[float, float]:", "title": "" }, { "docid": "45e9c27bab36d7179c66f69337006aff", "score": "0.4984155", "text": "def logistic_regression(y, tx, initial_w, max_iters, gamma):\n\tlosses = []\n\tw = initial_w\n\n\tfor i in range(max_iters):\n\t\tw, loss = learning_by_gradient_descent(y, tx, w, gamma)\n\t\t# if not (i % 10): \n\t\t# \tprint('Iteration {}, loss {}'.format(i, loss))\n\t\tlosses.append(loss)\n\n\treturn w, losses[-1]", "title": "" }, { "docid": "d78b549b7f7e8551f4d41d5e4e688edb", "score": "0.49798104", "text": "def CalcLogRegGradient(self, *args):\n return _snap.TLogRegFit_CalcLogRegGradient(self, *args)", "title": "" }, { "docid": "553d2a362222c54c7089ec178eea7fdd", "score": "0.49743974", "text": "def lse(x):\n x = np.sort(x)\n f = empirical_cdf(x.size, kind='median')\n fp = lambda v, z: 1. - np.exp(-np.exp((z - v[0]) / v[1])) # parametric Gumbel function\n e = lambda v, z, y: (fp(v, z) - y) # error function to be minimized\n a0, b0 = msm(x) # initial guess based on method of moments\n\n # least square fit\n p, cov, info, msg, ier = leastsq(e, [a0, b0], args=(x, f), full_output=1)\n\n return p[0], p[1]", "title": "" }, { "docid": "42fad586b8f5e0707ca5d291f9fd5178", "score": "0.4972404", "text": "def delta_loglike(self):\n try:\n # rewrite due to deprication of numpy.matrix\n # gm = np.matrix(self.gradient())\n # H = self.hessian()\n # return (gm * H.I * gm.T)[0,0]/4\n g = self.gradient()\n H = self.hessian()\n return np.dot( np.dot(g, np.linalg.inv(H)) , g)/4\n except Exception as msg:\n print ('Failed log likelihood estimate, returning 99.: %s' % msg)\n return 99.", "title": "" }, { "docid": "b116c94e5eb8d88e5a5e15962edf477c", "score": "0.49649793", "text": "def fitLinearLogLogModel(geodataframe):\n #linear model\n logger.info(\"Fitting OLS linear model: logBiomass ~ logSppN \")\n model = smf.ols(formula='logBiomass ~ logSppN',data=geodataframe)\n results = model.fit()\n param_model = results.params\n #summary = results.summary()\n return (model,results)", "title": "" }, { "docid": "47912fdb6c7f6a08e82ed1b366e8003f", "score": "0.49635294", "text": "def viterbi(self, demo, reg=True):\n\n nb_data, dim = demo.shape if isinstance(demo, np.ndarray) else demo['x'].shape\n\n logB = np.zeros((self.nb_states, nb_data))\n logDELTA = np.zeros((self.nb_states, nb_data))\n PSI = np.zeros((self.nb_states, nb_data)).astype(int)\n\n _, logB = self.obs_likelihood(demo)\n\n # forward pass\n logDELTA[:, 0] = np.log(self.init_priors + realmin * reg) + logB[:, 0]\n\n for t in range(1, nb_data):\n for i in range(self.nb_states):\n # get index of maximum value : most probables\n PSI[i, t] = np.argmax(logDELTA[:, t - 1] + np.log(self.Trans[:, i] + realmin * reg))\n logDELTA[i, t] = np.max(logDELTA[:, t - 1] + np.log(self.Trans[:, i] + realmin * reg)) + logB[i, t]\n\n assert not np.any(np.isnan(logDELTA)), \"Nan values\"\n\n # backtracking\n q = [0 for i in range(nb_data)]\n q[-1] = np.argmax(logDELTA[:, -1])\n for t in range(nb_data - 2, -1, -1):\n q[t] = PSI[q[t + 1], t + 1]\n\n return q", "title": "" }, { "docid": "a1502aec203c26755703891dd0f65bc6", "score": "0.49614376", "text": "def exponential_regression(xi_list,yi_list):\n\timport math\n\tif (len(xi_list) == len(yi_list)):\n\t\tm,r2 = 0,0\n\t\txsum = sum(xi_list)\n\t\tx2sum = sum(map(lambda x: x**2,xi_list))\n\t\txysum = sum(map(lambda x,y: x*math.log(y),xi_list,yi_list))\n\t\tysum = sum(map(lambda y: math.log(y),yi_list))\n\t\ty2sum = sum(map(lambda y: math.log(y)**2,yi_list))\n\t\tm = float((((len(xi_list) * float(xysum)) - (float(xsum) * float(ysum)))/((len(xi_list) * float(x2sum)) - (float(xsum) * float(xsum)))))\n\t\tb = float((float(ysum) - (float(m) * float(xsum)))/(len(xi_list)))\n\t\tr2 = float((((len(xi_list) * float(xysum)) - (float(xsum) * float(ysum)))/((((len(xi_list) * float(x2sum)) - (float(xsum) * float(xsum)))**.5) * (((len(xi_list) * float(y2sum)) - (float(ysum) * float(ysum)))**.5)))**2)\n\t\tprint \" \"\n\t\tprint \"sum of Xi = \" + str(xsum) + \"\\nsum of Yi = \" + str(ysum) + \"\\nsum of XiYi = \" + str(xysum) + \"\\nsum of X2i = \" + str(x2sum) + \"\\nsum of Y2i = \" + str(y2sum)\n\t\tprint \" m = \" + str(m)\n\t\tprint \" b = \" + str(math.exp(b))\n\t\tprint \" r2 = \" + str(r2)\n\t\tprint \" r = \" + str((float(r2))**.5)\n\t\tprint \"y = \" + str(math.exp(b)) + \"e^(\" + str(m) + \"(x)\"\n\telse:\n\t\tprint \"there was an unequal number of list elements in x list and the y list\"", "title": "" }, { "docid": "0bacb7c0ed27c4541afe2c8f3f80621a", "score": "0.4955185", "text": "def reg_logistic_regression(y, tx, initial_w, max_iters, gamma, lambda_):\n \n w = initial_w\n for n_iter in range(max_iters):\n loss = compute_loss_logistic_regularized(y, tx, w, lambda_)\n grad = compute_gradient_logistic_regularized(y,tx,w, lambda_)\n w = w - gamma * grad\n \n return w, loss", "title": "" }, { "docid": "1cce6b6189cf05fe16093bb58319c681", "score": "0.49539003", "text": "def _max_y_given_linear_input(c: float, w: torch.Tensor, b: torch.Tensor,\n x_coeffs, beta_coeffs, constants,\n x_hat) -> (float, np.ndarray):\n prog = gurobipy.Model()\n y_lower = torch.max(c * (w @ x_hat + b), w @ x_hat + b)\n y = prog.addVar(lb=y_lower.item())\n beta = prog.addVar(lb=0., ub=1.)\n prog.setObjective(gurobipy.LinExpr([1.], [y]), sense=gurobipy.GRB.MAXIMIZE)\n for i in range(len(x_coeffs)):\n prog.addLConstr(gurobipy.LinExpr([1., -beta_coeffs[i].item()],\n [y, beta]),\n sense=gurobipy.GRB.LESS_EQUAL,\n rhs=(x_coeffs[i] @ x_hat + constants[i]).item())\n prog.setParam(gurobipy.GRB.Param.OutputFlag, False)\n prog.optimize()\n assert (prog.status == gurobipy.GRB.Status.OPTIMAL)\n beta_val = beta.x\n y_max = y.x\n return (y_max, beta_val)", "title": "" }, { "docid": "14f5a449ff50c8b88225593e92c99809", "score": "0.49455962", "text": "def test_l_bfgs_b(self):\n retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,\n self.grad, args=(),\n maxfun=self.maxiter)\n\n (params, fopt, d) = retval\n\n assert_allclose(self.func(params), self.func(self.solution),\n atol=1e-6)\n\n # Ensure that function call counts are 'known good'; these are from\n # Scipy 0.7.0. Don't allow them to increase.\n assert_(self.funccalls == 7, self.funccalls)\n assert_(self.gradcalls == 5, self.gradcalls)\n\n # Ensure that the function behaves the same; this is from Scipy 0.7.0\n assert_allclose(self.trace[3:5],\n [[0. , -0.52489628, 0.48753042],\n [0. , -0.52489628, 0.48753042]],\n atol=1e-14, rtol=1e-7)", "title": "" }, { "docid": "2df4246157105ee99fb48006a832e80f", "score": "0.49394545", "text": "def loglikelihood(self, x, previous=False):\n # testing of original fct: MC integrate must be one: mean(p(x_i)) * volume(where x_i are uniformely sampled)\n # for i in xrange(3): print mean([cma.likelihood(20*r-10, dim * [0], None, 3) for r in rand(10000,dim)]) * 20**dim\n # TODO: test this!!\n # c=cma.fmin...\n # c[3]['cma'].loglikelihood(...)\n\n if previous and hasattr(self, 'lastiter'):\n sigma = self.lastiter.sigma\n Crootinv = self.lastiter._Crootinv\n xmean = self.lastiter.mean\n D = self.lastiter.D\n elif previous and self.countiter > 1:\n raise _Error('no previous distribution parameters stored, check options importance_mixing')\n else:\n sigma = self.sigma\n Crootinv = self._Crootinv\n xmean = self.mean\n D = self.D\n\n dx = array(x) - xmean # array(x) - array(m)\n n = self.N\n logs2pi = n * log(2 * np.pi) / 2.\n logdetC = 2 * sum(log(D))\n dx = np.dot(Crootinv, dx)\n res = -sum(dx**2) / sigma**2 / 2 - logs2pi - logdetC / 2 - n * log(sigma)\n if 1 < 3: # testing\n s2pi = (2 * np.pi)**(n / 2.)\n detC = np.prod(D)**2\n res2 = -sum(dx**2) / sigma**2 / 2 - log(s2pi * abs(detC)**0.5 * sigma**n)\n assert res2 < res + 1e-8 or res2 > res - 1e-8\n return res", "title": "" }, { "docid": "2f2bc02cfc7028a163a6dc345c490677", "score": "0.493198", "text": "def test_l_bfgs_b_funjac(self):\n def fun(x):\n return self.func(x), self.grad(x)\n\n retval = optimize.fmin_l_bfgs_b(fun, self.startparams,\n maxfun=self.maxiter)\n\n (params, fopt, d) = retval\n\n assert_allclose(self.func(params), self.func(self.solution),\n atol=1e-6)", "title": "" }, { "docid": "67aaf7f6133bbf0e0a38f35ced275a0b", "score": "0.49313143", "text": "def train(self,X,y,reg=1e-5,num_iters=100):\n \n num_train,dim = X.shape\n theta = np.ones((dim,))\n\n\n # Run scipy's fmin algorithm to run the gradient descent\n theta_opt = scipy.optimize.fmin_bfgs(self.loss, theta, fprime = self.grad_loss, args=(X,y,reg),maxiter=num_iters)\n \n \n return theta_opt", "title": "" }, { "docid": "6af0aaba76f5c9e942089c6ceede0b62", "score": "0.49294364", "text": "def fmin_g(func,start,end,acc,L=None,R=None,ML=None,MR=None):\n if abs(start-end) < abs(acc):\n xM = 0.5*start+0.5*end\n return (xM,start,end)\n phi = (math.sqrt(5.0)-1.0)/2.0\n xML = phi*start+(1-phi)*end\n xMR = phi*end+(1-phi)*start\n if (bool(L is None)!=bool(R is None)):\n #Error occured\n #print(\"error occured\")\n return(None,start,end)\n if L is None:\n L = func(start)\n if R is None:\n R = func(end)\n if ML is None:\n ML = func(xML)\n def fmin_L():\n return fmin_g(func,start,xMR,acc,L=L,R=MR,MR=ML)\n def fmin_R():\n return fmin_g(func,xML,end,acc,L=ML,R=R,ML=MR)\n if L<=ML and R <=ML:\n xM = 0.5*start+0.5*end\n print(\"L={0} R={1} ML={2}\".format(L,R,ML))\n return (xM,start,end)\n if MR is None:\n MR = func(xMR)\n if MR > ML:\n return fmin_L()\n else:\n return fmin_R()", "title": "" }, { "docid": "af986bf410678ea7295e90263f795b74", "score": "0.49291587", "text": "def infhordec(x): \n E = []\n for i in range(x):\n E.append(g[i] + J[i])\n J = min(E) # Bellman's equation is the optimal solution for the cost problem if mu(x) minimizes in the equation. ", "title": "" }, { "docid": "9bd2895ea6d65a15b468ba41f95ae64b", "score": "0.49269444", "text": "def logprior(param):\n d = -0.5 * np.sum(param**2/(10.0)**2)\n return d", "title": "" }, { "docid": "85816e0c70f7caa4ef2d2183c7bc9805", "score": "0.4925434", "text": "def logreg_coef(model,data):\n intercept = pd.DataFrame({'variable' : 'intercept', 'coefficient' : model.intercept_})\n coefficient = pd.DataFrame({'variable' : data.columns, 'coefficient' : model.coef_.transpose().flatten()})\n coefficient = coefficient.reindex(coefficient.coefficient.abs().sort_values(ascending = False).index)\n return(pd.concat([intercept,coefficient], axis = 0).reset_index(drop = True))", "title": "" }, { "docid": "778c8a94218a224621f02948783100b7", "score": "0.49224034", "text": "def find_min(open_dir, g):\r\n # pr_min_f isn't forward pr_min instead it's the f-value\r\n # of node with priority pr_min\r\n pr_min, pr_min_f = np.inf, np.inf\r\n for n in open_dir:\r\n f = g[n] + problem.h(n)\r\n pr = max(f, 2 * g[n])\r\n pr_min = min(pr_min, pr)\r\n pr_min_f = min(pr_min_f, f)\r\n return pr_min, pr_min_f, min(g.values())", "title": "" }, { "docid": "2a1f11c944b04cc23163a217cde6371c", "score": "0.4919689", "text": "def grad_log_likelihood(x,m,Q,sm2,grad_mat):\n den = sm2 + Q[0,0]\n def grad(m_d,Q_d,sd=0):\n dm0 = grad_mat['{}'.format(m_d)][0,0]\n dQ0 = grad_mat['{}'.format(Q_d)][0,0]\n return (x-m[0,0])*dm0/den\\\n +(x-m[0,0])**2/(2*den**2)*(dQ0+sd)\\\n -0.5/den*(dQ0+sd)\n return np.array([[grad('m_mlam','Q_mlam')],\\\n [grad('m_gamma','Q_gamma')],\\\n [grad('m_sl2','Q_sl2')],\\\n [grad('m_sm2','Q_sm2',1)],\\\n [grad('m_sd2','Q_sd2')],\\\n ])", "title": "" }, { "docid": "28e8f2aaf0bfd6ba97ea150aca9e3337", "score": "0.49106988", "text": "def least_squares_SGD(y, tx, gamma, max_iters,batch_size):\n # init parameters\n threshold = 1e-8\n losses = []\n w = np.zeros(tx.shape[1])\n\n # Perform regression on a data batch of size=100\n for iter in range(max_iters):\n for batch_y, batch_tx in batch_iter(y, tx,batch_size):\n gradient = calculate_gradient_mse(batch_y, batch_tx, w)\n # get loss and updated w\n loss = calculate_loss_mse(y, tx, w)\n w = w - gamma * gradient\n # log info\n if iter % 50 == 0:\n print(\"Current iteration={i}, the loss={l}\".format(i=iter, l=loss))\n # converge criteria\n losses.append(loss)\n if len(losses) > 1 and np.abs(losses[-1] - losses[-2]) < threshold:\n break\n print(\"The of loss iteration={i} is {l} \".format(l=calculate_loss_mse(y, tx, w), i=iter))\n return w", "title": "" }, { "docid": "376ac334ff110828709ad01f411a4e80", "score": "0.490798", "text": "def estimate_lognormal(self, data):\r\n\r\n if self.fit_method == 1:\r\n _results = LogNormal().maximum_likelihood_estimate(data,\r\n self.start_time,\r\n self.rel_time)\r\n\r\n self.n_suspensions = _results[3]\r\n self.n_failures = _results[4]\r\n elif self.fit_method == 2:\r\n _results = regression(data, self.start_time, self.rel_time,\r\n dist='lognormal')\r\n\r\n self.rho = _results[3]\r\n self.n_suspensions = _results[4]\r\n self.n_failures = _results[5]\r\n\r\n self.scale[1] = _results[0][0]\r\n self.shape[1] = _results[0][1]\r\n self.variance[0] = _results[1][0] # Scale\r\n self.variance[1] = _results[1][2] # Shape\r\n self.covariance[0] = _results[1][1] # Scale-Shape\r\n self.mle = _results[2][0]\r\n self.aic = _results[2][1]\r\n self.bic = _results[2][2]\r\n\r\n self.calculate_parameter_bounds(data)\r\n self.hazard_function()\r\n self.reliability_function()\r\n self.mean()\r\n\r\n return False", "title": "" }, { "docid": "c02ef7ba5c222da6dd67bbc68dc66d4b", "score": "0.49060988", "text": "def f1():\n \n # Initialize parameters\n cur_x = 3 # The algorithm starts at x=3\n rate = 0.01 # Learning rate\n precision = 0.000001 #This tells us when to stop the algorithm\n previous_step_size = 1 #\n max_iters = 10000 # maximum number of iterations\n iters = 0 #iteration counter\n \n df = lambda x: 2*(x+5) #Gradient of our function \n\n while previous_step_size > precision and iters < max_iters:\n prev_x = cur_x\n cur_x = cur_x - rate * df(prev_x) #Grad descent\n previous_step_size = abs(cur_x - prev_x) #Change in x\n iters = iters+1 \n print(\"Iteration\",iters,\"\\n X = \",cur_x) \n \n print(\"The local minimum occurs at\", cur_x)", "title": "" }, { "docid": "43927095b0d2d99dfbcedd831255f79d", "score": "0.49059555", "text": "def cm_check(self, f, line):\n if \"maxgcm\" in line:\n values = next(f) # grab the next line !\n values = values.split()\n maxgcm = int(values[2]) # pull the 3rd value for maxgcm\n self.add(\"maxgcm\", maxgcm)", "title": "" }, { "docid": "0bd132df28032580689144f43dda8870", "score": "0.4892994", "text": "def minimize_func(self, theta):\n # first scale the params based on the errors\n ntheta = (theta * self.fiterrs) + self.fitvals\n self.set_params(dict(zip(self.fitkeys, ntheta)))\n if not np.isfinite(self.lnprior(ntheta)):\n return np.inf\n phases = self.get_event_phases()\n lnlikelihood = marginalize_over_phase(phases, self.template,\n weights=self.weights)[1]\n print lnlikelihood, ntheta\n return -lnlikelihood", "title": "" }, { "docid": "f6ea6ca3baacdf3e2ede3e248eb99f4d", "score": "0.4892423", "text": "def log_prior(self, X):\n X = np.array(X, copy=False, ndmin=2)\n\n #not normalized\n #lp = np.zeros(X.shape[0])\n\n #normalize the prior\n lp = np.log( np.ones(X.shape[0]) / self.prior_volume )\n\n inside = np.all((X > self.min) & (X < self.max), axis=1)\n lp[~inside] = -np.inf\n\n return lp", "title": "" }, { "docid": "a74b522671b72b8edf4c0269af820e9e", "score": "0.48921713", "text": "def find_best_fit( self, r, nr ):\n # For higher sample frequencies the data points becomes horizontal\n # along line Nr=1. To create a more evident linear model in log-log\n # space, we average positive Nr values with the surrounding zero\n # values. (Church and Gale, 1991)\n\n if not r or not nr:\n # Empty r or nr?\n return\n\n zr = [ ]\n for j in range( len( r ) ):\n i = (r[ j - 1 ] if j > 0 else 0)\n k = (2 * r[ j ] - i if j == len( r ) - 1 else r[ j + 1 ])\n zr_ = 2.0 * nr[ j ] / (k - i)\n zr.append( zr_ )\n\n log_r = [ math.log( i ) for i in r ]\n log_zr = [ math.log( i ) for i in zr ]\n\n xy_cov = x_var = 0.0\n x_mean = 1.0 * sum( log_r ) / len( log_r )\n y_mean = 1.0 * sum( log_zr ) / len( log_zr )\n for (x, y) in zip( log_r, log_zr ):\n xy_cov += (x - x_mean) * (y - y_mean)\n x_var += (x - x_mean) ** 2\n self._slope = (xy_cov / x_var if x_var != 0 else 0.0)\n self._intercept = y_mean - self._slope * x_mean", "title": "" } ]
9ec554404ab602d2d537e9d7fe2ae34f
set internal self.params from a Parameters object or a list/tuple of Parameters
[ { "docid": "94a70df51a827fbfd24ce757ea6ce9ca", "score": "0.85300606", "text": "def __set_params(self, params):\n if params is None or isinstance(params, Parameters):\n self.params = params\n elif isinstance(params, (list, tuple)):\n _params = Parameters()\n for _par in params:\n if not isinstance(_par, Parameter):\n raise MinimizerException(self.err_nonparam)\n else:\n _params[_par.name] = _par\n self.params = _params\n else:\n raise MinimizerException(self.err_nonparam)", "title": "" } ]
[ { "docid": "2af869554d507f9e39fd156e99656bad", "score": "0.8027624", "text": "def set_parameters(self, **params):\n with self.parametersLock:\n for param in params:\n setattr(self, param, params[param])", "title": "" }, { "docid": "bc4ea99fdf77f79589f8ebc56bb8a71e", "score": "0.8016268", "text": "def set_params(self, **params):", "title": "" }, { "docid": "e98d842f39cf40978ec334fa9acb6aef", "score": "0.79551965", "text": "def set_parameters(self, params):\n if not isinstance(params, dict):\n try:\n params = params.get_parameters()\n except:\n raise TypeError(\"Wrong data type for set_parameters.\")\n\n assert type(params) == dict\n\n self._params = deepcopy(params)\n # Compute some values on top of the given input parameters\n self.compute_parameters()", "title": "" }, { "docid": "3f60a2c7e8ee402f23bb3c546beb80e6", "score": "0.795332", "text": "def set_params(self, params): \n pass", "title": "" }, { "docid": "08d2923ccf53c7bb4633aee3f2fc4767", "score": "0.79471844", "text": "def SetParameters(self, params):", "title": "" }, { "docid": "08d2923ccf53c7bb4633aee3f2fc4767", "score": "0.79471844", "text": "def SetParameters(self, params):", "title": "" }, { "docid": "08d2923ccf53c7bb4633aee3f2fc4767", "score": "0.79471844", "text": "def SetParameters(self, params):", "title": "" }, { "docid": "08d2923ccf53c7bb4633aee3f2fc4767", "score": "0.79471844", "text": "def SetParameters(self, params):", "title": "" }, { "docid": "08d2923ccf53c7bb4633aee3f2fc4767", "score": "0.79471844", "text": "def SetParameters(self, params):", "title": "" }, { "docid": "08d2923ccf53c7bb4633aee3f2fc4767", "score": "0.79471844", "text": "def SetParameters(self, params):", "title": "" }, { "docid": "08d2923ccf53c7bb4633aee3f2fc4767", "score": "0.79471844", "text": "def SetParameters(self, params):", "title": "" }, { "docid": "eb2a2cf554997ad3f7b32fc32c000d7e", "score": "0.7940367", "text": "def set_params(self, params, values):\r\n\t\tfor i, param in enumerate(params):\r\n\t\t\tself[param] = values[i]", "title": "" }, { "docid": "1a62d184492df03f435f32de7634293e", "score": "0.7871902", "text": "def set_params(self, params, values):\n for i, param in enumerate(params):\n self[param] = values[i]", "title": "" }, { "docid": "31254765a9e5d87bddf22b3de9a7063b", "score": "0.7854949", "text": "def set_params(self, **params):\n for parameter, value in params.items():\n setattr(self, parameter, value)\n return self", "title": "" }, { "docid": "88246772b6806f2c0878c4caaaa734ee", "score": "0.77793884", "text": "def set_params(self, **params) -> BTM:\n for param, value in params.items():\n self.__setattr__(param, value)\n return self", "title": "" }, { "docid": "489278d7dae8b1de17231052f579910c", "score": "0.77616614", "text": "def set_parameters(self,params):\n self.parameters = params", "title": "" }, { "docid": "e69e2045d40f4661728ce4440a8b327d", "score": "0.77615947", "text": "def set_params(self, **parameters):\n for parameter, value in parameters.items():\n setattr(self, parameter, value)\n return self", "title": "" }, { "docid": "e69e2045d40f4661728ce4440a8b327d", "score": "0.77615947", "text": "def set_params(self, **parameters):\n for parameter, value in parameters.items():\n setattr(self, parameter, value)\n return self", "title": "" }, { "docid": "b2810bed97717404c7eff846c374a2ed", "score": "0.77344745", "text": "def set_params(self, **parameters):\n for parameter, value in parameters.items():\n self.__setattr__(parameter, value)\n return self", "title": "" }, { "docid": "66d5cb8b08bbc1924d085b97bd0fe579", "score": "0.7729951", "text": "def set_parameters(self, params):\n for name, value in params.iteritems():\n self.set_parameter(name, value)", "title": "" }, { "docid": "b36f8f37dd35280c198e4d289ed5f187", "score": "0.77104175", "text": "def set_parameters(self, **params):\n for k, v in params.items():\n if k in self.parameters:\n setattr(self, k, v)\n else:\n log.warning(\"[{}] unknown parameter provided {} = {}\".format(self.name, k, v))", "title": "" }, { "docid": "f0f09a460960cdbc3a0917615f520af8", "score": "0.76986426", "text": "def set_params(**params):\n # will be useful for storing model information\n raise NotImplementedError", "title": "" }, { "docid": "34e5b6ac1cdf2c55cf5574f4b56670f4", "score": "0.7664566", "text": "def set_params(self, params):\n self.params = params", "title": "" }, { "docid": "395117e6869672fb17f86102d52c3f87", "score": "0.76344115", "text": "def set_params(self, params: dict) -> None:\n \n pass", "title": "" }, { "docid": "bbc715016504255845c8cad125705a40", "score": "0.7621637", "text": "def set_params(self, **params):\n if not params:\n # Simple optimization to gain speed(inspect is slow)\n return self\n valid_params = self.get_params(deep=True)\n from ..externals import six\n for key, value in six.iteritems(params):\n split = key.split('__', 1)\n if len(split) > 1:\n # nested objects case\n name, sub_name = split\n if name not in valid_params:\n raise ValueError('Invalid parameter %s for estimator %s. '\n 'Check the list of available parameters '\n 'with `estimator.get_params().keys()`.' %\n (name, self))\n sub_object = valid_params[name]\n sub_object.set_params(**{sub_name: value})\n else:\n # simple objects case\n if key not in valid_params:\n raise ValueError('Invalid parameter %s for estimator %s. '\n 'Check the list of available parameters '\n 'with `estimator.get_params().keys()`.' %\n (key, self.__class__.__name__))\n setattr(self, key, value)\n return self", "title": "" }, { "docid": "6c66bd5699c86e90ceabd385c9478ec9", "score": "0.7562209", "text": "def set_params(self, *single, **params):\n if len(single) > 0:\n self.params = single[0]\n else:\n self.params = params\n return self", "title": "" }, { "docid": "b14945bc29b19a4c7ebe726b11f339df", "score": "0.7544318", "text": "def set_params(self, **params):\n for name, value in params.items():\n if name.startswith(\"scaler__\"):\n assert hasattr(self.scaler, 'set_params'), \\\n \"Trying to set {} without scaler\".format(name)\n self.scaler.set_params(**{name[len(\"scaler__\"):]: value})\n elif name.startswith('layers__'):\n index = int(name[len('layers__'):])\n self.layers[index] = value\n elif name.startswith('initf__'):\n index = int(name[len('initf__'):])\n self.initf[index] = value\n elif name in NET_PARAMS:\n self.net_params[name] = value\n elif name in BASIC_PARAMS:\n setattr(self, name, value)\n else:\n self.train_params[name] = value", "title": "" }, { "docid": "afa07979c99687ac67eeb5760fb0c2ea", "score": "0.7502602", "text": "def setParams(self, **params):\n self.__params.update(params)\n self.paramStateChanged()", "title": "" }, { "docid": "3264c7672028098534a2a82b10eccf8a", "score": "0.7415884", "text": "def _set_params(self, args, kwargs):\n raise NotImplementedError(\"set_params\")", "title": "" }, { "docid": "5bf641e619e4408e0727cca0d33d43be", "score": "0.73267794", "text": "def set_params(self, params):\n\n param_error_msg = \"Error: supplied params must be a 2-tuple (alpha, sigma).\"\n if type(params) == str:\n\n if params not in ICV_PARAM_SELECTION_METHODS:\n sys.exit(\"Error: unrecognized param selection sting %s given.\" \\\n % params)\n\n elif params == 'auto':\n self.params = \\\n (2.42, max(5.06, 0.149 * self.data.shape[0]**(3./8.)))\n\n elif params == 'Savchuk2008':\n self.params = self._Savchuk2008(self.data.shape[0])\n\n elif type(params) == tuple:\n if len(params) != 2:\n sys.exit(param_error_msg)\n else:\n self.params = np.array(params)\n\n elif type(params) == np.ndarray:\n if params.flatten().shape != (2,):\n sys.exit(param_error_msg)\n else:\n self.params = params.flatten()\n\n else:\n sys.exit(\"Error: `params` value not recognized. Either specify a method string or a 2-tuple (alpha, sigma)\")", "title": "" }, { "docid": "bdf6fb2ef6ea2783b07bcffb17e7aab9", "score": "0.73008674", "text": "def setParams(self, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "009ad6d223bcc272fb3713dfeddd31dc", "score": "0.728719", "text": "def set_parameters(self, parameters, **kwargs):\n pass", "title": "" }, { "docid": "0390c4a44259fd621a5c27ada71b9893", "score": "0.725725", "text": "def set_params(self, param_dict):\n # NOT IMPLEMENTED YET\n pass\n return None", "title": "" }, { "docid": "1a25fd9c98a47ca2209d118fbeffae27", "score": "0.72548175", "text": "def set_params(self,**kwargs):\n for k,v in kwargs.items():\n self.params[k] = v", "title": "" }, { "docid": "b2e579fa7974d78d1f3de3d0de160a14", "score": "0.72537345", "text": "def set_params(self, paramlist, params):\n for k,v in zip(paramlist, params):\n self.model.setParam(k,v)", "title": "" }, { "docid": "c638a601d801ee528078156103702fb3", "score": "0.72353226", "text": "def set_params(self):\n\n raise NotImplementedError", "title": "" }, { "docid": "5c8dc2587ea270d6d5dc1d58644ca235", "score": "0.71970177", "text": "def set_params(self, *, params: Params) -> None:\n super().set_params(params=params)", "title": "" }, { "docid": "43aba87e85e85e10f8eed2463efe7cca", "score": "0.715308", "text": "def set_params(self):\r\n pass", "title": "" }, { "docid": "82d92cb150ae0f2f22efa83a4026a684", "score": "0.7137484", "text": "def update_parameters(self, params):\n if not isinstance(params, dict):\n try:\n params = params.get_parameters()\n except:\n raise TypeError(\"Wrong data type for update_parameters.\")\n\n for key, value in params.items():\n self.__setitem__(key, value)\n # Compute some values on top of the given input parameters\n self.compute_parameters()", "title": "" }, { "docid": "605aa13dcbe8a583bcb8bbc784539a1c", "score": "0.711073", "text": "def setParams(self, ignoreErrors=True, **params):\n for paramName in params:\n try:\n self.paramInfo[paramName][\"set\"](params[paramName])\n except (NotImplementedError, AttributeError) as e:\n print('Unable to set param: {paramName}'.format(paramName=paramName))\n print('\\tReason: {reason}'.format(reason=str(e)))\n # traceback.print_exc()\n except KeyError as e:\n print('Unable to set param: {paramName}'.format(paramName=paramName))\n print('\\tReason: {reason}'.format(reason='This parameter is not valid - it will be ignored.'))\n except Exception as e:\n if ignoreErrors:\n # Print error message, then move on...\n traceback.print_exc()\n else:\n # Stop the train, we've got an error!\n raise e", "title": "" }, { "docid": "24b5f0e2e19bd5bcd253cc6a063b317a", "score": "0.71074474", "text": "def params(self, params):\n\n self._params = params", "title": "" }, { "docid": "24b5f0e2e19bd5bcd253cc6a063b317a", "score": "0.71074474", "text": "def params(self, params):\n\n self._params = params", "title": "" }, { "docid": "11286bf05b34cbf65f41f293de2fa91b", "score": "0.70757836", "text": "def _set_params(self, params):\n if params is None:\n if hasattr(self._model, 'guess') and callable(self._model.guess):\n self._params = self._model.guess(self._data, x=self._x)\n else:\n self._params = None\n else:\n self._params = params\n\n if self._params is not None:\n self._params.__class__ = InteractiveParameters\n self._params.on_noninteractive_update = lambda *args: self.set_params(self._params)\n self._param_pane = self._params.controls(value_callback=lambda *args: self.redraw())\n self._rhs_pane[-1] = self._param_pane\n\n # Clear the selector's default value\n self._selector_pane.value = 'None'", "title": "" }, { "docid": "4ccc0e2689b4defe2ff86a523dd306c9", "score": "0.70380586", "text": "def set_parameters(self, params):\n current_idx = 0\n with torch.no_grad():\n for param in self.parameters():\n param.copy_(torch.Tensor(params[current_idx:current_idx+param.nelement()].reshape(param.shape)))\n current_idx += param.nelement()", "title": "" }, { "docid": "adea6b4eb8a2ea7ba1490289d74135f0", "score": "0.70195603", "text": "def set_parameters(self,\n parameters=None):\n\n if parameters is None:\n parameters = OrderedDict()\n\n self.parameters = OrderedDict()\n\n if isinstance(parameters, dict):\n for key, values in six.iteritems(parameters):\n if len(values) == 5:\n self.add_parameter(\n lha_block=values[0],\n lha_id=values[1],\n parameter_name=key,\n parameter_range=[values[3], values[4]],\n morphing_max_power=values[2]\n )\n elif len(values) == 2:\n self.add_parameter(\n lha_block=values[0],\n lha_id=values[1],\n parameter_name=key\n )\n else:\n raise ValueError('Parameter properties has unexpected length: {0}'.format(values))\n\n else:\n for values in parameters:\n assert len(values) == 2, 'Parameter list entry does not have length 2: {0}'.format(values)\n self.add_parameter(values[0], values[1])\n\n # After manually adding parameters, the morphing information is not accurate anymore\n self.morpher = None", "title": "" }, { "docid": "dfd7add61e7f3068d3574aa436a09daa", "score": "0.70190275", "text": "def setParams(self):\n pass", "title": "" }, { "docid": "0e73a4576ef357525e6049870ed05eb0", "score": "0.7003915", "text": "def _init_params(self, params=None):\n if params:\n for k, v in params.items():\n if k != \"self\":\n setattr(self, k, v)", "title": "" }, { "docid": "28c5b419b96ab914d9d81f2b946d3a23", "score": "0.69973135", "text": "def setParameters(self, parameters):\n\t\t# 1.) check if parameter list length is correct\n\t\tif len(parameters) != len(self.parameter_format):\n\t\t\tsel.log.error(\"Parameter list needs to contain exactly %s elements. Not %s!\"\n\t\t\t\t\t\t\t% (len(self.parameter_format), len(parameters)))\n\t\t\treturn\n\t\t# 2.) Check if parameters are correct and put them into the itape files\n\t\tfor ii in range(0, len(parameters)):\n\t\t\tif self._checkParameter(parameters[ii], self.parameter_format[ii]) == None:\n\t\t\t\treturn\n\t\t\tself.param_lookup[ii][0].setValue(self.param_lookup[ii][1], parameters[ii])\n\t\t# 3.) Replace parameter list\n\t\tself.param_list = parameters", "title": "" }, { "docid": "3328ad1d60517d3424632045e2f571c9", "score": "0.6983962", "text": "def param_update(self, params):\n self.params = params\n # Update each parameter by name\n [setattr(self, self.param_l[i], params[i]) for i in range(self.dim)]", "title": "" }, { "docid": "ae96730a95ca74275b9029228a364f2f", "score": "0.6958958", "text": "def __set_params(self, params_in):\n self.__params = params_in\n return 0", "title": "" }, { "docid": "3c558cf1784b1dd1e2560944dc57e74f", "score": "0.695762", "text": "def set_parameters(self, editor, parameters):\n for name, value in six.iteritems(parameters):\n try:\n current_value = getattr(editor, name)\n except AttributeError:\n invalid_msg = \"{0} is not a valid parameter\"\n raise ValueError(invalid_msg.format(name))\n \n value = self.coerce_type(value, parameters[name])\n self.type_check(parameters, name, value)\n setattr(editor, name, value)", "title": "" }, { "docid": "7240a117e03639552649117a37dc3cb0", "score": "0.6929051", "text": "def _set_params(self, *args, **kwargs):\n log.trace(\"in _set_params\")\n # Retrieve required parameter.\n # Raise if no parameter provided, or not a dict.\n result = None\n startup = False\n try:\n params = args[0]\n except IndexError:\n raise InstrumentParameterException('Set command requires a parameter dict.')\n\n try:\n startup = args[1]\n except IndexError:\n pass\n\n log.trace(\"_set_params calling _verify_not_readonly ARGS = \" + repr(args))\n self._verify_not_readonly(*args, **kwargs)\n\n for (key, val) in params.iteritems():\n result = self._do_cmd_resp(InstrumentCmds.SET, key, val, **kwargs)\n log.trace(\"_set_params calling _update_params\")\n self._update_params()\n return result", "title": "" }, { "docid": "21e560aa4b2956698d14532db7f68c8f", "score": "0.6926886", "text": "def _set_params(self, args, kwargs):\n self._set_args(args)\n self._set_kwargs(kwargs)\n\n self._invalidate()", "title": "" }, { "docid": "afc6f5f7c326692adde5e8d6e7aa1ddc", "score": "0.68460965", "text": "def _update_params(self, params: dict):\n for name, value in params.items():\n # WARN: this might be potentially risky\n inner_name = '_{}'.format(name)\n assert inner_name in self.__dict__\n assert isinstance(value, type(self.__dict__[inner_name]))\n setattr(self, inner_name, value)", "title": "" }, { "docid": "38b2f7d496774f0acfa797bb93396f94", "score": "0.6828791", "text": "def update_params(self, params):\n pass", "title": "" }, { "docid": "1fbf4e58f636639dccff3ab9136f14ac", "score": "0.6784372", "text": "def construct_params(self, params=None):\r\n\r\n raise NotImplementedError()", "title": "" }, { "docid": "d3b0abf36a0548f2ca862bd70a0a5bcd", "score": "0.6784351", "text": "def set_params(self, **kwargs):\n # todo: manager Thread\n pass", "title": "" }, { "docid": "a826e531434bd8e0b8d42344cfcc4a18", "score": "0.6767251", "text": "def set_params(self, params):\n params = dict_to_namespace(params)\n\n self.params = Namespace()\n self.params.n_ensemble = getattr(params, 'n_ensemble', 5)\n self.params.hls = getattr(params, 'hls', (20, 30, 40))\n self.params.max_iter = getattr(params, 'max_iter', 500)\n self.params.alpha = getattr(params, 'alpha', 0.01)\n self.params.trans_x = getattr(params, 'trans_x', False)", "title": "" }, { "docid": "a11dead44897173b03d0618b346d5a29", "score": "0.67653966", "text": "def __init__(self, params=None):\n if params is not None and isinstance(params, (dict, )):\n self.set_from_dict(params)", "title": "" }, { "docid": "67f54ef94af4cd574bb155fd71428a13", "score": "0.6747192", "text": "def _set_params(self, params):\n result = self._ia_client.set_param(params)\n log.info(\"set result = %s\" % str(result))\n\n if result is None:\n # TODO check why self._ia_client.set_param returns None\n return\n\n assert isinstance(result, dict)\n\n # check all requested params are in the result\n for (p, v) in params.items():\n self.assertTrue(p in result)\n\n return result", "title": "" }, { "docid": "3908767f1708bf9b40adf73b542eeb60", "score": "0.67295367", "text": "def set_params(self, params, keys=['theta', 'd', 'a', 'alpha']):\n T = TransformationMatrix.from_numpy(params, keys)\n self.params = params\n if self.is_tensor:\n T.tensor_()\n self.matrix = self.copy_fnc(T.matrix)", "title": "" }, { "docid": "cb8c06916c7b9b33b478b432ba1a52a3", "score": "0.6723054", "text": "def _prepare_parameters(self):\n\n parameters = self.params\n if isinstance(parameters, (list, tuple)):\n names, combinations = parameters\n param_combinations = combinations\n values = zip(*combinations)\n par_dict = {}\n for name, par_values in zip(names, values):\n par_dict[name] = sorted(list(set(par_values)))\n parameters = par_dict\n self.params = parameters\n\n indexed_param_combinations = []\n for comb in combinations:\n one_indexed_par_comb = []\n for name, par_val in zip(names, comb):\n ind_par = self._closest(parameters[name], par_val)\n one_indexed_par_comb.append((ind_par, par_val))\n indexed_param_combinations.append(tuple(one_indexed_par_comb))\n\n elif isinstance(parameters, dict):\n # Sorte parameters values\n par_dict = {}\n for name, values in parameters.items():\n par_dict[name] = sorted(values)\n self.params = par_dict\n indexed_parameters = {}\n for name, values in parameters.items():\n indexed_parameters[name] = zip(range(len(values)), values) \n indexed_param_combinations = list(itertools.product(*indexed_parameters.values())) \n else:\n raise ValueError('Argument `parameters` must be a list or dictionary.')\n\n self.indexed_param_combinations = indexed_param_combinations", "title": "" }, { "docid": "145cf56b0e5ba257c96994eb53b52614", "score": "0.67115813", "text": "def set_params(self, **kwargs):\n self._check_deprecated_params(**kwargs)\n normal_params, cb_params, special_params = {}, {}, {}\n virtual_params = {}\n\n for key, val in kwargs.items():\n if self._is_virtual_param(key):\n virtual_params[key] = val\n elif key.startswith('callbacks'):\n cb_params[key] = val\n self._params_to_validate.add(key)\n elif any(key.startswith(prefix) for prefix in self.prefixes_):\n special_params[key] = val\n self._params_to_validate.add(key)\n elif '__' in key:\n special_params[key] = val\n self._params_to_validate.add(key)\n else:\n normal_params[key] = val\n\n self._apply_virtual_params(virtual_params)\n BaseEstimator.set_params(self, **normal_params)\n\n for key, val in special_params.items():\n if key.endswith('_'):\n raise ValueError(\n \"Something went wrong here. Please open an issue on \"\n \"https://github.com/skorch-dev/skorch/issues detailing what \"\n \"caused this error.\")\n setattr(self, key, val)\n\n if cb_params:\n # callbacks need special treatmeant since they are list of tuples\n self._initialize_callbacks()\n self._set_params_callback(**cb_params)\n vars(self).update(cb_params)\n\n # If the net is not initialized or there are no special params, we can\n # exit as this point, because the special_params have been set as\n # attributes and will be applied by initialize() at a later point in\n # time.\n if not self.initialized_ or not special_params:\n return self\n\n # if net is initialized, checking kwargs is possible\n self._validate_params()\n\n ######################################################\n # Below: Re-initialize parts of the net if necessary #\n ######################################################\n\n # if there are module params, reinit module, criterion, optimizer\n # if there are criterion params, reinit criterion, optimizer\n # optimizer params don't need to be checked, as they are virtual\n reinit_module = False\n reinit_criterion = False\n reinit_optimizer = False\n\n component_names = {key.split('__', 1)[0] for key in special_params}\n for prefix in component_names:\n if (prefix in self._modules) or (prefix == 'compile'):\n reinit_module = True\n reinit_criterion = True\n reinit_optimizer = True\n\n module_params = {k: v for k, v in special_params.items()\n if k.startswith(prefix)}\n msg_module = self._format_reinit_msg(\n \"module\", module_params, triggered_directly=True)\n msg_criterion = self._format_reinit_msg(\n \"criterion\", triggered_directly=False)\n msg_optimizer = self._format_reinit_msg(\n \"optimizer\", triggered_directly=False)\n\n # if any module is modified, everything needs to be\n # re-initialized, no need to check any further\n break\n\n if prefix in self._criteria:\n reinit_criterion = True\n reinit_optimizer = True\n\n criterion_params = {k: v for k, v in special_params.items()\n if k.startswith(prefix)}\n msg_criterion = self._format_reinit_msg(\n \"criterion\", criterion_params, triggered_directly=True)\n msg_optimizer = self._format_reinit_msg(\n \"optimizer\", triggered_directly=False)\n\n if not (reinit_module or reinit_criterion or reinit_optimizer):\n raise ValueError(\"Something went wrong, please open an issue on \"\n \"https://github.com/skorch-dev/skorch/issues\")\n\n if reinit_module:\n self._initialize_module(reason=msg_module)\n if reinit_criterion:\n self._initialize_criterion(reason=msg_criterion)\n if reinit_optimizer:\n self._initialize_optimizer(reason=msg_optimizer)\n\n return self", "title": "" }, { "docid": "bf35f1d9b3efb2099bcecd6286ee9cfb", "score": "0.66882664", "text": "def params(self, *args, **kwargs):\r\n if len(args) == 1:\r\n kwargs.update(args[0])\r\n elif len(args) > 0:\r\n raise sa_exc.ArgumentError(\r\n \"params() takes zero or one positional argument, \"\r\n \"which is a dictionary.\")\r\n self._params = self._params.copy()\r\n self._params.update(kwargs)", "title": "" }, { "docid": "103192510777fb8c4ac127f646fb6b94", "score": "0.6672488", "text": "def updateParameters(self, parameters):\r\n\t\treturn", "title": "" }, { "docid": "1386f67d6af3a54968d22c7eeaf97898", "score": "0.66699916", "text": "def set_params(self, params): \n self.base_learner.set_params(**params[\"base_learner\"])", "title": "" }, { "docid": "a9488518f61a317920acd572b4c19e0f", "score": "0.66179377", "text": "def set_attributes(self, params: List[object]=None) ->None:\n if params:\n for k, v in params.items():\n if k != 'self':\n setattr(self, k, v)", "title": "" }, { "docid": "fc71991ae7b13b9a02bcf8ca3dfdfd31", "score": "0.66136456", "text": "def setParams(self, paramDict):\n for name, val in paramDict.items():\n if name == 'name':\n self.name = val\n elif name == 'driver':\n self._driver = val\n elif name == 'tax':\n self._taxable = val\n elif name == 'inflation':\n self._inflation = val\n elif name == 'mult_target':\n self._multTarget = val\n elif name == 'multiply':\n self._multiplier = val\n elif name == 'alpha':\n self._alpha = np.atleast_1d(val)\n elif name == 'reference':\n self._reference = val\n elif name == 'X':\n self._scale = val\n elif name == 'depreciate':\n self._depreciate = val\n self.checkInitialization()", "title": "" }, { "docid": "8fc9613fcba7ac6684f79a625a09f0f3", "score": "0.66111773", "text": "def updateParameters(self, parameters):\r\n return", "title": "" }, { "docid": "8fc9613fcba7ac6684f79a625a09f0f3", "score": "0.66111773", "text": "def updateParameters(self, parameters):\r\n return", "title": "" }, { "docid": "8fc9613fcba7ac6684f79a625a09f0f3", "score": "0.66111773", "text": "def updateParameters(self, parameters):\r\n return", "title": "" }, { "docid": "8fc9613fcba7ac6684f79a625a09f0f3", "score": "0.66111773", "text": "def updateParameters(self, parameters):\r\n return", "title": "" }, { "docid": "8fc9613fcba7ac6684f79a625a09f0f3", "score": "0.66111773", "text": "def updateParameters(self, parameters):\r\n return", "title": "" }, { "docid": "bb228e514320c89d427a44eec02997fd", "score": "0.6610467", "text": "def set_parameters(X):", "title": "" }, { "docid": "81dda21e8c7428321cc0b0545a4628f1", "score": "0.6604119", "text": "def set_from_dict(self, params):\n for key in params:\n if hasattr(self, key):\n setattr(self, key, params[key])", "title": "" }, { "docid": "808ba9e3cf2ab015c7d78fb3bfc1ee09", "score": "0.6600274", "text": "def assign(params, **kwargs):\r\n pass", "title": "" }, { "docid": "213b159e9caaf3e9016b7726bf6ff99f", "score": "0.65963167", "text": "def parameters(self, value: Any):\n self._parameters = value", "title": "" }, { "docid": "d2ed651c725e90c8cc7c147106567d3e", "score": "0.6579817", "text": "def __set_param(self, name, value):\n\t\tself.__params.__setitem__(name, value)", "title": "" }, { "docid": "c47bb48e2496481142fe69fed8037abd", "score": "0.6563728", "text": "def setParams(self, params):\n for name, val in params.items():\n if name == 'DiscountRate':\n self._discountRate = val\n elif name == 'tax':\n self._tax = val\n elif name == 'inflation':\n self._inflation = val\n elif name == 'ProjectTime':\n self._projectTime = val + 1 # one for the construction year!\n elif name == 'Indicator':\n self._indicators = val['name']\n self._metricTarget = val.get('target', None)\n activeCf = val['active']\n self._activeComponents = defaultdict(list)\n for request in activeCf:\n try:\n comp, cf = request.split('|')\n except ValueError:\n raise IOError('Expected active components in <Indicators> to be formatted as Component|Cashflow, but got {}'.format(request))\n self._activeComponents[comp].append(cf)\n self.checkInitialization()", "title": "" }, { "docid": "371cc9ea7bdfefbb8eba253a3e4ae4ef", "score": "0.6557801", "text": "def set_parameters(self, parameters):\n if len(parameters) != len(self._parameters):\n raise ValueError(\n 'Wrong size parameter vector, expecting ('\n + str(len(self._parameters)) + ') values.')\n self._parameters = np.array(parameters, copy=True, dtype=float)\n self._cached_rates = None\n self._cached_matrix = None", "title": "" }, { "docid": "4f3cfc450b46efff467e314b11e76bcf", "score": "0.6541401", "text": "def __init__(self, parameters):\n self.params = parameters", "title": "" }, { "docid": "0f0598553779b16a2fc6a6932f9b5c35", "score": "0.65380144", "text": "def _set_params_spec(self, spec_params, global_params):\n for item_key, item_params in spec_params.items():\n if isinstance(item_params, _params):\n self[item_key]._set_params(item_params.args, dict(global_params, **item_params.kwargs))\n elif isinstance(item_params, dict):\n self[item_key]._set_params_spec(item_params, global_params)\n else:\n raise ValueError(\"Unsupported parameter specification `%s`\" % type(item_params))", "title": "" }, { "docid": "c0d0f51adb70737af3b517db654f44b7", "score": "0.65322846", "text": "def _set_params(self):\n if not self.query_handler:\n return\n for (param, val_tuple) in self.params_dict.iteritems():\n (p_type, _, p_set, p_err_str) = val_tuple\n try:\n p_set(p_type(self.query_handler.get_param(param)))\n except ValueError, e:\n QtWidgets.QMessageBox.warning(self, 'Wrong parameter type',\n '%s: %s!' % (str(e), p_err_str))\n except KeyError, e:\n # No such param in JSON, pass\n pass", "title": "" }, { "docid": "c420cfcaa71fa4157d467cd238fd1884", "score": "0.65311545", "text": "def params(self, *arg, **kw):\r\n raise NotImplementedError(\r\n \"params() is not supported for INSERT/UPDATE/DELETE statements.\"\r\n \" To set the values for an INSERT or UPDATE statement, use\"\r\n \" stmt.values(**parameters).\")", "title": "" }, { "docid": "7ef258e713dccfdf7eb89c3ef93bd20e", "score": "0.65247166", "text": "def setParameters(self, p):\n for nodeNr in range(len(self.nodes)):\n for parameterName in self.nodes[nodeNr].parameters.keys():\n self.nodes[nodeNr].parameters[parameterName].value=p[str(nodeNr)+parameterName]", "title": "" }, { "docid": "67db64b5edbea422c58f1b9a81e2cba8", "score": "0.651911", "text": "def updateParameters(self, parameters):\n return", "title": "" }, { "docid": "67db64b5edbea422c58f1b9a81e2cba8", "score": "0.651911", "text": "def updateParameters(self, parameters):\n return", "title": "" }, { "docid": "67db64b5edbea422c58f1b9a81e2cba8", "score": "0.651911", "text": "def updateParameters(self, parameters):\n return", "title": "" }, { "docid": "b48cf5c9e1cd470dec91ef8bfd542448", "score": "0.6513755", "text": "def set(self, *args, **kwargs):\n self._set_params(args, kwargs)", "title": "" }, { "docid": "08a71426d7788fa4605a92f2480f7071", "score": "0.6513513", "text": "def resetparams(self, parameters):\n try:\n utils.update_dictionary_items(self.params,parameters)\n except AttributeError:\n # Variable self.params does not exist, so not updated\n # Create an empty set of params for future reference\n self.params = {}", "title": "" }, { "docid": "eadafa6d769a0c39f1eb1e2d915034df", "score": "0.6499835", "text": "def _set_params(self, *args, **kwargs):\n try:\n params = args[0]\n except IndexError:\n raise InstrumentParameterException('Set command requires a parameter dict.')\n\n self._verify_not_readonly(*args, **kwargs)\n\n for (key, val) in params.iteritems():\n log.debug(\"KEY = %s VALUE = %s\", key, val)\n\n if(key in ConfirmedParameter.list()):\n # We add a write delay here because this command has to be sent\n # twice, the write delay allows it to process the first command\n # before it receives the beginning of the second.\n response = self._do_cmd_resp(Command.SET, key, val, write_delay=0.2)\n else:\n response = self._do_cmd_resp(Command.SET, key, val, **kwargs)\n\n log.debug(\"set complete, update params\")\n self._update_params()", "title": "" }, { "docid": "4da76007a306ea072c35b1d7d48cb5ae", "score": "0.64943093", "text": "def _set_from_params(self, params):\n\n self._goal_x = params[\"goal\"][\"x\"]\n self._goal_y = params[\"goal\"][\"y\"]\n self._goal_psi = params[\"goal\"][\"psi\"]", "title": "" }, { "docid": "5602fd55457463587df63904841e3875", "score": "0.6485536", "text": "def params(self, values):\n raise NotImplementedError()", "title": "" }, { "docid": "52a41537d757e88af18f861bdb7b2c79", "score": "0.64744776", "text": "def update_params(self) -> None:\n raise NotImplementedError", "title": "" }, { "docid": "57bb24bc522cd17453a5d4d0f4467170", "score": "0.6473153", "text": "def set_params(self, arg_params, aux_params):\n for exec_ in self.execs:\n exec_.copy_params_from(arg_params, aux_params)", "title": "" }, { "docid": "4c533f2a695f6889b50b0ba10ca000f4", "score": "0.6468461", "text": "def updateParameters(self, parameters):\n\n return", "title": "" }, { "docid": "2c2a607f2c8b849030c13ff3a02a541b", "score": "0.64575356", "text": "def _set_params(self, **kwargs):\n for key in self._needed_kwargs:\n if key in kwargs:\n # pop key-value-pairs!\n setattr(self, key, kwargs.pop(key))\n # return modified kwargs\n return kwargs", "title": "" }, { "docid": "66d8dc73b1a5f99d4af7eff0748503ca", "score": "0.64526224", "text": "def set_parameters(self, graph, parameters):\n with self.transact() as tr:\n tr.set_parameters(graph, parameters)", "title": "" }, { "docid": "27e4aac3f88b7c92abdf34cd9b991572", "score": "0.64404774", "text": "def set_init_parameters(parameters, values):\n if type(parameters) == str:\n parameters = [parameters]\n elif not is_iterable(parameters):\n return\n if type(values) == str or not is_iterable(values):\n values = [values]\n assert len(parameters) == len(values)\n # Make sure that all values and parameters are strings for future comparison\n parameters = [str(p) for p in parameters]\n values = [str(v) for v in values]\n\n change_counter = 0\n for i, par in enumerate(parameters):\n if (not service_vars[par] == values[i]) or (par not in service_vars):\n service_vars[par] = values[i]\n change_counter += 1\n # If nothing new appeared or same values are already stored do nothing\n if change_counter == 0:\n return", "title": "" }, { "docid": "1c331d8788ea82d2db2f3df70536eac3", "score": "0.6438876", "text": "def from_params(cls, params):\r\n result = cls()\r\n result.params = params\r\n return result", "title": "" } ]
3242bfb4350542e64fc09c95bb72fb58
Sets the fontsize of the legend.
[ { "docid": "574f2950504e126970ecef1b51abdb6e", "score": "0.8113846", "text": "def set_legend_fontsize(command):\n try:\n plt.rc('legend', fontsize=int(command))\n except ValueError:\n print \"couldn't convert {} to int.\".format(command)", "title": "" } ]
[ { "docid": "61a6071a42ba742408afd723536ffc16", "score": "0.7425878", "text": "def setcafontsize(fontsize=20):\n ax = pl.gca()\n for tick in ax.xaxis.get_major_ticks():\n tick.label1.set_fontsize(20)\n for tick in ax.yaxis.get_major_ticks():\n tick.label1.set_fontsize(20)", "title": "" }, { "docid": "932c366db22a6903d12cf8a8bdb3e12e", "score": "0.7414001", "text": "def setFontAndLabelSize(base_size=9):\n\tfrom matplotlib import rcParams\n\trcParams['font.size'] = base_size\t#default is 12\n\trcParams['legend.fontsize'] = int(base_size*1.6)\t#default is large \n\t#rcParams['text.fontsize'] = 6\t#deprecated. use font.size instead\n\trcParams['axes.labelsize'] = base_size\t#default is medium\n\trcParams['axes.titlesize'] = int(base_size*1.6)\t#default is large \n\trcParams['xtick.labelsize'] = base_size\t#default is medium\n\trcParams['ytick.labelsize'] = base_size\t#default is medium", "title": "" }, { "docid": "449daeaeb1797c8a4883fd28f81721c9", "score": "0.7123725", "text": "def set_font_size(self, fontsize):\n if output_formatting == 'latex':\n self.set_matplotlib_global_latex_parameters(fontsize=fontsize)\n elif output_formatting == 'matlab':\n self.set_matplotlib_global_matlab_parameters(fontsize=fontsize)", "title": "" }, { "docid": "bac8563cbb95e3f140d11b8722bd8e10", "score": "0.69836456", "text": "def setFontSize(self, fontsize):\n Text.setFontSize(self, fontsize)\n self._resize()", "title": "" }, { "docid": "6db633c5642c4baaa8b0781b7aa16570", "score": "0.68419456", "text": "def setFontSize(self, fontsize):\n if not isinstance(fontsize, (int, float)):\n raise TypeError('fontsize must be numeric')\n if fontsize <= 0:\n raise ValueError('fontsize must be positive')\n\n self._size = fontsize\n self._update({'font size': self._size})", "title": "" }, { "docid": "668caedfc1d32678a749007d2e89efd5", "score": "0.6480325", "text": "def setFontSize(text: unicode, ptSize: int) -> unicode:\n ...", "title": "" }, { "docid": "2aa93cbf97a99037adb1556cafa8af15", "score": "0.6406514", "text": "def size(self, val):\n self.chart_style[\"size\"] = val", "title": "" }, { "docid": "f9c31561dbfd75c76f6f4c490f9ac519", "score": "0.63846284", "text": "def fontSize(self, fontSize):\n self._fontSize = fontSize", "title": "" }, { "docid": "f9c31561dbfd75c76f6f4c490f9ac519", "score": "0.63846284", "text": "def fontSize(self, fontSize):\n self._fontSize = fontSize", "title": "" }, { "docid": "3e4548c7bc6c681787c8e5c3cadd9448", "score": "0.63007694", "text": "def set_figure_params(serif=True, fontsize=9):\n\n params = {\n 'font.serif': ['Times',\n 'Palatino',\n 'New Century Schoolbook',\n 'Bookman',\n 'Computer Modern Roman'] + rcParams['font.serif'],\n 'font.sans-serif': ['Times',\n 'Helvetica',\n 'Avant Garde',\n 'Computer Modern Sans serif'] + rcParams['font.sans-serif'],\n 'font.family': 'serif',\n 'text.usetex': True,\n # Make sure mathcal doesn't use the Times style\n 'text.latex.preamble':\n r'\\DeclareMathAlphabet{\\mathcal}{OMS}{cmsy}{m}{n}',\n\n 'axes.labelsize': fontsize,\n 'axes.linewidth': .75,\n\n 'font.size': fontsize,\n 'legend.fontsize': fontsize,\n 'xtick.labelsize': fontsize * 8 / 9,\n 'ytick.labelsize': fontsize * 8 / 9,\n\n # 'figure.dpi': 150,\n # 'savefig.dpi': 600,\n 'legend.numpoints': 1,\n }\n\n if not serif:\n params['font.family'] = 'sans-serif'\n\n rcParams.update(params)", "title": "" }, { "docid": "19a8a6f7a6c6c9d245bdf37a95ec7b95", "score": "0.6255134", "text": "def setLegendRatio(self, ratio):\r\n self.setLegendPosition(self.legendPosition(), ratio)", "title": "" }, { "docid": "19424c851e0a530abe641a0e85f4012a", "score": "0.6227961", "text": "def _setFontSize(self, size):\n value = str(size)\n idx = self._size.findText(value)\n if idx != -1:\n self._size.setCurrentIndex(idx)\n else:\n self._size.setEditText(value)", "title": "" }, { "docid": "dffe27e569829a874dbe8e6eda5bcc44", "score": "0.6205801", "text": "def set_font(self, family: str = \"Yu Gothic\", size: int = 14):\n self.setStyleSheet('font-family: \"{}\"; font-size: {}px;'.format(family, size))", "title": "" }, { "docid": "ddf3636072e30dfa7168c8dc485d01dc", "score": "0.6195247", "text": "def set_fontsize(self, by='scores', custom_sizes=None, \n apply_regularization=True, \n regularization_factor=FONTSIZE_REG_FACTOR):\n\n if custom_sizes is not None:\n assert len(custom_sizes)==len(self.keywords)\n self.fontsizes_norm = np.array(custom_sizes)\n elif by=='scores':\n self.fontsizes_norm = self.scores/self.scores.sum() \n elif by=='constant':\n self.fontsizes_norm = np.full(len(self.keywords), 1)\n else:\n raise ValueError()\n \n #applying regularization\n if apply_regularization:\n self.fontsizes_norm = regularize(self.fontsizes_norm, \n regularization_factor)\n \n #normalize\n self.fontsizes_norm = self.fontsizes_norm/self.fontsizes_norm.sum()\n \n #raise flag indicating that the fontsizes have been modified\n self._flag_fontsizes = True", "title": "" }, { "docid": "e7bef4019584c03d95f364908d29e19d", "score": "0.6172227", "text": "def setup_legend(legend):\n\n # set style\n legendLabelStyle = {'color': '#FFF', 'size': '10pt'}\n # loop through legend items\n for item in legend.items:\n for single_item in item:\n # set style\n if isinstance(single_item, pg.graphicsItems.LabelItem.LabelItem):\n single_item.setText(single_item.text, **legendLabelStyle)", "title": "" }, { "docid": "a8165a02d7dc5eb9c4a0b22887d4d34e", "score": "0.616729", "text": "def set_size(self, new_size):\n # clamp the size\n self.font_size = min(80, max(2, round(new_size)))\n\n # clear all caches\n self.render_char.cache_clear()\n self.render_text.cache_clear()\n for dep in self._dependant_caches:\n dep.cache_clear()\n\n font = pygame.font.Font(self.font_name, self.font_size)\n self.char_size = Pos(font.size(\".\"))\n self.font = font\n return font", "title": "" }, { "docid": "781eb429d6116247f4b21ab89b9fa905", "score": "0.61361545", "text": "def set_font(self):\n size = 0\n if self.data.get(\"size\"):\n try:\n size = self.data[\"size\"].get()\n except tk.TclError:\n size = 0\n if int(size) > 100: size = 0\n if not size: size = 12\n\n if self.data.get(\"font_family\"):\n family = self.data[\"font_family\"]\n self.font_entry.delete(0,tk.END)\n self.font_entry.insert(0,family)\n else:\n family = \"Arial\"\n\n self.test_entry.config(font=(family, size, \"\"))", "title": "" }, { "docid": "f5c35dfc431ca90ab9d58b6c4c7593c6", "score": "0.6060346", "text": "def change_size(self, delta):\n self.set_size(self.font_size + delta)", "title": "" }, { "docid": "fa05241a8f6b73988780e94923e84b13", "score": "0.6048163", "text": "def getLegendLabelFont(self):\n return self.getAttribute('legend_label_font')", "title": "" }, { "docid": "31a1d777fc8e7a6f9e12e5d1c1a88b6a", "score": "0.6025795", "text": "def fl_set_browser_fontsize(ptr_flobject, size):\n _fl_set_browser_fontsize = library.cfuncproto(\n library.load_so_libforms(), \"fl_set_browser_fontsize\",\n None, [cty.POINTER(xfdata.FL_OBJECT), cty.c_int],\n \"\"\"void fl_set_browser_fontsize(FL_OBJECT * ob, int size)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n i_size = library.convert_to_intc(size)\n library.keep_elem_refs(ptr_flobject, size, i_size)\n _fl_set_browser_fontsize(ptr_flobject, i_size)", "title": "" }, { "docid": "6fe21d321dd1e492c5ed7b37adca6d09", "score": "0.60224885", "text": "def SetFontSizeMapping(*args, **kwargs):\n return _richtext.RichTextHTMLHandler_SetFontSizeMapping(*args, **kwargs)", "title": "" }, { "docid": "28e7e5e75c4da0c4707515b4e157d1fa", "score": "0.59444743", "text": "def update_font_size(self, font=False, size=False):\n if font is not False:\n self.font = font\n if size is not False:\n self.size = size\n self.font_obj = pygame.font.Font(self.font, self.size)", "title": "" }, { "docid": "6ca84c539405f5c4b72862a053d28c2d", "score": "0.59226555", "text": "def setup_mpl(font_size=15):\n mpl.rcParams['font.size'] = font_size\n mpl.rcParams['axes.labelsize'] = 15\n mpl.rcParams['axes.labelpad'] = 10\n\n plt.gcf().set_size_inches(7.2, 7.2)", "title": "" }, { "docid": "9ac619a87d68a35f1a42469fafde1f57", "score": "0.5848525", "text": "def setLegendRect(self, rect):\r\n self.__data.legendRect = rect", "title": "" }, { "docid": "5c78a7074881d1e826386a9865736e22", "score": "0.58405215", "text": "def legend(self):", "title": "" }, { "docid": "a128814d65cef0d590bde910e713bc13", "score": "0.5832805", "text": "def font_size(self):\n return self._label.font_size", "title": "" }, { "docid": "7b63c53a8cd5cda46c175b55f8874b81", "score": "0.58296406", "text": "def figureSetting():\n #set figure linewidth\n plt.rcParams['axes.linewidth']=2\n\n #set font size for labels on axes\n plt.rcParams['axes.labelsize'] = 16\n\n #set size of numbers on x-axis\n plt.rcParams['xtick.labelsize'] = 16\n #set size of numbers on y-axis\n plt.rcParams['ytick.labelsize'] = 16\n\n #set size of ticks on x-axis\n plt.rcParams['xtick.major.size'] = 7\n #set size of ticks on y-axis\n plt.rcParams['ytick.major.size'] = 7\n\n #set size of markers, e.g., circles representing points\n #set numpoints for legend\n plt.rcParams['legend.numpoints'] = 1", "title": "" }, { "docid": "3eac3e01dff9853b79003c3661f65c31", "score": "0.58232653", "text": "def legend(self, **kwargs):\n handles, labels = self.figax[1][0, 0].get_legend_handles_labels()\n args = {\n \"loc\": \"center\",\n \"borderaxespad\": 0.0,\n \"frameon\": False,\n }\n args.update(kwargs)\n self.figax[1][0, -1].legend(handles, labels, **args)", "title": "" }, { "docid": "aef6befb3f2ed53dc40b5aa127264011", "score": "0.5750643", "text": "def set_font():\n # Add Roboto font\n fonts_dir = pkgr.resource_filename(\"gfdlvitals\", \"resources/fonts\")\n\n font_dirs = [fonts_dir]\n font_files = font_manager.findSystemFonts(fontpaths=font_dirs)\n\n for font_file in font_files:\n font_manager.fontManager.addfont(font_file)\n\n # Define fonts and sizes\n matplotlib.rcParams[\"font.family\"] = \"Roboto\"\n matplotlib.rcParams.update({\"font.size\": 14})", "title": "" }, { "docid": "c134d7f6ec021317aa6926a296dd6d22", "score": "0.57484037", "text": "def add_legend(self):\n\n def format_legend(self, leg):\n for itext, text in enumerate(leg.get_texts()):\n text.set_color(self.legend.font_color)\n if self.plot_func not in ['plot_hist', 'plot_bar']:\n leg.legendHandles[itext]. \\\n _legmarker.set_markersize(self.legend.marker_size)\n if self.legend.marker_alpha is not None:\n leg.legendHandles[itext]. \\\n _legmarker.set_alpha(self.legend.marker_alpha)\n\n leg.get_title().set_fontsize(self.legend.font_size)\n leg.get_frame().set_facecolor(self.legend.fill_color.get(0))\n leg.get_frame().set_alpha(self.legend.fill_alpha)\n leg.get_frame().set_edgecolor(self.legend.edge_color.get(0))\n leg.get_frame().set_linewidth(self.legend.edge_width)\n\n if self.legend.on and len(self.legend.values) > 0:\n\n # Format the legend keys\n #self.format_legend_values()\n\n # Sort the legend keys\n if 'NaN' in self.legend.values.keys():\n del self.legend.values['NaN']\n\n if self.ref_line.on:\n ref_line, ref_line_legend_text = [], []\n for iref, ref in enumerate(self.ref_line.column.values):\n ref_line += self.legend.values[self.ref_line.legend_text.get(iref)]\n ref_line_legend_text += [self.ref_line.legend_text.get(iref)]\n del self.legend.values[self.ref_line.legend_text.get(iref)]\n else:\n ref_line = None\n if 'fit_line' in self.legend.values.keys():\n fit_line = self.legend.values['fit_line']\n del self.legend.values['fit_line']\n else:\n fit_line = None\n\n if self.axes.twin_x or self.axes.twin_y:\n keys = self.legend.values.keys()\n else:\n keys = natsorted(list(self.legend.values.keys()))\n lines = [self.legend.values[f][0] for f in keys\n if self.legend.values[f] is not None]\n if ref_line is not None:\n keys = ref_line_legend_text + keys\n lines = ref_line + lines\n\n if len(lines) == 0:\n print('Legend contains no elements...skipping')\n return\n\n # Set the font properties\n fontp = {}\n fontp['family'] = self.legend.font\n fontp['size'] = self.legend.font_size\n fontp['style'] = self.legend.font_style\n fontp['weight'] = self.legend.font_weight\n\n if self.legend.location == 0:\n self.legend.obj = \\\n self.fig.obj.legend(lines, keys, loc='upper right',\n title=self.legend.text if self.legend is not True else '',\n bbox_to_anchor=(self.legend.position[1],\n self.legend.position[2]),\n numpoints=self.legend.points,\n prop=fontp)\n format_legend(self, self.legend.obj)\n\n else:\n for irow, row in enumerate(self.axes.obj):\n for icol, col in enumerate(row):\n if self.legend.nleg == 1 and \\\n not(irow == 0 and icol == self.ncol - 1):\n continue\n leg = \\\n col.legend(lines, keys, loc=self.legend.location,\n title = self.legend.text if self.legend is not True else '',\n numpoints=self.legend.points,\n prop=fontp)\n leg.set_zorder(102)\n format_legend(self, leg)", "title": "" }, { "docid": "69fc6c3a0f9b76ab9caf04a22019d857", "score": "0.574725", "text": "def _setup_legend(self):\n leg = r.TLegend(*self.legpos)\n leg.SetFillColor(0)\n leg.SetFillStyle(0)\n leg.SetTextFont(42)\n leg.SetTextSize(0.035)\n leg.SetBorderSize(0)\n\n return leg", "title": "" }, { "docid": "957d060d5badb47a3399bd9279c5b5c7", "score": "0.5738898", "text": "def fontsize(size=None):\n global _fontsize\n if size is not None:\n _fontsize = size\n return _fontsize", "title": "" }, { "docid": "9486af79ac902be016e0c7590d209d0a", "score": "0.57093334", "text": "def set_font(self, font):\n pass", "title": "" }, { "docid": "da957a0116d9c9df063c4919acc31865", "score": "0.5700024", "text": "def legend(self, **kwargs):\n args = {\n \"loc\": \"best\",\n }\n args.update(kwargs)\n self.figax[1][0, 0].legend(**args)", "title": "" }, { "docid": "f6fc03aca73e26cc6d14535cc0d77372", "score": "0.569207", "text": "def setFont(text: unicode, color: java.awt.Color, ptSize: int) -> unicode:\n ...", "title": "" }, { "docid": "8616997ce58836c2d7393c3ff049ed39", "score": "0.5657972", "text": "def _fontChanged(self):\n blocked = self._size.blockSignals(True)\n old_size = self._size.currentText()\n family = self._family.currentFont().family()\n sizes = Q.QFontDatabase().pointSizes(family)\n sizes = [str(size) for size in sizes]\n self._size.clear()\n self._size.addItems(sizes)\n self._setFontSize(old_size)\n self._size.blockSignals(blocked)", "title": "" }, { "docid": "789d2e63b2c9917d89108d85055bf68a", "score": "0.56358665", "text": "def label_axes(ax, xlabel, ylabel, fontsize):\n ax.set_xlabel(xlabel, fontsize = fontsize)\n ax.set_ylabel(ylabel, fontsize = fontsize)\n return", "title": "" }, { "docid": "748c626e12af2e3862adb000fd4629d1", "score": "0.56130075", "text": "def set_rcParams(self):\n super().set_rcParams()\n\n plt.rcParams[\"font.family\"] = \"serif\"", "title": "" }, { "docid": "089a1b741b6b8d153bec1dc19c7ecf5a", "score": "0.5585016", "text": "def set_font(\n self,\n font: int,\n scale: int = 0,\n bold: int = 0,\n trans: int = 0,\n scroll: int = 0,\n /,\n ) -> None:", "title": "" }, { "docid": "91d2fe76d8283d3766aa67f480de921a", "score": "0.55841786", "text": "def c_symb_legend(cls, mview, x1, y1, font_size, symb_scale, file, title, sub_title):\n gxapi_cy.WrapMVU._c_symb_legend(GXContext._get_tls_geo(), mview, x1, y1, font_size, symb_scale, file.encode(), title.encode(), sub_title.encode())", "title": "" }, { "docid": "6db2922570a96848da95449ecd8b2153", "score": "0.5579555", "text": "def FontSetSize(Size: int) -> None:\n pass", "title": "" }, { "docid": "3341d26ef0ab4fdaee4699a145bcb394", "score": "0.5567199", "text": "def addLegend():\n box_y = -.50*0.65\n return plt.legend(loc='upper center', bbox_to_anchor=(0.5, box_y),\n fontsize=legtx_size, frameon=True, ncol=4)", "title": "" }, { "docid": "3a8c28b252ea532198e550faaf421c92", "score": "0.55503744", "text": "def style(self, i, j, **kwargs):\n attributes = {}\n for k,v in kwargs.items():\n if k in (\"font\", \"fontname\"):\n attributes[\"font_name\"] = v\n elif k == \"fontsize\":\n attributes[\"font_size\"] = v\n elif k in (\"bold\", \"italic\", \"align\"):\n attributes[k] = v\n elif k == \"fontweight\":\n attributes.setdefault(\"bold\", BOLD in v)\n attributes.setdefault(\"italic\", ITALIC in v)\n elif k == \"lineheight\":\n attributes[\"line_spacing\"] = v * self._label.font_size\n elif k == \"fill\":\n attributes[\"color\"] = [int(ch*255) for ch in v]\n else:\n attributes[k] = v\n self._dirty = True\n self._label.begin_update()\n self._label.document.set_style(i, j, attributes)", "title": "" }, { "docid": "b8dcda6b6d9c580ffcc1c490289f81e3", "score": "0.55388415", "text": "def legend_place(self):\n self.ax1.legend(loc=\"upper right\", ncol=2)", "title": "" }, { "docid": "b62581c257faa2276aba400a1ef7551c", "score": "0.5526164", "text": "def on_font_style_font_set(self, fbtn):\n self.client.set_string(KEY('/style/font/style'), fbtn.get_font_name())", "title": "" }, { "docid": "9ee0d855113c56a5957c69bb037f1ab9", "score": "0.5523606", "text": "def prop_symb_legend(cls, mview, x1, y1, font_size, symb_scale, base, n_symb, start, increment, title, sub_title):\n gxapi_cy.WrapMVU._prop_symb_legend(GXContext._get_tls_geo(), mview, x1, y1, font_size, symb_scale, base, n_symb, start, increment, title.encode(), sub_title.encode())", "title": "" }, { "docid": "171e939c008a66c2e1838c8ce5dba81d", "score": "0.5510543", "text": "def setFont(self, font):\r\n super().setFont(font)\r\n\r\n self.update_line_number_area_width()", "title": "" }, { "docid": "d7cc882037dd65fa1120421309df5c6b", "score": "0.54745233", "text": "def update_font(self):\n font = self.get_font()\n self.get_widget().update_font(font)", "title": "" }, { "docid": "b83101f718040a4c08237a17bd87fe1e", "score": "0.5474328", "text": "def legend(self):\n\n # Set the legend on the top right axis\n ax = self._axes[0, -1]\n ax.legend()", "title": "" }, { "docid": "19042a6d175298a9aae7b6caf04f05f3", "score": "0.5433523", "text": "def fontResize(self, incr=None, actual=None):\n try:\n family, size, style = self.currentFont()\n resize = int(size) + incr if incr else actual\n self.text.config(font=(family, resize, style))\n except:\n my_showerror(self, 'Font', 'Cannot resize current font')", "title": "" }, { "docid": "0f7afa0a02fd98b27de52dcd5e67ab8a", "score": "0.54260075", "text": "def setFont(self, font, log=None):\r\n setAttribute(self, 'font', font, log)", "title": "" }, { "docid": "baf5bfe746e611c1638de2153ac8052d", "score": "0.5415139", "text": "def change_font(event):\n if font_option.get() == 'none':\n my_font = (font_family.get(), font_size.get())\n else:\n my_font = (font_family.get(), font_size.get(), font_option.get())\n\n #change the font style\n input_text.config(font=my_font)", "title": "" }, { "docid": "eccaba933fc09a9fa04c6528b86a891b", "score": "0.5363993", "text": "def set_rich_text_font(self, font, fixed_font):\n\n self.rich_text.set_font(font, fixed_font=fixed_font)", "title": "" }, { "docid": "55260fdec36bd25a40b6724e1b2dc080", "score": "0.5357717", "text": "def font(self, size, bold):\n name = self.fonts[\"name\"]\n if isinstance(size, str):\n size = self.fonts[\"size\"][size]\n return Utils.font(name, size, bold)", "title": "" }, { "docid": "aeea4b9a6939f72a8bbf3a22882e86ac", "score": "0.5346458", "text": "def plot_setup(labels=['X', 'Y'], fsize=14, setlimits=False,\n title=None, legend=True, limits=(0,1,0,1), colorbar=False):\n plt.xlabel(str(labels[0]), fontsize=fsize)\n plt.ylabel(str(labels[1]), fontsize=fsize)\n #fig = plt.gcf()\n #fig.set_size_inches(6, 4)\n if title:\n plt.title(title, fontsize=fsize)\n if legend:\n plt.legend(fontsize=fsize-4)\n if setlimits:\n plt.xlim((limits[0], limits[1]))\n plt.ylim((limits[2], limits[3]))\n if colorbar:\n plt.colorbar()", "title": "" }, { "docid": "648b1c49ba52be91d7bec8314334df0a", "score": "0.5341836", "text": "def SetFont(self, font):\n wxPanel.SetFont(self, font)\n self.SetLabel(self.label)\n self.Draw(wxClientDC(self))", "title": "" }, { "docid": "6413503ddce5d7f76864d39d0675a2be", "score": "0.53366446", "text": "def _plot_legend(fig, ax):\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n ax.legend(loc='upper left', bbox_to_anchor=(1.02, 1), framealpha=0)", "title": "" }, { "docid": "0df8759e24dbe60be56a4d05c5428367", "score": "0.53337634", "text": "def set_hint_text_font_size(self, font_size: float) -> None:\n\n if self.mode != \"round\":\n Animation(\n _hint_text_font_size=font_size, duration=0.2, t=\"out_quad\"\n ).start(self)", "title": "" }, { "docid": "f00c186f21853e8d80021d89fc85378a", "score": "0.53329694", "text": "def _on_change(self):\n font = self.__generate_font_tuple()\n self._example_label.configure(font=font)", "title": "" }, { "docid": "759402f2d3b8bc7fdb788e0063dcee7a", "score": "0.5325875", "text": "def fit_fontsize(self, heightlimit):\n self.ctx.set_font_size(heightlimit)\n asc, desc, height, _, _ = self.font_extents()\n self.ctx.set_font_size(int(heightlimit * heightlimit / height))\n return self.font_extents()", "title": "" }, { "docid": "0bdb4f5e10e28668d74f2dea59d3ae32", "score": "0.53253627", "text": "def FontSetSizeMedium() -> None:\n pass", "title": "" }, { "docid": "b0e0ccc32ae84f2f95d50c69208ea164", "score": "0.5324703", "text": "def sizeHint(self, option, index):\n size = QtCore.QSize(option.rect.width(), option.fontMetrics.lineSpacing() * 5)\n return size", "title": "" }, { "docid": "66e9acfd7b3edd747f6dcae114e650a7", "score": "0.53113306", "text": "def _jiggleFontSize(self):\n\t\tself.Freeze()\n\t\tself.FontSize += 1\n\t\tself.FontSize -= 1\n\t\tself.Thaw()", "title": "" }, { "docid": "1d973bf6c0a56cfd62fd6a75f059f195", "score": "0.5308444", "text": "def apply_plotting_arguments(ax, legend_loc=None, ncol=1, legend_size=16, \n title=None, xlim=None, ylim=None, **kwargs):\n if legend_loc is not None:\n ax.legend(loc=legend_loc, ncol=ncol, prop={'size':legend_size}, numpoints=1)\n ax.minorticks_on()\n ax.tick_params(which='major', width=2, length=8)\n ax.tick_params(which='minor', width=1, length=4)\n \n if title is not None:\n ax.set_title(title, fontsize=args.title_size)\n \n # change the axis limits\n if xlim is not None:\n ax.set_xlim(*xlim)\n if ylim is not None:\n ax.set_ylim(*ylim)", "title": "" }, { "docid": "9f8e185149e300182581118e653311a6", "score": "0.53077894", "text": "def font_size(self):\n return self._font_size", "title": "" }, { "docid": "4b4b63a5ffaf2dbb3e5c666c5166d6cb", "score": "0.52977455", "text": "def set_fontcolor(self, by='label', colorscale='Set3', \n custom_colors=None):\n \n if by=='label' and (custom_colors is None):\n scales = cl.scales['8']['qual']\n #All colorscales in 'scales.keys()' can be used\n \n assert colorscale in ['Pastel2','Paired','Pastel1',\n 'Set1','Set2','Set3','Dark2','Accent']\n colors = scales[colorscale].copy()\n colors.reverse()\n \n color_mapping={key:colors[i] for i,key in enumerate(self.text_dict)}\n fontcolors = list(map(color_mapping.get, self.labels))\n \n Wordmesh.set_fontcolor(self, custom_colors=fontcolors)\n \n else:\n #change default colorscale to a quantitative one\n colorscale = 'YlGnBu' if (colorscale=='Set3') else colorscale\n Wordmesh.set_fontcolor(self, by=by, colorscale=colorscale,\n custom_colors=custom_colors)", "title": "" }, { "docid": "dc16d40d14fb8cca3b0c11c162561dad", "score": "0.52973455", "text": "def setValue(self, font):\n self._family.setCurrentFont(font)\n self._setFontSize(font.pointSize())\n self._fontChanged()", "title": "" }, { "docid": "e748d73afee8a87e31af5f1de99a0ecb", "score": "0.5295958", "text": "def plot_formatting(self,fam='serif',fam_font='Computer Modern Roman',font_size=14,tick_size=14):\n\t\t\"\"\"like, if you want bold text or not.\t\t\t\t\t\t\t\t \"\"\"\n\t\n\t\tplt.rc('text',usetex=True)\n\t\taxis_font={'family': fam,'serif':[fam_font],'size':font_size}\n\t\tplt.rc('font',**axis_font)\n\t\tplt.rc('font',weight ='bold')\n\t\t#plt.rcParams['text.latex.preamble']=[r'\\boldmath']\n\t\tplt.xticks(fontsize=tick_size)\n\t\tplt.yticks(fontsize=tick_size)", "title": "" }, { "docid": "8155a0909415555e2fad6812c061c84f", "score": "0.5289767", "text": "def setup_font_styles(self):\n self.title_font = QtGui.QFont()\n self.title_font.setBold(True)\n self.title_font.setPixelSize(15)\n\n self.subtitle_font = QtGui.QFont()\n self.subtitle_font.setPixelSize(12)\n self.subtitle_font.setBold(True)\n\n self.text_font = QtGui.QFont()\n self.text_font.setPixelSize(10)\n\n self.monitor_number_font = QtGui.QFont()\n self.monitor_number_font.setPixelSize(14)\n self.monitor_number_font.setBold(True)", "title": "" }, { "docid": "800f2b51befbaa9c7aff1b41e12636e9", "score": "0.52517295", "text": "def plot_legend(ax_in):\n\n purple_patch = mpatches.Patch(color='purple', label='Chicken')\n yellow_patch = mpatches.Patch(color='yellow', label='Duck')\n ax_in.legend(handles=[purple_patch, yellow_patch])", "title": "" }, { "docid": "78042bf129695c4bbe574e87c6ae75fd", "score": "0.5250003", "text": "def FontSetSizeLarge() -> None:\n pass", "title": "" }, { "docid": "88752f2e045f655992db30c22e3696dc", "score": "0.5248312", "text": "def setSize(self, newSize, operation='', units=None, log=None):\r\n if units==None: units=self.units#need to change this to create several units from one\r\n setAttribute(self, 'size', val2array(newSize, False), log, operation)", "title": "" }, { "docid": "1045bcbf66eabeb3237d79608534a447", "score": "0.5246121", "text": "def setFieldSize(self, value, operation='', log=None):\r\n setAttribute(self, 'fieldSize', value, log, operation) # call attributeSetter\r", "title": "" }, { "docid": "af8af65bc0610e004de1f12b03d47417", "score": "0.5232166", "text": "def get_label_font_size(max_dim):\n\n label_font_sizes = {1: 8, 2: 7}\n return label_font_sizes[max_dim] if max_dim in label_font_sizes else 6", "title": "" }, { "docid": "6d842a31cdd8430aad90b259e1089383", "score": "0.52212363", "text": "def FontSetSizeSmall() -> None:\n pass", "title": "" }, { "docid": "cc258a009769889638a415b2a852c024", "score": "0.5215327", "text": "def getViewFonts() -> Dict:\n plotFonts: Dict = {}\n plotFonts[\"suptitle\"] = 14\n plotFonts[\"title\"] = 12\n plotFonts[\"axisLabel\"] = 12\n plotFonts[\"axisTicks\"] = 12\n plotFonts[\"legend\"] = 12\n return plotFonts", "title": "" }, { "docid": "2c8c736727d34278852f6e3b838173a3", "score": "0.52112734", "text": "def set_rcparams(width=6.69291, fontsize=16, for_article=True, for_beamer=False):\n\n height = width / 1.618\n\n if for_article or for_beamer:\n params = {\n #'backend': 'pdf',\n 'axes.labelsize': fontsize,\n 'font.size': fontsize,\n 'figure.figsize': (width, height),\n 'legend.fontsize': fontsize,\n 'axes.titlesize': fontsize,\n 'xtick.labelsize': fontsize,\n 'ytick.labelsize': fontsize,\n 'xtick.major.pad': fontsize,\n 'xtick.major.pad': fontsize,\n 'text.usetex': True,\n 'font.sans-serif' : 'Helvetica Neue',\n 'font.family': 'sans-serif',\n 'image.cmap' : 'viridis',\n 'image.interpolation' : 'bilinear',\n 'image.resample' : False }\n #'font.serif': 'Times New Roman',\n #'font.sans-serif': 'Times New Roman'}\n# 'ps.usedistiller': 'xpdf'}\n\n if for_beamer:\n# params['font.family'] = 'sans-serif'\n preamble = r'''\\usepackage[cm]{sfmath}'''\n plt.rc('text.latex', preamble=preamble)\n\n if for_article or for_beamer:\n plt.rcParams.update(params)", "title": "" }, { "docid": "ae6d16fc36f1838b9c4e65f2c1e62fa2", "score": "0.5202016", "text": "def _set_size(self):\n if 'Size' in self.anno_df.columns: return\n w_label = (self.anno_df.XMax - self.anno_df.XMin).values\n h_label = (self.anno_df.YMax - self.anno_df.YMin).values\n self.anno_df['Size'] = w_label * h_label", "title": "" }, { "docid": "ea3ed0c6646304cbc65da8a554adcbcc", "score": "0.5182702", "text": "def open_legend(self, legend, props):\n pass", "title": "" }, { "docid": "b01d96f4ef316c11a84a0abd38fb8a80", "score": "0.51737106", "text": "def set_layer_size(self, new_size):\n self.hidden_layer_size = new_size", "title": "" }, { "docid": "2d3e4db9733d2723f80792e2566bacec", "score": "0.51683617", "text": "def OnSpinCtrl_InsetLabelsTextSize(self, event):\r\n self.ChangeNumericGeneralPlotOption ( event, 'InsetLabelsTextSize')", "title": "" }, { "docid": "48330a731897f1cd6237e512adf31e38", "score": "0.516547", "text": "def set_pen_size(self, pen_size):\n self.pen_size = pen_size", "title": "" }, { "docid": "395b04c9afce74afd5dc7e6016163ad3", "score": "0.51513004", "text": "def get_font_size(self):\r\n while self.font_size > 12:\r\n self.date_label.config(font=(\"SFUIText\", self.font_size, \"bold\"))\r\n self.date_label.update()\r\n self.date_label_width = self.date_label.winfo_width()\r\n self.date_label_height = self.date_label.winfo_height()\r\n if self.date_label_width > self.target_width or self.date_label_height > self.target_height:\r\n self.font_size -= 1\r\n else:\r\n #self.logger.debug(f'Target widget width {self.target_width}')\r\n #self.logger.debug(f'Real widget width {int(self.date_label_width)}')\r\n #self.logger.debug(f'Target widget height {self.target_height}')\r\n #self.logger.debug(f'Real widget height {int(self.date_label_height)}')\r\n break", "title": "" }, { "docid": "05e599b681085353ed6540a9a0082df7", "score": "0.5137848", "text": "def set_style_pers():\n\n from cycler import cycler\n matplotlib.rcParams.update(\n {\n 'axes.titlesize': 25, # axe title\n 'axes.labelsize': 20,\n 'axes.prop_cycle': cycler('color',\n ['#0D8295', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2',\n '#7f7f7f', '#bcbd22', '#17becf']), # lines colors\n 'lines.linewidth': 3,\n 'lines.markersize': 150,\n 'xtick.labelsize': 15,\n 'ytick.labelsize': 15,\n 'font.family': 'Century Gothic'\n }\n )", "title": "" }, { "docid": "b2d3ce09df24fe4ae07934317b96fa26", "score": "0.51367974", "text": "def legend_artist(self, legend, orig_handle,\n fontsize, handlebox):\n xdescent, ydescent, width, height = self.adjust_drawing_area(\n legend, orig_handle,\n handlebox.xdescent, handlebox.ydescent,\n handlebox.width, handlebox.height,\n fontsize)\n artists = self.create_artists(legend, orig_handle,\n xdescent, ydescent, width, height,\n fontsize, handlebox.get_transform())\n\n # create_artists will return a list of artists.\n for a in artists:\n handlebox.add_artist(a)\n\n # we only return the first artist\n return artists[0]", "title": "" }, { "docid": "affabdcb6673a119b15664d18fd2b4b5", "score": "0.5135309", "text": "def GetConstrainedFontSize(self, string, vtkTextProperty, p_int, p_int_1, p_int_2, p_int_3):\n ...", "title": "" }, { "docid": "a0938260376dad3462e215147ca4a1bc", "score": "0.51311845", "text": "def _show_legend(ax):\n leg = ax.legend(loc=1, shadow=True, fancybox=True, labelspacing=0.2,\n borderpad=0.15)\n ltext = leg.get_texts()\n llines = leg.get_lines()\n\n from matplotlib.artist import setp\n setp(ltext, fontsize='small')\n setp(llines, linewidth=1)", "title": "" }, { "docid": "d04cd6497edbb0ffa69011fefba662e3", "score": "0.5120697", "text": "def changeValue(self, value):\r\n self.parent.textSizeChange(value)", "title": "" }, { "docid": "22c002587e8df76c4167de877b452aaa", "score": "0.511946", "text": "def set_font(font):\n if font not in plt.rcParams[\"font.family\"]:\n font_manager.fontManager.ttflist.extend(\n font_manager.createFontList(font_manager.findSystemFonts()))\n plt.rcParams[\"font.family\"] = font", "title": "" }, { "docid": "4a71336027c4e28a6568c6e11c50db8c", "score": "0.5116817", "text": "def _drawLegend(self):\n treatAsData = self._findOption(drawoptions.TreatAsData, default=drawoptions.TreatAsData())\n legendPosition = self._findOption(drawoptions.LegendPosition, default=drawoptions.LegendPosition(doDraw=True))\n if legendPosition.doDraw():\n if legendPosition.hasUserLimits():\n xLow,yLow,xHigh,yHigh = legendPosition.calculateLegendLimits()\n opt = \"brNDC\"\n else:\n xLow,yLow,xHigh,yHigh = self._calculateLegendLimits()\n opt = \"br\"\n #print xLow,yLow,xHigh,yHigh\n leg = ROOT.TLegend(xLow,yLow,xHigh,yHigh, \"\", opt)\n leg.SetFillStyle(1001)\n leg.SetFillColor(ROOT.kWhite)\n for effRej in self.effRejGraphs:\n graph = effRej.getGraph()\n opt = \"LF\"\n leg.AddEntry(graph,effRej.title, opt)\n leg.Draw()\n #store the legend in the histogram collection\n self.legend = leg\n return", "title": "" }, { "docid": "42a7c99834fe9435bfac7727363d8b5b", "score": "0.51054686", "text": "def apply_conf(rc):\n rc('text', usetex=True)\n font = {'family' : 'serif',\n 'serif':['Times'],\n 'weight' : 'bold',\n 'size' : 11}\n rc('font', **font)", "title": "" }, { "docid": "f548b6a9b30f8a7d1a27a18ec65ad604", "score": "0.5105193", "text": "def setFontMetrics():\n global SEQUENCEFONT\n global SEQUENCEFONTMETRICS\n global SEQUENCEFONTCHARWIDTH\n global SEQUENCEFONTCHARHEIGHT\n global SEQUENCEFONTEXTRAWIDTH\n global SEQUENCETEXTXCENTERINGOFFSET\n global SEQUENCETEXTYCENTERINGOFFSET\n SEQUENCEFONT = QFont(\"Monaco\")\n if hasattr(QFont, 'Monospace'):\n SEQUENCEFONT.setStyleHint(QFont.Monospace)\n SEQUENCEFONT.setFixedPitch(True)\n SEQUENCEFONTH = int(PATH_BASE_WIDTH / 3.)\n SEQUENCEFONT.setPixelSize(SEQUENCEFONTH)\n SEQUENCEFONTMETRICS = QFontMetricsF(SEQUENCEFONT)\n SEQUENCEFONTCHARWIDTH = SEQUENCEFONTMETRICS.width(\"A\")\n SEQUENCEFONTCHARHEIGHT = SEQUENCEFONTMETRICS.height()\n SEQUENCEFONTEXTRAWIDTH = PATH_BASE_WIDTH - SEQUENCEFONTCHARWIDTH\n SEQUENCEFONT.setLetterSpacing(QFont.AbsoluteSpacing,\n SEQUENCEFONTEXTRAWIDTH)\n SEQUENCETEXTXCENTERINGOFFSET = SEQUENCEFONTEXTRAWIDTH / 4.\n SEQUENCETEXTYCENTERINGOFFSET = PATH_BASE_WIDTH * 0.6", "title": "" }, { "docid": "3cefbca08bc94c41e8b4284fde1cc095", "score": "0.5103869", "text": "def getFontSize(self):\n return self._size", "title": "" }, { "docid": "0674d721d952059192b6bdc413e8cf0c", "score": "0.5103313", "text": "def getPlotFonts() -> Dict:\n plotFonts: Dict = {}\n plotFonts[\"suptitle\"] = 18\n plotFonts[\"title\"] = 16\n plotFonts[\"axisLabel\"] = 16\n plotFonts[\"axisTicks\"] = 14\n plotFonts[\"legend\"] = 14\n return plotFonts", "title": "" }, { "docid": "15014d4c8aeaa59900e59e68ca7ca00c", "score": "0.5094148", "text": "def add_legend(self, ax, cmap, **legend_kwargs):\n leg = create_legend(ax, cmap, np.arange(cmap.N), self.common_unique)\n\n extra = dict(bbox_to_anchor=(0.9, 0.05, 0.1, 0.1), ncol=cmap.N / 2,\n mode='scale', fancybox=False, shadow=False, fontsize=3.0,\n columnspacing=1.0, loc='center right', markerscale=0.7,\n framealpha=1.0, borderpad=0.5, handleheight=0.5,\n frameon=True)\n legend_kwargs.update(extra)\n\n ax.legend(handles=leg, **legend_kwargs)", "title": "" }, { "docid": "4dadcc6be7431e35d3438c706ccd4c80", "score": "0.5087669", "text": "def set_up_graph (x_label, y_label, is_legend):\n # Change size and font of tick labels\n fontsize = 12\n ax = gca()\n for tick in ax.xaxis.get_major_ticks():\n tick.label1.set_fontsize(fontsize)\n tick.label1.set_fontweight('bold')\n for tick in ax.yaxis.get_major_ticks():\n tick.label1.set_fontsize(fontsize)\n tick.label1.set_fontweight('bold')\n # Set the axis labels\n xlabel(x_label, fontsize=16, fontweight='bold')\n ylabel(y_label, fontsize=16, fontweight='bold')\n if is_legend:\n # Make the legend\n legend (fontsize = 14)", "title": "" }, { "docid": "9e028e9e3fd865c00bf1f76da7212756", "score": "0.50805175", "text": "def setSize(self, value, operation='', log=None):\r\n setAttribute(self, 'size', value, log, operation) # calls attributeSetter\r", "title": "" }, { "docid": "6c42a72957389b873d4fd0b3a0f3910a", "score": "0.5064782", "text": "def legend_text(ax, x, y, text, **kwargs):\n\n tr = ax.get_xlim()\n yr = ax.get_ylim()\n\n plt.text(tr[0] + (tr[1]-tr[0]) * x, yr[0] + (yr[1]-yr[0]) * y,\n text, **kwargs)", "title": "" }, { "docid": "1fa0b5062f67a67b3d0d36951c7b493b", "score": "0.506106", "text": "def __Font(self):\n font = askfont(self, \"ABCD abcd\", title='Font', family=self.__textFont['family'], size=self.__textFont['size'], weight=self.__textFont['weight'], slant=self.__textFont['slant'], underline=self.__textFont['underline'], overstrike=self.__textFont['overstrike'])\n if font:\n font_ = Font(family=font['family'], size=font['size'], weight=font['weight'], slant=font['slant'], underline=font['underline'], overstrike=font['overstrike'])\n self.textBox.config(font=font_)\n self.__textFont = font", "title": "" }, { "docid": "96d7e87d19c5f4a62e708892f3cdb2eb", "score": "0.5048518", "text": "def set_font(font):\n global _font\n _font = font", "title": "" } ]
3f97a90acbb37dfb912487589401e8d8
Serialize a dict of cloudinit templates. It will serialize the names of cloudinit templates, thus allowing nailgun to request particular version for every template to be rendered during provisioning.
[ { "docid": "2fcca3281cf15d1f30a48e690ffb7a6e", "score": "0.8214958", "text": "def serialize_cloud_init_templates(cls, release):\n cloud_init_templates = {}\n for k in (consts.CLOUD_INIT_TEMPLATES.boothook,\n consts.CLOUD_INIT_TEMPLATES.cloud_config,\n consts.CLOUD_INIT_TEMPLATES.meta_data):\n cloud_init_templates[k] = '{0}_fuel_{1}_{2}.jinja2'.format(\n k, release.environment_version,\n release.operating_system.lower())\n return cloud_init_templates", "title": "" } ]
[ { "docid": "0d781a4bebda31651e06ba83460827b4", "score": "0.58914036", "text": "def templates(self):\r\n params = {\"f\" : \"json\"}\r\n exportURL = self._url + \"/templates\"\r\n return self._con.get(path=exportURL,\r\n params=params, token=self._token)", "title": "" }, { "docid": "cb9a37f8a0da15441910060f312c259f", "score": "0.5867848", "text": "def set_templates(self):\n templates = {\n\n \"os.linux\": \"Template OS Linux SNMPv2\",\n \"os.win\": \"Template OS Windows SNMPv2\",\n \"net.cisco\": \"Template Net Cisco IOS SNMPv2\",\n \"net.ubnt\": \"Template Net Ubiquiti AirOS SNMPv1\",\n \"net.mikrotik\": \"Template Net Mikrotik SNMPv2\",\n \"net.juniper\": \"Template Net Juniper SNMPv2\",\n \"net.brocade.fc\": \"Template Net Brocade FC SNMPv2\",\n \"net.arista\": \"Template Net Arista SNMPv2\",\n \"net.alcatel\": \"Template Net Alcatel Timetra TiMOS SNMPv2\",\n \"net.extreme\": \"Template Net Extreme EXOS SNMPv2\",\n \"net.huawei.vrp\": \"Template Net Huawei VRP SNMPv2\",\n \"net.dlink.des72\": \"Template Net D-Link DES 7200 SNMPv2\",\n \"net.dlink\": \"Template Net D-Link DES_DGS Switch SNMPv2\",\n \"net.qtech\": \"Template Net QTech QSW SNMPv2\",\n \"net.dell\": \"Template Net Dell Force S-Series SNMPv2\",\n \"net.mellanox\": \"Template Net Mellanox SNMPv2\",\n \"net.qlogic\": \"Template Net Intel_Qlogic Infiniband SNMPv2\",\n \"net.tplink\": \"Template Net TP-LINK SNMPv2\",\n \"net.foundry\": \"Template Net Brocade_Foundry Nonstackable SNMPv2\",\n \"net.brocade\": \"Template Net Brocade_Foundry Nonstackable SNMPv2\",\n \"net.brocade.stack\": \"Template Net Brocade_Foundry Stackable SNMPv2\",\n \"net.netgear\": \"Template Net Netgear Fastpath SNMPv2\",\n \"net.hp.comware\": \"Template Net HP Comware HH3C SNMPv2\",\n \"net.hp\": \"Template Net HP Enterprise Switch SNMPv2\",\n \"server.dell\": \"Template Server Dell iDRAC SNMPv2\",\n \"server.supermicro\": \"Template Server Supermicro Aten SNMPv2\",\n \"server.ibm\": \"Template Server IBM IMM SNMPv1\",\n \"server.hp\": \"Template Server HP iLO SNMPv2\",\n \"server.cisco\": \"Template Server Cisco UCS SNMPv2\",\n\n }\n for search_string, template_name in templates.items():\n\n if search_string in get_hostname_from_path(self.path):\n # print(\"Found match {} in {}\".format(search_string, template))\n template_id = self.zapi.get_id('template', item=template_name)\n if template_id:\n template = {\n \"name\": template_name,\n \"templateid\": template_id\n }\n self.params['templates'].append(template)\n else:\n raise ZabbixNotFoundException(\n \"No such template found: {}\".format(template_name))\n break", "title": "" }, { "docid": "83f290256089b8a0a6b66aafb7a89e12", "score": "0.5631751", "text": "def tmpl_get_info(self, client, kwargs_dict):\n id = self._get_arg(\"template_id\", kwargs_dict)\n tmpl = client.collections.templates(id)\n tmpl.reload(attributes='operating_system')\n result = {\n 'operating_system': tmpl.operating_system['product_name'],\n 'name': tmpl.name\n }\n return result", "title": "" }, { "docid": "37bbba13114fe8604e40ceb8d49dbf7b", "score": "0.5495", "text": "def _makeTemplate(descriptors_dict):\n template = {'data': []}\n for key in descriptors_dict:\n template['data'].append({'name': key, 'value': descriptors_dict[key]})\n return {'template': template}", "title": "" }, { "docid": "a113aea4f575dda373bc74fed028e1cb", "score": "0.54393315", "text": "def create_template_values(name, spec):\n\n host_url, full_url = get_urls(spec)\n # All we need for template rendering, alphabetically listed\n template_values = {\n \"auth\": spec[\"auth\"],\n \"authentication_plugin_cookie_secret\": base64.urlsafe_b64encode(\n os.urandom(32)\n ).decode(),\n \"full_url\": full_url,\n \"host_url\": host_url,\n \"ingress_annotations\": json.dumps(spec[\"routing\"][\"ingressAnnotations\"]),\n \"jupyter_server\": spec[\"jupyterServer\"],\n \"jupyter_server_app_token\": spec[\"auth\"].get(\"token\", os.urandom(32).hex()),\n \"jupyter_server_cookie_secret\": os.urandom(32).hex(),\n \"name\": name,\n \"oidc\": spec[\"auth\"][\"oidc\"],\n \"path\": os.path.join(\"/\", spec[\"routing\"][\"path\"].rstrip(\"/\")),\n \"pvc\": spec[\"storage\"][\"pvc\"],\n \"routing\": spec[\"routing\"],\n \"storage\": spec[\"storage\"],\n }\n\n return template_values", "title": "" }, { "docid": "a4614ade3e9a6653e3439f8a1411264f", "score": "0.5425902", "text": "def tofile(self, file):\n tpls = [page_to_dict(x) for x in self.templates]\n json.dump({'templates': tpls}, file)", "title": "" }, { "docid": "0799e53b2e0989cc28a13b7fa8ca3977", "score": "0.5398179", "text": "def cloudformation_template(definitions):\n template = Template()\n for document in definitions:\n template.add_resource(DefinitionTroposphereAdapter(document))\n for resource in document.get_complimentary_cfn_resources():\n template.add_resource(resource)\n\n return template", "title": "" }, { "docid": "80ebd00989262c1ae7efb6d3f29d0695", "score": "0.5390497", "text": "def get_all_templates(self):\n conn = None\n data = {'status': 'KO'}\n try:\n conn = lite.connect(self.database_file)\n data['templates'] = database.get_all_templates(conn)\n data['status'] = 'OK'\n except IPOLBlobsDataBaseError as ex:\n self.logger.exception(\"DB error while reading all the templates\")\n print(\"Failed reading all the templates. Error: {}\".format(ex))\n except Exception as ex:\n self.logger.exception(\"*** Unhandled exception while reading all the templates\")\n print(\"*** Unhandled exception while reading all the templates. Error: {}\".format(ex))\n finally:\n if conn is not None:\n conn.close()\n return json.dumps(data).encode()", "title": "" }, { "docid": "206c9a17abb0a3ab6b3e8e8822b3463f", "score": "0.5368829", "text": "def get_crud_template_dict():\n return CRUD_TEMPLATE_DICT", "title": "" }, { "docid": "e6ea5c5a65e3b8ec6c29b1665d6774f9", "score": "0.52457017", "text": "def push_cf_templates_to_s3(template_dir='cloudformation'):\n\n abs_template_dir = os.path.abspath(template_dir)\n\n templates = []\n\n for file in os.listdir(abs_template_dir):\n\n if 'cf-template.json' != file:\n templates.append(os.path.join(abs_template_dir, file))\n\n print('Uploading templates:')\n print(templates)\n\n\n s3_conn = S3Connection(config.AWS_API_KEY, config.AWS_SECRET_KEY)\n\n for bucket_name in config.AWS_CF_S3_BUCKETS:\n bucket = s3_conn.get_bucket(bucket_name)\n\n for k in bucket.get_all_keys():\n k.delete()\n\n for t in templates:\n k = Key(bucket)\n k.key = os.path.basename(t)\n k.set_contents_from_filename(os.path.abspath(t))\n\n print('Upload complete')", "title": "" }, { "docid": "fec09712a1fec4837a42283d264f76a9", "score": "0.5244598", "text": "def translate_templates(env, loader, settings, verbose=False, debug=False):\n languages = {}\n res = {}\n\n locale_dir = pkg_resources.resource_filename(__name__, 'locale')\n\n for lang in pkg_resources.resource_listdir(__name__, 'locale'):\n lang_dir = os.path.join(locale_dir, lang)\n if not os.path.isdir(lang_dir):\n if debug:\n sys.stderr.write(\"Not a directory: {!r}\\n\".format(lang_dir))\n continue\n if verbose:\n languages[lang] = 1\n\n translations = Translations.load(locale_dir, [lang], settings['gettext_domain'])\n env.install_gettext_translations(translations)\n\n for template_file in loader.list_templates():\n if template_file.endswith('.swp'):\n continue\n template = env.get_template(template_file)\n translated = template.render(settings=settings)\n\n if not template_file in res:\n res[template_file] = {}\n res[template_file][lang] = translated.encode('utf-8')\n\n if debug:\n sys.stderr.write(\"Lang={!s} :\\n{!s}\\n\\n\".format(lang, translated.encode('utf-8')))\n\n if verbose:\n print(\"\\nLanguages : {!r}\\nGenerated templates : {!r}\\n\".format(\n sorted(languages.keys()), sorted(res.keys())))\n\n return res", "title": "" }, { "docid": "94a7704f21347dc4f1612a74c18961e6", "score": "0.52026105", "text": "def _json(self):\n response = {\n 'template': {\n 'name': self.template.rec_name,\n 'id': self.template.id,\n 'list_price': self.list_price,\n },\n 'code': self.code,\n 'description': self.description,\n }\n return response", "title": "" }, { "docid": "aae329d9a3eb3ad3bb6a2f8e66b9daeb", "score": "0.51913154", "text": "def get_instance_template_names(request):\n print('Finding Orchestrate templates by name {name}'.format(name=request.name))\n result = compute.instanceTemplates().list(\n project=request.project,\n filter='name = {name}-*'.format(name=request.name),\n ).execute()\n names = [item['name'] for item in result.get('items', [])]\n return names", "title": "" }, { "docid": "22d8a9ee4d0c77c952394a567fe1bb73", "score": "0.5178449", "text": "def list_init_cfg_templates():\n all_templates = list()\n\n try:\n db_templates = Template.query.filter(Template.type == 'init-cfg')\n for t in db_templates:\n db_template = dict()\n db_template['name'] = t.name\n db_template['description'] = t.description\n db_template['type'] = t.type\n all_templates.append(db_template)\n\n except SQLAlchemyError as sqe:\n print('Could not list init-cfg templates')\n print(sqe)\n finally:\n return all_templates", "title": "" }, { "docid": "86c724669f1edca81cd983a8711dc859", "score": "0.5178123", "text": "def get_children_templates(pvc_enabled=False):\n children_templates = {\n \"service\": \"service.yaml\",\n \"ingress\": \"ingress.yaml\",\n \"statefulset\": \"statefulset.yaml\",\n \"configmap\": \"configmap.yaml\",\n \"secret\": \"secret.yaml\",\n }\n if pvc_enabled:\n children_templates[\"pvc\"] = \"pvc.yaml\"\n\n return children_templates", "title": "" }, { "docid": "f9f95818989f97444126e548ca9730cf", "score": "0.5165234", "text": "def templates(self):\n results = self._connect.get(\"templates\")\n return [Template(self._connect, data['id'], data, Configuration, User)\n for data in results]", "title": "" }, { "docid": "31c6abfd4fe80e17c7ff23bf5428093d", "score": "0.5132547", "text": "def prepare_template_data(self):\n return {}", "title": "" }, { "docid": "8fd8ae20b3889c4f1812a896d3bc41f9", "score": "0.51243794", "text": "def customise_cloudinit(tenant, job):\n cpus = job.launch.instance.cpus\n ip_addr = tenant.public_ip\n domain = tenant.domain\n d = {'ip_addr': ip_addr, 'cpus': cpus, 'domain': domain}\n\n filein = open(ProvisionerConfig().cloudinit_file)\n src = Template(filein.read())\n\n result = src.substitute(d)\n return result", "title": "" }, { "docid": "f1aec0e6081d4dc18afc1fd461b810b1", "score": "0.5122537", "text": "def index(self, **kwargs):\n user = kwargs.get('user')\n templates = user.templates.order_by(Template.label.asc())\n\n return response([template.to_dict() for template in templates])", "title": "" }, { "docid": "f8601eb997ab9e258c2fa07955f9cddf", "score": "0.510873", "text": "def templates(self):\n return self._templates", "title": "" }, { "docid": "f8601eb997ab9e258c2fa07955f9cddf", "score": "0.510873", "text": "def templates(self):\n return self._templates", "title": "" }, { "docid": "57e0c38643a0a645be7930038308ed45", "score": "0.5105398", "text": "def templater(\n template: Text = typer.Argument(\n ...,\n help=\"Path to Jinja2 template (absolute, or relative to user home)\",\n show_default=False,\n ),\n env_filter: Text = typer.Option(\n None,\n \"--filter\",\n \"-f\",\n help=\"Environment variable filter (ignored when mapping is taken from JSON file)\",\n show_default=False,\n ),\n mapping: Text = typer.Option(\n None,\n \"--mapping\",\n \"-m\",\n help=\"path to JSON mappings (absolute, or relative to user home)\",\n show_default=False,\n ),\n write: bool = typer.Option(\n False,\n \"--write\",\n \"-w\",\n help=\"Write out templated file alongside Jinja2 template\",\n ),\n) -> None:\n mappings = {}\n if mapping:\n mappings.update(makester.templater.get_json_values(mapping))\n else:\n mappings.update(makester.templater.get_environment_values(token=(env_filter)))\n\n log.info(\"Template mapping values sourced:\\n%s\", json.dumps(mappings, indent=2))\n\n makester.templater.build_from_template(mappings, template, write_output=write)", "title": "" }, { "docid": "82a16060a84e023a9c3e3411c8dd13c2", "score": "0.5101036", "text": "def initialize_vm_and_template_names(request, storage):\n self = request.node.cls\n\n self.test_templates = [\n \"{0}_{1}\".format(\n storage_helpers.create_unique_object_name(\n self.__class__.__name__, config.OBJECT_TYPE_TEMPLATE\n )[:33], \"single\"\n ),\n \"{0}_{1}\".format(\n storage_helpers.create_unique_object_name(\n self.__class__.__name__, config.OBJECT_TYPE_TEMPLATE\n )[:35], \"both\"\n )\n ]\n self.vm_names = [\n \"{0}_{1}\".format(\n storage_helpers.create_unique_object_name(\n self.__class__.__name__, config.OBJECT_TYPE_VM\n ), \"from_single\"\n ),\n \"{0}_{1}\".format(\n storage_helpers.create_unique_object_name(\n self.__class__.__name__, config.OBJECT_TYPE_VM\n ), \"from_both\"\n )\n ]", "title": "" }, { "docid": "50d874e353b81bf058ffc4eb190ba601", "score": "0.5100742", "text": "def _generate_template_dict(dict_ips, equipment):\n\n key_dict = dict()\n\n # TODO Separate differet vendor support if needed for gateway redundancy\n key_dict['VLAN_NUMBER'] = dict_ips['vlan_num']\n key_dict['VLAN_NAME'] = dict_ips['vlan_name']\n key_dict['IP'] = dict_ips[equipment].get('ip')\n key_dict['USE_GW_RED'] = dict_ips['gateway_redundancy']\n key_dict['GW_RED_ADDR'] = dict_ips['gateway']\n key_dict['GW_RED_PRIO'] = dict_ips[equipment].get('prio')\n key_dict['CIDR_BLOCK'] = dict_ips['cidr_block']\n key_dict['NETWORK_MASK'] = dict_ips['mask']\n key_dict['NETWORK_WILDMASK'] = dict_ips['wildmask']\n key_dict['IP_VERSION'] = dict_ips['ip_version']\n key_dict['FIRST_NETWORK'] = dict_ips['first_network']\n\n if dict_ips['is_vxlan']:\n key_dict['VXLAN'] = dict_ips.get('is_vxlan')\n key_dict['VXLAN_ANYCAST_IP'] = utils.get_local_tunnel_ip(equipment.id)\n\n if 'vrf' in dict_ips.keys():\n key_dict['VRF'] = dict_ips['vrf']\n\n if 'dhcprelay_list' in dict_ips.keys():\n key_dict['DHCPRELAY_LIST'] = dict_ips['dhcprelay_list']\n else:\n key_dict['DHCPRELAY_LIST'] = []\n # key_dict[\"ACL_IN\"] = \"\"\n # key_dict[\"ACL_OUT\"] = \"\"\n\n return key_dict", "title": "" }, { "docid": "29d935e65e23a7e7688972397ff90543", "score": "0.50983274", "text": "def save_templates(self, templates):\r\n for template in templates:\r\n points = self._preprocess([(p[0], p[1]) for p in template.points])\r\n self._templates.append(self.Template(name=template.name, points=points))\r\n\r\n # Save them to our \"database\" of known templates of shapes\r\n with open(self._TEMPLATES_FILE, 'w') as out:\r\n json.dump([dict(t._asdict()) for t in self._templates], out)", "title": "" }, { "docid": "1da4f656efb07144451365a255584738", "score": "0.50593185", "text": "def _publish(self):\n print(f'Loading template...')\n template_str, template_type, template_data = self._load_template(self.template)\n\n # Convert from SAM to CFN if desired\n template_data = self._normalize_template_format(template_data)\n\n if self.verbose:\n print(f'Stack template \"{os.path.abspath(self.template)}\": ')\n print('-----------------------------------------------------------------------')\n print(dump_cfn_template_yaml(template_data))\n print('-----------------------------------------------------------------------')\n\n # Process each included template in order\n for include_template in self.include_templates:\n print(f'Loading included template \"{os.path.abspath(include_template)}\"...')\n include_str, include_type, include_data = self._load_template(include_template)\n\n # We must run \"aws cloudformation package\" on the included template to expand\n # references to local resources (like a CodeUri of \"./deployment.zip\") before\n # we can apply transforms. Applying transforms may require normalizing\n # from SAM to CFN and that will fail if \"./deployment.zip\" is still in the\n # template. It doesn't hurt to run \"cloudformation package\" again later in\n # this function, since it will compute the same resource names the second time\n # and skip uploading them based on S3 ETag.\n print(f'Packaging included template \"{os.path.abspath(include_template)}\"...')\n p_include_str, p_include_type, p_include_data = self._aws_cfn_package_and_upload_extras(include_str)\n\n # Convert from SAM to CFN if desired\n p_include_data = self._normalize_template_format(p_include_data)\n\n if self.verbose:\n print(f'Included template \"{os.path.abspath(include_template)}\": ')\n print('-----------------------------------------------------------------------')\n print(dump_cfn_template_yaml(p_include_data))\n print('-----------------------------------------------------------------------')\n\n print(f'Including resources from \"{os.path.abspath(include_template)}\"...')\n template_data = self._apply_includes(template_data, p_include_data)\n\n # If we applied includes, dump the template data back to a string for later use\n if self.include_templates:\n if self.verbose:\n print(f'Stack template \"{os.path.abspath(self.template)}\" after includes applied: ')\n print('-----------------------------------------------------------------------')\n print(dump_cfn_template_yaml(template_data))\n print('-----------------------------------------------------------------------')\n\n if len(template_data.get('IncludedResources', {})) > 0:\n raise CaricaCfnToolsError(\n 'The following IncludedResources did not match a resource in any included '\n 'templates: ' + ', '.join(template_data['IncludedResources'].keys()))\n\n del template_data['IncludedResources']\n\n if template_type == 'yaml':\n template_str = dump_cfn_template_yaml(template_data)\n else:\n template_str = dump_cfn_template_json(template_data)\n\n print(f'Packaging template resources...')\n p_template_str, p_template_type, p_template_data = self._aws_cfn_package_and_upload_extras(template_str)\n\n print(f'Uploading template...')\n template_key = self._upload_template(p_template_str)\n print(f'Template uploaded at s3://{self.bucket}/{template_key}')\n\n # Return the full HTTPS URL to the template in the S3 bucket\n return get_s3_https_url(self.region, self.bucket, template_key)", "title": "" }, { "docid": "63a8403c2d5b824deb2845438b9d6cf1", "score": "0.50262487", "text": "def cloud_config_dict(self):\n return {\n \"url\": self.generate_temp_url(),\n }", "title": "" }, { "docid": "402242e62392880a94defc2c3fd15e4a", "score": "0.50137264", "text": "def inject_template_vars():\n\n if \"ADDITIONAL_ADMIN_PAGES\" in current_app.config:\n additionalAdminPages = current_app.config['ADDITIONAL_ADMIN_PAGES']\n else:\n additionalAdminPages = None\n\n tableNames = []\n for t in db.metadata.tables:\n tableNames.append(t)\n\n questionnairesSystem = []\n\n if path.exists(current_app.root_path + \"/questionnaires\"):\n for q in listdir(current_app.root_path + \"/questionnaires\"):\n if q.endswith(\".json\"):\n questionnairesSystem.append(q.replace(\".json\", \"\"))\n\n tableNames = sorted(tableNames)\n questionnairesLive = current_app.page_list.get_questionnaire_list(True)\n questionnairesLiveUntagged = sorted(current_app.page_list.get_questionnaire_list())\n questionnairesSystem = sorted(questionnairesSystem)\n\n return dict(\n additionalAdminPages=additionalAdminPages,\n tableNames=tableNames,\n questionnairesLive=questionnairesLive,\n questionnairesLiveUntagged=questionnairesLiveUntagged,\n questionnairesSystem=questionnairesSystem,\n logGridClicks=current_app.config['LOG_GRID_CLICKS']\n )", "title": "" }, { "docid": "2391256f2e11ca8aae853c2583b8ce76", "score": "0.49814987", "text": "def update_templates(self):\n if self.time > 3:\n self.templates = self.init_templates(self.template)", "title": "" }, { "docid": "9174662b3b5ff3db716f99b47685f7d1", "score": "0.49621838", "text": "def list(self):\n # self._loader = jinja2.FileSystemLoader(self._path)\n # self._env = jinja2.Environment(loader=self._loader, trim_blocks=True)\n ret_loader = {}\n for x in self._loader.list_templates():\n if (unicode(x).endswith(\".yml\")):\n if (unicode(x).split(\"/\")[-1] == \"init.yml\"):\n key = unicode(x)[:-9].replace(\"/\", \".\")\n if(key):\n ret_loader[key] = unicode(x)\n else:\n key = unicode(x)[:-4].replace(\"/\", \".\")\n if(key):\n ret_loader[key] = unicode(x)\n # lib.debug.debug(ret_loader)\n return (ret_loader)", "title": "" }, { "docid": "9180ac9d90afa0de81bf8974a250c353", "score": "0.49323073", "text": "def get_all_oceancd_verification_templates(self):\n response = self.send_get(\n url=self.__base_oceancd_vt_url,\n entity_name=\"oceancdVerificationTemplate\")\n\n formatted_response = self.convert_json(\n response, self.camel_to_underscore)\n\n return formatted_response[\"response\"][\"items\"]", "title": "" }, { "docid": "42d5c926d704b1819ecd599c846f258a", "score": "0.49305764", "text": "def available_templates():\n return listdir(PLUGIN_PRE_COMMIT_TEMPLATE_DIR)", "title": "" }, { "docid": "8e32810ad5575aefb959a5e9988d8c7d", "score": "0.49295282", "text": "def _list_templates(settings):\n for idx, option in enumerate(settings.config.get(\"project_templates\"), start=1):\n puts(\" {0:5} {1:36}\\n {2}\\n\".format(\n colored.yellow(\"[{0}]\".format(idx)),\n colored.cyan(option.get(\"name\")),\n option.get(\"url\")\n ))", "title": "" }, { "docid": "81ce0eb95d6d281214f56faed3384da5", "score": "0.4925827", "text": "async def get_templates(current_user: User = Depends(Authentication.get_current_user_and_bot)):\n return {\"data\": {\"use-cases\": Utility.list_directories(\"./template/use-cases\")}}", "title": "" }, { "docid": "901e60004caff3898f1a461c633616a7", "score": "0.49177292", "text": "def templates(self, templates):\n\n self._templates = templates", "title": "" }, { "docid": "901e60004caff3898f1a461c633616a7", "score": "0.49177292", "text": "def templates(self, templates):\n\n self._templates = templates", "title": "" }, { "docid": "901e60004caff3898f1a461c633616a7", "score": "0.49177292", "text": "def templates(self, templates):\n\n self._templates = templates", "title": "" }, { "docid": "fa655a0aaea15c1541dd2451c5383f3e", "score": "0.49009877", "text": "def _input_templates(self):\n foo = self._config.read([self._pathfile])\n if len(foo) == 1:\n for k, v in self._config.items('paths'):\n self.templates[k] = v\n else:\n raise ValueError(\"Could not read {0}!\".format(self._pathfile))\n return", "title": "" }, { "docid": "7260c91ff4df8e8e9d3b2bd34e929f7c", "score": "0.48978633", "text": "def dumps(self, pretty=True):\n self['AWSTemplateFormatVersion'] = '2010-09-09'\n return json.JSONEncoder(indent=2 if pretty else None,\n sort_keys=True).encode(self)", "title": "" }, { "docid": "c17a76b564795c12689ed8981aef2f98", "score": "0.48819992", "text": "def get_templates_dirs(self):\r\n return [resource_filename(__name__, 'templates')]", "title": "" }, { "docid": "b20a760c2a20bafb119b2d0a904dd7a0", "score": "0.48695523", "text": "def templater(ctx, config, template='k8s/templates/all-in-one.yaml'):\n\n if config[-5:] != '.yaml':\n config += '.yaml'\n\n # Get path of tasks.py file to allow independence from CWD\n dir_path = os.path.dirname(os.path.realpath(__file__))\n\n if not os.path.isabs(config):\n config = os.path.join(dir_path, config)\n if not os.path.isabs(template):\n template = os.path.join(dir_path, template)\n\n with open(config, 'r') as stream:\n config_dict = yaml.load(stream)\n\n with open(template, 'r') as myfile:\n template_str = myfile.read()\n\n formatted = format_yaml(template_str, config_dict)\n output_dir = os.path.join(dir_path, 'k8s', config_dict['NAMESPACE'])\n output_path = os.path.join(output_dir, 'all-in-one.yaml')\n if os.path.isfile(output_path):\n print('Deployment config already exists. Aborting.')\n else:\n os.mkdir(output_dir)\n with open(output_path, 'w') as myfile:\n myfile.write(formatted)", "title": "" }, { "docid": "87a88c39590ce52eaa4206d1cf30dd48", "score": "0.48314163", "text": "def req_yaml_template(pip=False, version=True, build=False):\n template_str = '{name}'\n if version:\n template_str += '=={version}' if pip else '={version}'\n if build and not pip:\n if not version:\n template_str += '=*'\n template_str += '={build_string}'\n return template_str", "title": "" }, { "docid": "ba8942010aedb0861dc0c5e2107bd411", "score": "0.48205176", "text": "def dumpTemplates(self):\n print \"Template Tree:\",\n self.rootTemplateNode.dump(1)", "title": "" }, { "docid": "4b32a1bc42c477e881e3cde948983efd", "score": "0.47987002", "text": "def get_template_content(path):\n template_dict = {}\n\n _filename, file_extension = os.path.splitext(path)\n file_extension = file_extension.replace('.', '')\n if file_extension in consts.TEMPLATING_EXTS:\n try:\n template_content = {}\n abs_path = os.path.abspath(os.path.expandvars(path))\n with open(abs_path, 'r') as stream:\n if file_extension in consts.JSON_EXTS:\n template_content = json.load(stream) #nosec\n elif file_extension in consts.YMAL_EXTS:\n template_content = yaml.safe_load(stream) #nosec\n template_dict.update(template_content)\n except Exception as e:\n logger.errorout(\"Error reading templating file\",\n file=path, error=e.message)\n else:\n logger.errorout(\"No templating file found\",\n file=path)\n\n return template_dict", "title": "" }, { "docid": "b279c119d77fc5d7ac1fa6cbd755d19b", "score": "0.47841996", "text": "def get_project_templates(program, project):\n file_format = html.escape(flask.request.args.get(\"format\", \"tsv\"))\n template = utils.transforms.graph_to_doc.get_all_template(\n file_format,\n program=program,\n project=project,\n categories=html.escape(flask.request.args.get(\"categories\", \"\")),\n exclude=html.escape(flask.request.args.get(\"exclude\", \"\")),\n )\n response = flask.make_response(template)\n suffix = \"json\" if file_format == \"json\" else \"tar.gz\"\n response.headers[\n \"Content-Disposition\"\n ] = \"attachment; filename=submission_templates.{}\".format(suffix)\n return response", "title": "" }, { "docid": "39c4e19b2b9e7e773f90e459e0f6564e", "score": "0.47841004", "text": "def get_actions_template_dict():\n return ACTIONS_TEMPLATE_DICT", "title": "" }, { "docid": "8a9479b0496bd10cd33b0d4540f2dfab", "score": "0.47816598", "text": "def build_task_templates(\n self, task_specifications: Dict[str, TaskSpecification]\n ) -> Dict[str, TaskTemplate]:\n task_templates = {}\n for task_name, task_specification in task_specifications.items():\n task_templates[task_name] = self.task_template_classes[task_name](\n task_name, task_specification\n )\n return task_templates", "title": "" }, { "docid": "f9aae82ab4c6542b15dc3f02c292a261", "score": "0.4771485", "text": "def get_template_values(self, stac_object: \"STACObject_Type\") -> Dict[str, Any]:\n return OrderedDict(\n [(k, self._get_template_value(stac_object, k)) for k in self.template_vars]\n )", "title": "" }, { "docid": "61156f85e7056a18bc52f8c3dea283e4", "score": "0.4766067", "text": "def templates(self, workspace, table):\n return self.get([workspace, table, \"templates\"])", "title": "" }, { "docid": "8b613184a11ce5b93cc88ad90609333b", "score": "0.47491765", "text": "def stack_template_key_name(blueprint):\n return \"%s-%s.json\" % (blueprint.name, blueprint.version)", "title": "" }, { "docid": "0399666be55c598da2726659422b58cb", "score": "0.4746444", "text": "def get_templates_dirs(self):\n return [resource_filename(__name__, 'templates')]", "title": "" }, { "docid": "0399666be55c598da2726659422b58cb", "score": "0.4746444", "text": "def get_templates_dirs(self):\n return [resource_filename(__name__, 'templates')]", "title": "" }, { "docid": "c901e4575f34057e459e8cc6c728c025", "score": "0.47438046", "text": "def test_serialization(self):\n template = TemplateHandle(\n identifier='ABC',\n base_dir='XYZ',\n workflow_spec=dict(),\n parameters=[\n TemplateParameter(pd.parameter_declaration('A')),\n TemplateParameter(pd.parameter_declaration('B', data_type=pd.DT_LIST)),\n TemplateParameter(pd.parameter_declaration('C', parent='B'))\n ]\n )\n doc = DefaultTemplateLoader().to_dict(template)\n parameters = DefaultTemplateLoader().from_dict(doc).parameters\n assert len(parameters) == 3\n assert 'A' in parameters\n assert 'B' in parameters\n assert len(parameters['B'].children) == 1\n template = DefaultTemplateLoader().from_dict(doc)\n assert template.identifier == 'ABC'\n # The base directory is not materialized\n assert template.base_dir is None\n # Invalid resource descriptor serializations\n with pytest.raises(err.InvalidTemplateError):\n ResourceDescriptor.from_dict(dict())\n with pytest.raises(err.InvalidTemplateError):\n ResourceDescriptor.from_dict({LABEL_ID: 'A', 'noname': 'B'})", "title": "" }, { "docid": "d848455d749a63bbffa666058fc34948", "score": "0.4740226", "text": "def list(self, args):\n try:\n admin = self._context.getAdministrationService()\n enterprise = admin.getCurrentEnterprise()\n templates = enterprise.listTemplates()\n pprint_templates(templates)\n except (AbiquoException, AuthorizationException), ex:\n print \"Error: %s\" % ex.getMessage()", "title": "" }, { "docid": "971030d4262292c57d0925d0d51548ad", "score": "0.4739123", "text": "def template(self) -> typing.Mapping[typing.Any, typing.Any]:\n return self._values.get('template')", "title": "" }, { "docid": "363f97607701cf1840b3045e93d75b3b", "score": "0.4734091", "text": "def templates(self):\n template = lib.EnvGetNextDeftemplate(self._env, ffi.NULL)\n\n while template != ffi.NULL:\n yield Template(self._env, template)\n\n template = lib.EnvGetNextDeftemplate(self._env, template)", "title": "" }, { "docid": "9cf945cfae9ca289dcd44391413c613b", "score": "0.47315523", "text": "def configuration_templates(self):\n return self._configuration_templates", "title": "" }, { "docid": "914603f27f580b7bbb62ef83470f871e", "score": "0.472707", "text": "def get_def_templates_dict_from_list(def_temps_list):\n\n Template = namedtuple('Template', 'data')\n dict = {}\n for num, item in zip(range(len(def_temps_list)), def_temps_list):\n dict[num] = Template(item)\n\n return dict", "title": "" }, { "docid": "edba4549dae73fc14c18ac13d62071f2", "score": "0.47256705", "text": "def get_template_names(self):\n names = []\n\n self.template_name_suffix = \"{0}_{1}\".format(\n self.template_name_suffix,\n self.object.type\n )\n\n if hasattr(self.object, '_meta'):\n app_label = self.object._meta.app_label\n object_name = self.object._meta.object_name.lower()\n elif hasattr(self, 'model') and hasattr(self.model, '_meta'):\n app_label = self.model._meta.app_label\n object_name = self.model._meta.object_name.lower()\n\n if self.object.channel:\n long_slug = self.object.channel.long_slug\n\n # site specific template folder\n # sitename/infographics/\n if self.site.id > 1:\n app_label = \"{0}/{1}\".format(self.site, app_label)\n\n # 1. try channel/infographic template\n # opps_infographic/channel-slug/infographic-slug.html\n names.append('{0}/{1}/{2}.html'.format(\n app_label, long_slug, self.kwargs['slug']\n ))\n # 2. try a generic channel template\n # opps_infographic/channel-slug/<model>_detail.html\n names.append('{0}/{1}/{2}{3}.html'.format(\n app_label, long_slug, object_name, self.template_name_suffix\n ))\n\n # 3. try infographic template (all channels)\n # opps_infographic/infographic-slug.html\n names.append('{0}/{1}.html'.format(\n app_label, self.kwargs['slug']\n ))\n\n # The least-specific option is the default <app>/<model>_detail.html;\n # only use this if the object in question is a model.\n if hasattr(self.object, '_meta'):\n names.append(\"%s/%s%s.html\" % (\n self.object._meta.app_label,\n self.object._meta.object_name.lower(),\n self.template_name_suffix\n ))\n elif hasattr(self, 'model') and hasattr(self.model, '_meta'):\n names.append(\"%s/%s%s.html\" % (\n self.model._meta.app_label,\n self.model._meta.object_name.lower(),\n self.template_name_suffix\n ))\n\n return names", "title": "" }, { "docid": "5ba1097349ea58b03bd5761949ab8185", "score": "0.4723949", "text": "def templates(self) -> Path:\n return self._templates_path or self.data / self.suffix_templates", "title": "" }, { "docid": "3a0040a47501fb9897d82fcd01d08d5c", "score": "0.47210553", "text": "def write_config(template, path, verbose=False):\n # Write json to file\n with open(path, \"w\") as file:\n if isinstance(template, dict):\n file.write(json.dumps(template, indent=4))\n else:\n file.write(template)", "title": "" }, { "docid": "f478e8efb8a1d148ff5d2a63ae13dc81", "score": "0.47174114", "text": "def template(self) -> pulumi.Input['ServiceTemplateArgs']:\n return pulumi.get(self, \"template\")", "title": "" }, { "docid": "7af0f8c06982109c2cac5d149a4b3ccb", "score": "0.4715727", "text": "def initNameTemplate(self):\n\n nameTemplate = {\n \"locations\": [\"L\", \"R\", \"M\"],\n \"mirrorMap\": {\n \"L\": \"R\",\n \"R\": \"L\",\n \"M\": \"M\"\n },\n \"separator\": \"_\",\n \"types\": {\n \"default\": \"null\",\n \"Component\": \"\",\n \"ComponentGroup\": \"cmp\",\n \"ComponentInput\": \"cmpIn\",\n \"ComponentOutput\": \"cmpOut\",\n \"Container\": \"\",\n \"Control\": \"an\",\n \"FKControl\": \"fk\",\n \"IKControl\": \"ik\",\n \"MCControl\": \"mc\",\n \"PivotControl\": \"piv\",\n \"Curve\": \"crv\",\n \"HierarchyGroup\": \"hrc\",\n \"Joint\": \"def\",\n \"RefJoint\": \"ref\",\n \"Layer\": \"\",\n \"Locator\": \"loc\",\n \"Transform\": \"xfo\",\n \"CtrlSpace\": \"ctrlspace\",\n \"Space\": \"space\",\n \"OrientationConstraint\": \"oriCns\",\n \"PoseConstraint\": \"poseCns\",\n \"ParentConstraint\": \"parCns\",\n \"PositionConstraint\": \"posCns\",\n \"ScaleConstraint\": \"sclCns\",\n \"KLOperator\": \"klOp\",\n \"CanvasOperator\": \"canvasOp\"\n },\n \"formats\":\n {\n \"default\" : [\"component\", \"sep\", \"location\", \"sep\", \"name\", \"sep\", \"type\"],\n \"Container\" : [\"name\"],\n \"Layer\" : [\"container\", \"sep\", \"name\"],\n \"Control\" : [\"location\", \"sep\", \"name\", \"sep\", \"type\"],\n \"CtrlSpace\" : [\"location\", \"sep\", \"name\", \"sep\", \"type\"],\n \"Space\" : [\"location\", \"sep\", \"name\", \"sep\", \"type\"],\n \"FKControl\" : [\"location\", \"sep\", \"name\", \"sep\", \"type\"],\n \"IKControl\" : [\"location\", \"sep\", \"name\", \"sep\", \"type\"],\n \"MCControl\" : [\"location\", \"sep\", \"name\", \"sep\", \"type\"],\n \"Pivot\" : [\"location\", \"sep\", \"name\", \"sep\", \"type\"],\n \"Joint\" : [\"location\", \"sep\", \"name\", \"sep\", \"type\"],\n \"Transform\" : [\"location\", \"sep\", \"name\", \"sep\", \"type\"],\n \"KLOperator\" : [\"location\", \"sep\", \"name\", \"sep\", \"solverName\", \"sep\", \"type\"],\n \"CanvasOperator\" : [\"location\", \"sep\", \"name\", \"sep\", \"solverName\", \"sep\", \"type\"]\n }\n }\n\n return nameTemplate", "title": "" }, { "docid": "df72de50f8cdec59f57c583b543f8e5c", "score": "0.47126195", "text": "def templates_ntp(self, data, tenant_id=None, api_version=\"v2.0\"):\n\n if tenant_id is None and self._parent_class.tenant_id:\n # Pull tenant_id from parent namespace cache.\n tenant_id = self._parent_class.tenant_id\n elif not tenant_id:\n # No value for tenant_id.\n raise TypeError(\"tenant_id is required but not set or cached.\")\n cur_ctlr = self._parent_class.controller\n\n url = str(cur_ctlr) + \"/{}/api/tenants/{}/templates/ntp\".format(api_version,\n tenant_id)\n\n api_logger.debug(\"URL = %s\", url)\n return self._parent_class.rest_call(url, \"post\", data=data)", "title": "" }, { "docid": "eb90ada09909bd299e7185ab59ead110", "score": "0.47067723", "text": "def get_invoice_templates(self):\n return self._invoice_templates", "title": "" }, { "docid": "05e247dfe0457cc1dec7a384a883c4c7", "score": "0.46991655", "text": "def _init_cloudformation_template(self):\n\n if not self.resource_template:\n self.cloudformation_template = None\n self.cloudformation_filetype = None\n return\n\n _, self.cloudformation_filetype = os.path.splitext(self.resource_template)\n\n try:\n resource_template = open(self.resource_template, 'r', errors='replace')\n except FileNotFoundError:\n eprint(\"error: could not find CloudFormation template in: {}\", self.resource_template)\n raise SystemExit(2)\n\n with resource_template:\n try:\n self.cloudformation_template = AwsProvider.TEMPLATE_LOADERS[self.cloudformation_filetype](resource_template, default_region=self.default_region)\n except ValueError as e:\n eprint(\"error: invalid CloudFormation template:\\n{}\", e)\n raise SystemExit(-1)", "title": "" }, { "docid": "faafc1176f83171ae88e3371f48a4f57", "score": "0.4698663", "text": "def ListTemplates(*args):\n return _Interface.Interface_InterfaceModel_ListTemplates(*args)", "title": "" }, { "docid": "14b72aa7a50c532bc30fe09bb5f3254d", "score": "0.46978334", "text": "def _composerTemplatesPath(self):\r\n regConfig = RegistryConfig()\r\n keyName = \"ComposerTemplates\"\r\n \r\n valueCollection = regConfig.read([keyName])\r\n \r\n if len(valueCollection) == 0:\r\n return None\r\n \r\n else:\r\n return valueCollection[keyName]", "title": "" }, { "docid": "ecb3d410865db495b435ade24927843f", "score": "0.46881557", "text": "def response_template_data(pytestconfig: pytest.Config\n\t\t\t ) -> dict[str, primitive | list[primitive |\n\t\t\t\t dict[str, object] | list[object]] | dict[object, object]]:\n\tprereq_path = pytestconfig.getoption(\"--response-template\")\n\tif not isinstance(prereq_path, str):\n\t\t# unlike the configuration file, this must be present\n\t\traise ValueError(\"prereqisites path not configured\")\n\n\t# Response keys for api endpoint\n\tresponse_template: dict[\n\t\tstr,\n\t\tlist[dict[str, object] | list[object] | primitive] |\\\n\t\t\tdict[object, object] |\\\n\t\t\tprimitive\n\t\t] |\\\n\tprimitive = None\n\twith open(prereq_path, encoding=\"utf-8\", mode=\"r\") as prereq_file:\n\t\tresponse_template = json.load(prereq_file)\n\tif not isinstance(response_template, dict):\n\t\traise TypeError(f\"Response template data must be an object, not '{type(response_template)}'\")\n\n\treturn response_template", "title": "" }, { "docid": "1fca0b4eaad6acb3bafde211ef82422b", "score": "0.46871", "text": "def template(self) -> pulumi.Output['outputs.ServiceTemplate']:\n return pulumi.get(self, \"template\")", "title": "" }, { "docid": "9395a1314550858d3537f98ce06bee87", "score": "0.46860084", "text": "def set_config_template_paths(self):\n # self.data['paths'] = assure_obj_child_dict(self.data['paths'], 'templates')\n if not 'templates' in self.paths:\n self.paths.templates = {}\n templates = {}\n\n if not 'templates' in self:\n return None\n\n for template in self.templates:\n temp_path = self.templates[template]\n if '$' in temp_path:\n s = string.Template(temp_path)\n temp_path = s.substitute(self['paths'])\n if os.path.isdir(temp_path):\n self.paths.templates[template] = temp_path\n else:\n logging.error(\"Config: Could not set template path for %s, not a directory: %s\" % (template, temp_path))", "title": "" }, { "docid": "9622e639fa32f98beae461c62f21d888", "score": "0.46859026", "text": "def get_demo_templates(self, demo_id):\n data = {\"status\": \"KO\"}\n conn = None\n try:\n conn = lite.connect(self.database_file)\n\n db_response = database.get_demo_templates(conn, demo_id)\n data[\"templates\"] = db_response\n data[\"status\"] = \"OK\"\n\n except IPOLBlobsDataBaseError as ex:\n self.logger.exception(\"Fails obtaining the owned templates from demo #{}\".format(demo_id))\n print(\"Couldn't obtain owned templates from demo #{}. Error: {}\".format(demo_id, ex))\n except Exception as ex:\n self.logger.exception(\"*** Unhandled exception while obtaining the owned templates from demo #{}\"\n .format(demo_id))\n print(\"*** Unhandled exception while obtaining the owned templates from demo #{}. Error: {}\" \\\n .format(demo_id, ex))\n finally:\n if conn is not None:\n conn.close()\n return json.dumps(data).encode()", "title": "" }, { "docid": "83d2dc4440591ac621972e7ee81578b5", "score": "0.4680466", "text": "def config(self):\n return cloudpickle.dumps(self.serializers)", "title": "" }, { "docid": "6c803dd8495150d34d8426e5b01e74d2", "score": "0.4668071", "text": "def build_pvc_dict(self, varname=None):\n kind = self.task_vars.get(str(varname) + '_kind')\n if kind:\n kind = self._templar.template(kind)\n create_pv = self.task_vars.get(str(varname) + '_create_pv')\n if create_pv:\n create_pv = self._templar.template(create_pv)\n create_pvc = self.task_vars.get(str(varname) + '_create_pvc')\n if create_pvc:\n create_pvc = self._templar.template(create_pvc)\n if kind != 'object' and create_pv and create_pvc:\n volume, size, _, access_modes = self.build_common(varname=varname)\n storageclass = self.task_vars.get(str(varname) + '_storageclass')\n if storageclass:\n storageclass = self._templar.template(storageclass)\n elif storageclass is None and kind != 'dynamic':\n storageclass = ''\n return dict(\n name=\"{0}-claim\".format(volume),\n capacity=size,\n access_modes=access_modes,\n storageclass=storageclass)\n return None", "title": "" }, { "docid": "d33373129b15765dd0d27b041417a8d6", "score": "0.46636617", "text": "def compare_templates(self, deployed: str, generated: str) -> DiffType:", "title": "" }, { "docid": "2ef304e4391519befa07c6637f1deb48", "score": "0.46608227", "text": "def template_options(self) -> \"ITemplateOptions\":\n return jsii.get(self, \"templateOptions\")", "title": "" }, { "docid": "75f0225ab9a2dd0111129bdf1cc87fd2", "score": "0.46534625", "text": "def generate_templates(self, update=True):\n something_written = False\n if not self.env.config.no_template:\n for kind in self.env.xmlfiles:\n _, write_happend = self.make_or_get_template(\n kind, do_write=True, update=update)\n if write_happend:\n something_written = True\n return something_written", "title": "" }, { "docid": "2e44156aab2690bf7efe58f6e95f70aa", "score": "0.46524787", "text": "def _load_conf_templates(self, name=None, in_path=None):\n if name:\n templates = [{\"name\": name, \"value\": in_path}]\n else:\n templates = self._list_templates(True)\n\n for template in templates:\n conf = self.server_conf.clone()\n conf.set_template(True)\n path = os.path.join(self.templates_path, template[\"name\"])\n if template[\"name\"] not in self._templates_conf:\n conf.add_file(path)\n conf.set_default(path)\n conf.parse()\n self._templates_conf[template[\"name\"]] = conf", "title": "" }, { "docid": "dc3985fcce05590f188d351ce1b92aa5", "score": "0.46462923", "text": "def s3_page_dict(self):\n pages = {}\n for page in self.pages.values():\n pages[page.s3_key] = render(self, page)\n return pages", "title": "" }, { "docid": "6d921046d5bff0cbe097a4c2eac58caa", "score": "0.46372885", "text": "def module_config_template():\n\n template = {\n \"GCEBillingInfo\": {\n \"module\": \"modules.GCE.sources.GCEBillingInfo\",\n \"name\": \"GCEBillingInfo\",\n \"parameters\": {\n 'projectId': 'Blah',\n 'lastKnownBillDate': '01/01/18 00:00', # '%m/%d/%y %H:%M'\n 'balanceAtDate': 100.0, # $\n 'accountName': 'Blah',\n 'accountNumber': 1111,\n 'credentialsProfileName': 'BillingBlah',\n 'applyDiscount': True, # DLT discount does not apply to credits\n 'botoConfig': \"path_to_file\",\n 'locaFileDir': \"dir_for_billing_files\"\n },\n \"schedule\": 24 * 60 * 60,\n }\n }\n\n print(\"GCE Billing Info\")\n pprint.pprint(template)", "title": "" }, { "docid": "7bf1238c61c78dc6511e271616253028", "score": "0.46356177", "text": "def __map_os_cluster_template(os_tmplt):\n return ClusterTemplate(\n id=os_tmplt.uuid,\n name=os_tmplt.name,\n image=os_tmplt.image_id,\n keypair=os_tmplt.keypair_id,\n network_driver=os_tmplt.network_driver,\n external_net=os_tmplt.external_network_id,\n floating_ip_enabled=os_tmplt.floating_ip_enabled,\n docker_volume_size=os_tmplt.docker_volume_size,\n server_type=os_tmplt.server_type,\n flavor=os_tmplt.flavor_id,\n master_flavor=os_tmplt.master_flavor_id,\n coe=os_tmplt.coe,\n fixed_net=os_tmplt.fixed_network,\n fixed_subnet=os_tmplt.fixed_subnet,\n registry_enabled=os_tmplt.registry_enabled,\n insecure_registry=os_tmplt.insecure_registry,\n docker_storage_driver=os_tmplt.docker_storage_driver,\n dns_nameserver=os_tmplt.dns_nameserver,\n public=os_tmplt.public,\n tls_disabled=os_tmplt.tls_disabled,\n http_proxy=os_tmplt.http_proxy,\n https_proxy=os_tmplt.https_proxy,\n no_proxy=os_tmplt.no_proxy,\n volume_driver=os_tmplt.volume_driver,\n master_lb_enabled=os_tmplt.master_lb_enabled,\n labels=os_tmplt.labels\n )", "title": "" }, { "docid": "70abe6ae294effbd7aa1cb3739b88e0f", "score": "0.46278247", "text": "def create_template(self):\n template = self.template\n template.add_version('2010-09-09')\n template.add_description(\n \"Onica - ECS Service - (1.0.0)\"\n )\n self.add_ecs_service()", "title": "" }, { "docid": "cec68099e3ce533b6a6b37cd5deea604", "score": "0.46195582", "text": "def main():\n os.chdir(APPENGINE_DIRECTORY)\n\n bundled_change_times = get_file_modified_times('templates')\n first_bundled_time = min(bundled_change_times) if bundled_change_times else 0\n latest_unbundled_time = max(get_file_modified_times('private'))\n if latest_unbundled_time < first_bundled_time:\n print('App Engine templates are up to date.')\n return\n\n print('Building templates for App Engine...')\n\n if not os.path.exists('templates'):\n os.mkdir('templates')\n\n template_names = os.listdir(os.path.join('private', 'templates'))\n pool = multiprocessing.Pool(max(multiprocessing.cpu_count() // 2, 1))\n result = pool.map(build_file, template_names)\n\n if not all(result):\n print('Failed to build App Engine templates.')\n sys.exit(1)\n\n print('App Engine templates built successfully.')", "title": "" }, { "docid": "69b07fc61640f2f5a85521d62fc337ca", "score": "0.46134996", "text": "def get_deployment_template_contents(\n self,\n all_modules: list,\n module_name: str):\n pass", "title": "" }, { "docid": "e1979aa7413433424c411612058a9c89", "score": "0.46108767", "text": "def render_template(cls, template_name_or_list=None, *wp_args, **context):\n template = cls._prefix_template(template_name_or_list or cls._template)\n if getattr(cls, 'ALLOW_JSON', True) and request.is_xhr:\n return jsonify_template(template, _render_func=cls.render_template_func, **context)\n else:\n context['_jinja_template'] = template\n return cls(g.rh, *wp_args, **context).display()", "title": "" }, { "docid": "3599e249da930564d78ce9c98299ae78", "score": "0.46089613", "text": "def _createFilenameTemplates(self):\n \n myDict = {\n 'partSet': 'sets/inputSet.lst',\n 'partFlipSet': 'sets/inputSet__ctf_flip.lst',\n 'volume': self._getExtraPath('volume.hdf'),\n }\n \n self._updateFilenamesDict(myDict)", "title": "" }, { "docid": "62f200d4ffa35f80dddd6712172865fa", "score": "0.46050486", "text": "def get_all_configuration_templates(self):\n url: str = f\"{self.url}/config-template\"\n\n for template in self.send_message_json(\"GET\",\n \"Get configuration templates\",\n url):\n yield ConfigurationTemplate(\n self.rb_name,\n self.rb_version,\n template[\"template-name\"],\n template.get(\"description\")\n )", "title": "" }, { "docid": "1ab717977b836083ef1ce5ecad558e83", "score": "0.4602624", "text": "def serialize(self):\n data = copy.deepcopy(self.kwargs)\n keys = (\n \"advanced\",\n \"active\",\n \"summary\",\n \"export\",\n \"effective\",\n \"decision\",\n \"default\",\n \"images\",\n \"html\",\n )\n payload = dict(((k, v) for k, v in data.items() if k in keys))\n for k, v in payload.items():\n if isinstance(v, bool):\n if v == True:\n payload[k] = \"1\"\n else:\n payload[k] = \"\"\n payload[\"effective\"] = data[\"effective\"].strftime(\"%m/%d/%Y\")\n html = data.pop(\"template\")\n payload[\"html\"] = html\n return payload", "title": "" }, { "docid": "5e8fc142ee4c2939dd012d30b3327fd0", "score": "0.46023148", "text": "def template_version(compiler, filename):\n global _templatev\n if filename not in _templatev:\n from os import path\n from fortpy.utility import get_fortpy_templates_dir\n tempath = path.join(get_fortpy_templates_dir(), filename)\n _templatev[filename] = get_fortpy_version(compiler, tempath, attribute=\"codeversion\")\n\n return _templatev[filename]", "title": "" }, { "docid": "9d89ba85b4836e4ff7cb3ebb2cc7b551", "score": "0.46015143", "text": "def __init__(self, templates: List[Dict] = None):\n if templates is None:\n r = requests.get(URL)\n r.raise_for_status()\n templates = r.json()\n self.templates = templates", "title": "" }, { "docid": "510ff5ae73d211446dc9b690f7b1d77f", "score": "0.4594645", "text": "def register_template(self,path,options,overwrite=False):\n l=[]\n from asenzor.widgets import WidgetBox\n if path not in templates or overwrite:\n templates[path]=options\n elif path in templates and not overwrite:\n raise Exception(f\"El template '{path}'ya posee su configuración\")\n\n\n if type(options)==dict:\n for option in options:\n for elem in options[option]:\n\n if isinstance(elem,WidgetBox) and elem.name not in l:\n pass\n elif not isinstance(elem,WidgetBox) and elem[\"name\"] not in l:\n pass\n else:\n raise Exception(f\"El campo '{elem['name']}' ya existe, este deber ser unico\")", "title": "" }, { "docid": "e9aa3d99155a775c445825fa629c64e7", "score": "0.45914528", "text": "def template_version(self):", "title": "" }, { "docid": "6214e5974126ed8d1bb337ebad156ded", "score": "0.4590325", "text": "def render(self, data_dict, template=None):\r\n LOG.debug(\"rendering output as Json via %s\" % self.__module__)\r\n sys.stdout = backend.__saved_stdout__\r\n sys.stderr = backend.__saved_stderr__\r\n return json.dumps(data_dict)", "title": "" }, { "docid": "31690f9d1f2fa09563c249e281cbde87", "score": "0.458902", "text": "def get_task_templates(self, obj):\n templates = Task.objects.filter(is_template=True)\n serializer = TemplateDetailsSerializer(templates, many=True)\n return serializer.data", "title": "" }, { "docid": "ac393c496a8cb7b14e3413897901fe70", "score": "0.45876276", "text": "def index_templator(parts, opts):\n script_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n mapping_file = os.path.join(script_dir, \"templates\", parts[0] + \".json\")\n types = load_types(parts[0])\n names = load_types(parts[0], part=\"names\")\n template = {\n \"name\": parts[0],\n \"index_name\": opts[\"hub-separator\"].join(parts),\n \"mapping\": tofile.load_yaml(mapping_file),\n \"types\": types,\n \"names\": names,\n }\n return template", "title": "" }, { "docid": "174c5be2ff6f1a9d99511eaabaa4a96c", "score": "0.4583124", "text": "def get_template_names(self):\n template_names = []\n template_name = getattr(self, 'template_name', None)\n if template_name:\n template_names.append(template_name)\n\n if getattr(self, 'model', None):\n template_names.append('%s/%s%s.html' % (\n self.model._meta.app_label,\n self.model._meta.model_name,\n '_' + self.urlname,\n ))\n\n default_template_name = getattr(self, 'default_template_name', None)\n if default_template_name:\n template_names.append(default_template_name)\n return template_names", "title": "" }, { "docid": "22243694701acb381be5e7fd2210780c", "score": "0.4582601", "text": "def get_arm_template(resources):\n # run each resource translator:\n for resource in resources:\n resource.translate()\n\n # also, let all the resource translators apply any changes\n # to the context they require:\n for resource in resources:\n resource.update_context()\n\n template_data = CTX.get_template_data()\n template_data.update({\n \"$schema\": constants.ARM_SCHEMA_URL,\n \"contentVersion\": constants.ARM_TEMPLATE_VERSION\n })\n\n return collections.OrderedDict([\n (\"contentVersion\", constants.ARM_TEMPLATE_VERSION),\n (\"$schema\", constants.ARM_SCHEMA_URL),\n (\"parameters\", template_data[\"parameters\"]),\n (\"variables\", template_data[\"variables\"]),\n (\"resources\", template_data[\"resources\"])\n ])", "title": "" }, { "docid": "31625e9aabad5542a9be2930782cc9f6", "score": "0.45822716", "text": "def parse_templated_fields(metadata: Dict) -> Dict:\n parse_dict = {}\n for field in metadata:\n if \"configurations\" not in field:\n parse_dict.update({field: metadata[field]})\n else:\n parse_dict.update(get_config(metadata, field))\n \n def _recursive_render(s, cur_key):\n if s is None:\n return s\n counter = 0\n while \"{{ \" in s and \" }}\" in s:\n s = jinja2.Template(s).render(**parse_dict)\n counter += 1\n if counter > 100:\n raise(ValueError(f\"Cannot parse templated field {cur_key}\"))\n return s\n\n # looping over config sections:\n for config_sec, configs in metadata.items():\n if\"configurations\" not in config_sec:\n continue\n # looping over each field in the current config section\n for cur_key, cur_val in configs.items():\n if cur_val[\"type\"] in [\"string\", \"str\"]:\n cur_val[\"value\"] = _recursive_render(cur_val[\"value\"], cur_key)\n elif cur_val[\"type\"] == \"array\":\n for index, s in enumerate(cur_val[\"value\"]): \n cur_val[\"value\"][index] = _recursive_render(s, cur_key)\n elif cur_val[\"type\"] == \"object\": # a dict\n # convert to json string\n object_str = json.dumps(cur_val[\"value\"]).replace(\"\\\\\", \"\")\n # parse it like a string\n object_str = _recursive_render(object_str, cur_key)\n # convert it back to dict\n cur_val[\"value\"] = json.loads(object_str)\n \n metadata[config_sec][cur_key][\"value\"] = cur_val[\"value\"]\n \n return metadata", "title": "" }, { "docid": "4ee87473b2355a6b0701ab8f546b733b", "score": "0.4574512", "text": "def _upload_template(self, template_str):\n s3 = boto3.client('s3', region_name=self.region)\n\n base, ext = os.path.splitext(self.template)\n if not ext:\n ext = '.txt'\n\n key = f'{self.stack_name}/{self.stack_name}{ext}'\n s3.put_object(Bucket=self.bucket, Key=key, Body=bytes(template_str, 'utf-8'))\n return key", "title": "" }, { "docid": "605b7539408fcfd3b8db58059ecba6dd", "score": "0.4572792", "text": "def templates(self) -> List[\"Template\"]:\n for template in self.manifest.templates:\n template.templates_source = self\n template.validate(skip_files=False)\n\n return self.manifest.templates", "title": "" } ]
f9406bcd7c389dd26528a55309644db0
This is the customizable parrt of derived classes, used to set parameters for a particular type of implicit solvation
[ { "docid": "f9438647cfc9d1e5c4ede606537bddb7", "score": "0.0", "text": "def setup(self):\n pass", "title": "" } ]
[ { "docid": "9d312f1507d143e7fd675c7906961b22", "score": "0.6681285", "text": "def define_param(self, *pargs): ###\r\n self._subclass_paramdefs = getattr(self, \"_subclass_paramdefs\", [])\r\n self._subclass_paramdefs += list(pargs)", "title": "" }, { "docid": "f92f0b2b9b13eb0026b184480bb2e5f0", "score": "0.6573209", "text": "def _define_params(self):\n pass", "title": "" }, { "docid": "d7b1722da4987f80b5168409493a6dea", "score": "0.65635693", "text": "def set_params(self, **prameters):\n pass", "title": "" }, { "docid": "c8f71c3fa74222e49d149a63e6254bb1", "score": "0.65052146", "text": "def parameters(self):\n raise NotImplementedError(\"Subclass must override parameters(self).\")", "title": "" }, { "docid": "6dbc35fdea72f3b60ba8e5ce1933919a", "score": "0.6490133", "text": "def param_types(self):", "title": "" }, { "docid": "9dcc9553444d7896d6fca937884440ef", "score": "0.6446535", "text": "def _set_transformation_parameters(self):\n raise NotImplementedError(\"Abstract method should not be declared \"\n \"in derivate classes.\")", "title": "" }, { "docid": "88185664b8bf3273a9d9128509b53c0f", "score": "0.63372034", "text": "def set_params(self, **kwargs):\r\n pass", "title": "" }, { "docid": "3edd6e83fa9b5b51f219e0c182395f96", "score": "0.6254607", "text": "def _overwrite_parameters(self): \r\n pass", "title": "" }, { "docid": "2a5d5f6ab75646542232933a7301b791", "score": "0.62477267", "text": "def set_params(self, **kwargs):\n raise NotImplementedError()", "title": "" }, { "docid": "bee4886f7ef257ee55905c76b96d42b9", "score": "0.62369025", "text": "def init_params(self):\n raise NotImplementedError", "title": "" }, { "docid": "4a086e3be4e65541fd7f40b1ef86eb30", "score": "0.6226857", "text": "def _setParams(self, *params):\n pass", "title": "" }, { "docid": "461c0e5e9596bd95f0c21c22d708d59f", "score": "0.6147022", "text": "def _update_params(self):\n raise NotImplementedException()", "title": "" }, { "docid": "14074e714770d6cf2a80080b71d97e23", "score": "0.61399585", "text": "def __init__(self, *args):\n _Dynamic.Dynamic_RealParameter_swiginit(self,_Dynamic.new_Dynamic_RealParameter(*args))", "title": "" }, { "docid": "2d2589002a55338a58ee88ad9af73d0e", "score": "0.6130425", "text": "def __init__(self,params):\n self.setParams(params)", "title": "" }, { "docid": "2d2589002a55338a58ee88ad9af73d0e", "score": "0.6130425", "text": "def __init__(self,params):\n self.setParams(params)", "title": "" }, { "docid": "2d2589002a55338a58ee88ad9af73d0e", "score": "0.6130425", "text": "def __init__(self,params):\n self.setParams(params)", "title": "" }, { "docid": "51012f5a9e80954a35f47ed9f9ace49c", "score": "0.61168116", "text": "def dual_parameters(self):\n pass", "title": "" }, { "docid": "9aa292de9bd5d0cb2081b87a393841e9", "score": "0.6097648", "text": "def parameters(self, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "e2dddf4528ac58450aaa65a04b1a70cf", "score": "0.60857797", "text": "def set_params(self, params):\n pass", "title": "" }, { "docid": "13e352f17973b356c2899b716a71b743", "score": "0.608567", "text": "def setParameters(self, izParameters): #$NON-NLS-1$\r", "title": "" }, { "docid": "184b91cfd9e62024d234a2e020921e55", "score": "0.6065205", "text": "def __init__(self, params):\n self.setParams(params)", "title": "" }, { "docid": "94c9fecbf3122a2deffbad6dbe46713e", "score": "0.60613495", "text": "def init_parameters(self):\n pass", "title": "" }, { "docid": "588ef722c5cda0de400abb51efd2f51e", "score": "0.60579973", "text": "def set_params(self, params):\n params = dict_to_namespace(params)\n\n # Set self.params\n self.params = Namespace()\n self.params.name = getattr(params, 'name', 'Base')\n self.params.verbose = getattr(params, 'verbose', self.verbose_init_arg)", "title": "" }, { "docid": "5ba0d085b2b3b6dfcc93992f290057ee", "score": "0.6047688", "text": "def parameters(self):\n raise NotImplementedError()", "title": "" }, { "docid": "f7872e3abdf6b45975575555ee608fe2", "score": "0.6045701", "text": "def setParameters(self, **kwargs):\n\t\tself.__setParams(**kwargs)", "title": "" }, { "docid": "5c8dc2587ea270d6d5dc1d58644ca235", "score": "0.60420346", "text": "def set_params(self, *, params: Params) -> None:\n super().set_params(params=params)", "title": "" }, { "docid": "db2fc50b41d14193065c62297e0151bf", "score": "0.60177714", "text": "def set_parameters(\n self, bools: Optional[List[Dict]] = None,\n ints: Optional[List[Dict]] = None,\n strs: Optional[List[Dict]] = None,\n doubles: Optional[List[Dict]] = None,\n groups: Optional[List[Dict]] = None) -> Dict:\n raise NotImplementedError('What is the utility ?')", "title": "" }, { "docid": "410f0da2dd110b392f57403e8f150ed0", "score": "0.6010537", "text": "def __init__(self,delta,max_width,sod,odd,cutoff_FWHM,param_x,param_y,param_type,norm_pow,warn=True):\n self.sod = sod\n self.odd = odd\n self.warn = warn\n self.max_width = max_width\n self.cutoff_FWHM = cutoff_FWHM\n self.norm_pow = norm_pow\n super().__init__(delta)\n self.set_params(param_x,param_y,param_type)", "title": "" }, { "docid": "d1aa9bba35c63d8e105d9bbdbaa8e884", "score": "0.59914905", "text": "def __init__(self):\n self.defaults = PARAM_DEFAULTS.copy()\n self.params = PARAM_DEFAULTS.copy()\n self.codes = PARAM_CODES.copy()", "title": "" }, { "docid": "06952b5a8848ca04a604c0fcc1f70d4c", "score": "0.5981454", "text": "def __init__(self, parameter_dictionary):\n super().__init__(parameter_dictionary)\n self.model_string = \"multizone\"\n model_dictionary = self._get_model_dict(__class__.default_parameters)\n self.me = [n for n in model_dictionary[\"me\"]]\n self.we = model_dictionary[\"we\"]\n self.aU = model_dictionary[\"aU\"]\n self.bU = model_dictionary[\"bU\"]\n self.mU = [n for n in model_dictionary[\"mU\"]]", "title": "" }, { "docid": "4b7c6ed794a48cb5ab268bd37c7eec25", "score": "0.5978037", "text": "def set_params(self, params):\n super().set_params(params)\n params = dict_to_namespace(params)\n\n self.params.name = getattr(params, \"name\", \"AcqViz1D\")\n self.params.figsize = getattr(params, \"figsize\", (8, 4))\n self.params.n_path_max = getattr(params, \"n_path_max\", None)\n self.params.xlabel = getattr(params, \"xlabel\", \"x\")\n self.params.ylabel = getattr(params, \"ylabel\", \"y\")\n self.params.lims = getattr(params, \"lims\", None)", "title": "" }, { "docid": "23e08232a902d502ca56611d3f688874", "score": "0.59776086", "text": "def addParameters(self):\n # type: () -> None\n # pylint: disable=attribute-defined-outside-init\n\n # self.pType = self.addParam(\"mode\", \"long\", 0, 0)\n self.pBlend = self.addParam(\"blend\", \"double\", 1, 0, 1)\n # self.pBladeOffset = self.addParam(\"bladeOffset\", \"float\", 0, 0)\n self.pNeutralPose = self.addParam(\"neutralpose\", \"bool\", False)\n self.pIkRefArray = self.addParam(\"ikrefarray\", \"string\", \"\")\n\n self.pUseIndex = self.addParam(\"useIndex\", \"bool\", False)\n self.pParentJointIndex = self.addParam(\"parentJointIndex\", \"long\", -1, None, None)\n\n # TODO: if have IK or IK/FK lock the axis position to force 2D Planar IK solver\n # Create a a method to lock and unlock while changing options in the PYSIDE component Settings", "title": "" }, { "docid": "036a932e85a3887fd2a1dd469cfded86", "score": "0.59639764", "text": "def initializeParameters(self):", "title": "" }, { "docid": "603848a734f3b6494d54c20668dcbff0", "score": "0.59528655", "text": "def set_params(self, params: Dict):\n raise NotImplementedError", "title": "" }, { "docid": "3ecb141ab7bff77387ef137354641985", "score": "0.594809", "text": "def __init__(self, type):\n \n self.parameters = []\n self.weights = []\n self.opt_values = []\n self.configuration = {}\n\n if type not in [0, 1]:\n raise ValueError(\"Parameter type has not valid value.\")\n else:\n self._type = type", "title": "" }, { "docid": "b62580a801c046034f538b1a3986dc24", "score": "0.5931429", "text": "def _setExplicitParameter(self,par,val):\n if par in Replicate._isPurePlateParameter and Replicate._isPurePlateParameter[par]:\n raise RuntimeError(\"_setExplicitParameter: parameter \"+par+\" cannot be set as this is a plate-wide parameter\")\n if par not in self._inheritableParameters:\n raise RuntimeError(\"_setExplicitParameter: unknown parameter \"+par)\n self._inheritableParameters[par]=val\n self._parametersUpdated(par)", "title": "" }, { "docid": "fa82e4688736d4cea55f5251d83fbb3d", "score": "0.5923829", "text": "def set_params(self, **values):\n pt, pe, pn = {}, {}, {}\n for k, v in values.items():\n if k.startswith(\"e_\"):\n pe[k[2:]] = v\n elif k.startswith(\"t_\"):\n pt[k[2:]] = v\n elif k.startswith(\"n_\"):\n pn[k[2:]] = v\n else:\n raise ValueError(\"Unexpected parameter name '{0}'.\".format(k))\n self.transformer.set_params(**pt)\n self.estimator.set_params(**pe)", "title": "" }, { "docid": "4f3cfc450b46efff467e314b11e76bcf", "score": "0.58940685", "text": "def __init__(self, parameters):\n self.params = parameters", "title": "" }, { "docid": "4f3cfc450b46efff467e314b11e76bcf", "score": "0.58940685", "text": "def __init__(self, parameters):\n self.params = parameters", "title": "" }, { "docid": "4f3cfc450b46efff467e314b11e76bcf", "score": "0.58940685", "text": "def __init__(self, parameters):\n self.params = parameters", "title": "" }, { "docid": "4f3cfc450b46efff467e314b11e76bcf", "score": "0.58940685", "text": "def __init__(self, parameters):\n self.params = parameters", "title": "" }, { "docid": "4f3cfc450b46efff467e314b11e76bcf", "score": "0.58940685", "text": "def __init__(self, parameters):\n self.params = parameters", "title": "" }, { "docid": "4f3cfc450b46efff467e314b11e76bcf", "score": "0.58940685", "text": "def __init__(self, parameters):\n self.params = parameters", "title": "" }, { "docid": "4f3cfc450b46efff467e314b11e76bcf", "score": "0.58940685", "text": "def __init__(self, parameters):\n self.params = parameters", "title": "" }, { "docid": "4f3cfc450b46efff467e314b11e76bcf", "score": "0.58940685", "text": "def __init__(self, parameters):\n self.params = parameters", "title": "" }, { "docid": "4f3cfc450b46efff467e314b11e76bcf", "score": "0.58940685", "text": "def __init__(self, parameters):\n self.params = parameters", "title": "" }, { "docid": "4f3cfc450b46efff467e314b11e76bcf", "score": "0.58940685", "text": "def __init__(self, parameters):\n self.params = parameters", "title": "" }, { "docid": "4f3cfc450b46efff467e314b11e76bcf", "score": "0.58940685", "text": "def __init__(self, parameters):\n self.params = parameters", "title": "" }, { "docid": "4f3cfc450b46efff467e314b11e76bcf", "score": "0.58940685", "text": "def __init__(self, parameters):\n self.params = parameters", "title": "" }, { "docid": "3fd5402ed9682c61b15ea50bdac947e8", "score": "0.5881887", "text": "def param():\n raise NotImplementedError", "title": "" }, { "docid": "5348b1d9c8546707cafb300724d1f255", "score": "0.5870972", "text": "def make_params(self, config):\n raise NotImplementedError", "title": "" }, { "docid": "ca0336092d7ce075cd8fe57d29da3b0a", "score": "0.5858396", "text": "def __setParams(self, delta=0.5, T=20, deltaT=0.8, coolingMethod=coolDelta, **ukwargs):\n\t\tself.delta, self.T, self.curT, self.deltaT = delta, T, T, deltaT\n\t\tself.cool = coolingMethod\n\t\tif ukwargs: logger.info('Unused arguments: %s' % (ukwargs))", "title": "" }, { "docid": "cc63cb884f10c797c78525edf6c59eb1", "score": "0.5857526", "text": "def set_params(self, params):\n # Reset internal params\n self._params = {}\n\n # Get the paramater template to check against\n param_temp = get_param_template()\n\n # Go over the keys to check each param\n for key in param_temp.keys():\n val = params.get(key)\n if val is None:\n raise KeyError(\"Param {} was not specified.\".format(key))\n\n # Convert param to the correct type\n param_type = param_temp(key)[0]\n val = param_type(val)\n\n # Add it to the param dictionary\n self._params[key] = val\n\n # Also add it as private fields to the algorithm object\n setattr(self, '_' + key, val)", "title": "" }, { "docid": "003a8d3f12ec113b7f908ede4ca552b9", "score": "0.585688", "text": "def construct_free_params(self):\n raise NotImplementedError()\n pass", "title": "" }, { "docid": "311b7a7a6bbc2e29659a87d5c87f92b7", "score": "0.5852243", "text": "def __init__(self, *args, **kwargs):\n \n cname = self.__class__.__name__\n modulename = op.splitext(op.basename(__file__))[0]\n \n # Make sure no keywords are in class attributes prior to initialization\n for key in kwargs:\n if key not in dict(self._class_attributes()):\n raise ParametersError('Invalid parameter \"%s\" is not a class '\n 'attribute of %s.%s' % (key, modulename, cname) )\n\n super(ABCParameters, self).__init__(*args, **kwargs)", "title": "" }, { "docid": "36380490f094ec5e37f45114b3352159", "score": "0.58120227", "text": "def _set_params_vals_to_class(self, params, init=False, skip_phases=False):\n for p in params:\n if init:\n setattr(self, p, params[p].value)\n else:\n if hasattr(self, p):\n setattr(self, p, params[p].value)\n\n if not skip_phases:\n\n updated_lp = False\n\n for p in self.phases:\n mat = self.phases[p]\n \"\"\"\n PART 1: update the lattice parameters\n \"\"\"\n lp = []\n lpvary = False\n pre = p + \"_\"\n\n name = [f\"{pre}{x}\" for x in wppfsupport._lpname]\n\n for nn in name:\n if nn in params:\n if params[nn].vary:\n lpvary = True\n lp.append(params[nn].value)\n elif nn in self.params:\n lp.append(self.params[nn].value)\n\n if not lpvary:\n pass\n else:\n lp = self.phases[p].Required_lp(lp)\n mat.lparms = np.array(lp)\n mat._calcrmt()\n updated_lp = True\n\n if updated_lp:\n self.calctth()", "title": "" }, { "docid": "94c0311a0f946845023a01dbfa0536eb", "score": "0.580728", "text": "def set_params(self, params: np.ndarray):\n raise NotImplementedError", "title": "" }, { "docid": "16edd6b401b1c08ed649eb8caff60a65", "score": "0.5806448", "text": "def reset_parameters(self):\n raise NotImplementedError(self.__class__.__name__)", "title": "" }, { "docid": "1a0c9636165488abc62a781f21abb44d", "score": "0.5803955", "text": "def parameters(self):\n pass", "title": "" }, { "docid": "69a291330efd8819d4105d723ce381d5", "score": "0.57838356", "text": "def _init_internal_params(self):\n # reuse base class function\n super(PoreSurfaceCalculation, self)._init_internal_params()\n\n self._default_parser = 'phtools.surface'", "title": "" }, { "docid": "55b42fee0aa1b3b5cb1b391bc0984719", "score": "0.5783599", "text": "def build_params(self):\n raise NotImplementedError()", "title": "" }, { "docid": "aeaa71fe757da214464dae0b85ee35b4", "score": "0.5781906", "text": "def __init__(self, *args):\n _Dynamic.Dynamic_InstanceParameter_swiginit(self,_Dynamic.new_Dynamic_InstanceParameter(*args))", "title": "" }, { "docid": "740eeb025312579fc4a0bf7b5f9073be", "score": "0.5777267", "text": "def setParameters(self, NP=25, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, **ukwargs):\n\t\tself.NP, self.C1, self.C2, self.w, self.vMin, self.vMax = NP, C1, C2, w, vMin, vMax\n\t\tif ukwargs: logger.info('Unused arguments: %s' % (ukwargs))", "title": "" }, { "docid": "333c9ff385285bc3ee5456521749c8b5", "score": "0.5771174", "text": "def updateParameters(self, parameters):", "title": "" }, { "docid": "333c9ff385285bc3ee5456521749c8b5", "score": "0.5771174", "text": "def updateParameters(self, parameters):", "title": "" }, { "docid": "333c9ff385285bc3ee5456521749c8b5", "score": "0.5771174", "text": "def updateParameters(self, parameters):", "title": "" }, { "docid": "333c9ff385285bc3ee5456521749c8b5", "score": "0.5771174", "text": "def updateParameters(self, parameters):", "title": "" }, { "docid": "de691f5441548e424961ff1efa958e49", "score": "0.5770644", "text": "def SetParameters(self, params):\n pass", "title": "" }, { "docid": "ffd76b8312b0a0d6da5341b94b699f00", "score": "0.5763571", "text": "def set_params(self, numticks=None, symthresh=None,\n base=None, subs=None):\n if numticks is not None:\n self.numticks = numticks\n if symthresh is not None:\n self.symthresh = symthresh\n if base is not None:\n self.base = base\n if subs is not None:\n self.subs = subs if len(subs) > 0 else None", "title": "" }, { "docid": "6662591dfda18fb4ec1333746d02abbe", "score": "0.5755703", "text": "def set_parameters(problem,xleft,xright,IC,tfinal,num_output_times):\n\n from importlib import import_module\n prb = import_module(problem)\n \n paramtrs = prb.Parameters()\n paramtrs.xleft = xleft\n paramtrs.xright = xright\n paramtrs.IC = IC\n paramtrs.tfinal = tfinal\n paramtrs.num_output_times = num_output_times\n \n return paramtrs", "title": "" }, { "docid": "879815468ba9ffc0f31924a3a0d5ae08", "score": "0.5750623", "text": "def __init__(self,params,impactFunctionClass):\n self.impactFunctionClass=impactFunctionClass\n self.setParams(params)", "title": "" }, { "docid": "778ac42a170eae053be2029b2d4b7f1f", "score": "0.57500285", "text": "def setParameters(self, NP=40, F=0.7, R=0.3, C=3, FC=0.5, **ukwargs):\n\t\tself.NP, self.F, self.R, self.C, self.FC = NP, F, R, C, FC\n\t\tif ukwargs: logger.info('Unused arguments: %s' % (ukwargs))", "title": "" }, { "docid": "a781ef3ba4abf34f86a3fff121c84cda", "score": "0.57196766", "text": "def __init__(self, *args):\n _Dynamic.Dynamic_ObjectParameter_swiginit(self,_Dynamic.new_Dynamic_ObjectParameter(*args))", "title": "" }, { "docid": "262daca94c41d3ad26eb615bf2191489", "score": "0.5717583", "text": "def addParameters(self):\r\n\r\n # Default Values\r\n self.pBlend = self.addParam(\"blend\", \"double\", 1, 0, 1)\r\n self.pFull3BoneIK = self.addParam(\"full3BonesIK\", \"double\", 1, 0, 1)\r\n self.pIkRefArray = self.addParam(\"ikrefarray\", \"string\", \"\")\r\n self.pUpvRefArray = self.addParam(\"upvrefarray\", \"string\", \"\")\r\n self.pMaxStretch = self.addParam(\"maxstretch\", \"double\", 1.5, 1, None)\r\n\r\n self.pIKSolver = self.addEnumParam(\r\n \"ikSolver\", [\"IK Spring\", \"IK Rotation Plane\"], 0)\r\n\r\n self.pIKOrient = self.addParam(\"ikOri\", \"bool\", True)\r\n\r\n # Divisions\r\n self.pDiv0 = self.addParam(\"div0\", \"long\", 2, 1, None)\r\n self.pDiv1 = self.addParam(\"div1\", \"long\", 2, 1, None)\r\n self.pDiv1 = self.addParam(\"div2\", \"long\", 2, 1, None)\r\n\r\n # FCurves\r\n self.pSt_profile = self.addFCurveParam(\r\n \"st_profile\", [[0, 0], [.5, -1], [1, 0]])\r\n\r\n self.pSq_profile = self.addFCurveParam(\r\n \"sq_profile\", [[0, 0], [.5, 1], [1, 0]])\r\n\r\n self.pUseIndex = self.addParam(\"useIndex\", \"bool\", False)\r\n\r\n self.pParentJointIndex = self.addParam(\r\n \"parentJointIndex\", \"long\", -1, None, None)", "title": "" }, { "docid": "bf37f039dcc1860cd7d4667083308af9", "score": "0.57135636", "text": "def __init__(self, params):\n\n # create generic technology object\n super().__init__(params)\n # input params UNITS ARE COMMENTED TO THE RIGHT\n self.technology_type = 'Generator'\n self.rated_power = params['rated_capacity'] # kW/generator\n self.p_min = params['min_power'] # kW/generator\n self.variable_om = params['variable_om_cost'] # $/kwh\n self.fixed_om = params['fixed_om_cost'] # $/yr\n\n self.capital_cost_function = [params['ccost'], # $/generator\n params['ccost_kW']]\n\n self.n = params['n'] # generators\n\n self.is_electric = True\n self.is_fuel = True", "title": "" }, { "docid": "74cef6ba1821e738513af34dba1689a8", "score": "0.5711947", "text": "def configure(self, paramDict):\n\t\traise NotImplementedError( \"Should have implemented this\" )", "title": "" }, { "docid": "df4137032bec96e9c54a4ac0a4c8eb1b", "score": "0.57083553", "text": "def set_arg_types( self ):\n if self.mode == 'grad':\n self.function = terms.dw_biot_grad\n use_method_with_name( self, self.get_fargs_grad, 'get_fargs' )\n self.use_caches = {'state_in_volume_qp' : [['state',\n {'state' : (-1,-1)}]]}\n elif self.mode == 'div':\n self.function = terms.dw_biot_div\n use_method_with_name( self, self.get_fargs_div, 'get_fargs' )\n self.use_caches = {'cauchy_strain' : [['state',\n {'strain' : (-1,-1)}]]}\n else:\n raise NotImplementedError", "title": "" }, { "docid": "1f548711d5ff07783178ef21095fe988", "score": "0.57043725", "text": "def define_parameters(self):\n\n self.add_argument('--conversion_type', dest='conversion_type', type=str, optional=False,\n help='which type of conversion you want 1. To jpg 2. To numpy')", "title": "" }, { "docid": "cf496f233bd68187a5649f9b70c69a0b", "score": "0.57004184", "text": "def setup_parameters(self):\n structure = self.ctx.structure_initial_primitive\n meshcutoff = 0.0\n\n for kind in structure.get_kind_names():\n try:\n cutoff = self.ctx.protocol['atom_heuristics'][kind]['cutoff']\n meshcutoff = max(meshcutoff, cutoff)\n except:\n pass # No problem. No heuristics, no info\n\n meshcutoff = max(\n self.ctx.protocol['min_meshcutoff'],\n meshcutoff) # In case we did not get anything, set a minimum value\n\n self.ctx.inputs['parameters'] = {\n 'dm-tolerance': self.ctx.protocol['dm_convergence_threshold'],\n 'md-max-force-tol':\n self.ctx.protocol['forces_convergence_threshold'],\n 'mesh-cutoff': \"{} Ry\".format(meshcutoff),\n 'electronic-temperature':\n self.ctx.protocol['electronic_temperature'],\n 'md-type-of-run': self.ctx.protocol['md-type-of-run'],\n 'md-num-cg-steps': self.ctx.protocol['md-num-cg-steps']\n }", "title": "" }, { "docid": "418809032511f64ecc5ff5f590a69906", "score": "0.5700236", "text": "def set_params(self,param_1,param_2,weight_1,param_type='scale'):\n param_1 = np.abs(param_1)\n if param_type == 'FWHM':\n self.FWHM_1 = param_1\n self.scale_1 = get_scale(self.FWHM_1,self.norm_pow)\n elif param_type == 'scale':\n self.scale_1 = param_1\n self.FWHM_1 = get_FWHM(self.scale_1,self.norm_pow)\n else:\n raise ValueError(\"param_type is invalid\")\n\n if param_2 is not None:\n param_2 = np.abs(param_2)\n if param_type == 'FWHM':\n self.FWHM_2 = param_2\n self.scale_2 = get_scale(self.FWHM_2,self.norm_pow)\n elif param_type == 'scale':\n self.scale_2 = param_2\n self.FWHM_2 = get_FWHM(self.scale_2,self.norm_pow)\n else:\n raise ValueError(\"param_type is invalid\")\n \n if weight_1 is not None:\n weight_1 = 0.0 if weight_1<0.0 else weight_1\n weight_1 = 1.0 if weight_1>1.0 else weight_1\n self.weight_1 = weight_1 \n \n cutoff_width = max(self.cutoff_FWHM_1*self.FWHM_1,self.cutoff_FWHM_2*self.FWHM_2)/2.0\n if cutoff_width>self.max_width and self.warn:\n print(\"WARN: The maximum width {} specified for detector PSF is less than the cutoff width {}\".format(self.max_width,cutoff_width))\n cutoff_width = min(cutoff_width,self.max_width)\n\n psf_func = self.psf_function() \n psf_grad_funcs = self.psf_grad_function() \n \n super().set_pars(cutoff_width,psf_func,psf_grad_funcs)", "title": "" }, { "docid": "0730ed0b98f9bf047642ebd4e2dc1281", "score": "0.569411", "text": "def __init__(self,delta,max_width,cutoff_FWHM_1,cutoff_FWHM_2,param_1,param_2,weight_1,param_type,norm_pow,warn=True):\n super().__init__(delta)\n self.max_width = max_width\n self.cutoff_FWHM_1 = cutoff_FWHM_1\n self.cutoff_FWHM_2 = cutoff_FWHM_2\n self.norm_pow = norm_pow\n self.warn = warn\n self.set_params(param_1,param_2,weight_1,param_type)", "title": "" }, { "docid": "21ce0f760a72ab7646abc31cfa1a5918", "score": "0.56916815", "text": "def from_params(self, params):\r\n raise NotImplementedError()", "title": "" }, { "docid": "3f55cdae1a1994906ac9d59d878614b0", "score": "0.5690055", "text": "def _build_param_dict(self):\n # Add parameter handlers to parameter dict.\n self._param_dict = ProtocolParameterDict()\n\n self._param_dict.add(Parameter.CYCLE_TIME,\n r'(\\d+)\\s+= Cycle Time \\(.*\\)\\r\\n(0|1)\\s+= Minutes or Seconds Cycle Time',\n lambda match: self._to_seconds(int(match.group(1)),\n int(match.group(2))),\n self._int_to_string,\n type=ParameterDictType.INT,\n display_name=\"Cycle Time\",\n visibility=ParameterDictVisibility.READ_WRITE,\n startup_param=True,\n direct_access=True,\n default_value=20,\n range=(15, 3600),\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.CHANGE_PARAM,\n units=Units.SECOND,\n description='Sample interval (15 - 3600), where time greater than 59 is rounded down to '\n 'the nearest minute.',\n submenu_write=[[\"1\", Prompt.CYCLE_TIME_PROMPT]])\n\n self._param_dict.add(Parameter.VERBOSE,\n r'bogusdatadontmatch', # Write-only\n lambda match: None,\n self._int_to_string,\n type=ParameterDictType.INT,\n display_name=\"Verbose\",\n visibility=ParameterDictVisibility.IMMUTABLE,\n startup_param=True,\n direct_access=True,\n range={'On':1, 'Off':0},\n init_value=0,\n value=0,\n # TODO - HAD PROBLEMS COMPARING VALUES BEFORE SETTING DURING INIT BECAUSE VALUE WASN'T SET IN UPDATE PARAMS (NO WAY TO GET VALUE FROM INSTRUMENT)\n description=\"Enable verbosity with data points\",\n menu_path_write=SubMenu.CHANGE_PARAM,\n submenu_write=[[\"2\", Prompt.VERBOSE_PROMPT]])\n\n self._param_dict.add(Parameter.METADATA_POWERUP,\n r'(0|1)\\s+= Metadata Print Status on Power up',\n lambda match: int(match.group(1)),\n self._int_to_string,\n type=ParameterDictType.INT,\n display_name=\"Metadata on Powerup\",\n visibility=ParameterDictVisibility.IMMUTABLE,\n startup_param=True,\n direct_access=True,\n range={'On':1, 'Off':0},\n init_value=0,\n description=\"Enable display of metadata at startup\",\n menu_path_write=SubMenu.CHANGE_PARAM,\n submenu_write=[[\"3\", Prompt.METADATA_PROMPT]])\n\n self._param_dict.add(Parameter.METADATA_RESTART,\n r'(0|1)\\s+= Metadata Print Status on Restart Data Collection',\n lambda match: int(match.group(1)),\n self._int_to_string,\n type=ParameterDictType.INT,\n display_name=\"Metadata on Restart\",\n visibility=ParameterDictVisibility.IMMUTABLE,\n startup_param=True,\n direct_access=True,\n range={'On':1, 'Off':0},\n init_value=0,\n description=\"Enable display of metadata at restart\",\n menu_path_write=SubMenu.CHANGE_PARAM,\n submenu_write=[[\"4\", Prompt.METADATA_PROMPT]])\n\n self._param_dict.add(Parameter.RES_SENSOR_POWER,\n r'(0|1)\\s+= Res Power',\n lambda match: int(match.group(1)),\n self._int_to_string,\n type=ParameterDictType.INT,\n display_name=\"Res Sensor Power\",\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=False,\n direct_access=False,\n range={'On':1, 'Off':0},\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n description=\"Enable res sensor power\",\n menu_path_write=SubMenu.SENSOR_POWER,\n submenu_write=[[\"1\"]])\n\n self._param_dict.add(Parameter.INST_AMP_POWER,\n r'(0|1)\\s+= Thermocouple & Hydrogen Amp Power',\n lambda match: int(match.group(1)),\n self._int_to_string,\n type=ParameterDictType.INT,\n display_name=\"Instrumentation Amp Power\",\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=False,\n direct_access=False,\n range={'On':1, 'Off':0},\n description=\"Enable instrumentation amp power\",\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.SENSOR_POWER,\n submenu_write=[[\"2\"]])\n\n self._param_dict.add(Parameter.EH_ISOLATION_AMP_POWER,\n r'(0|1)\\s+= eh Amp Power',\n lambda match: int(match.group(1)),\n self._int_to_string,\n type=ParameterDictType.INT,\n display_name=\"eH Isolation Amp Power\",\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=False,\n direct_access=False,\n range={'On':1, 'Off':0},\n description=\"Enable eH isolation amp power\",\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.SENSOR_POWER,\n submenu_write=[[\"3\"]])\n\n self._param_dict.add(Parameter.HYDROGEN_POWER,\n r'(0|1)\\s+= Hydrogen Sensor Power',\n lambda match: int(match.group(1)),\n self._int_to_string,\n type=ParameterDictType.INT,\n display_name=\"Hydrogen Sensor Power\",\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=False,\n direct_access=False,\n range={'On':1, 'Off':0},\n description=\"Enable hydrogen sensor power\",\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.SENSOR_POWER,\n submenu_write=[[\"4\"]])\n\n self._param_dict.add(Parameter.REFERENCE_TEMP_POWER,\n r'(0|1)\\s+= Reference Temperature Power',\n lambda match: int(match.group(1)),\n self._int_to_string,\n type=ParameterDictType.INT,\n display_name=\"Reference Temp Power\",\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=False,\n direct_access=False,\n range={'On':1, 'Off':0},\n description=\"Enable reference temperature power\",\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.SENSOR_POWER,\n submenu_write=[[\"5\"]])\n\n self._param_dict.add(Parameter.RUN_ACQUIRE_STATUS_INTERVAL,\n \"fakeregexdontmatch\",\n lambda match: match.group(0),\n str,\n type=ParameterDictType.STRING,\n expiration=None,\n visibility=ParameterDictVisibility.READ_WRITE,\n display_name=\"Acquire Status Interval\",\n description='Time interval for running acquiring status.',\n default_value='00:00:00',\n units='HH:MM:SS',\n startup_param=True,\n direct_access=False)", "title": "" }, { "docid": "bb99fa455637be6a844d3cdfb4edd07e", "score": "0.56883425", "text": "def __init__(self, params):\n # base class is ICE\n super().__init__(params)\n self.tag = 'DieselGenset'\n self.can_participate_in_market_services = False", "title": "" }, { "docid": "0eec46ff694d2a95af40e71e6859c4cd", "score": "0.5679218", "text": "def __init__(self):\n super().__init__(une_classe = 2, une_mana_requise = 8)", "title": "" }, { "docid": "86af1ce7267fe05ebb0ca93756c0e553", "score": "0.56787694", "text": "def __init__(self, params: Union[Dict[str, ZfitParameter]] = None,\n name: str = \"BaseConstraint\", dtype=ztypes.float,\n **kwargs):\n super().__init__(name=name, dtype=dtype, params=params, **kwargs)", "title": "" }, { "docid": "4938547320a62ac3688fcca00d1ffeb5", "score": "0.5674202", "text": "def _setCalculator(self):\n for pname in self.__class__._parnames:\n self.addParameter(\n Parameter(pname, value=self.meta[pname])\n )\n self.processMetaData()\n return", "title": "" }, { "docid": "ad76864feca5954540dbec5765ad8319", "score": "0.5672512", "text": "def _update_param(self):\n # Update policy parameters\n for target_param, param in zip(self.target_actor.parameters(), self.actor.parameters()):\n target_param.data.copy_(param.data)\n\n # Update critic parameters\n for target_param, param in zip(self.target_critic.parameters(), self.critic.parameters()):\n target_param.data.copy_(param.data)", "title": "" }, { "docid": "d0ec9aae9b594028456f7b59cf910942", "score": "0.5662788", "text": "def _viz_params(self):\n raise AssertionError('Method in subclass expected to have been invoked')", "title": "" }, { "docid": "3b17d17697e156a0c4bbd610ecae4c74", "score": "0.5657436", "text": "def __init__(self, name, param_dict, param_seq, c_type='float'):\n self.code_params = {\n \"name\" :name,\n \"param_dict\": param_dict,\n \"param_seq\": param_seq,\n \"c_type\": c_type}", "title": "" }, { "docid": "98ab611d27868a142eabcfd721c2ba43", "score": "0.564859", "text": "def __init__(self, name, n, neurontype):\n self.code_params = {\n \"name\": name,\n \"n\": n,\n \"neurontype\": neurontype,\n \"para\": \"{}_params\".format(name),\n \"ini\": \"{}_ini_params\".format(name)}", "title": "" }, { "docid": "72f43b814cf45f0d5b2c80befa60fa26", "score": "0.5645379", "text": "def _set_parameters(self, params: dict) -> None:\n variable_code = \"self.{0} = tf.Variable({1}, name='{0}', dtype=tf.float32)\"\n for k, v in params.items():\n byte_code = compile(variable_code.format(k, v), filename=\"<inline code>\", mode=\"exec\")\n exec(byte_code)\n return", "title": "" }, { "docid": "271d2658606b160822667540685315a9", "score": "0.5644841", "text": "def __init__(self):\n self.params = arcpy.GetParameterInfo()\n\n self.coretbl_param = self.params[11]\n self.expertcoor_param = self.params[14]\n self.core_params = [12, 13, 14]\n self.climrast_param = self.params[15]\n self.modadvance_param = self.params[16]\n self.climate_params = range(17, 28)\n self.expcoorwt_param = self.params[31]\n self.climgradwt_param = self.params[32]", "title": "" }, { "docid": "f2947f2b4ef4a9ef24315ddcc91ba101", "score": "0.5644656", "text": "def set_fortran_parameters(self,solution):\n self.set_method(solution.state)\n classic = __import__(self.so_name)\n # The reload here is necessary because otherwise the common block\n # cparam in the Riemann solver doesn't get flushed between running\n # different tests in a single Python session.\n reload(classic)\n solution.state.set_cparam(classic)", "title": "" }, { "docid": "873c64c8e4f92c4aa5a2ca118d8fb92c", "score": "0.5643392", "text": "def __init__(**kw):", "title": "" }, { "docid": "0f5556e9d461c5c71da4495d976dfe9a", "score": "0.56430715", "text": "def set_params(self, **params: Dict[str, Any]) -> BaseVolumeRegressor:\n return self", "title": "" }, { "docid": "272518b2194bed6c27748b8f53346346", "score": "0.5642341", "text": "def __init__(self, type_of):\n self.type = type_of", "title": "" }, { "docid": "5cd44619b1be3bdc280575919cbcd8fb", "score": "0.5636607", "text": "def _parameters(self):\n return NotImplemented", "title": "" }, { "docid": "ea007050d2c8a8df92fb147af84c5b34", "score": "0.5636208", "text": "def set_parameters(self, params):\n\n for param in self.PARAMETERS:\n if param in params:\n setattr(self, param, params[param])\n else:\n setattr(self, param, None)\n\n for i in range(1, 4):\n if 'Kd%d'%i in params:\n setattr(self, 'ku%d'%i, params['kb%d'%i] / params['Kd%d'%i])\n\n for i in [12, 23, 13, 123]:\n if 'omega%d'%i in params:\n setattr(self, 'ucoop%d'%i, params['omega%d'%i] / params['bcoop%d'%i])\n\n self.ss.clear()", "title": "" }, { "docid": "9680a6d3ed3760edf216bb174995a1f8", "score": "0.56319565", "text": "def __init__(self, base, **kwargs):\r\n self.base = base\r\n self.kwargs = kwargs", "title": "" }, { "docid": "d1851bd4bde83ffa8d69e2dd337d9790", "score": "0.5628374", "text": "def setParameters(self, NP=10, lt=3, al=10, lsc=1, gsc=1, tr=0.3, **ukwargs):\n\t\tAlgorithm.setParameters(self, NP=NP, **ukwargs)\n\t\tself.lt, self.al, self.lsc, self.gsc, self.tr = lt, al, lsc, gsc, tr", "title": "" } ]
18772f8e3ebd9b6ba811cbfd85ece254
Ensures that Renderman is loaded, and channels are expected values.
[ { "docid": "ee5f1d66f0bd1cc64a73ff437c03c358", "score": "0.5228379", "text": "def validate(self):\n valid = self.valid_channels()\n filtered_list = []\n \n if not self.channels: raise PassMakerError, 'must select at least one channel'\n \n for chan in self.channels: \n if chan not in valid: raise PassMakerError,'invalid channel: [%s]' % chan\n # remove potential duplicates\n if chan not in filtered_list: filtered_list.append(chan)\n self.channels = filtered_list", "title": "" } ]
[ { "docid": "8f7b5096222aaf3f14153ade9dc5b7b0", "score": "0.5716942", "text": "def _checkInitialization(self):\n if not(self.general and self.runtime and self.groups):\n raise RuntimeError(\"Settings are not completely initialized\")", "title": "" }, { "docid": "1a08789e4e308a6aff6531a68331b0ce", "score": "0.5478793", "text": "def test_get_available_renderings(self):\n pass", "title": "" }, { "docid": "b622efd375d90358c655f847f8c36a49", "score": "0.5461889", "text": "def test_surface_initialization(self):\n reaction_system = self.rmg.reaction_systems[0]\n reaction_system.attach(self.listener)\n reaction_model = self.rmg.reaction_model\n\n core_species = reaction_model.core.species\n core_reactions = reaction_model.core.reactions\n surface_species = [core_species[7], core_species[6]]\n surface_reactions = [core_reactions[0], core_reactions[2], core_reactions[3]]\n\n reaction_system.initialize_model(core_species, core_reactions,\n reaction_model.edge.species, reaction_model.edge.reactions, surface_species,\n surface_reactions)\n\n self.assertEquals(len(surface_species), 1) # only H should be left\n self.assertEquals(len(surface_reactions), 2) # all the reactions with H should stay", "title": "" }, { "docid": "8a63abbe77f0939ab0c97e446a8c502c", "score": "0.54246956", "text": "def UndefinedMaterials():", "title": "" }, { "docid": "e0cd31a00b839d7cb055ba766dfc155e", "score": "0.5275234", "text": "def loadRender():\n pm.mel.eval('setRenderingEngineInModelPanel \"{}\";'.format(pcfg.maya_default_rendering_api))\n tone_maps = pm.colorManagementPrefs(q=True, vts=True)\n\n if pcfg.maya_default_tone_map not in tone_maps or not store.get(pcfg.use_tone_map):\n return\n\n pm.colorManagementPrefs(e=True, vtn=pcfg.maya_default_tone_map)\n pm.modelEditor('modelPanel4', e=True, vtn=pcfg.maya_default_tone_map)", "title": "" }, { "docid": "b02f83d448b291fd5febf1c22a811ce9", "score": "0.5241606", "text": "async def on_ready(self):\n self._ch_out = self.validate_channel(self._ch_out_id)\n self._ch_test = self.validate_channel(self._ch_test_id)\n self.core.set_cog_ready(self)", "title": "" }, { "docid": "8e43d157e943d845d47a1d7388f7970a", "score": "0.5226501", "text": "def verify_channel(data):\n channel = data[\"channel\"]\n if channel not in channels:\n emit(\"default channel\")\n else:\n # do nothing if the channel exists\n pass", "title": "" }, { "docid": "4449f81708a650aac84e1fa631e30323", "score": "0.5219593", "text": "def _channel_control(channel):\r\n if channel not in ['red', 'green', 'blue']:\r\n raise NotAllowedChannel('Not a valid channel for '\r\n 'RGB color scheme!')", "title": "" }, { "docid": "de0e096075fa2b789635f5558041f2d4", "score": "0.5156255", "text": "def rmanLoaded():\n if not cmds.pluginInfo('RenderMan_for_Maya', q=True, loaded=True):\n melPrint('// loading RenderMan for Maya plug-in...')\n try:\n cmds.loadPlugin('RenderMan_for_Maya')\n return True\n except RuntimeError, err:\n return False\n else:\n return True", "title": "" }, { "docid": "352743fae82a960fe5594789cfab9bf7", "score": "0.50059444", "text": "def test_loadColorsDefSystemNames():\n colors = strains.load_colors()\n assert len(colors) > 0", "title": "" }, { "docid": "aeae3d3f3d69b88896001dcd02e3def2", "score": "0.4996556", "text": "def load_channels(channels, upper_bound=99):\n if channels is None:\n raise LoadError(\"channels argument is not set\")\n\n try:\n converted_channels = int(channels)\n except ValueError:\n raise LoadError(\"channels argument must be a number\")\n\n if converted_channels < 2:\n raise LoadError(\"must have more than 1 channel\")\n\n if converted_channels > upper_bound:\n raise LoadError(\n f\"must have fewer than {upper_bound + 1} channels (arbitrary)\",\n )\n\n return converted_channels", "title": "" }, { "docid": "6b2cce6959b481bdecc38a937a4cf6f6", "score": "0.49713627", "text": "def _handle_channels_reading(channels_fname, raw):\n logger.info(\"Reading channel info from {}.\".format(channels_fname))\n channels_dict = _from_tsv(channels_fname)\n ch_names_tsv = channels_dict[\"name\"]\n\n # Now we can do some work.\n # The \"type\" column is mandatory in BIDS. We can use it to set channel\n # types in the raw data using a mapping between channel types\n channel_type_bids_mne_map = dict()\n\n # Get the best mapping we currently have from BIDS to MNE nomenclature\n bids_to_mne_ch_types = _get_ch_type_mapping(fro=\"bids\", to=\"mne\")\n ch_types_json = channels_dict[\"type\"]\n for ch_name, ch_type in zip(ch_names_tsv, ch_types_json):\n # We don't map MEG channels for now, as there's no clear 1:1 mapping\n # from BIDS to MNE coil types.\n if ch_type.upper() in (\n \"MEGGRADAXIAL\",\n \"MEGMAG\",\n \"MEGREFGRADAXIAL\",\n \"MEGGRADPLANAR\",\n \"MEGREFMAG\",\n \"MEGOTHER\",\n ):\n continue\n\n # Try to map from BIDS nomenclature to MNE, leave channel type\n # untouched if we are uncertain\n updated_ch_type = bids_to_mne_ch_types.get(ch_type, None)\n\n if updated_ch_type is None:\n # XXX Try again with uppercase spelling – this should be removed\n # XXX once https://github.com/bids-standard/bids-validator/issues/1018 # noqa:E501\n # XXX has been resolved.\n # XXX x-ref https://github.com/mne-tools/mne-bids/issues/481\n updated_ch_type = bids_to_mne_ch_types.get(ch_type.upper(), None)\n if updated_ch_type is not None:\n msg = (\n \"The BIDS dataset contains channel types in lowercase \"\n \"spelling. This violates the BIDS specification and \"\n \"will raise an error in the future.\"\n )\n warn(msg)\n\n if updated_ch_type is None:\n # We don't have an appropriate mapping, so make it a \"misc\" channel\n channel_type_bids_mne_map[ch_name] = \"misc\"\n warn(\n f'No BIDS -> MNE mapping found for channel type \"{ch_type}\". '\n f'Type of channel \"{ch_name}\" will be set to \"misc\".'\n )\n else:\n # We found a mapping, so use it\n channel_type_bids_mne_map[ch_name] = updated_ch_type\n\n # Special handling for (synthesized) stimulus channel\n synthesized_stim_ch_name = \"STI 014\"\n if (\n synthesized_stim_ch_name in raw.ch_names\n and synthesized_stim_ch_name not in ch_names_tsv\n ):\n logger.info(\n f'The stimulus channel \"{synthesized_stim_ch_name}\" is present in '\n f\"the raw data, but not included in channels.tsv. Removing the \"\n f\"channel.\"\n )\n raw.drop_channels([synthesized_stim_ch_name])\n\n # Rename channels in loaded Raw to match those read from the BIDS sidecar\n if len(ch_names_tsv) != len(raw.ch_names):\n warn(\n f\"The number of channels in the channels.tsv sidecar file \"\n f\"({len(ch_names_tsv)}) does not match the number of channels \"\n f\"in the raw data file ({len(raw.ch_names)}). Will not try to \"\n f\"set channel names.\"\n )\n else:\n for bids_ch_name, raw_ch_name in zip(ch_names_tsv, raw.ch_names.copy()):\n if bids_ch_name != raw_ch_name:\n raw.rename_channels({raw_ch_name: bids_ch_name})\n\n # Set the channel types in the raw data according to channels.tsv\n channel_type_bids_mne_map_available_channels = {\n ch_name: ch_type\n for ch_name, ch_type in channel_type_bids_mne_map.items()\n if ch_name in raw.ch_names\n }\n ch_diff = set(channel_type_bids_mne_map.keys()) - set(\n channel_type_bids_mne_map_available_channels.keys()\n )\n if ch_diff:\n warn(\n f\"Cannot set channel type for the following channels, as they \"\n f'are missing in the raw data: {\", \".join(sorted(ch_diff))}'\n )\n raw.set_channel_types(channel_type_bids_mne_map_available_channels)\n\n # Set bad channels based on _channels.tsv sidecar\n if \"status\" in channels_dict:\n bads_tsv = _get_bads_from_tsv_data(channels_dict)\n bads_avail = [ch_name for ch_name in bads_tsv if ch_name in raw.ch_names]\n\n ch_diff = set(bads_tsv) - set(bads_avail)\n if ch_diff:\n warn(\n f'Cannot set \"bad\" status for the following channels, as '\n f\"they are missing in the raw data: \"\n f'{\", \".join(sorted(ch_diff))}'\n )\n\n raw.info[\"bads\"] = bads_avail\n\n return raw", "title": "" }, { "docid": "8093e21f82cfb99d12ca5549ecdc3416", "score": "0.49689618", "text": "def test_get_channels(self):\n url = '/' + version + '/collection/col1/experiment/exp1/channel/'\n\n # Get an existing channel\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data['channels'][0], 'channel1')", "title": "" }, { "docid": "6b82599332c9bf09e286d3c17a3f7227", "score": "0.4957093", "text": "def test_mismatched_channels(self):\n print(\"Running test_mismatched_channels\")\n device = DsiConnector(connection_params=self.connection_params(),\n device_spec=spec(channels=['ch1', 'ch2']))\n self.assertEqual(len(device.channels), 2)\n device.connect()\n\n with pytest.raises(Exception):\n device.acquisition_init()", "title": "" }, { "docid": "c3de1cc7ea532c6001298ea28336a435", "score": "0.4944292", "text": "def _load_renderings(self, config):", "title": "" }, { "docid": "3229b7cd1a100c7ba63a3f6d6030a877", "score": "0.49287942", "text": "def testInitWorld(self):\n raytracer = Rangefinder()\n raytracer.add_photoreceptors([0], 0.01, 0.01)\n self.assertRaises(BVException, raytracer.render, RigidBodyState())", "title": "" }, { "docid": "776909f96843dcc4ddf6824e5a2bfcf1", "score": "0.49238923", "text": "def test_valid_init(self) -> None:\n self.record.cof_name = \"T2A2HTH\"\n # Large numbers are OK if they fit within unsigned 32-bit\n self.record.frames_per_direction = 0xFFFFFFFF\n self.record.animation_speed = 0xFFFFFFFF\n self.record.triggers = {0: 1, 1: 2}", "title": "" }, { "docid": "68b06f3dd5e0c69c198ee50a55f09b32", "score": "0.49211928", "text": "def check_init(self):\n self.capsule_ensure()\n _imager_lib.check_init(self._capsule)", "title": "" }, { "docid": "f4ad687c31cb66b4cbdc9b7ede22c964", "score": "0.49132943", "text": "def test():\n\t\tglobal _TEST_FRAME_\n\t\t_TEST_FRAME_ = dict(zip(\n\t\t\t_RECOGNIZED_ELEMENTS_,\n\t\t\tlen(_RECOGNIZED_ELEMENTS_) * [1.]\n\t\t))\n\t\ttry:\n\t\t\t_TEST_FRAME_ = channel_entrainment(_TEST_FRAME_)\n\t\texcept:\n\t\t\treturn False\n\t\treturn isinstance(_TEST_FRAME_, channel_entrainment)", "title": "" }, { "docid": "60425d53da11293ebf962d253574c6a2", "score": "0.49087375", "text": "def __init__(self, num_channels):\n pygame.mixer.set_reserved(num_channels)\n self.unused_channel_list = []\n for id in range(0, num_channels):\n self.unused_channel_list.append(pygame.mixer.Channel(id))\n \n self.used_channel_map = {}", "title": "" }, { "docid": "e4840e0ca5d97c310f5f4d597b4265a6", "score": "0.49073067", "text": "def test_load_glb(self):\n input_path = os.path.join(TEST_PATH, \"bed.glb\")\n model = GltfModel.load_from_glb(input_path)\n # note: these values were manually checked against the text part of\n # the glb file. The sizes are regression tests.\n self.assertEqual(model.num_primitive_meshes(), 3)\n self.assertEqual(model.num_images(), 3)\n self.assertEqual(model.image_size(0), 196608)\n self.assertEqual(model.image_size(1), 196608)\n # Note: The fact that these URIs are empty after reading from a glb\n # means that we have to make up URI's when creating a glb.\n self.assertEqual(model.image_uri(0), \"0.png\")\n self.assertEqual(model.image_uri(1), \"1.png\")", "title": "" }, { "docid": "bc97140405d58c661316c2f6ab826c0c", "score": "0.49027652", "text": "def setup_boxes(self):\n self.ui.psdMethod.addItem('welch')\n self.ui.psdMethod.addItem('multitaper')\n chans = Counter([mne.io.pick.channel_type(self.data.info, i)\n for i in range(self.data.info[\"nchan\"])])\n if chans['eeg']:\n self.ui.typeBox.addItem('eeg')\n if chans['mag']:\n self.ui.typeBox.addItem('mag')\n if chans['grad']:\n self.ui.typeBox.addItem('grad')", "title": "" }, { "docid": "d7dea699b8fe074ad407226ced90f3e6", "score": "0.4899702", "text": "def test_configuration_loader_get_codec_formats_named(mock_empty_config_file, valid_codec_format_name) -> None:\n assert len(Configuration().get_codec_formats([valid_codec_format_name])) == 1", "title": "" }, { "docid": "7c2d8154f4f88b04a2ab937122197e9a", "score": "0.48744828", "text": "def _check_setup(self):\n self.errors = []", "title": "" }, { "docid": "109ccff07c337a1e1696160482bafaa2", "score": "0.48481873", "text": "async def test_load_pipette_96_channels(\n decoy: Decoy,\n model_utils: ModelUtils,\n hardware_api: HardwareControlAPI,\n state_store: StateStore,\n action_dispatcher: ActionDispatcher,\n loaded_static_pipette_data: LoadedStaticPipetteData,\n subject: EquipmentHandler,\n) -> None:\n pipette_dict = cast(PipetteDict, {\"model\": \"hello\", \"pipette_id\": \"world\"})\n\n decoy.when(state_store.config.use_virtual_pipettes).then_return(False)\n decoy.when(model_utils.generate_id()).then_return(\"unique-id\")\n decoy.when(hardware_api.get_attached_instrument(mount=HwMount.LEFT)).then_return(\n pipette_dict\n )\n decoy.when(\n pipette_data_provider.get_pipette_static_config(pipette_dict)\n ).then_return(loaded_static_pipette_data)\n\n decoy.when(hardware_api.get_instrument_max_height(mount=HwMount.LEFT)).then_return(\n 42.0\n )\n\n result = await subject.load_pipette(\n pipette_name=\"p1000_96\",\n mount=MountType.LEFT,\n pipette_id=None,\n )\n\n assert result == LoadedPipetteData(pipette_id=\"unique-id\")\n\n decoy.verify(\n await hardware_api.cache_instruments({HwMount.LEFT: \"p1000_96\"}),\n action_dispatcher.dispatch(\n AddPipetteConfigAction(\n pipette_id=\"unique-id\",\n serial_number=\"world\",\n config=loaded_static_pipette_data,\n )\n ),\n )", "title": "" }, { "docid": "7fc73db99d57d2e68614c6d0530cd625", "score": "0.48429143", "text": "def testInitSensor(self):\n raytracer = Rangefinder()\n raytracer.set_map(example_world)\n self.assertRaises(BVException, raytracer.render, RigidBodyState())", "title": "" }, { "docid": "51eb21bc7bf01410bb5343375a8eb920", "score": "0.48154598", "text": "def validate(self):\n variables = ['layerThickness', 'normalVelocity', 'ssh']\n compare_variables(test_case=self, variables=variables,\n filename1='forward/output.nc')", "title": "" }, { "docid": "0f51eda645312aada40b943915f41e95", "score": "0.4795046", "text": "def _initialize_data(self):\n self.channels = {1: Channel(Channel.NITROGEN), 2: Channel(Channel.HELIUM), 3: Channel(Channel.HELIUM_CONT)}\n self.cycle = True # Whether the device will continuously cycle through fill states", "title": "" }, { "docid": "932b4ab941218143ed36070201e1c28a", "score": "0.47922906", "text": "def test_meshes_no_base_color(self):\n input_path = os.path.join(TEST_PATH, \"blind.glb\")\n model = GltfModel.load_from_glb(input_path)\n self.assertEqual(model.num_primitive_meshes(), 6)\n meshes = model.primitive_meshes()\n self.assertEqual(len(meshes), 6)\n for i in [0, 1, 3, 4]:\n self.assertIsInstance(meshes[i], TexturedMesh)\n for i in [2, 5]:\n self.assertIsInstance(meshes[i], Mesh)", "title": "" }, { "docid": "e5f44b23459ab55563b9ff12cac3ff79", "score": "0.4789424", "text": "def test_cdef_no_inputs(self):\n with self.assertRaises(TypeError):\n glymur.jp2box.ChannelDefinitionBox()", "title": "" }, { "docid": "01494aa402d8c9a336ee9740761e747b", "score": "0.47890764", "text": "def GetTextureDefaults(channel: Any) -> BaseContainer:\n ...", "title": "" }, { "docid": "0e9917d312b0ea529e8db21901252114", "score": "0.47725597", "text": "def validate_properties(self):\n # TODO: Add Boss specific validation\n # Verify Collection\n\n # Verify Experiment\n\n # Verify Channel\n\n # If channel already exists, check corners to see if data exists. If so question user for overwrite\n\n # Check tile size - error if too big\n\n # Check backend connectivity\n\n return ['Parameter Validation Passed'], [], []", "title": "" }, { "docid": "d4e94dfbc6c1f7ab099c7b867c7d8aa1", "score": "0.4756722", "text": "def check_if_can_run(self):\n\t\tmessage = \" setting must be assigned before run. \" + \\\n\t\t \"Add it as keyword argument when calling the constructor or the methods run or set_settings.\"\n\n\t\tif self.video_path is None:\n\t\t\traise AssertionError(\"video_path\" + message)\n\t\tif self.output_folder is None:\n\t\t\traise AssertionError(\"output_folder\" + message)\n\t\tif self.openpose_path is None:\n\t\t\traise AssertionError(\"openpose_path\" + message)\n\t\tif self.bones_defs is None:\n\t\t\traise AssertionError(\"bones_defs\" + message)", "title": "" }, { "docid": "3463a53a11e26dc0edea0db6116358b6", "score": "0.47555706", "text": "def testValidateGPUSettings(self):\n with self.assertRaises(WMSpecFactoryException):\n StdBase.validateGPUSettings({\"RequiresGPU\": \"optional\"})\n with self.assertRaises(WMSpecFactoryException):\n StdBase.validateGPUSettings({\"RequiresGPU\": \"required\"})\n with self.assertRaises(WMSpecFactoryException):\n StdBase.validateGPUSettings({\"RequiresGPU\": \"optional\", \"GPUParams\": json.dumps(\"\")})\n with self.assertRaises(WMSpecFactoryException):\n StdBase.validateGPUSettings({\"RequiresGPU\": \"required\", \"GPUParams\": json.dumps(None)})\n\n # now input that passes the validation\n self.assertTrue(StdBase.validateGPUSettings({\"RequiresGPU\": \"forbidden\"}))\n self.assertTrue(StdBase.validateGPUSettings({\"RequiresGPU\": \"optional\",\n \"GPUParams\": json.dumps(\"blah\")}))\n self.assertTrue(StdBase.validateGPUSettings({\"RequiresGPU\": \"required\",\n \"GPUParams\": json.dumps({\"Key1\": \"value1\"})}))", "title": "" }, { "docid": "4cb4ec381f93edf5267b80229f6a70be", "score": "0.47545567", "text": "def scene_setting_init(use_gpu):\n sce = bpy.context.scene.name\n bpy.data.scenes[sce].render.engine = g_engine_type\n bpy.data.scenes[sce].render.film_transparent = g_use_film_transparent\n #output\n bpy.data.scenes[sce].render.image_settings.color_mode = g_rgb_color_mode\n bpy.data.scenes[sce].render.image_settings.color_depth = g_rgb_color_depth\n bpy.data.scenes[sce].render.image_settings.file_format = g_rgb_file_format\n\n #dimensions\n bpy.data.scenes[sce].render.resolution_x = g_resolution_x\n bpy.data.scenes[sce].render.resolution_y = g_resolution_y\n bpy.data.scenes[sce].render.resolution_percentage = g_resolution_percentage\n\n if use_gpu:\n bpy.data.scenes[sce].render.engine = 'CYCLES' #only cycles engine can use gpu\n bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral\n bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral\n bpy.context.scene.cycles.device = 'GPU'\n bpy.data.scenes[sce].cycles.device = 'GPU'", "title": "" }, { "docid": "b97bccd82a0385f05c47effdb8cb05b6", "score": "0.47537827", "text": "def check_settings(self): \r\n # assert\r\n assert self.model_tag is not None, 'model_tag not assigned' \r\n assert self.is_train is not None, 'is_train not assigned'\r\n \r\n # gpu\r\n num_gpu = len(self.gpu_available.split(\",\"))\r\n if num_gpu > 1:\r\n #\r\n if self.gpu_batch_split is None:\r\n self.gpu_batch_split = [self.batch_size//num_gpu] * num_gpu\r\n #\r\n str_info = \"make sure that num_gpu == len(self.gpu_batch_split)\"\r\n assert num_gpu == len(self.gpu_batch_split), str_info\r\n str_info = \"make sure that self.batch_size == sum(self.gpu_batch_split)\"\r\n assert self.batch_size == sum(self.gpu_batch_split), str_info\r\n \r\n # directories\r\n if self.model_dir is None:\r\n self.model_dir = os.path.join(self.base_dir, 'model_' + self.model_tag)\r\n if self.model_dir_best is None:\r\n self.model_dir_best = self.model_dir + \"_best\"\r\n if self.log_dir is None:\r\n self.log_dir = os.path.join(self.base_dir, 'log')\r\n #\r\n if not os.path.exists(self.base_dir): os.mkdir(self.base_dir)\r\n if not os.path.exists(self.model_dir): os.mkdir(self.model_dir)\r\n if not os.path.exists(self.model_dir_best): os.mkdir(self.model_dir_best)\r\n if not os.path.exists(self.log_dir): os.mkdir(self.log_dir)\r\n #\r\n # files\r\n if self.model_name is None:\r\n self.model_name = 'model_' + self.model_tag\r\n if self.pb_file is None:\r\n self.pb_file = os.path.join(self.model_dir_best, 'model_frozen.pb')\r\n #\r\n # logger\r\n str_datetime = time.strftime(\"%Y-%m-%d-%H-%M-%S\")\r\n if self.log_path is None: self.log_path = os.path.join(\r\n self.log_dir, self.model_name + \"_\" + str_datetime +\".txt\")\r\n #\r\n self.logger = self.create_logger(self.log_path)\r\n print(\"settings checked\")\r\n #\r\n self.logger.info(self.trans_info_to_dict())\r\n self.display()\r\n #\r", "title": "" }, { "docid": "84a7d7e512876591a5bc705ff36f1fa7", "score": "0.4752472", "text": "def test_init(self):\n\n exclude_mods=['KnownRVSurvey', 'tieredScheduler']\n\n required_modules = [\n 'BackgroundSources', 'Completeness', 'Observatory', 'OpticalSystem',\n 'PlanetPhysicalModel', 'PlanetPopulation', 'PostProcessing', \n 'SimulatedUniverse', 'TargetList', 'TimeKeeping', 'ZodiacalLight' ]\n \n for mod in self.allmods:\n if mod.__name__ in exclude_mods:\n continue\n \n with RedirectStreams(stdout=self.dev_null):\n sim = mod(scriptfile=self.script)\n\n self.assertIsInstance(sim._outspec, dict)\n # check for presence of a couple of class attributes\n self.assertIn('DRM', sim.__dict__)\n\n for rmod in required_modules:\n self.assertIn(rmod, sim.__dict__)\n self.assertEqual(getattr(sim,rmod)._modtype,rmod)", "title": "" }, { "docid": "ec58c344a79cda92e71fba32780f65be", "score": "0.47488916", "text": "def test_init(self):\n ini_gr = GolGrid()\n gol = game_of_life.GameOfLife(rule_sets.RuleSetStandard(), ini_gr)\n # Assert the game engine has been initialised.\n assert gol\n\n rs = gol._rule_set\n # Assert the game engine's rule set has been correctly initialised.\n assert isinstance(rs, rule_sets.RuleSetStandard)", "title": "" }, { "docid": "11ad36e291f7713f1219d30788cf300f", "score": "0.47398448", "text": "def get_channels(self):\n self._read_data(self.data_path)\n\n print(\"The following channels were found in the file:\")\n for channel in self.parm_dict['channels']:\n print(channel)\n\n print('You may specify which channels to use when calling translate.')\n\n return", "title": "" }, { "docid": "744e989d5ee281bdbf47a6afdc2b01f1", "score": "0.47342414", "text": "def test_default(self):\n measure_channel = MeasureChannel(123)\n\n self.assertEqual(measure_channel.index, 123)\n self.assertEqual(measure_channel.name, 'm123')", "title": "" }, { "docid": "d851862239d0037b2a054fd0ef515329", "score": "0.4733378", "text": "def validate(self):\n variables = ['thickness', 'bedTopography', 'iceMask',\n 'beta']\n compare_variables(test_case=self, variables=variables,\n filename1='mesh/Thwaites.nc')", "title": "" }, { "docid": "1125452ad8ef74677eb81ed6350631db", "score": "0.47322938", "text": "def test_read1(self):\n self.transmitter.read_channel_method = 0\n\n self.transmitter.open()\n channel = self.transmitter.channels[0]\n\n with self.assertRaises(InvalidReadMethodError):\n channel.read()", "title": "" }, { "docid": "f945d4928fa9eb1f25aeedb460d77ad8", "score": "0.47290015", "text": "def test_get_channel_rect(self):\r\n system_rect = RECT()\r\n system_rect.left = 8\r\n system_rect.top = 19\r\n system_rect.right = 249\r\n system_rect.bottom = 23\r\n self.assert_channel_rect(self.ctrl.get_channel_rect(), system_rect)", "title": "" }, { "docid": "21b07e3aec4c2c23726143b20d15bfbc", "score": "0.47284964", "text": "def test_get_channel_configuration(self):\n pass", "title": "" }, { "docid": "31c48e2c12bbb701f1e79f16cdc4b858", "score": "0.472598", "text": "def testInit(self):\n self.assertEqual(self.c1.c, u' ')\n self.assertEqual(self.c1.f, DEFAULT_FORE_COLOR)\n self.assertEqual(self.c1.b, DEFAULT_BACK_COLOR)\n self.assertEqual(self.c1.r, DEFAULT_RENDITION)", "title": "" }, { "docid": "d76cc9528bb63c4efd32b9dd768cc18f", "score": "0.4725825", "text": "def test_get_channel_exist(self):\n url = '/' + version + '/collection/col1/experiment/exp1/channel/channel1/'\n\n # Get an existing experiment\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data['name'], 'channel1')\n self.assertEqual(response.data['downsample_status'], 'NOT_DOWNSAMPLED')", "title": "" }, { "docid": "8431d85f525aec858376f81680d06c8f", "score": "0.47236434", "text": "def _setup(self):\n self.reset()\n self._spawn_mugs()", "title": "" }, { "docid": "941b9b4416626a7276d36c5cbd5b0ba1", "score": "0.47231436", "text": "def test_initialization(self):\n f = frames.StandardFrame()\n assert(all([f.roll_one is None, f.roll_two is None, f.frame_score is None, f.is_strike is False,\n f.is_spare is False, f.frame_complete() is False, not hasattr(f, 'roll_three')]))", "title": "" }, { "docid": "f5b2b7dbe824f3226ffb556ef84fcb44", "score": "0.47209316", "text": "def test_send_message_channels(self):\n self.assertEqual(self.send_message_channels_static,\n self.send_message_channels)", "title": "" }, { "docid": "d82bebad2773b66dcb813688e276f924", "score": "0.47138196", "text": "def test_initialize(self):\n self.game_controller.initialize()\n # The spaces which the random tiles occupy are based on the random gene-\n # rator seed and thus are always equal in tests.\n self.assertEqual(\n self.game_field.field_data[2][1].tile,\n self.tile_collection.get_tile('value', value=2)\n )\n self.assertEqual(\n self.game_field.field_data[2][3].tile,\n self.tile_collection.get_tile('value', value=4)\n )", "title": "" }, { "docid": "89558e49d175ffb571f6b74f723a6dfb", "score": "0.4711043", "text": "def testIsVisualizationModuleLoaded(self):\n\t\tpass", "title": "" }, { "docid": "001539a0b4e3904d43a076b1357ad930", "score": "0.47097236", "text": "def test_one_channel():\n viewer = ViewerModel()\n np.random.seed(0)\n data = np.random.random((15, 10, 1))\n viewer.add_image(data, channel_axis=-1)\n assert len(viewer.layers) == data.shape[-1]\n for i in range(data.shape[-1]):\n assert np.all(viewer.layers[i].data == data.take(i, axis=-1))\n assert viewer.layers[i].colormap[0] == two_colormaps[i]", "title": "" }, { "docid": "bb150aefb74ae521ee2c7101e4f4ae78", "score": "0.47011745", "text": "def _check_game_correctly_initiated(self):\n raise NotImplementedError", "title": "" }, { "docid": "f1947533bae4ccc469356f0fc0f7a20c", "score": "0.4699429", "text": "def test_basic(self):\n result = ResNorm(ResolutionWorkspace=self._res_ws,\n VanadiumWorkspace=self._van_ws,\n Version=2)\n self._validate_result(result)", "title": "" }, { "docid": "fbbfb2f16d868469de4976d98f3beb52", "score": "0.4696081", "text": "def test_create_channel_configuration(self):\n pass", "title": "" }, { "docid": "0b139523c1901a556c137c95dc1ee4d1", "score": "0.469584", "text": "def sanity_check(self):\n pass", "title": "" }, { "docid": "57070faeee97c142aa4b33ee38e3a6ad", "score": "0.46877772", "text": "def test_error_shown_camera_module_wrong(self, tmp_path, controller):\n\n # always set both values, otherwise the controller will ask for the \n # missing value which is not intendet in this test\n controller.configuration.setValue(pylo.controller.CONFIG_DEVICE_GROUP, \n \"camera\", \n \"NoFileDummyCamera\")\n controller.configuration.setValue(pylo.controller.CONFIG_DEVICE_GROUP, \n \"microscope\", \"Dummy Microscope\")\n\n with pytest.raises(DummyViewShowsError):\n # DummyView raises DummyViewShowsError when showError() is called\n self.init_start_program_test(controller, tmp_path, change_camera=False)\n\n found = False\n for e in controller.view.error_log:\n if \"Could not import the device 'NoFileDummyCamera'\" in str(e[0]):\n found = True\n break\n \n assert found", "title": "" }, { "docid": "23af51c0fafb3a1fa2b696b9e2b4e68b", "score": "0.46829215", "text": "def IsRenderSupported(self, vtkRenderWindow, vtkVolumeProperty):\n ...", "title": "" }, { "docid": "2c7f3ed0fa6ae35fa6bbba7c4e21d362", "score": "0.46767727", "text": "def check(self, *chs):\n if not chs:\n chs = range(1, self._num_channels+1)\n if self.tree.shot != -1:\n raise MDSplus.TreeNOWRITESHOT # only check model trees\n if self.site == 1 and not self.on:\n raise MDSplus.DevINV_SETUP # module 1 is master module\n pre, post = self._pre, self._post\n for ch in chs:\n if not self.getchannel(ch).on:\n continue\n rang = self._range(ch)\n if not isinstance(rang, MDSplus.Range) or rang.delta < 1:\n raise MDSplus.DevRANGE_MISMATCH\n if -pre > rang.begin:\n raise MDSplus.DevBAD_STARTIDX\n if post < rang.ending:\n raise MDSplus.DevBAD_ENDIDX\n return chs", "title": "" }, { "docid": "2c7f3ed0fa6ae35fa6bbba7c4e21d362", "score": "0.46767727", "text": "def check(self, *chs):\n if not chs:\n chs = range(1, self._num_channels+1)\n if self.tree.shot != -1:\n raise MDSplus.TreeNOWRITESHOT # only check model trees\n if self.site == 1 and not self.on:\n raise MDSplus.DevINV_SETUP # module 1 is master module\n pre, post = self._pre, self._post\n for ch in chs:\n if not self.getchannel(ch).on:\n continue\n rang = self._range(ch)\n if not isinstance(rang, MDSplus.Range) or rang.delta < 1:\n raise MDSplus.DevRANGE_MISMATCH\n if -pre > rang.begin:\n raise MDSplus.DevBAD_STARTIDX\n if post < rang.ending:\n raise MDSplus.DevBAD_ENDIDX\n return chs", "title": "" }, { "docid": "de8dfa9eb33f14887edd3bfd9468bd3e", "score": "0.46707767", "text": "def test_list_cubes_correct_values(self):\n cm = model_registry.ModelRegistry()\n models = list(cm.list_models())\n self.assertEquals(len(models), 1, 'no dataset was loaded')\n self.assertEquals(models[0], MODEL_NAME, 'dataset with wrong name')", "title": "" }, { "docid": "e239d4c15563e157cd670d70a390d2ac", "score": "0.46688938", "text": "def test_generate_gpu_scene_with_references_before_generating_gpu_of_references_first(\n create_test_data,\n store_local_session,\n create_pymel,\n create_maya_env,\n):\n data = create_test_data\n gen = RepresentationGenerator(version=data[\"building1_yapi_look_dev_main_v001\"])\n\n with pytest.raises(RuntimeError) as cm:\n gen.generate_gpu()\n\n assert str(\n cm.value\n ) == \"Please generate the GPU Representation of the references first!!!\\n{}\".format(\n data[\"building1_yapi_model_main_v003\"].absolute_full_path\n )", "title": "" }, { "docid": "b6d18119dda61fba9550852b0f80dc43", "score": "0.4667882", "text": "def _check_before_run(self):\n if not osp.exists(self.train_dir_img):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir_img))\n if not osp.exists(self.train_dir_den):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir_den))\n if not osp.exists(self.test_dir_img):\n raise RuntimeError(\"'{}' is not available\".format(self.test_dir_img))\n if not osp.exists(self.test_dir_den):\n raise RuntimeError(\"'{}' is not available\".format(self.test_dir_den))", "title": "" }, { "docid": "c9121eab1987a8f9b064a52476901c4d", "score": "0.4664668", "text": "def test_init(self):\n # Test energy compatibility\n width_orig = self._backgrounds[0]._energy_width\n self._backgrounds[0]._energy_width *= 0.5\n self.assertRaises(CompatibilityError,\n limit_setting.LimitSetting,\n self._signal,\n self._backgrounds)\n self._backgrounds[0]._energy_width = width_orig\n\n # Test radial compatibility\n width_orig = self._backgrounds[0]._radial_width\n self._backgrounds[0]._radial_width *= 0.5\n self.assertRaises(CompatibilityError,\n limit_setting.LimitSetting,\n self._signal,\n self._backgrounds)\n self._backgrounds[0]._radial_width = width_orig\n\n # Test time compatibility\n width_orig = self._backgrounds[0]._time_width\n self._backgrounds[0]._time_width *= 0.5\n self.assertRaises(CompatibilityError,\n limit_setting.LimitSetting,\n self._signal,\n self._backgrounds)\n self._backgrounds[0]._time_width = width_orig", "title": "" }, { "docid": "c53f2a58c367c706e58a798223673676", "score": "0.46646097", "text": "def _check_registered(self):\n if self._socket_client is None:\n raise ControllerNotRegistered(\n (\n \"Trying to use the controller without it being registered \"\n \"with a Cast object.\"\n )\n )", "title": "" }, { "docid": "5fa50e7f3b7867ee2b72878f7a9f8a00", "score": "0.46623838", "text": "def pre_check(self):\n if ( len(self.teams) is not len(self.metrics) ) or \\\n ( len(self.teams) is not len(self.colors) ) or \\\n ( len(self.metrics) is not len(self.colors) ):\n print(\"Pre-check condition not met.\")\n print(\"Please check teams, metrics or colors lists for appropriate length and try again! Aborting ...\")\n sys.exit(1)\n else:\n print(\"Pre-check condition valid. Continue procedure ...\\n\")", "title": "" }, { "docid": "79da4d1c8ed71a1dcf3282d41cc5811b", "score": "0.4658781", "text": "def test_can_create():\n NGLViewer(object=\"1CRN\", background=\"yellow\", height=500)", "title": "" }, { "docid": "2f6cec8d2b79a69351e1a6093a911b81", "score": "0.4656259", "text": "def minimal_init():\n init_sound(config.num_channels)\n voice.init(config)\n set_screen(fullscreen)\n update_display_caption()\n pygame.key.set_repeat(500, 100)", "title": "" }, { "docid": "baae24c2e7f419344412a78818e086f0", "score": "0.465499", "text": "def test_microscope_and_camera_are_valid(self, tmp_path, controller):\n self.init_start_program_test(controller, tmp_path)\n\n # check mircoscope and camera are valid\n # this test does not work, objects have different classes because they\n # are loaded differently, does not matter in the \"real\" application\n # assert isinstance(controller.microscope, DummyMicroscope)\n # assert isinstance(controller.camera, DummyCamera)\n\n assert controller.microscope.__class__.__module__ in os.path.basename(__file__)\n assert controller.microscope.__class__.__name__ == \"DummyMicroscope\"\n\n assert controller.camera.__class__.__module__ in os.path.basename(__file__)\n assert controller.camera.__class__.__name__ == \"DummyCamera\"", "title": "" }, { "docid": "e852f09c7ebafc1407ce91f5a0c12bf1", "score": "0.46494904", "text": "async def test_setup_missing_basic_config(hass: HomeAssistant) -> None:\n assert await async_setup_component(\n hass, BINARY_SENSOR_DOMAIN, {BINARY_SENSOR_DOMAIN: {\"platform\": DOMAIN}}\n )\n await hass.async_block_till_done()\n assert len(hass.states.async_all(BINARY_SENSOR_DOMAIN)) == 0", "title": "" }, { "docid": "a907dd1b115f973f6ceeff1027580665", "score": "0.46481845", "text": "def test_configuration_loader_get_codec_valid_format_names(mock_empty_config_file, valid_codec_format_name) -> None:\n assert isinstance(Configuration().get_codec(valid_codec_format_name), Codec)", "title": "" }, { "docid": "31dd8e270817b963da201753350c9a7c", "score": "0.46480066", "text": "def detect_fail_channel(self):", "title": "" }, { "docid": "898cd0c2a5e7c2ad965b3bd546f41379", "score": "0.4645169", "text": "def test_default(self):\n control_channel = ControlChannel(123)\n\n self.assertEqual(control_channel.index, 123)\n self.assertEqual(control_channel.name, 'u123')", "title": "" }, { "docid": "8d41f7ab9ee5bd6dd08cab929f5787ff", "score": "0.46439725", "text": "def check_headers(self):\n\n # Check length of channels header\n n = len(self.channel_names)\n m = len(self.cols_to_process)\n if n != m:\n msg = (\n f\"Number of channel names specified ({n}) does not equal number of \"\n f\"channels to process ({m}) for {self.logger_id}.\"\n )\n raise LoggerError(msg)\n\n # Check length of units header\n n = len(self.channel_units)\n if n != m:\n msg = (\n f\"Number of units specified ({n}) does not equal number of \"\n f\"channels to process ({m}) for {self.logger_id}.\"\n )\n raise LoggerError(msg)", "title": "" }, { "docid": "11b0f3851bd156539cf21e700501b689", "score": "0.46435672", "text": "def checkIsValid(self):\r\n files = os.listdir(self.dir)\r\n has_ini = [f for f in files if f.endswith('variables.ini')]\r\n has_raw = [f for f in files if f.endswith('.raw')]\r\n if not (has_ini and has_raw):\r\n raise RuntimeError(\"Recording directory {} is not valid (does not have a metadata or image file).\"\r\n .format(self.dir))", "title": "" }, { "docid": "2945657f3cc2786e90da64bcc9eec273", "score": "0.46428514", "text": "def test_manual(self):\r\n fromfile = configParser.MultiWorldParser()\r\n fromfile.parse(\"test/data/settings/settings_test_1.py\")\r\n\r\n self.s.set_config_item(\"worlds\", {\r\n 'test': \"test/data/settings/test_world\",\r\n })\r\n self.s.set_config_item(\"renders\", OrderedDict([\r\n (\"myworld\", {\r\n \"title\": \"myworld title\",\r\n \"world\": \"test\",\r\n \"rendermode\": rendermodes.normal,\r\n \"northdirection\": \"upper-left\",\r\n }),\r\n\r\n (\"otherworld\", {\r\n \"title\": \"otherworld title\",\r\n \"world\": \"test\",\r\n \"rendermode\": rendermodes.normal,\r\n \"bgcolor\": \"#ffffff\"\r\n }),\r\n ]))\r\n self.s.set_config_item(\"outputdir\", \"/tmp/fictional/outputdir\")\r\n self.assertEquals(fromfile.get_validated_config(), self.s.get_validated_config())", "title": "" }, { "docid": "d13fe98642eb340411744ffd93ca6b4a", "score": "0.46341673", "text": "def checkinit():\n args = parse_args()\n paths = ScatterPath(args.infile)\n init = initdat(paths)\n\n fig, axes = plt.subplots(init.dim, 2)\n for d in range(init.dim):\n ax = axes[d, 0] if init.dim > 1 else axes[0]\n ax.hist(init.r0[:,d], bins=60)\n ax = axes[d, 1] if init.dim > 1 else axes[1]\n ax.hist(init.p0[:, d], bins=60)\n\n ## -- save fig -- ##\n if not args.save and HAVE_DISPLAY:\n plt.show()\n else:\n saveto = 'init.png' if not args.save else args.save\n fig.savefig(saveto, dpi=fig.dpi * 2)", "title": "" }, { "docid": "02c77a0bb7ae7c89ccd93591b6c95911", "score": "0.46196654", "text": "def test_GWspect_test(self):\n spec = GWSpecs()\n spec.test()\n self.assertEqual(len(spec.warnings), 0)\n self.assertEqual(len(spec.errors), 0)", "title": "" }, { "docid": "e16f63c394264270ef098ec9390cee71", "score": "0.46146637", "text": "async def load_data(self):\n\n await self.bot.wait_until_ready()\n guild = discord.utils.get(\n self.bot.guilds, name='Hatventures Community'\n )\n\n if guild is not None:\n channel = discord.utils.find(\n lambda c: c.name.startswith('hatbot'),\n guild.channels\n )\n feesh = discord.utils.get(guild.emojis, name='feesh')\n\n self.guild = guild\n self.channel_msg = channel\n self.feesh_emoji = feesh\n\n else:\n self.guild = None\n self.channel_msg = None\n self.feesh_emoji = ':fish:'", "title": "" }, { "docid": "16184164f2894a625a6eaa48bc8af3f6", "score": "0.46119907", "text": "def checkDisplay(self):\n try:\n wcs = self._display.readInfo()\n except:\n return False\n return True", "title": "" }, { "docid": "8987b1143bf12eaf134ed94ef9e0cafa", "score": "0.46118513", "text": "def check_init():\n global inited\n if inited == False:\n raise KeyError(\"Initial file name not provided. Please run init() first.\")\n return", "title": "" }, { "docid": "d035733cea6c8913bca47353b6a1cc9f", "score": "0.46107167", "text": "def prepare(self):\n # if `inception_pkl` is provided, read mean and cov stat\n if self.inception_pkl is not None and mmcv.is_filepath(\n self.inception_pkl):\n with open(self.inception_pkl, 'rb') as f:\n reference = pickle.load(f)\n self.real_mean = reference['mean']\n self.real_cov = reference['cov']\n mmcv.print_log(\n f'Load reference inception pkl from {self.inception_pkl}',\n 'mmgen')\n self.num_real_feeded = self.num_images", "title": "" }, { "docid": "41dd92da4e607983d6b7055d2d1cc969", "score": "0.46089545", "text": "def test_load_level_exception(self):\n\n from bravo.plugins.serializers import Alpha\n def raiser(self, level):\n raise SerializerReadException(\"testing\")\n self.patch(Alpha, \"load_level\", raiser)\n\n w = bravo.world.World(self.name)\n w.start()\n w.stop()", "title": "" }, { "docid": "3035321d734022a3259edfc20e99f316", "score": "0.46085367", "text": "def __init__(self, ch_group):\n self.name = ch_group['name']\n self.position_set = False\n self.automation_blocks = ch_group.get('automation', [])\n self.velocity = VelocityFilter()\n self.clear_velocity() \n self.subgroups = ch_group.get('subgroups', [])\n self.position = SmoothVal(np.array([0,0,0]), 0.01)\n logging.debug(\"Creating channel descriptor %s:\\n%s\" % (self.name, yaml.dump(ch_group)))\n self.muted = ch_group.get('mute', False)\n self.gain = SmoothVal(ch_group.get('gain', 0.0), 0.01, linear=True)\n self.frequency = SmoothVal(ch_group.get('frequency', 1.0), 0.05) \n self.parent = None\n self.filter_val = SmoothVal(ch_group.get('filter', 48000), 0.01)\n lp_filter = system.create_dsp_by_type(FMOD_DSP_TYPE_LOWPASS) \n lp_filter.set_param(0, self.filter_val.state)\n self.sub_group_channels = []\n self.transient_channels = ch_group.get('transient_channels', 0)\n self.sub_channels = []\n self.automations = AutomationGroup()\n self.sounds = []\n self.group = system.create_channel_group(self.name) \n add_dsp_channel_group(self.group, lp_filter) \n self.lp_filter = lp_filter\n \n # attach to the master\n system.master_channel_group.add_group(self.group)\n \n for i in range(self.transient_channels): \n chan = new_channel()\n self.sub_channels.append(chan)\n \n self.update(0.0)", "title": "" }, { "docid": "99917d9b1500b3167b87adf3577735fc", "score": "0.4606006", "text": "def test_permutate_keeping_channel_order(self):\n values_r = [color.channels['red'] for color in self.palette.colors]\n values_g = [color.channels['green'] for color in self.palette.colors]\n values_b = [color.channels['blue'] for color in self.palette.colors]\n result = self.palette.retrieve_matching_palette()\n self.assertTrue(result.colors[0].channels['red'] in values_r)\n self.assertTrue(result.colors[0].channels['green'] in values_g)\n self.assertTrue(result.colors[0].channels['blue'] in values_b)", "title": "" }, { "docid": "908b17453df8670dfdc6815bbd614d49", "score": "0.46054354", "text": "def test_init(self):\n\n req_atts = [\n \"missionStart\",\n \"missionPortion\",\n \"missionLife\",\n \"missionFinishAbs\",\n \"currentTimeNorm\",\n \"currentTimeAbs\",\n \"OBnumber\",\n \"OBduration\",\n \"OBstartTimes\",\n \"OBendTimes\",\n \"cachedir\",\n ]\n\n for mod in self.allmods:\n with RedirectStreams(stdout=self.dev_null):\n obj = mod(**copy.deepcopy(self.spec))\n\n # verify that all attributes are there\n for att in req_atts:\n self.assertTrue(\n hasattr(obj, att),\n \"Missing attribute {} for {}\".format(att, mod.__name__),\n )", "title": "" }, { "docid": "94ca49d9b3be42ff16832639efe7bcf1", "score": "0.45973316", "text": "def test_default(self):\n acquire_channel = AcquireChannel(123)\n\n self.assertEqual(acquire_channel.index, 123)\n self.assertEqual(acquire_channel.name, 'a123')", "title": "" }, { "docid": "77eea398b29a9c4d82b31dc3dc8c84fe", "score": "0.45972517", "text": "def check_properlysetup(self):\n if self.readertype is None or self.readertype is ReaderType.NEITHER or self.targeturl is None or self.reader is None:\n return False\n else:\n return True", "title": "" }, { "docid": "e0c799c62fea5314d377c6866b2a9d28", "score": "0.45921466", "text": "def test_load(self):\n\t\tg = Game()\n\t\tg.ruleset = ruleset", "title": "" }, { "docid": "3126d8ee38e0ab7a184622307bbbf445", "score": "0.45909813", "text": "def check_channel_scaling(self,channel_number=1):\n #Get and convert data usual way\n series=self.get_multiple_traces(n_traces=1,\n channel_number=channel_number)\n converted_data=series.converted_data[0,0]\n #Get data converted by scope to ascii string for comparison\n self.write(\"WAV:FORM ASCII\")\n self.write(\":WAV:DATA? CHAN%d\" % channel_number)\n number_digits=self.read(2)\n number_digits=int(number_digits[1])\n number_data_points=int(self.read(number_digits))\n ascii_data= self.read(25).split(',')[0]\n #Print data for comparison\n print \"ASCII: %s, and converted: %f\" % (ascii_data, converted_data)\n #Unlock scope\n self.unlock()\n return", "title": "" }, { "docid": "775acdd73599ffe4a6acc23dae340430", "score": "0.4587437", "text": "def _display_setup(self):\n display_file = \"{}/display.json\".format(self.settings_dir)\n with open(display_file) as json_file:\n win_settings = json.load(json_file)\n self.win = visual.Window(**win_settings)\n self.mouse = event.Mouse(visible=False, win=self.win)", "title": "" }, { "docid": "1115263b3275d8574b2564cf3c026fd7", "score": "0.45766273", "text": "def __init__(self):\n self.channels = []\n self.chan_mask = np.full(MAX_CHANS, -1)", "title": "" }, { "docid": "21edbbf6b8a48f7df612b12b7417f1e8", "score": "0.45724767", "text": "def get_num_channels (self):\n return len([True for v in self._channels_setup if v[0]])", "title": "" }, { "docid": "0110240a6c79a8b49d9d95eeb1af3305", "score": "0.456968", "text": "def test_configuration_loader_get_codec_valid_formats(mock_empty_config_file, valid_codec_format) -> None:\n assert isinstance(Configuration().get_codec(valid_codec_format), Codec)", "title": "" }, { "docid": "f3dbdbbf551acf025f48adf84d297bea", "score": "0.45689282", "text": "def verifyConfiguration(self):\n logEvent = \"%sverify\" % self._loggingPrefix\n self._eventLogger.eventBegin(logEvent)\n\n Integrator.verifyConfiguration(self)\n self.materialObj.verifyConfiguration()\n\n if self.mesh().dimension() != self.materialObj.dimension():\n raise ValueError(\"Mesh dimension is '%d' but material '%s' of type \" \\\n \"'%s' applies to dimension '%d'.\" % \\\n (self.mesh().dimension(),\n self.materialObj.label(),\n self.materialObj,\n self.materialObj.dimension()))\n self._verifyConfiguration()\n self.output.verifyConfiguration(self.mesh())\n\n self._eventLogger.eventEnd(logEvent) \n return", "title": "" }, { "docid": "57ccf6915e28346a368f5fadc9028950", "score": "0.45633274", "text": "def test_vs_level_status_false(self, cleanup):\n netscaler_conv(config_file_name=setup.get('config_file_name'),\n controller_version=setup.get('controller_version_v17'))", "title": "" }, { "docid": "bdef447e7e955b2396526a43eca13792", "score": "0.45625502", "text": "def test():\n\t\tglobal _TEST_FRAME_\n\t\t_TEST_FRAME_ = dict(zip(\n\t\t\t_RECOGNIZED_ELEMENTS_,\n\t\t\tlen(_RECOGNIZED_ELEMENTS_) * [0.]\n\t\t))\n\t\ttry:\n\t\t\t_TEST_FRAME_ = evolutionary_settings(_TEST_FRAME_, \"test\")\n\t\texcept:\n\t\t\treturn False\n\t\treturn isinstance(_TEST_FRAME_, evolutionary_settings)", "title": "" }, { "docid": "bdae01004c5e6bc413cf05d84cf90ece", "score": "0.45585018", "text": "def num_channels(self):\n return 3", "title": "" }, { "docid": "b2bf70ec5e73aa109a33e4242ac7a40b", "score": "0.45563757", "text": "def test_RandomJitterColorChannels(self):\n t = preprocessing.RandomJitterColorChannels(max_shift=2)\n image_transformed = t(self.image)\n\n # Check if image has the right mode\n self.assertEqual('RGB', image_transformed.mode)\n\n # Check if image has still the old size\n self.assertEqual(self.image.size, image_transformed.size)", "title": "" }, { "docid": "82195f17b7aa21ae0ef30c8d5735426c", "score": "0.4555458", "text": "def test_control_channel(self):\n with pulse.build(self.backend):\n self.assertEqual(pulse.control_channels(0, 1)[0], pulse.ControlChannel(0))", "title": "" } ]
772ff500c953b77f98c57c3de2a8c6fb
Removes detections with lower score than 'score_thres' and performs NonMaximum Suppression to further filter detections.
[ { "docid": "9409c3f76fb8eaac6f934602edc839a7", "score": "0.70985776", "text": "def non_max_suppression(prediction,\n num_classes,\n score_thres=0.5,\n nms_thres=0.4):\n\n # From (center x, center y, width, height) to (x1, y1, x2, y2)\n box_corner = prediction.new(prediction.shape)\n box_corner[..., 0] = prediction[..., 0] - prediction[..., 2] / 2\n box_corner[..., 1] = prediction[..., 1] - prediction[..., 3] / 2\n box_corner[..., 2] = prediction[..., 0] + prediction[..., 2] / 2\n box_corner[..., 3] = prediction[..., 1] + prediction[..., 3] / 2\n prediction[..., :4] = box_corner[..., :4]\n\n output = [None for _ in range(len(prediction))]\n for image_i, image_pred in enumerate(prediction):\n # Get scores for every box\n # xyxy = image_pred[:, :4].view(-1, 4)\n conf_prob = image_pred[:, 4].view(-1, 1)\n cls_prob = image_pred[:, 5:].view(-1, num_classes)\n score = conf_prob * cls_prob\n # Mask\n # conf_mask = conf_prob >= score_thres\n # cls_prob_max = torch.max(cls_prob, 1, keepdim=True)[0]\n # cls_mask = cls_prob_max >= score_thres\n # score_mask = (conf_mask * cls_mask).squeeze()\n # Mask out boxes with lower score\n score_max = torch.max(score, 1, keepdim=False)[0]\n score_mask = (score_max >= score_thres)\n image_pred = image_pred[score_mask]\n score = score[score_mask]\n # If none are remaining => process next image\n if not image_pred.size(0):\n continue\n # Get score and class with highest confidence\n class_score, class_pred = torch.max(score, 1, keepdim=True)\n # Detections ordered as (x1, y1, x2, y2, obj_prob, cls_score, cls_pred)\n # entries must be the same type in a Tensor\n detections = torch.cat(\n (image_pred[:, :5], class_score.float(), class_pred.float()), 1)\n\n # Iterate through all predicted classes\n unique_labels = detections[:, -1].cpu().unique()\n if prediction.is_cuda:\n unique_labels = unique_labels.cuda()\n for c in unique_labels:\n # Get the detections with the particular class\n detections_class = detections[detections[:, -1] == c]\n # Sort the detections by\n # score (p(obj)*p(cls|obj)) rather than cls_prob!!!\n _, conf_sort_index = torch.sort(detections_class[:, 5],\n descending=True)\n detections_class = detections_class[conf_sort_index]\n\n # Perform non-maximum suppression\n max_detections = []\n while detections_class.size(0):\n # Get detection with highest confidence\n max_detections.append(detections_class[0].unsqueeze(0))\n # Stop if we're at the last detection\n if len(detections_class) == 1:\n break\n # Get the IOUs for all boxes with lower confidence\n ious = bbox_iou(max_detections[-1], detections_class[1:])\n # Remove detections with IoU >= NMS threshold\n detections_class = detections_class[1:][ious < nms_thres]\n\n max_detections = torch.cat(max_detections).data\n\n # Add max detections to outputs\n if output[image_i] is None:\n output[image_i] = max_detections\n else:\n output[image_i] = torch.cat((output[image_i], max_detections))\n\n return output # Its entry could be None or torch.Tensor", "title": "" } ]
[ { "docid": "7217d95319326f924337efc1a6a0a023", "score": "0.68844444", "text": "def non_max_suppression(prediction, conf_thres=0.5, nms_thres=0.4):\n\n # From (center x, center y, width, height) to (x1, y1, x2, y2)\n prediction[..., :4] = xywh2xyxy(prediction[..., :4])\n output = [None for _ in range(len(prediction))]\n for image_i, image_pred in enumerate(prediction):\n # Filter out confidence scores below threshold\n image_pred = image_pred[image_pred[:, 4] >= conf_thres]\n # If none are remaining => process next image\n if not image_pred.size(0):\n continue\n # Object confidence times class confidence\n score = image_pred[:, 4] * image_pred[:, 5:].max(1)[0]\n # Sort by it\n image_pred = image_pred[(-score).argsort()]\n class_confs, class_preds = image_pred[:, 5:].max(1, keepdim=True)\n detections = torch.cat((image_pred[:, :5], class_confs.float(), class_preds.float()), 1)\n # Perform non-maximum suppression\n keep_boxes = []\n while detections.size(0):\n large_overlap = bbox_iou(detections[0, :4].unsqueeze(0), detections[:, :4]) > nms_thres\n label_match = detections[0, -1] == detections[:, -1]\n # Indices of boxes with lower confidence scores, large IOUs and matching labels\n invalid = large_overlap & label_match\n weights = detections[invalid, 4:5]\n # Merge overlapping bboxes by order of confidence\n detections[0, :4] = (weights * detections[invalid, :4]).sum(0) / weights.sum()\n keep_boxes += [detections[0]]\n detections = detections[~invalid]\n if keep_boxes:\n output[image_i] = torch.stack(keep_boxes)\n\n return output", "title": "" }, { "docid": "8eee32bd9c783e534f4261dbfdcc294b", "score": "0.68122953", "text": "def non_max_suppression(prediction, num_classes, conf_thres=0.5, nms_thres=0.4):\n\n # From (center x, center y, width, height) to (x1, y1, x2, y2)\n box_corner = prediction.new(prediction.shape)\n box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2\n box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2\n box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2\n box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2\n prediction[:, :, :4] = box_corner[:, :, :4]\n\n output = [None for _ in range(len(prediction))]\n for image_i, image_pred in enumerate(prediction):\n # Filter out confidence scores below threshold\n conf_mask = (image_pred[:, 4] >= conf_thres).squeeze()\n image_pred = image_pred[conf_mask]\n # If none are remaining => process next image\n if not image_pred.size(0):\n continue\n # Get score and class with highest confidence\n class_conf, class_pred = torch.max(image_pred[:, 5 : 5 + num_classes], 1, keepdim=True)\n # Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)\n detections = torch.cat((image_pred[:, :5], class_conf.float(), class_pred.float()), 1)\n # Iterate through all predicted classes\n unique_labels = detections[:, -1].cpu().unique()\n if prediction.is_cuda:\n unique_labels = unique_labels.cuda()\n for c in unique_labels:\n # Get the detections with the particular class\n detections_class = detections[detections[:, -1] == c]\n # Sort the detections by maximum objectness confidence\n _, conf_sort_index = torch.sort(detections_class[:, 4], descending=True)\n detections_class = detections_class[conf_sort_index]\n # Perform non-maximum suppression\n max_detections = []\n while detections_class.size(0):\n # Get detection with highest confidence and save as max detection\n max_detections.append(detections_class[0].unsqueeze(0))\n # Stop if we're at the last detection\n if len(detections_class) == 1:\n break\n # Get the IOUs for all boxes with lower confidence\n ious = bbox_iou(max_detections[-1], detections_class[1:])\n # Remove detections with IoU >= NMS threshold\n detections_class = detections_class[1:][ious < nms_thres]\n\n max_detections = torch.cat(max_detections).data\n # Add max detections to outputs\n output[image_i] = (\n max_detections if output[image_i] is None else torch.cat((output[image_i], max_detections))\n )\n\n return output", "title": "" }, { "docid": "2c5efb4591043d72e22b40dec086671d", "score": "0.6796", "text": "def non_max_suppression(self, filtered_boxes, box_classes, box_scores):", "title": "" }, { "docid": "f7eed58d68958be4d46103fc734187a0", "score": "0.643234", "text": "def get_detections_after_soft_non_maximum_suppression(detections, sigma, score_threshold):\n\n areas = (detections[:, 2] - detections[:, 0] + 1) * (detections[:, 3] - detections[:, 1] + 1)\n # expand detections with areas, so that the second dimension is\n # x_min, y_min, x_max, y_max, score, area\n detections = np.concatenate([detections, areas.reshape(-1, 1)], axis=1)\n\n retained_detections = []\n\n while detections.size > 0:\n\n # Get index for detection with max score, then swap that detection with detection at index 0.\n # This way we will get detection with max score at index 0 in detections array\n max_score_index = np.argmax(detections[:, 4], axis=0)\n detections[[0, max_score_index]] = detections[[max_score_index, 0]]\n\n # Save max score detection to retained detections\n retained_detections.append(detections[0])\n\n # Compute intersection over union between top score box and all other boxes\n min_x = np.maximum(detections[0, 0], detections[1:, 0])\n min_y = np.maximum(detections[0, 1], detections[1:, 1])\n max_x = np.minimum(detections[0, 2], detections[1:, 2])\n max_y = np.minimum(detections[0, 3], detections[1:, 3])\n\n overlap_width = np.maximum(max_x - min_x + 1, 0.0)\n overlap_height = np.maximum(max_y - min_y + 1, 0.0)\n\n intersection_area = overlap_width * overlap_height\n intersection_over_union = intersection_area / (detections[0, 5] + detections[1:, 5] - intersection_area)\n\n # Update detections scores for all detections other than max score - we don't want to affect its score.\n # Scores are updated using an exponential function such that detections that have no intersection with top\n # score detection aren't affected, and boxes that have iou of 1 with top score detection have their\n # scores set to zero\n detections[1:, 4] *= np.exp(-(intersection_over_union * intersection_over_union) / sigma)\n\n # Discard detections with scores below score threshold. Take care to shift indices by +1 to account for fact\n # we are leaving out top score detection at index 0\n retained_detections_indices = np.where(detections[1:, 4] >= score_threshold)[0] + 1\n detections = detections[retained_detections_indices]\n\n return np.array(retained_detections)", "title": "" }, { "docid": "aa5874af1ce7f57320e4fea3ae230f90", "score": "0.6274116", "text": "def non_max_suppression(self, filtered_boxes, box_classes, box_scores):\n tmp_boxes = []\n tmp_classes = []\n tmp_scores = []\n\n for clase in np.unique(box_classes):\n indexes = np.where(box_classes == clase)\n boxes_ofclas = filtered_boxes[indexes]\n classes_ofclas = box_classes[indexes]\n scores_ofclas = box_scores[indexes]\n\n x1 = boxes_ofclas[:, 0]\n y1 = boxes_ofclas[:, 1]\n x2 = boxes_ofclas[:, 2]\n y2 = boxes_ofclas[:, 3]\n\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n order = scores_ofclas.argsort()[::-1]\n\n keep = []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n\n inds = np.where(ovr <= self.nms_t)[0]\n order = order[inds + 1]\n\n tmp_boxes.append(boxes_ofclas[keep])\n tmp_classes.append(classes_ofclas[keep])\n tmp_scores.append(scores_ofclas[keep])\n\n boxes_predic = np.concatenate(tmp_boxes, axis=0)\n classes_predic = np.concatenate(tmp_classes, axis=0)\n scores_predic = np.concatenate(tmp_scores, axis=0)\n\n return boxes_predic, classes_predic, scores_predic", "title": "" }, { "docid": "2139603a72953299368fe24111a33a04", "score": "0.61954445", "text": "def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()):\n nc = prediction.shape[2] - 5\n xc = prediction[..., 4] > conf_thres\n min_wh, max_wh = 2, 4096\n time_limit = 10.0\n redundant = True\n multi_label = nc > 1\n merge = False\n t = time.time()\n output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction):\n x = x[xc[xi]]\n if labels and len(labels[xi]):\n l = labels[xi]\n v = torch.zeros((len(l), nc + 5), device=x.device)\n v[:, :4] = l[:, 1:5]\n v[:, 4] = 1.0\n v[range(len(l)), l[:, 0].long() + 5] = 1.0\n x = torch.cat((x, v), 0)\n if not x.shape[0]:\n continue\n x[:, 5:] *= x[:, 4:5]\n box = xywh2xyxy(x[:, :4])\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else:\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n n = x.shape[0]\n if not n:\n continue\n x = x[x[:, 4].argsort(descending=True)]\n c = x[:, 5:6] * (0 if agnostic else max_wh)\n boxes, scores = x[:, :4] + c, x[:, 4]\n i = torchvision.ops.nms(boxes, scores, iou_thres)\n if merge and 1 < n < 3000.0:\n iou = box_iou(boxes[i], boxes) > iou_thres\n weights = iou * scores[None]\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True)\n if redundant:\n i = i[iou.sum(1) > 1]\n output[xi] = x[i]\n if time.time() - t > time_limit:\n None\n break\n return output", "title": "" }, { "docid": "b89e5cb5e674c5b29db56d45fa3f4a0b", "score": "0.61135346", "text": "def nonmaxsup(scores, ksize):\n suppressed = np.copy(scores)\n filtered = maximum_filter(suppressed, (ksize, ksize))\n maxima = (suppressed == filtered)\n suppressed[np.logical_not(maxima)] = 0\n return suppressed", "title": "" }, { "docid": "40b196cf49a05de092073a75b5e2d64f", "score": "0.6053268", "text": "def my_nms(pred,nms_thres=0.5):\n # Get detections sorted by decreasing confidence scores\n pred = pred[(-pred[:, 4]).argsort()]\n\n det_max = []\n nms_style = 'MERGE' # 'OR' (default), 'AND', 'MERGE' (experimental)\n for c in pred[:, -1].unique():\n dc = pred[pred[:, -1] == c] # select class c\n n = len(dc)\n if n == 1:\n det_max.append(dc) # No NMS required if only 1 prediction\n continue\n elif n > 100:\n dc = dc[:100] # limit to first 100 boxes: https://github.com/ultralytics/yolov3/issues/117\n\n # Non-maximum suppression\n if nms_style == 'OR': # default\n # METHOD1\n # ind = list(range(len(dc)))\n # while len(ind):\n # j = ind[0]\n # det_max.append(dc[j:j + 1]) # save highest conf detection\n # reject = (bbox_iou(dc[j], dc[ind]) > nms_thres).nonzero()\n # [ind.pop(i) for i in reversed(reject)]\n\n # METHOD2\n while dc.shape[0]:\n det_max.append(dc[:1]) # save highest conf detection\n if len(dc) == 1: # Stop if we're at the last detection\n break\n iou = bbox_iou(dc[0], dc[1:]) # iou with other boxes\n dc = dc[1:][iou < nms_thres] # remove ious > threshold\n\n elif nms_style == 'AND': # requires overlap, single boxes erased\n while len(dc) > 1:\n iou = bbox_iou(dc[0], dc[1:]) # iou with other boxes\n if iou.max() > 0.5:\n det_max.append(dc[:1])\n dc = dc[1:][iou < nms_thres] # remove ious > threshold\n\n elif nms_style == 'MERGE': # weighted mixture box\n while len(dc):\n if len(dc) == 1:\n det_max.append(dc)\n break\n i = bbox_iou(dc[0], dc) > nms_thres # iou with other boxes\n weights = dc[i, 4:5]\n dc[0, :4] = (weights * dc[i, :4]).sum(0) / weights.sum()\n det_max.append(dc[:1])\n dc = dc[i == 0]\n\n elif nms_style == 'SOFT': # soft-NMS https://arxiv.org/abs/1704.04503\n sigma = 0.5 # soft-nms sigma parameter\n while len(dc):\n if len(dc) == 1:\n det_max.append(dc)\n break\n det_max.append(dc[:1])\n iou = bbox_iou(dc[0], dc[1:]) # iou with other boxes\n dc = dc[1:]\n dc[:, 4] *= torch.exp(-iou ** 2 / sigma) # decay confidences\n\n if len(det_max):\n det_max = torch.cat(det_max) # concatenate\n output = det_max[(-det_max[:, 4]).argsort()] # sort\n\n return output", "title": "" }, { "docid": "a6021205af826d0240c6d3b08d7f6b5a", "score": "0.6035794", "text": "def _preprocess_logits(\n self,\n scores,\n top_k=None,\n top_p=None,\n min_tokens_to_keep=1,\n filter_value=-float(\"Inf\"),\n ):\n if top_k is not None and top_k != 0:\n top_k = min(max(top_k, min_tokens_to_keep), scores.size(-1)) # Safety check\n # Remove all tokens with a probability less than the last token of the top-k\n indices_to_remove = scores < torch.topk(scores, top_k)[0][..., -1, None]\n scores = scores.masked_fill(indices_to_remove, filter_value)\n if top_p is not None and top_p < 1.0:\n sorted_logits, sorted_indices = torch.sort(scores, descending=True)\n cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1)\n\n # Remove tokens with cumulative top_p above the threshold (token with 0 are kept)\n sorted_indices_to_remove = cumulative_probs > top_p\n if min_tokens_to_keep > 1:\n # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)\n sorted_indices_to_remove[..., : min_tokens_to_keep - 1] = 0\n # Shift the indices to the right to keep also the first token above the threshold\n sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[\n ..., :-1\n ].clone()\n sorted_indices_to_remove[..., 0] = 0\n\n # scatter sorted tensors to original indexing\n indices_to_remove = sorted_indices_to_remove.scatter(\n 1, sorted_indices, sorted_indices_to_remove\n )\n scores = scores.masked_fill(indices_to_remove, filter_value)\n\n return scores", "title": "" }, { "docid": "10bdfa496a7b1f3afc4ab59391e1d036", "score": "0.5963352", "text": "def my_nms(pred,nms_thres=0.5):\n # Get detections sorted by decreasing confidence scores\n pred = pred[(-pred[:, 4]).argsort()]\n\n det_max = []\n nms_style = 'MERGE' # 'OR' (default), 'AND', 'MERGE' (experimental)\n for c in pred[:, -1].unique():\n dc = pred[pred[:, -1] == c] # select class c\n n = len(dc)\n if n == 1:\n det_max.append(dc) # No NMS required if only 1 prediction\n continue\n elif n > 100:\n dc = dc[:100] # limit to first 100 boxes: https://github.com/ultralytics/yolov3/issues/117\n\n # Non-maximum suppression\n if nms_style == 'OR': # default\n while dc.shape[0]:\n det_max.append(dc[:1]) # save highest conf detection\n if len(dc) == 1: # Stop if we're at the last detection\n break\n iou = bbox_iou(dc[0], dc[1:]) # iou with other boxes\n dc = dc[1:][iou < nms_thres] # remove ious > threshold\n\n elif nms_style == 'AND': # requires overlap, single boxes erased\n while len(dc) > 1:\n iou = bbox_iou(dc[0], dc[1:]) # iou with other boxes\n if iou.max() > 0.5:\n det_max.append(dc[:1])\n dc = dc[1:][iou < nms_thres] # remove ious > threshold\n\n elif nms_style == 'MERGE': # weighted mixture box\n while len(dc):\n if len(dc) == 1:\n det_max.append(dc)\n break\n i = bbox_iou(dc[0], dc) > nms_thres # iou with other boxes\n weights = dc[i, 4:5]\n dc[0, :4] = (weights * dc[i, :4]).sum(0) / weights.sum()\n det_max.append(dc[:1])\n dc = dc[i == 0]\n\n elif nms_style == 'SOFT': # soft-NMS https://arxiv.org/abs/1704.04503\n sigma = 0.5 # soft-nms sigma parameter\n while len(dc):\n if len(dc) == 1:\n det_max.append(dc)\n break\n det_max.append(dc[:1])\n iou = bbox_iou(dc[0], dc[1:]) # iou with other boxes\n dc = dc[1:]\n dc[:, 4] *= torch.exp(-iou ** 2 / sigma) # decay confidences\n\n if len(det_max):\n det_max = torch.cat(det_max) # concatenate\n output = det_max[(-det_max[:, 4]).argsort()] # sort\n\n return output", "title": "" }, { "docid": "c83b974bbcb001ccb25cd6ada06e2855", "score": "0.5952654", "text": "def non_max_suppression(prediction, conf_thres=0.5, nms_thres=0.5):\r\n # prediction: torch.Size([1, 8190, 8]) 第一维bs是图片数,第二维是所有的proposal,第三维是xywh + conf + classes(这里是三类)\r\n min_wh = 2 # (pixels) minimum box width and height\r\n output = [None] * len(prediction)\r\n for image_i, pred in enumerate(prediction):\r\n # Experiment: Prior class size rejection\r\n # x, y, w, h = pred[:, 0], pred[:, 1], pred[:, 2], pred[:, 3]\r\n # a = w * h # area\r\n # ar = w / (h + 1e-16) # aspect ratio\r\n # n = len(w)\r\n # log_w, log_h, log_a, log_ar = torch.log(w), torch.log(h), torch.log(a), torch.log(ar)\r\n # shape_likelihood = np.zeros((n, 60), dtype=np.float32)\r\n # x = np.concatenate((log_w.reshape(-1, 1), log_h.reshape(-1, 1)), 1)\r\n # from scipy.stats import multivariate_normal\r\n # for c in range(60):\r\n # shape_likelihood[:, c] =\r\n # multivariate_normal.pdf(x, mean=mat['class_mu'][c, :2], cov=mat['class_cov'][c, :2, :2])\r\n\r\n if prediction.numel() == 0: # for multi-scale filtered result , in case of 0\r\n continue\r\n\r\n # Multiply conf by class conf to get combined confidence\r\n # max(1)是按照1维搜索,对每个proposal取出多分类分数,得到最大的那个值\r\n # 返回值class_conf和索引class_pred,索引就是类别所属\r\n class_conf, class_pred = pred[:, 6:].max(1) # max(1) 是每行找最大的,即当前proposal最可能是哪个类\r\n pred[:, 5] *= class_conf # 乘以conf才是真正的得分,赋值到conf的位置\r\n\r\n # Select only suitable predictions\r\n # 先创造一个满足要求的索引bool矩阵,然后据此第二步进行索引\r\n # 条件为:1.最大类的conf大于预设值 2.该anchor的预测wh大于2像素 3.非nan或无穷\r\n i = (pred[:, 5] > conf_thres) & (pred[:, 2:4] > min_wh).all(1) & torch.isfinite(pred).all(1)\r\n pred = pred[i]\r\n\r\n # If none are remaining => process next image\r\n if len(pred) == 0:\r\n continue\r\n\r\n # Select predicted classes\r\n class_conf = class_conf[i] # bool向量筛掉False的conf\r\n class_pred = class_pred[i].unsqueeze(1).float() # torch.Size([num_of_proposal]) --> torch.Size([num_of_proposal,1])便于后面的concat\r\n\r\n use_cuda_nms = True\r\n # use_cuda时方案是不限于100个,因为有可能产生很多的高得分proposal,会误删\r\n if use_cuda_nms:\r\n det_max = []\r\n pred = torch.cat((pred[:, :6], class_conf.unsqueeze(1), class_pred), 1)\r\n pred = pred[(-pred[:, 5]).argsort()]\r\n for c in pred[:, -1].unique():\r\n dc = pred[pred[:, -1] == c]\r\n dc = dc[(-dc[:, 5]).argsort()]\r\n # if len(dc)>100: # 如果proposal实在太多,取100个\r\n # dc = dc[:100]\r\n\r\n # Non-maximum suppression\r\n inds = r_nms(dc[:,:6], nms_thres)\r\n \r\n det_max.append(dc[inds])\r\n if len(det_max):\r\n det_max = torch.cat(det_max) # concatenate\r\n output[image_i] = det_max[(-det_max[:, 5]).argsort()] # sort\r\n\r\n else:\r\n # Detections ordered as (x1y1x2y2, obj_conf, class_conf, class_pred)\r\n pred = torch.cat((pred[:, :6], class_conf.unsqueeze(1), class_pred), 1)\r\n\r\n # Get detections sorted by decreasing confidence scores\r\n pred = pred[(-pred[:, 5]).argsort()]\r\n\r\n det_max = []\r\n nms_style = 'OR' # 'OR' (default), 'AND', 'MERGE' (experimental)\r\n\r\n for c in pred[:, -1].unique():\r\n dc = pred[pred[:, -1] == c] # select class c # shape [num,7] 7 = (x1, y1, x2, y2, object_conf, class_conf)\r\n n = len(dc)\r\n if n == 1:\r\n det_max.append(dc) # No NMS required if only 1 prediction\r\n continue\r\n elif n > 100:\r\n dc = dc[:100] # limit to first 100 boxes: https://github.com/ultralytics/yolov3/issues/117\r\n\r\n # Non-maximum suppression\r\n if nms_style == 'OR': # default\r\n # METHOD1\r\n # ind = list(range(len(dc)))\r\n # while len(ind):\r\n # j = ind[0]\r\n # det_max.append(dc[j:j + 1]) # save highest conf detection\r\n # reject = (skew_bbox_iou(dc[j], dc[ind]) > nms_thres).nonzero()\r\n # [ind.pop(i) for i in reversed(reject)]\r\n\r\n # METHOD2\r\n while dc.shape[0]:\r\n det_max.append(dc[:1]) # save highest conf detection\r\n if len(dc) == 1: # Stop if we're at the last detection\r\n break\r\n iou = skew_bbox_iou(dc[0], dc[1:]) # iou with other boxes\r\n dc = dc[1:][iou < nms_thres] # remove ious > threshold\r\n\r\n elif nms_style == 'AND': # requires overlap, single boxes erased\r\n while len(dc) > 1:\r\n iou = skew_bbox_iou(dc[0], dc[1:]) # iou with other boxes\r\n if iou.max() > 0.5:\r\n det_max.append(dc[:1])\r\n dc = dc[1:][iou < nms_thres] # remove ious > threshold\r\n\r\n elif nms_style == 'MERGE': # weighted mixture box\r\n while len(dc):\r\n if len(dc) == 1:\r\n det_max.append(dc)\r\n break\r\n # 有个bug:如果当前一批box中和最高conf(排序后是第一个也就是dc[0])的iou都小于nms_thres,\r\n # 那么i全为False,导致weights=[],从而weights.sum()=0导致dc[0]变成nan!\r\n i = skew_bbox_iou(dc[0], dc) > nms_thres # iou with other boxes, 返回的也是boolean,便于后面矩阵索引和筛选\r\n weights = dc[i, 5:6] # 大于nms阈值的重复较多的proposal,取出conf\r\n assert len(weights)>0, 'Bugs on MERGE NMS!!'\r\n dc[0, :5] = (weights * dc[i, :5]).sum(0) / weights.sum() # 将最高conf的bbox代之为大于阈值的所有bbox加权结果(conf不变,变了也没意义)\r\n det_max.append(dc[:1])\r\n dc = dc[i == 0] # bool的false等价于0,这一步将dc中的已经计算过的predbox剔除掉\r\n\r\n elif nms_style == 'SOFT': # soft-NMS https://arxiv.org/abs/1704.04503\r\n sigma = 0.5 # soft-nms sigma parameter\r\n while len(dc):\r\n if len(dc) == 1:\r\n det_max.append(dc)\r\n break\r\n det_max.append(dc[:1])\r\n iou = skew_bbox_iou(dc[0], dc[1:]) # iou with other boxes\r\n dc = dc[1:]\r\n dc[:, 4] *= torch.exp(-iou ** 2 / sigma) # decay confidences\r\n # dc = dc[dc[:, 4] > nms_thres] # new line per https://github.com/ultralytics/yolov3/issues/362\r\n\r\n if len(det_max):\r\n det_max = torch.cat(det_max) # concatenate\r\n import ipdb; ipdb.set_trace()\r\n output[image_i] = det_max[(-det_max[:, 5]).argsort()] # sort\r\n \r\n\r\n return output", "title": "" }, { "docid": "e749d4229da35082cafd2486cae60054", "score": "0.59475535", "text": "def non_max_suppression(boxlist, thresh, max_output_size):\n if not 0 <= thresh <= 1.0:\n raise ValueError('thresh must be between 0 and 1')\n if not isinstance(boxlist, box_list.BoxList):\n raise ValueError('boxlist must be a BoxList')\n if not boxlist.has_field('scores'):\n raise ValueError('input boxlist must have \\'scores\\' field')\n\n bbox = boxlist.get()\n scores = boxlist.get_field('scores')\n scores = scores.reshape(-1, 1)\n bbox_new = np.concatenate((bbox, scores), 1)\n\n selected_indices = nms_op(bbox_new, thresh)\n\n scores = scores.reshape(-1)[selected_indices]\n Z = zip(scores, selected_indices)\n Z = sorted(Z, reverse=True)\n _, indices_new = zip(*Z)\n max_size = np.arange(max_output_size)\n indices_new_1 = np.array(indices_new)[max_size]\n\n return gather(boxlist, indices_new_1)", "title": "" }, { "docid": "aa4a66b4533274633aefc0ef0926ffb7", "score": "0.59081316", "text": "def filter_by_score(data, threshold):\n\n if not data:\n print(\"Input to filter_by_score is empty.\")\n return data\n\n if 'score' not in data[0]:\n print(\"Data must be scored to run filter_by_score - bypassing this filter.\")\n print(\"Look into the 'scoring' directory for more info\")\n return data\n\n out = []\n filtered_out = 0\n\n for d in data:\n if d['score'] > threshold:\n out.append(d)\n else:\n filtered_out += 1\n\n print(\"Filtering by score filtered out {} articles, {} remain\".format(filtered_out, len(out)))\n return out", "title": "" }, { "docid": "d056aea11bf96f5addddb3c60cbcc4f8", "score": "0.59064317", "text": "def remove_negativescores_nodes(self):\n gravity_items = self.parser.css_select(\n self.top_node, \"*[gravityScore]\")\n for item in gravity_items:\n score = self.parser.getAttribute(item, 'gravityScore')\n score = float(score) if score else 0\n if score < 1:\n item.getparent().remove(item)", "title": "" }, { "docid": "a9a37d8c5580e8f8d8daff92d40aadbb", "score": "0.5865797", "text": "def unmold_detections_filter(self, detections, mrcnn_mask, image_shape, window,superpixel_map):\n # How many detections do we have?\n # Detections array is padded with zeros. Find the first class_id == 0.\n zero_ix = np.where(detections[:,4] == 0)[0]\n N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]\n \n # Extract boxes, class_ids, scores, and class-specific masks\n boxes = detections[:N, :4]\n class_ids = detections[:N, 4].astype(np.int32)\n scores = detections[:N, 5]\n masks = mrcnn_mask[np.arange(N), :, :, class_ids]\n \n # Filter out detections with zero area. Often only happens in early\n # stages of training when the network weights are still a bit random.\n exclude_ix = np.where((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 2] - boxes[:, 0]) <= 0)[0]\n if exclude_ix.shape[0] > 0:\n boxes = np.delete(boxes, exclude_ix, axis=0)\n class_ids = np.delete(class_ids, exclude_ix, axis=0)\n scores = np.delete(scores, exclude_ix, axis=0)\n masks = np.delete(masks, exclude_ix, axis=0)\n N = class_ids.shape[0]\n \n # Compute scale and shift to translate coordinates to image domain.\n h_scale = image_shape[0] / (window[2] - window[0])\n w_scale = image_shape[1] / (window[3] - window[1])\n scale = min(h_scale, w_scale)\n shift = window[:2] # y, x\n scales = np.array([scale, scale, scale, scale])\n shifts = np.array([shift[0], shift[1], shift[0], shift[1]])\n \n # Translate bounding boxes to image domain\n boxes = np.multiply(boxes - shifts, scales).astype(np.int32)\n \n # Resize masks to original image size and set boundary threshold.\n full_masks = []\n for i in range(N):\n # Convert neural network mask to full size mask\n full_mask = utils.unmold_mask_superpixel_filter(masks[i], boxes[i], image_shape,superpixel_map)\n full_masks.append(full_mask)\n full_masks = np.stack(full_masks, axis=-1)\\\n if full_masks else np.empty((0,) + masks.shape[1:3])\n \n return boxes, class_ids, scores, full_masks", "title": "" }, { "docid": "24dacc1539b6da63f76a3998d0951ef2", "score": "0.58614933", "text": "def non_max_suppression(boxes, scores, threshold):\n assert boxes.shape[0] > 0\n if boxes.dtype.kind != \"f\":\n boxes = boxes.astype(np.float32)\n\n # Compute box areas\n y1 = boxes[:, 0]\n x1 = boxes[:, 1]\n y2 = boxes[:, 2]\n x2 = boxes[:, 3]\n area = (y2 - y1) * (x2 - x1)\n\n # Get indicies of boxes sorted by scores (highest first)\n ixs = scores.argsort()[::-1]\n\n pick = []\n while len(ixs) > 0:\n # Pick top box and add its index to the list\n i = ixs[0]\n pick.append(i)\n # Compute IoU of the picked box with the rest\n iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]])\n # Identify boxes with IoU over the threshold. This\n # returns indices into ixs[1:], so add 1 to get\n # indices into ixs.\n remove_ixs = np.where(iou > threshold)[0] + 1\n # Remove indices of the picked and overlapped boxes.\n ixs = np.delete(ixs, remove_ixs)\n ixs = np.delete(ixs, 0)\n return np.array(pick, dtype=np.int32)", "title": "" }, { "docid": "24dacc1539b6da63f76a3998d0951ef2", "score": "0.58614933", "text": "def non_max_suppression(boxes, scores, threshold):\n assert boxes.shape[0] > 0\n if boxes.dtype.kind != \"f\":\n boxes = boxes.astype(np.float32)\n\n # Compute box areas\n y1 = boxes[:, 0]\n x1 = boxes[:, 1]\n y2 = boxes[:, 2]\n x2 = boxes[:, 3]\n area = (y2 - y1) * (x2 - x1)\n\n # Get indicies of boxes sorted by scores (highest first)\n ixs = scores.argsort()[::-1]\n\n pick = []\n while len(ixs) > 0:\n # Pick top box and add its index to the list\n i = ixs[0]\n pick.append(i)\n # Compute IoU of the picked box with the rest\n iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]])\n # Identify boxes with IoU over the threshold. This\n # returns indices into ixs[1:], so add 1 to get\n # indices into ixs.\n remove_ixs = np.where(iou > threshold)[0] + 1\n # Remove indices of the picked and overlapped boxes.\n ixs = np.delete(ixs, remove_ixs)\n ixs = np.delete(ixs, 0)\n return np.array(pick, dtype=np.int32)", "title": "" }, { "docid": "776fb2d38b63d51819cf683095dd0e78", "score": "0.58027905", "text": "def non_max_suppression(self, filtered_boxes, box_classes, box_scores):\n if len(filtered_boxes) == 0:\n return []\n x1 = filtered_boxes[:, 0]\n y1 = filtered_boxes[:, 1]\n x2 = filtered_boxes[:, 2]\n y2 = filtered_boxes[:, 3]\n ind = np.lexsort((-box_scores, box_classes))\n _, class_count = np.unique(box_classes, return_counts=True)\n i = 0\n keep_i = []\n for c in class_count:\n c_boxes = ind[i:i + c]\n while len(c_boxes):\n fix = c_boxes[0]\n keep_i += [fix]\n c_boxes = c_boxes[1:]\n keep_tmp = []\n for b in c_boxes:\n xA = max(x1[fix], x1[b])\n yA = max(y1[fix], y1[b])\n xB = min(x2[fix], x2[b])\n yB = min(y2[fix], y2[b])\n interArea = max(0, xB - xA) * max(0, yB - yA)\n boxAArea = (x2[fix] - x1[fix]) * (y2[fix] - y1[fix])\n boxBArea = (x2[b] - x1[b]) * (y2[b] - y1[b])\n overlap = interArea / ((boxAArea + boxBArea) - interArea)\n if overlap > self.nms_t:\n pass\n else:\n keep_tmp += [b]\n c_boxes = keep_tmp\n i += c\n return filtered_boxes[keep_i], box_classes[keep_i], box_scores[keep_i]", "title": "" }, { "docid": "b8363cf219b98a99116759a5a0cb876f", "score": "0.57892257", "text": "def non_max_suppression(yolo_feats, yolo_max_boxes, yolo_iou_threshold, yolo_score_threshold):\n bbox_per_stage, objectness_per_stage, class_probs_per_stage = [], [], []\n\n for stage_feats in yolo_feats:\n num_boxes = (\n stage_feats[0].shape[1] * stage_feats[0].shape[2] * stage_feats[0].shape[3]\n ) # num_anchors * grid_x * grid_y\n bbox_per_stage.append(\n tf.reshape(\n stage_feats[0],\n (tf.shape(stage_feats[0])[0], num_boxes, stage_feats[0].shape[-1]),\n )\n ) # [None,num_boxes,4]\n objectness_per_stage.append(\n tf.reshape(\n stage_feats[1],\n (tf.shape(stage_feats[1])[0], num_boxes, stage_feats[1].shape[-1]),\n )\n ) # [None,num_boxes,1]\n class_probs_per_stage.append(\n tf.reshape(\n stage_feats[2],\n (tf.shape(stage_feats[2])[0], num_boxes, stage_feats[2].shape[-1]),\n )\n ) # [None,num_boxes,num_classes]\n\n bbox = tf.concat(bbox_per_stage, axis=1)\n objectness = tf.concat(objectness_per_stage, axis=1)\n class_probs = tf.concat(class_probs_per_stage, axis=1)\n\n boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(\n boxes=tf.expand_dims(bbox, axis=2),\n scores=objectness * class_probs,\n max_output_size_per_class=yolo_max_boxes,\n max_total_size=yolo_max_boxes,\n iou_threshold=yolo_iou_threshold,\n score_threshold=yolo_score_threshold,\n )\n\n return [boxes, scores, classes, valid_detections]", "title": "" }, { "docid": "d313d019914488ffdce6c901dcc52086", "score": "0.5775584", "text": "def non_max_suppression(boxes_offsets, scores, classes_pred, fm_size, iou_threshold=0.6, max_boxes=10):\n boxes_offsets = cxcywh_to_xyxy(boxes_offsets, *fm_size)\n \n keep_indices = nms(boxes_offsets, scores, iou_threshold)\n \n return (xyxy_to_xywh(boxes_offsets[keep_indices[:max_boxes]]), scores[keep_indices[:max_boxes]], classes_pred[keep_indices[:max_boxes]])", "title": "" }, { "docid": "749a09ef8cfe07cb4575279d1a395689", "score": "0.57553893", "text": "def yolo_non_max_suppression(scores, boxes,max_boxes = 10, iou_threshold = 0.5):\r\n \r\n # create max_box_tensor to be used in tf.image.non_max_suppression()\r\n max_boxes_tensor = K.variable(max_boxes, dtype='int32') # tensor to be used in tf.image.non_max_suppression()\r\n K.get_session().run(tf.variables_initializer([max_boxes_tensor]))\r\n\r\n # initialize variable max_boxes_tensor\r\n nms_indices = tf.image.non_max_suppression(boxes, scores, max_output_size=max_boxes, iou_threshold=iou_threshold)\r\n\r\n # Use tf.image.non_max_suppression() to get the list of indices corresponding to boxes you keep\r\n \r\n # Use K.gather() to select only nms_indices from scores, boxes and classes\r\n scores = K.gather(scores, nms_indices)\r\n boxes = K.gather(boxes, nms_indices)\r\n \r\n return scores, boxes", "title": "" }, { "docid": "28b82fb454080b04459ab7432a0fe692", "score": "0.57473624", "text": "def non_max_suppression_rotated_bbox(prediction, conf_thres=0.95, nms_thres=0.4):\n\n output = [None for _ in range(len(prediction))]\n for image_i, image_pred in enumerate(prediction):\n # Filter out confidence scores below threshold\n image_pred = image_pred[image_pred[:, 6] >= conf_thres]\n # If none are remaining => process next image\n if not image_pred.size(0):\n continue\n # Object confidence times class confidence\n score = image_pred[:, 6] * image_pred[:, 7:].max(1)[0]\n # Sort by it\n image_pred = image_pred[(-score).argsort()]\n class_confs, class_preds = image_pred[:, 7:].max(1, keepdim=True)\n detections = torch.cat((image_pred[:, :7].float(), class_confs.float(), class_preds.float()), 1)\n # Perform non-maximum suppression\n keep_boxes = []\n while detections.size(0):\n #large_overlap = rotated_bbox_iou(detections[0, :6].unsqueeze(0), detections[:, :6], 1.0, False) > nms_thres # not working\n large_overlap = rotated_bbox_iou_polygon(detections[0, :6], detections[:, :6]) > nms_thres\n # large_overlap = torch.from_numpy(large_overlap.astype('uint8'))\n large_overlap = torch.from_numpy(large_overlap)\n label_match = detections[0, -1] == detections[:, -1]\n # Indices of boxes with lower confidence scores, large IOUs and matching labels\n invalid = large_overlap & label_match\n weights = detections[invalid, 6:7]\n # Merge overlapping bboxes by order of confidence\n detections[0, :6] = (weights * detections[invalid, :6]).sum(0) / weights.sum()\n keep_boxes += [detections[0]]\n detections = detections[~invalid]\n if keep_boxes:\n output[image_i] = torch.stack(keep_boxes)\n\n return output", "title": "" }, { "docid": "9531695077275e34bccae19c13f0f740", "score": "0.5731023", "text": "def non_max_suppression(predictions, confidence_threshold=0.5, iou_threshold=0.5):\n predictions = np.asarray(predictions)\n conf_mask = np.expand_dims((predictions[:, :, 4] >= confidence_threshold), -1)\n predictions = predictions * conf_mask\n\n results = []\n for i, image_pred in enumerate(predictions):\n result = {}\n shape = image_pred.shape\n non_zero_idxs = np.nonzero(image_pred)\n image_pred = image_pred[non_zero_idxs]\n image_pred = image_pred.reshape(-1, shape[-1])\n\n bbox_attrs = image_pred[:, :5]\n classes = image_pred[:, 5:]\n classes = np.argmax(classes, axis=-1)\n\n unique_classes = list(set(classes.reshape(-1)))\n\n for cls in unique_classes:\n cls_mask = classes == cls\n cls_boxes = bbox_attrs[np.nonzero(cls_mask)]\n cls_boxes = cls_boxes[cls_boxes[:, -1].argsort()[::-1]]\n cls_scores = cls_boxes[:, -1]\n cls_boxes = cls_boxes[:, :-1]\n\n while len(cls_boxes) > 0:\n box = cls_boxes[0]\n score = cls_scores[0]\n if not cls in result:\n result[cls] = []\n result[cls].append((box, score))\n cls_boxes = cls_boxes[1:]\n # update, remove bbox iou < iou_threshold\n ious = np.array([box_iou(box, x) for x in cls_boxes])\n print(ious)\n iou_mask = ious < iou_threshold\n cls_boxes = cls_boxes[np.nonzero(iou_mask)]\n cls_scores = cls_scores[np.nonzero(iou_mask)]\n results.append(result)\n return results", "title": "" }, { "docid": "d1f0f994b73c1396d78b078d5c2dc1ad", "score": "0.57159203", "text": "def filterOutlier(self, z_score_threshold):\n\t\tdata = np.array(self.average)\n\t\tmedian = np.median(data)\n\t\tdeviation = np.median(np.abs(data - median))\n\t\tz_scores = 0.675*(data - median)/deviation\n\t\tdata_out = data[np.where(np.abs(z_scores) < z_score_threshold)].tolist()\n\t\toutput = data_out if len(data_out) > 0 else self.average\n\t\treturn output", "title": "" }, { "docid": "a7d1efaa7474130a55fa170a0556e9e9", "score": "0.569207", "text": "def yolo_non_max_suppression(scores, boxes, classes, max_boxes = 10, iou_threshold = 0.5):\n\n max_boxes_tensor = K.variable(max_boxes, dtype='int32') # tensor to be used in tf.image.non_max_suppression()\n K.get_session().run(tf.variables_initializer([max_boxes_tensor])) # initialize variable max_boxes_tensor\n\n\n nms_indices = tf.image.non_max_suppression(boxes,scores,max_boxes,iou_threshold)\n\n\n\n scores = K.gather(scores,nms_indices)\n boxes = K.gather(boxes,nms_indices)\n classes = K.gather(classes,nms_indices)\n\n return scores, boxes, classes", "title": "" }, { "docid": "67af3844983c7ad58aa6a4d21c11a4a8", "score": "0.5679401", "text": "def filter_prediction(self, prediction, max_boxes=10, min_score=0.5):\n boxes = prediction['detection_boxes']\n classes = prediction['detection_class_entities']\n scores = prediction['detection_scores']\n predictions = []\n for i in range(boxes.shape[0]):\n predictions.append(\n (boxes[i], classes[i].decode('utf-8'), scores[i]))\n predictions.sort(key=lambda x: x[2], reverse=True)\n predictions = [(b, c, s) for (b, c, s) in predictions\n if c in self.fruits and s > min_score]\n return predictions[:max_boxes]", "title": "" }, { "docid": "df2dfbc1c94b8fb7583fecfd4fb6c0f1", "score": "0.56513774", "text": "def _non_maximum_suppression(self, boxes, class_probs, max_detections):\n assert len(boxes) == len(class_probs)\n\n max_detections = min(max_detections, len(boxes))\n max_probs = np.amax(class_probs, axis=1)\n max_classes = np.argmax(class_probs, axis=1)\n\n areas = boxes[:, 2] * boxes[:, 3]\n\n selected_boxes = []\n selected_classes = []\n selected_probs = []\n\n while len(selected_boxes) < max_detections:\n # Select the prediction with the highest probability.\n i = np.argmax(max_probs)\n if max_probs[i ] < self.prob_threshold:\n break\n\n # Save the selected prediction\n selected_boxes.append(boxes[i])\n selected_classes.append(max_classes[i])\n selected_probs.append(max_probs[i])\n\n box = boxes[i]\n other_indices = np.concatenate((np.arange(i), np.arange(i + 1, len(boxes))))\n other_boxes = boxes[other_indices]\n\n # Get overlap between the 'box' and 'other_boxes'\n x1 = np.maximum(box[0], other_boxes[:, 0])\n y1 = np.maximum(box[1], other_boxes[:, 1])\n x2 = np.minimum(box[0] + box[2], other_boxes[:, 0] + other_boxes[:, 2])\n y2 = np.minimum(box[1] + box[3], other_boxes[:, 1] + other_boxes[:, 3])\n w = np.maximum(0, x2 - x1)\n h = np.maximum(0, y2 - y1)\n\n # Calculate Intersection Over Union (IOU)\n overlap_area = w * h\n iou = overlap_area / (areas[i] + areas[other_indices] - overlap_area)\n\n # Find the overlapping predictions\n overlapping_indices = other_indices[np.where(iou > self.IOU_THRESHOLD)[0]]\n overlapping_indices = np.append(overlapping_indices, i)\n\n # Set the probability of overlapping predictions to zero, and udpate max_probs and max_classes.\n class_probs[overlapping_indices, max_classes[i]] = 0\n max_probs[overlapping_indices] = np.amax(class_probs[overlapping_indices], axis=1)\n max_classes[overlapping_indices] = np.argmax(class_probs[overlapping_indices], axis=1)\n\n assert len(selected_boxes) == len(selected_classes) and len(selected_boxes) == len(selected_probs)\n return selected_boxes, selected_classes, selected_probs", "title": "" }, { "docid": "1db7040c065865eabf94158e4d009547", "score": "0.5645559", "text": "def soft_nms(x1, y1, x2, y2, scores, thresh, sigma2=0.5): \r\n areas = (x2 - x1 + 1) * (y2 - y1 + 1) \r\n order = scores.argsort()[::-1] \r\n nmsed_scrs = scores\r\n keep = [] \r\n threshed = []\r\n while order.size > 0: \r\n i = order[0] \r\n keep.append(i) \r\n xx1 = np.maximum(x1[i], x1[order[1:]]) \r\n yy1 = np.maximum(y1[i], y1[order[1:]]) \r\n xx2 = np.minimum(x2[i], x2[order[1:]]) \r\n yy2 = np.minimum(y2[i], y2[order[1:]]) \r\n \r\n w = np.maximum(0.0, xx2 - xx1 + 1) \r\n h = np.maximum(0.0, yy2 - yy1 + 1) \r\n inter = w * h \r\n ovr = inter / (areas[i] + areas[order[1:]] - inter) \r\n \r\n inds = np.where(ovr <= thresh)[0] \r\n greater = np.where(ovr > thresh)[0] \r\n nmsed_scrs[order[inds + 1]] *= 1\r\n nmsed_scrs[order[greater + 1]] *= np.exp(-(ovr[greater]**2)/sigma2)\r\n threshed.extend(order[greater + 1])\r\n order = order[inds + 1] \r\n\r\n k = np.where(nmsed_scrs[threshed] > 0.05)[0]\r\n keep.extend(np.array(threshed)[k])\r\n\r\n return keep", "title": "" }, { "docid": "5d17a48d44003e2ffd9f9c1e15f7a1f0", "score": "0.5642232", "text": "def non_maxima_suppression(self, imgname, overlap_thr=0.01, to_print=True):\n \n if to_print:\n print \"[INFO] Non-maxima suppression... \"\n \n overlap_thr = self.dataset.config[\"NMS_overlap_thr\"]\n # nacteni pozitivnich bounding boxu a jejich ppsti\n boxes, probs = self.create_boxes_nms(imgname)\n # nacteni jednotlivych souradnic bounding boxu\n ys, hs, xs, ws = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]\n \n bb_area = (hs-ys+1)*(ws-xs+1)\n # serazeni indexu podle pravdepodobnosti\n indexes = np.argsort(probs)\n new_indexes = list()\n \n while True:\n # vytazeni indexu bounding boxu s nejvyssi ppsti\n i = indexes[-1]\n new_indexes.append(i)\n index = indexes[:-1]\n \n ys2 = np.maximum(ys[i], ys[index])\n hs2 = np.minimum(hs[i], hs[index])\n xs2 = np.maximum(xs[i], xs[index])\n ws2 = np.minimum(ws[i], ws[index])\n \n new_area = np.maximum(0, ws2-xs2+1) * np.maximum(0, hs2-ys2+1)\n # vypocet prekryti\n overlap = new_area.astype(float) / bb_area[index]\n # odstraneni indexu boxu s vysokym prekrytim\n indexes_to_delete = np.where(overlap > overlap_thr)[0]\n indexes = np.delete(index, indexes_to_delete)\n \n # zastavit po vyprazdneni\n if len(indexes) == 0:\n break\n \n if to_print:\n print \"[RESULT] Pocet pozitivnich bounding boxu zredukovan na \", len(new_indexes)\n #print new_indexes\n # vrati vybrane bounding boxy \n return boxes[new_indexes].astype(int)", "title": "" }, { "docid": "9204a3b77cdc0e9c0707ac42fb999fbe", "score": "0.5627586", "text": "def nonmaxsuppression(criterion, threshold):\n\n #\n # You code here\n criterion_max_filter = maximum_filter(criterion, size=5, mode='constant')\n\n criterion_non_max = np.zeros(criterion_max_filter.shape)\n criterion_non_max[np.where(criterion == criterion_max_filter)] = \\\n criterion[np.where(criterion == criterion_max_filter)]\n\n rows, cols = np.nonzero(criterion_non_max > threshold)\n\n # throw away interest points near by image boundary\n for i in range(rows.size):\n if (rows[i] < 5 or rows[i] >= criterion_max_filter.shape[0] - 5) or \\\n (cols[i] < 5 or cols[i] >= criterion_max_filter.shape[1] - 5):\n rows[i] = 0\n cols[i] = 0\n\n return rows[rows.nonzero()], cols[cols.nonzero()]\n #", "title": "" }, { "docid": "58f00aaddd2d1fe57871faa738e1ec13", "score": "0.56077796", "text": "def thresh_scale(self, thres_value):\n\t\tself.img= (self.img>=thres_value)*(self.img-thres_value+1.0)", "title": "" }, { "docid": "fb320d44af408c2975a9fd06ff530544", "score": "0.56059515", "text": "def thresh(self, thres_value=255):\n\t\tself.img= (self.img>=thres_value)*1.0", "title": "" }, { "docid": "99c09c1748bce7e172637b97c424e87a", "score": "0.5581341", "text": "def non_max_suppression(predictions_with_boxes, confidence_threshold, iou_threshold=0.4):\n conf_mask = np.expand_dims((predictions_with_boxes[:, :, 4] > confidence_threshold), -1)\n predictions = predictions_with_boxes * conf_mask\n\n results = []\n for i, image_pred in enumerate(predictions):\n result = {}\n shape = image_pred.shape\n non_zero_idxs = np.nonzero(image_pred)\n image_pred = image_pred[non_zero_idxs]\n image_pred = image_pred.reshape(-1, shape[-1])\n\n bbox_attrs = image_pred[:, :5]\n classes = image_pred[:, 5:]\n classes = np.argmax(classes, axis=-1)\n\n unique_classes = list(set(classes.reshape(-1)))\n\n for cls in unique_classes:\n cls_mask = classes == cls\n cls_boxes = bbox_attrs[np.nonzero(cls_mask)]\n cls_boxes = cls_boxes[cls_boxes[:, -1].argsort()[::-1]]\n cls_scores = cls_boxes[:, -1]\n cls_boxes = cls_boxes[:, :-1]\n\n while len(cls_boxes) > 0:\n box = cls_boxes[0]\n score = cls_scores[0]\n if not cls in result:\n result[cls] = []\n result[cls].append((box, score))\n cls_boxes = cls_boxes[1:]\n ious = np.array([_iou(box, x) for x in cls_boxes])\n iou_mask = ious < iou_threshold\n cls_boxes = cls_boxes[np.nonzero(iou_mask)]\n cls_scores = cls_scores[np.nonzero(iou_mask)]\n\n results.append(result)\n return results", "title": "" }, { "docid": "207f9e5fd5e362344558c7d1f05a91d5", "score": "0.55561775", "text": "def prune_values(self, threshold):\n raise NotImplementedError()", "title": "" }, { "docid": "66c934525dbfa1541447509e9edb728a", "score": "0.55446434", "text": "def process_remove_outliers_others(df: pd.DataFrame) -> pd.DataFrame:\n feature_names = df.columns\n z_scores = pd.DataFrame()\n for column in feature_names:\n z_scores['zscore_'+column] = (df[column] - df[column].mean())/df[column].std(ddof=0)\n z_scores_names = [col for col in df.columns if col.startswith('zscore')]\n return df.loc[(abs(z_scores<1.5) + (z_scores != z_scores)).all(axis=1)]", "title": "" }, { "docid": "c1768752854565b501222e894cc59306", "score": "0.5532525", "text": "def _check_if_score_above_threshold(self, score_value, score_type, threat_types):\n # if the endpoint didn't provide the score value then don't do any filtering based on the score\n if score_value < 0:\n return threat_types\n\n if score_value >= self.score_threshold:\n threat_types.add(score_type)\n log.debug(u\"'{}' classification was added because its score value '{}' is higher \"\n \"than the score_threshold value '{}'\".format(score_type, score_value, self.score_threshold))\n else:\n if score_type in threat_types:\n threat_types.remove(score_type)\n log.debug(u\"'{}' classification was removed because its score value '{}' is lower \"\n \"than the score_threshold value '{}'\".format(score_type, score_value, self.score_threshold))\n return threat_types", "title": "" }, { "docid": "acbedac72ba0284437aa23fa8f778cd3", "score": "0.5521811", "text": "def remove_high_correlated_features(file, threshold_value):\r\n return", "title": "" }, { "docid": "7541b1058e5b52873d30dc5df1a6f10c", "score": "0.55135053", "text": "def nonmaxsuppression(criterion, threshold):\r\n #find maximum in 5*5 Window\r\n data_max = maximum_filter(criterion, size=5,mode='mirror')\r\n #all the other equas 0\r\n criterion[criterion != data_max] = 0\r\n #find the interest points with thershold\r\n rows, cols = np.nonzero(criterion > threshold)\r\n print('rows',rows)\r\n criterion_thresh = np.logical_and(data_max > threshold,criterion >= data_max)\r\n mask = np.zeros_like(criterion_thresh)\r\n mask[5:-5,5:-5] = criterion_thresh[5:-5,5:-5] \r\n rows2, cols2 = np.nonzero(mask)\r\n print('rows2',rows2)\r\n return rows2, cols2", "title": "" }, { "docid": "a101f39aebaa08597bb5254da8cdd479", "score": "0.54890937", "text": "def non_max_suppression(boxes, max_bbox_overlap, scores=None):\n if len(boxes) == 0:\n return []\n\n boxes = boxes.astype(np.float)\n pick = []\n\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2] + boxes[:, 0]\n y2 = boxes[:, 3] + boxes[:, 1]\n\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n if scores is not None:\n idxs = np.argsort(scores)\n else:\n idxs = np.argsort(y2)\n\n while len(idxs) > 0:\n last = len(idxs) - 1\n i = idxs[last]\n pick.append(i)\n\n xx1 = np.maximum(x1[i], x1[idxs[:last]])\n yy1 = np.maximum(y1[i], y1[idxs[:last]])\n xx2 = np.minimum(x2[i], x2[idxs[:last]])\n yy2 = np.minimum(y2[i], y2[idxs[:last]])\n\n w = np.maximum(0, xx2 - xx1 + 1)\n h = np.maximum(0, yy2 - yy1 + 1)\n\n overlap = (w * h) / area[idxs[:last]]\n\n idxs = np.delete(\n idxs, np.concatenate(\n ([last], np.where(overlap > max_bbox_overlap)[0])))\n\n return pick", "title": "" }, { "docid": "7c61bc425964bfffe0d0d8ca5b82a815", "score": "0.5473743", "text": "def filter_det(scores, boxes, start_ind=0, max_per_img=100, thresh=0.001, pre_nms_topn=6000,\n post_nms_topn=300, nms_thresh=0.3, nms_filter_duplicates=True):\n\n valid_cls = (scores[:, 1:].data.max(0)[0] > thresh).nonzero() + 1\n if valid_cls.dim() == 0:\n return None\n\n nms_mask = scores.data.clone()\n nms_mask.zero_()\n\n for c_i in valid_cls.squeeze(1).cpu():\n scores_ci = scores.data[:, c_i]\n boxes_ci = boxes.data[:, c_i]\n keep = apply_nms(scores_ci, boxes_ci,\n pre_nms_topn=pre_nms_topn, post_nms_topn=post_nms_topn,\n nms_thresh=nms_thresh)\n nms_mask[:, c_i][keep] = 1\n\n #dists_all = Variable(nms_mask * scores.data, volatile=True)\n with torch.no_grad():\n dists_all = Variable(nms_mask * scores.data)\n\n if nms_filter_duplicates:\n scores_pre, labels_pre = dists_all.data.max(1)\n inds_all = scores_pre.nonzero()\n assert inds_all.dim() != 0\n inds_all = inds_all.squeeze(1)\n\n labels_all = labels_pre[inds_all]\n scores_all = scores_pre[inds_all]\n else:\n nz = nms_mask.nonzero()\n assert nz.dim() != 0\n inds_all = nz[:, 0]\n labels_all = nz[:, 1]\n scores_all = scores.data.view(-1)[inds_all * scores.data.size(1) + labels_all]\n\n # dists_all = dists_all[inds_all]\n # dists_all[:,0] = 1.0-dists_all.sum(1)\n\n # # Limit to max per image detections\n vs, idx = torch.sort(scores_all, dim=0, descending=True)\n idx = idx[vs > thresh]\n if max_per_img < idx.size(0):\n idx = idx[:max_per_img]\n\n inds_all = inds_all[idx] + start_ind\n #!!!!scores_all = Variable(scores_all[idx], volatile=True)\n #!!!!labels_all = Variable(labels_all[idx], volatile=True)\n with torch.no_grad():\n scores_all = Variable(scores_all[idx])\n labels_all = Variable(labels_all[idx])\n # dists_all = dists_all[idx]\n\n return inds_all, scores_all, labels_all", "title": "" }, { "docid": "ba72a5adb85fc26dd6425c18a905c957", "score": "0.5467921", "text": "def filter_detections(\n boxes,\n classification,\n other = [],\n class_specific_filter = True,\n nms = True,\n score_threshold = 0.05,\n max_detections = 300,\n nms_threshold = 0.5\n):\n def _filter_detections(scores, labels):\n # threshold based on score\n indices = tf.where(keras.backend.greater(scores, score_threshold))\n\n if nms:\n filtered_boxes = tf.gather_nd(boxes, indices)\n filtered_scores = keras.backend.gather(scores, indices)[:, 0]\n\n # perform NMS\n nms_indices = tf.image.non_max_suppression(filtered_boxes, filtered_scores, max_output_size=max_detections, iou_threshold=nms_threshold)\n\n # filter indices based on NMS\n indices = keras.backend.gather(indices, nms_indices)\n\n # add indices to list of all indices\n labels = tf.gather_nd(labels, indices)\n indices = keras.backend.stack([indices[:, 0], labels], axis=1)\n\n return indices\n\n if class_specific_filter:\n all_indices = []\n # perform per class filtering\n for c in range(int(classification.shape[1])):\n scores = classification[:, c]\n labels = c * tf.ones((keras.backend.shape(scores)[0],), dtype='int64')\n all_indices.append(_filter_detections(scores, labels))\n\n # concatenate indices to single tensor\n indices = keras.backend.concatenate(all_indices, axis=0)\n else:\n scores = keras.backend.max(classification, axis = 1)\n labels = keras.backend.argmax(classification, axis = 1)\n indices = _filter_detections(scores, labels)\n\n # select top k\n scores = tf.gather_nd(classification, indices)\n labels = indices[:, 1]\n scores, top_indices = tf.nn.top_k(scores, k=keras.backend.minimum(max_detections, keras.backend.shape(scores)[0]))\n\n # filter input using the final set of indices\n indices = keras.backend.gather(indices[:, 0], top_indices)\n boxes = keras.backend.gather(boxes, indices)\n labels = keras.backend.gather(labels, top_indices)\n other_ = [keras.backend.gather(o, indices) for o in other]\n\n # zero pad the outputs\n pad_size = keras.backend.maximum(0, max_detections - keras.backend.shape(scores)[0])\n boxes = tf.pad(boxes, [[0, pad_size], [0, 0]], constant_values=-1)\n scores = tf.pad(scores, [[0, pad_size]], constant_values=-1)\n labels = tf.pad(labels, [[0, pad_size]], constant_values=-1)\n labels = keras.backend.cast(labels, 'int32')\n other_ = [tf.pad(o, [[0, pad_size]] + [[0, 0] for _ in range(1, len(o.shape))], constant_values=-1) for o in other_]\n\n # set shapes, since we know what they are\n boxes.set_shape([max_detections, 4])\n scores.set_shape([max_detections])\n labels.set_shape([max_detections])\n for o, s in zip(other_, [list(keras.backend.int_shape(o)) for o in other]):\n o.set_shape([max_detections] + s[1:])\n\n return [boxes, scores, labels] + other_", "title": "" }, { "docid": "9684432d76bb573db64bb9f1f2bef343", "score": "0.546727", "text": "def prune_stats(stats, big_stats, threshold):\n for item,freq in list(stats.items()):\n if freq < threshold:\n del stats[item]\n if freq < 0:\n big_stats[item] += freq\n else:\n big_stats[item] = freq", "title": "" }, { "docid": "1d99ac1ac290958d966d3f6b52d7def6", "score": "0.5458985", "text": "def non_maxima_suppression(boxes, probs, classes_num, thr=0.2):\n for i, box in enumerate(boxes):\n if probs[i] == 0:\n continue\n for j in range(i+1, len(boxes)):\n if classes_num[i] == classes_num[j] and iou(box, boxes[j]) > thr:\n probs[j] = 0.0\n\n return probs", "title": "" }, { "docid": "5351f97ed61eb6f74ef92b72991bdc8d", "score": "0.5439366", "text": "def nms_allcls(x1, y1, x2, y2, scores, thresh): \r\n areas = (x2 - x1 + 1) * (y2 - y1 + 1) \r\n order = scores.argsort()[::-1] \r\n keep = [] \r\n while order.size > 0: \r\n i = order[0] \r\n keep.append(i) \r\n xx1 = np.maximum(x1[i], x1[order[1:]]) \r\n yy1 = np.maximum(y1[i], y1[order[1:]]) \r\n xx2 = np.minimum(x2[i], x2[order[1:]]) \r\n yy2 = np.minimum(y2[i], y2[order[1:]]) \r\n w = np.maximum(0.0, xx2 - xx1 + 1) \r\n h = np.maximum(0.0, yy2 - yy1 + 1) \r\n inter = w * h \r\n ovr = inter / (areas[i] + areas[order[1:]] - inter) \r\n inds = np.where(ovr <= thresh)[0] \r\n # indd = np.where(ovr > thresh)[0] \r\n # print(str(i)+' <- '+str(indd))\r\n order = order[inds + 1] \r\n return keep", "title": "" }, { "docid": "67046cb371c4648d4c09549f000d287a", "score": "0.5361539", "text": "def nms(x1, y1, x2, y2, scores, cls, thresh): \r\n\r\n areas = (x2 - x1 + 1) * (y2 - y1 + 1) \r\n order = scores.argsort()[::-1] \r\n keep = []\r\n while order.size > 0: \r\n i = order[0]\r\n clas = cls[i]\r\n keep.append(i)\r\n in_cls_order = np.array([ii for ii in order[1:] if cls[ii] == clas])\r\n if in_cls_order.size == 0: \r\n order = order[1:]\r\n continue\r\n xx1 = np.maximum(x1[i], x1[in_cls_order]) \r\n yy1 = np.maximum(y1[i], y1[in_cls_order]) \r\n xx2 = np.minimum(x2[i], x2[in_cls_order]) \r\n yy2 = np.minimum(y2[i], y2[in_cls_order]) \r\n \r\n w = np.maximum(0.0, xx2 - xx1 + 1) \r\n h = np.maximum(0.0, yy2 - yy1 + 1) \r\n inter = w * h \r\n ovr = inter / (areas[i] + areas[in_cls_order] - inter) \r\n \r\n # inds = np.where(ovr <= thresh)[0] \r\n indd = np.where(ovr > thresh)[0]\r\n order = np.setdiff1d(order,in_cls_order[indd])[1:]\r\n # print(str(i)+' <- '+str(indd))\r\n # order = order[inds + 1] \r\n \r\n return keep", "title": "" }, { "docid": "e8a00ab8d71b33fa421933a46997c32b", "score": "0.5358012", "text": "def remove_outliers(Z):\n N,M = Z.shape\n\n print('Started with %d workers and %d scores.\\n'%(np.size(Z,0),np.sum(~np.isnan(Z))))\n\n mu = np.nanmean(Z) # MOS for each sentence\n s = np.nanstd(Z) # std dev for each sentence\n\n mu_norm = abs(Z-mu)/s # normalized scores\n outlying_scores = (mu_norm > 3.0)\n outlying_workers = np.sum(outlying_score,1) > .05*np.sum(~np.isnan(Z),1)\n\n Z[outlying_score] = np.NaN # remove outlying scores (greater than 2.5 std devs away from the mean)\n Z = Z[~outlying_workers] # remove subjects who have more than 5% of outlying scores\n\n print('Removed %d outlying scores.\\n'%np.sum(outlying_scores))\n print('Removed %d outlying workers.\\n'%np.sum(outlying_workers))\n\n print('Finished with %d workers and %d scores.\\n\\n'%(np.size(Z,0), np.sum(~np.isnan(Z))))\n\n return Z", "title": "" }, { "docid": "1345827c64e2b8765ee54b85df3b4f33", "score": "0.5355537", "text": "def prune_stats(stats, threshold: int):\n for item,freq in list(stats.items()):\n if freq < threshold:\n del stats[item]", "title": "" }, { "docid": "7b4b49adb13bda09b193c176393486d3", "score": "0.5350756", "text": "def cut_video(self):\n global_thresh = np.array(list(map((lambda x: np.percentile(x, 95)),\n self.current_video)))\n self.current_video[self.current_video < global_thresh[:,None,None]] = 0", "title": "" }, { "docid": "be18ae2a7202b95594de848ba37ba775", "score": "0.53304553", "text": "def is_outlier(score_vector):\n assert (score_vector.ndim==1), \"Pass 1D array of vector of scores \"\n if SIMILARITY_MERTIC=='ncc':\n\t\tmax_score = np.max(score_vector)\n\t\tif np.sum(score_vector>=0.8*max_score)>=O_F_THRESHOLD:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n if SIMILARITY_MERTIC=='ssd':\n\t\tmin_distance = np.min(score_vector)\n\t\tif np.sum(score_vector<=1.4*min_score)>=O_F_THRESHOLD:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n return True", "title": "" }, { "docid": "b826551a653ea50ee96cf788804fa5b5", "score": "0.530742", "text": "def filter_det(scores, boxes, start_ind=0, max_per_img=100, thresh=0.001, pre_nms_topn=6000,\n post_nms_topn=300, nms_thresh=0.3, nms_filter_duplicates=True):\n\n valid_cls = (scores[:, 1:].data.max(0)[0] > thresh).nonzero() + 1\n if valid_cls.dim() == 0:\n return None\n\n nms_mask = scores.data.clone()\n nms_mask.zero_()\n\n for c_i in valid_cls.squeeze(1).cpu():\n scores_ci = scores.data[:, c_i]\n boxes_ci = boxes.data[:, c_i]\n\n keep = apply_nms(scores_ci, boxes_ci,\n pre_nms_topn=pre_nms_topn, post_nms_topn=post_nms_topn,\n nms_thresh=nms_thresh)\n nms_mask[:, c_i][keep] = 1\n\n dists_all = Variable(nms_mask * scores.data, volatile=True)\n\n if nms_filter_duplicates:\n scores_pre, labels_pre = dists_all.data.max(1)\n inds_all = scores_pre.nonzero()\n assert inds_all.dim() != 0\n inds_all = inds_all.squeeze(1)\n\n labels_all = labels_pre[inds_all]\n scores_all = scores_pre[inds_all]\n else:\n nz = nms_mask.nonzero()\n assert nz.dim() != 0\n inds_all = nz[:, 0]\n labels_all = nz[:, 1]\n scores_all = scores.data.view(-1)[inds_all * scores.data.size(1) + labels_all]\n\n # dists_all = dists_all[inds_all]\n # dists_all[:,0] = 1.0-dists_all.sum(1)\n\n # # Limit to max per image detections\n vs, idx = torch.sort(scores_all, dim=0, descending=True)\n idx = idx[vs > thresh]\n if max_per_img < idx.size(0):\n idx = idx[:max_per_img]\n\n inds_all = inds_all[idx] + start_ind\n scores_all = Variable(scores_all[idx], volatile=True)\n labels_all = Variable(labels_all[idx], volatile=True)\n # dists_all = dists_all[idx]\n\n return inds_all, scores_all, labels_all", "title": "" }, { "docid": "01c438f6714d7b7f42a942458e41a608", "score": "0.52979475", "text": "def remove_by_threshold(self, threshold: int = 5) -> None:\n to_remove = [k for k, v in self._dictionary.items() if v <= threshold]\n self.remove_words(to_remove)", "title": "" }, { "docid": "a7c290e34f84000bed428fddc5024739", "score": "0.5273788", "text": "def remove_outlier(x, thresh=3.5):\n if len(x.shape) == 1: x = x[:,None]\n median = np.median(x, axis=0)\n diff = np.sqrt(((x - median)**2).sum(axis=-1))\n modified_z_score = 0.6745 * diff / np.median(diff)\n x_filtered = x[modified_z_score <= thresh]\n return x_filtered", "title": "" }, { "docid": "a989770f61f441d6bc8a37c11061172d", "score": "0.5262682", "text": "def remove_anomalies(top_ranked, threshold):\n return dict(filter(lambda doc: doc[1] > threshold, top_ranked.items()))", "title": "" }, { "docid": "5d4f85e8202f6836cf696f739bc2c3f9", "score": "0.5239941", "text": "def get_threshold(scores, n_std=0.0):\n mean = np.mean(scores)\n std = np.std(scores)\n return mean - (std * n_std)", "title": "" }, { "docid": "733cc25f0d770c0b036479001685915e", "score": "0.5211697", "text": "def outliers(arr, zthresh, removal=False):\n\n # importing\n import numpy as np\n from scipy import stats\n\n # z scores\n z = np.abs(stats.zscore(arr))\n outs = (z > zthresh)\n\n if removal:\n arr = arr[~outs]\n return arr\n else:\n subset = arr[outs]\n return subset", "title": "" }, { "docid": "d76350a20dfbde0c96e7a937ad74f28b", "score": "0.5203149", "text": "def non_max_suppression(inputs, n_classes, max_output_size, iou_threshold,\n confidence_threshold):\n\n #acquiring classes from inputs\n classes = tf.reshape(inputs[:, 5:] , (-1))\n classes = tf.expand_dims(tf.cast(classes, dtype=tf.float32), axis=-1) #casting to float32 for tf.image.non_max_suppression\n\n #builidng boxes tensor from inputs and classes\n boxes = tf.concat([inputs[:, :5], classes], axis=-1)\n\n #support array to be returned with final bboxes\n array_bboxes = []\n\n #iterate over all possible classes of COCO Dataset\n for cls in range(n_classes):\n \n #Check if given cls is within boxes\n mask = tf.equal(boxes[:, 5], cls)\n mask_shape = mask.get_shape()\n\n #If no classes are present skip, else -->\n if mask_shape.ndims != 0:\n\n #save rows of boxes based on mask \n class_boxes = tf.boolean_mask(boxes, mask)\n \n #prepare variables for tf.image.non_max_suppression\n boxes_coords, boxes_conf_scores, _ = tf.split(class_boxes, [4, 1, -1], axis=-1)\n #transform in 1D \n boxes_conf_scores = tf.reshape(boxes_conf_scores, [-1])\n\n indices = tf.image.non_max_suppression(boxes_coords, boxes_conf_scores, max_output_size, iou_threshold)\n \n #using indices access selected bbox from class_boxes\n class_boxes = tf.gather(class_boxes, indices)\n\n #if tf.gather returnes 0 skip, else -->\n if tf.shape(class_boxes)[0] != 0 :\n #saving to support array\n array_bboxes.append(class_boxes)\n \n #concat into single tensor\n best_bboxes = tf.concat(array_bboxes, axis=0)\n\n return best_bboxes", "title": "" }, { "docid": "a42b7f62fa57143d3151a1e64dc561dc", "score": "0.5168097", "text": "def apply_nms(bbox_preds, scores_preds, iou_thresh=0.5):\n # torchvision returns the indices of the bboxes to keep\n keep = nms(bbox_preds, scores_preds, iou_thresh)\n # filter existing boxes and scores and return\n bbox_preds_kept = bbox_preds[keep]\n scores_preds = scores_preds[keep]\n return bbox_preds_kept, scores_preds", "title": "" }, { "docid": "e33b7c0db03b2ac59df25904181a249d", "score": "0.5167948", "text": "def top_filtering(logits, top_k=0, top_p=0.0, threshold=-float('Inf'), filter_value=-float('Inf')):\n assert logits.dim() == 2\n top_k = min(top_k, logits.size(-1))\n if top_k > 0:\n # Remove all tokens with a probability less than the last token in the top-k tokens\n indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]\n logits[indices_to_remove] = filter_value\n\n if top_p > 0.0:\n # Compute cumulative probabilities of sorted tokens\n sorted_logits, sorted_indices = torch.sort(logits, descending=True)\n cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)\n\n # Remove tokens with cumulative probability above the threshold\n sorted_indices_to_remove = cumulative_probabilities > top_p\n # Shift the indices to the right to keep also the first token above the threshold\n sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()\n sorted_indices_to_remove[..., 0] = 0\n\n # sorted_indices_to_remove is shape (batch_size, vocab_size) containing bools corresponding to sorted_indices.\n # Each row has Falses, then Trues.\n # For each row, get the index (in sorted_indices_to_remove) of the last False\n num_falses = sorted_indices_to_remove.size(1) - sorted_indices_to_remove.sum(dim=1) # num false per row\n last_false = num_falses - 1 # idx of last false per row. shape (batch_size)\n\n # For each row, get the vocab-index of the last \"False\" token (i.e. least prob token that won't be masked)\n least_prob_index = sorted_indices[range(sorted_indices.size(0)), last_false] # shape (batch_size)\n\n # For each row, get the logit for the least probable unmasked token\n cutoff_logits = logits[range(sorted_indices.size(0)), least_prob_index] # shape (batch_size)\n\n # For each row, set everything lower than cutoff_logits to filter_value\n indices_to_remove = logits < cutoff_logits.unsqueeze(1)\n logits[indices_to_remove] = filter_value\n\n indices_to_remove = logits < threshold\n logits[indices_to_remove] = filter_value\n\n return logits", "title": "" }, { "docid": "a4dfcf51165585099bac20f3424001be", "score": "0.51606655", "text": "def score_cut(self, ffmpeg_video_path):\n cuts = extract_shots_with_ffprobe(ffmpeg_video_path, 0.0)\n scores = [score for _, score in cuts]\n\n return max(scores)", "title": "" }, { "docid": "1ecfea5acdc6c719891aea6a4dd4214d", "score": "0.51561296", "text": "def nms(boxes, scores, iou_threshold=0.5, topk=None):\n\n if (not boxes.numel()) or (not scores.numel()):\n return torch.zeros(0, dtype=torch.long)\n\n keep = None\n #############################################################################\n # TODO: Implement non-maximum suppression which iterates the following: #\n # 1. Select the highest-scoring box among the remaining ones, #\n # which has not been chosen in this step before #\n # 2. Eliminate boxes with IoU > threshold #\n # 3. If any boxes remain, GOTO 1 #\n # Your implementation should not depend on a specific device type; #\n # you can use the device of the input if necessary. #\n # HINT: You can refer to the torchvision library code: #\n # github.com/pytorch/vision/blob/master/torchvision/csrc/cpu/nms_cpu.cpp #\n #############################################################################\n # Replace \"pass\" statement with your code\n highest_scores = torch.argsort(scores, descending=True)\n best_boxes = boxes[highest_scores,:] #highest scoring boxes\n box_areas = torch.prod(boxes[:, 2:] - boxes[:, :2], dim=1)\n keep = []\n while len(highest_scores)>0:\n highest_score_idx = highest_scores[0]\n keep.append(highest_score_idx)\n if topk and len(keep)==topk:\n return torch.tensor(keep, device=boxes.device,dtype=torch.int64)\n\n curr_box = boxes[highest_score_idx]\n # x1, y1, x2, y2 = curr_box[0], curr_box[1], curr_box[2], curr_box[3]\n best_boxes = boxes[highest_scores,:]\n top_lefts = torch.max(curr_box[:2],best_boxes[:,:2])\n bottom_rights = torch.min(curr_box[2:],best_boxes[:,2:])\n\n intersection = torch.prod(bottom_rights-top_lefts, dim=1)\n intersection *= (top_lefts < bottom_rights).all(dim=1) #asserts that intersection only holds when they truly overlap..\n\n curr_area = box_areas[highest_score_idx]\n union = curr_area + box_areas[highest_scores] - intersection\n iou_mat = torch.div(intersection,union).squeeze()\n highest_scores = highest_scores[torch.where(iou_mat <= iou_threshold)]###\n\n keep = torch.tensor(keep).to(torch.int64).to(boxes.device)\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return keep", "title": "" }, { "docid": "92fb23d79bb63ef35a0fbc3b6160631f", "score": "0.5155447", "text": "def prune_values(self, threshold):\n if self._distrib.prune_values(threshold):\n self._cached_values = None", "title": "" }, { "docid": "c5f6b91f4612bcba339ec73fd76474d1", "score": "0.51520663", "text": "def non_max_suppression(data, valid_count, max_output_size=-1,\n iou_threshold=0.5, force_suppress=False, top_k=-1,\n coord_start=2, score_index=1, id_index=0,\n return_indices=True, invalid_to_bottom=False):\n batch_size = data.shape[0]\n num_anchors = data.shape[1]\n score_axis = score_index\n score_shape = (batch_size, num_anchors)\n score_tensor = tvm.compute(score_shape, lambda i, j: data[i, j, score_axis])\n sort_tensor = argsort(score_tensor, valid_count=valid_count, axis=1, is_ascend=False)\n out, box_indices = hybrid_nms(data, sort_tensor, valid_count,\n tvm.const(max_output_size, dtype=\"int32\"),\n tvm.const(iou_threshold, dtype=data.dtype),\n tvm.const(force_suppress, dtype=\"bool\"),\n tvm.const(top_k, dtype=\"int32\"),\n tvm.const(coord_start, dtype=\"int32\"),\n tvm.const(id_index, dtype=\"int32\"),\n tvm.const(score_index, dtype=\"int32\"),\n zero=tvm.const(0, dtype=data.dtype),\n one=tvm.const(1, dtype=data.dtype))\n if not return_indices and invalid_to_bottom:\n out = hybrid_rearrange_out(out, one=tvm.const(1, dtype=data.dtype))\n\n return box_indices if return_indices else out", "title": "" }, { "docid": "061c622f64d3bd06d5a831c46d2dd4ba", "score": "0.5146496", "text": "def segmentize(self, imgs):\n results = self.detect(imgs)\n seg = []\n for k, result in enumerate(results):\n img = imgs[k]\n ears = []\n for i in range(len(result['scores'])):\n if result['scores'][i] > self.treshold:\n m = np.array(result['masks'])[:, :, i].astype(np.int)\n masked = img * np.stack([m, m, m], axis=2)\n a = np.where(m != 0)\n y1, y2, x1, x2 = np.min(a[0]), np.max(a[0]) + 1, np.min(a[1]), np.max(a[1]) + 1\n out = masked[y1:y2, x1:x2]\n ears.append(out)\n seg.append(ears)\n return seg", "title": "" }, { "docid": "f866621122223509a8d46da3e13fa11d", "score": "0.51462513", "text": "def nms(detections, thresh):\n x1 = detections[:, 0]\n y1 = detections[:, 1]\n x2 = detections[:, 2]\n y2 = detections[:, 3]\n scores = detections[:, 4]\n ## 单个框面积大小\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n ## 按照得分值从大到小将序号排列\n order = scores.argsort()[::-1] ## [::-1]倒序\n\n keep = []\n while order.size > 0:\n i = order[0]\n keep.append(i) ## 得分最大的保留,保留值为序号\n\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n\n inds = np.where(ovr <= thresh)[0]\n order = order[inds + 1]\n\n return keep", "title": "" }, { "docid": "5abf861cd4b7f82e2acacbb998a72698", "score": "0.5141607", "text": "def filter_fused_masks(fused_results,\n bb_threshold=0.5,\n mask_threshold=0.9,\n priority_table=None,\n verbose=0):\n rois = fused_results['rois']\n masks = fused_results['masks']\n scores = fused_results['scores']\n class_ids = fused_results['class_ids']\n bbAreas = np.zeros(len(class_ids))\n maskAreas = np.zeros(len(class_ids))\n toDelete = []\n for i, r1 in enumerate(rois):\n # If this RoI has already been selected for deletion, we skip it\n if i in toDelete:\n continue\n\n # If the area of this RoI has not been computed\n if bbAreas[i] == 0:\n r1Width = r1[3] - r1[1]\n r1Height = r1[2] - r1[0]\n bbAreas[i] = r1Width * r1Height\n\n # Then we check for each RoI that has not already been checked\n for j in range(i + 1, len(rois)):\n if j in toDelete:\n continue\n r2 = rois[j]\n\n # We want only one prediction class to be vessel\n priority = comparePriority(class_ids[i] - 1, class_ids[j] - 1, priority_table)\n if priority == 0:\n continue\n\n # If the area of the 2nd RoI has not been computed\n if bbAreas[j] == 0:\n r2Width = r2[3] - r2[1]\n r2Height = r2[2] - r2[0]\n bbAreas[j] = r2Width * r2Height\n\n # Computation of the bb intersection\n y1 = np.maximum(r1[0], r2[0])\n y2 = np.minimum(r1[2], r2[2])\n x1 = np.maximum(r1[1], r2[1])\n x2 = np.minimum(r1[3], r2[3])\n xInter = np.maximum(x2 - x1, 0)\n yInter = np.maximum(y2 - y1, 0)\n intersection = xInter * yInter\n\n # We skip next part if bb intersection not representative enough\n partOfR1 = intersection / bbAreas[i]\n partOfR2 = intersection / bbAreas[j]\n if partOfR1 > bb_threshold or partOfR2 > bb_threshold:\n # Getting first mask and computing its area if not done yet\n mask1 = masks[:, :, i]\n if maskAreas[i] == -1:\n mask1Histogram = div.getBWCount(mask1, using=\"numpy\")\n maskAreas[i] = mask1Histogram[1]\n if maskAreas[i] == 0:\n print(i, mask1Histogram[1])\n\n # Getting second mask and computing its area if not done yet\n mask2 = masks[:, :, j]\n if maskAreas[j] == -1:\n mask2Histogram = div.getBWCount(mask2, using=\"numpy\")\n maskAreas[j] = mask2Histogram[1]\n if maskAreas[j] == 0:\n print(j, mask2Histogram[1])\n\n # Computing intersection of mask 1 and 2 and computing its area\n mask1AND2 = np.logical_and(mask1, mask2)\n mask1AND2Histogram = div.getBWCount(mask1AND2, using=\"numpy\")\n partOfMask1 = mask1AND2Histogram[1] / maskAreas[i]\n partOfMask2 = mask1AND2Histogram[1] / maskAreas[j]\n\n # We check if the common area represents more than the vessel_threshold of the non-vessel mask\n if priority == -1 and partOfMask1 > mask_threshold:\n if verbose > 0:\n print(\"[{:03d}/{:03d}] Kept class = {}\\tRemoved Class = {}\".format(i, j, class_ids[i],\n class_ids[j]))\n toDelete.append(i)\n elif priority == 1 and partOfMask2 > mask_threshold:\n if verbose > 0:\n print(\"[{:03d}/{:03d}] Kept class = {}\\tRemoved Class = {}\".format(i, j, class_ids[i],\n class_ids[j]))\n toDelete.append(j)\n\n # Deletion of unwanted results\n scores = np.delete(scores, toDelete)\n class_ids = np.delete(class_ids, toDelete)\n masks = np.delete(masks, toDelete, axis=2)\n rois = np.delete(rois, toDelete, axis=0)\n return {\"rois\": rois, \"class_ids\": class_ids, \"scores\": scores, \"masks\": masks}", "title": "" }, { "docid": "180da288ee175378c284b510eb691308", "score": "0.5130966", "text": "def filter_detections(\n boxes,\n classification,\n class_specific_filter=True,\n nms=True,\n score_threshold=0.01,\n max_detections=300,\n nms_threshold=0.5\n):\n\n def _filter_detections(boxes, scores, labels):\n # threshold based on score\n indices = torch.gt(scores, score_threshold).nonzero()\n if indices.shape[0] == 0:\n return torch.tensor([], dtype=torch.int64, device=device)\n indices = indices[:, 0]\n\n if nms:\n filtered_boxes = torch.index_select(boxes, 0, indices)\n filtered_scores = torch.index_select(scores, 0, indices)\n\n # perform NMS\n nms_indices = non_max_suppression(filtered_boxes, filtered_scores, max_output_size=max_detections,\n iou_threshold=nms_threshold)\n\n # filter indices based on NMS\n indices = torch.index_select(indices, 0, nms_indices)\n\n # add indices to list of all indices\n labels = torch.index_select(labels, 0, indices)\n indices = torch.stack([indices, labels], dim=1)\n\n return indices\n\n results = []\n for box_cur, classification_cur in zip(boxes, classification):\n if class_specific_filter:\n all_indices = []\n # perform per class filtering\n for c in range(int(classification_cur.shape[1])):\n scores = classification_cur[:, c]\n labels = torch.full_like(scores, c, dtype=torch.int64)\n all_indices.append(_filter_detections(box_cur, scores, labels))\n\n # concatenate indices to single tensor\n indices = torch.cat(all_indices, dim=0)\n else:\n scores, labels = torch.max(classification_cur, dim=1)\n indices = _filter_detections(box_cur, scores, labels)\n\n if indices.shape[0] == 0:\n results.append({'bboxes':np.zeros((0, 4)), 'scores': np.full((0, ), -1, dtype=np.float32),'category_id': np.full((0, ), -1, dtype=np.int64)})\n continue\n # select top k\n scores = classification_cur[indices[:, 0], indices[:, 1]]\n labels = indices[:, 1]\n indices = indices[:, 0]\n\n scores, top_indices = torch.topk(scores, k=min(max_detections, scores.shape[0]))\n # filter input using the final set of indices\n indices = indices[top_indices]\n box_cur = box_cur[indices]\n labels = labels[top_indices]\n results.append({'bboxes':box_cur.cpu().detach().numpy(),'scores': scores.cpu().detach().numpy(), 'category_id': labels.cpu().detach().numpy()})\n\n return results", "title": "" }, { "docid": "0b80eb7d63db8b4bea10396cbaede314", "score": "0.51293033", "text": "def score_func(scores, predictions, inaction_score, perfect_score, thresh=0):\n # Apply the threshold\n predictions = (predictions > thresh).astype(int)\n\n # Get the actual score\n actual_score = scores[:, 1][predictions == 1].sum() + scores[:, 0][predictions == 0].sum()\n\n # Get the normalized score\n normalized_score = (actual_score - inaction_score) / (perfect_score - inaction_score)\n\n return normalized_score", "title": "" }, { "docid": "3094bbd8da01347ba880423f953478b8", "score": "0.5114042", "text": "def filter_seen(self, user_id, scores):\n\n start_pos = self.URM_train.indptr[user_id]\n end_pos = self.URM_train.indptr[user_id + 1]\n\n user_profile = self.URM_train.indices[start_pos:end_pos]\n\n scores[user_profile] = -np.inf\n\n return scores", "title": "" }, { "docid": "3094bbd8da01347ba880423f953478b8", "score": "0.5114042", "text": "def filter_seen(self, user_id, scores):\n\n start_pos = self.URM_train.indptr[user_id]\n end_pos = self.URM_train.indptr[user_id + 1]\n\n user_profile = self.URM_train.indices[start_pos:end_pos]\n\n scores[user_profile] = -np.inf\n\n return scores", "title": "" }, { "docid": "c662b61c991502a7db81dbb20d17b0f0", "score": "0.51126647", "text": "def nms(\n boxes,\n scores,\n score_threshold,\n nms_threshold,\n top_k=200,\n normalized=True,\n eta=1.0,\n):\n index = -1\n for i in range(boxes.shape[0]):\n if (\n index > -1\n and iou(boxes[i], boxes[index], normalized) > nms_threshold\n ):\n weight_merge(boxes[i], boxes[index], scores[i], scores[index])\n scores[index] += scores[i]\n scores[i] = score_threshold - 1.0\n else:\n index = i\n\n all_scores = copy.deepcopy(scores)\n all_scores = all_scores.flatten()\n\n selected_indices = np.argwhere(all_scores > score_threshold)\n selected_indices = selected_indices.flatten()\n all_scores = all_scores[selected_indices]\n\n sorted_indices = np.argsort(-all_scores, axis=0, kind='mergesort')\n sorted_scores = all_scores[sorted_indices]\n sorted_indices = selected_indices[sorted_indices]\n\n if top_k > -1 and top_k < sorted_indices.shape[0]:\n sorted_indices = sorted_indices[:top_k]\n sorted_scores = sorted_scores[:top_k]\n\n selected_indices = []\n adaptive_threshold = nms_threshold\n for i in range(sorted_scores.shape[0]):\n idx = sorted_indices[i]\n keep = True\n for k in range(len(selected_indices)):\n if keep:\n kept_idx = selected_indices[k]\n overlap = iou(boxes[idx], boxes[kept_idx], normalized)\n keep = True if overlap <= adaptive_threshold else False\n else:\n break\n if keep:\n selected_indices.append(idx)\n if keep and eta < 1 and adaptive_threshold > 0.5:\n adaptive_threshold *= eta\n return selected_indices", "title": "" }, { "docid": "20a0853eecce7738de571a2b74694c9a", "score": "0.51064366", "text": "def _filterScore(self, phase, submission, user):\n if (phase.get('hideScores')\n and not Phase().hasAccess(phase, user, level=AccessType.WRITE)):\n submission.pop('score', None)\n submission.pop('overallScore', None)\n else:\n # coerce any nans or infs to strings\n for dataset in (submission.get('score') or ()):\n for metric in dataset['metrics']:\n if metric['value'] is not None:\n v = float(metric['value'])\n if math.isnan(v) or math.isinf(v):\n metric['value'] = str(v)\n v = submission.get('overallScore') or 0\n if math.isnan(v) or math.isinf(v):\n submission['overallScore'] = str(v)\n\n return submission", "title": "" }, { "docid": "cdefa9b9d4cca8f729ed7fd5afd2d0b5", "score": "0.5092048", "text": "def filter_duplicated_objects(detected_objects, treshold=0.8):\n r = detected_objects['rois']\n overlaps = car_detection.mrcnn.utils.compute_overlaps(r, r)\n\n mask = [True]*len(r)\n for i, v in enumerate(overlaps):\n v = v.copy()\n np.put(v, i, 0)\n if v.max() < treshold:\n continue\n j = np.argmax(v)\n score_i = detected_objects['scores'][i]\n score_j = detected_objects['scores'][j]\n mask[j if score_i > score_j else i] = False\n\n mask = np.array(mask, dtype=bool)\n results = {\n 'rois': detected_objects['rois'][mask],\n 'class_ids': detected_objects['class_ids'][mask],\n 'scores': detected_objects['scores'][mask],\n 'masks': detected_objects['masks'][:,:,mask]\n }\n\n return results", "title": "" }, { "docid": "f32eb07075fc8f40537ab91011245934", "score": "0.5083327", "text": "def replace_outlier(self, data, thresh=3.5):\n nrows = len(data)\n median = np.median(data)\n diff = np.abs(data - median)\n mdev = np.median(diff)\n modified_z_score = 0.6745 * diff/mdev if mdev else [0 for i in \\\n range (diff.shape[0])]\n for j in range(nrows):\n #replaace values that z score is larger than thresh\n if modified_z_score[j] > thresh: \n data[j] = median\n return data", "title": "" }, { "docid": "9643835fb2ea5f790c2035a7af9bdc51", "score": "0.5081157", "text": "def _build_non_max_suppressor(nms_config):\n if nms_config.iou_threshold < 0 or nms_config.iou_threshold > 1.0:\n raise ValueError('iou_threshold not in [0, 1.0].')\n if nms_config.max_detections_per_class > nms_config.max_total_detections:\n raise ValueError('max_detections_per_class should be no greater than '\n 'max_total_detections.')\n\n non_max_suppressor_fn = functools.partial(\n post_processing.batch_multiclass_non_max_suppression,\n score_thresh=nms_config.score_threshold,\n iou_thresh=nms_config.iou_threshold,\n max_size_per_class=nms_config.max_detections_per_class,\n max_total_size=nms_config.max_total_detections)\n return non_max_suppressor_fn", "title": "" }, { "docid": "91e889762bb2de59a7da790c761b316d", "score": "0.5064485", "text": "def filer_roi(self, pred_loc, pred_fg_softmax_score, image_anchors, img_size):\n \n roi = self.anchors.decode_bbox(image_anchors, pred_loc)\n\n # Clip those boxes which lie outside image dimensions\n roi[:, 0] = np.clip(roi[:, 0], 0, img_size[0])\n roi[:, 2] = np.clip(roi[:, 2], 0, img_size[0])\n roi[:, 1] = np.clip(roi[:, 1], 0, img_size[1])\n roi[:, 3] = np.clip(roi[:, 3], 0, img_size[1])\n roi_height = roi[:, 2] - roi[:, 0]\n roi_width = roi[:, 3] - roi[:, 1]\n\n # 1. Min Size Filteration \n min_size = self.min_size * self.scale\n keep = np.where((roi_height >= min_size) & (roi_width >= min_size))[0]\n roi = roi[keep, :]\n pred_fg_softmax_score = pred_fg_softmax_score[keep]\n order = pred_fg_softmax_score.ravel().argsort()[::-1]\n \n # 2. Pre NMS Filteration \n order = order[:self.n_pre_nms ]\n roi = roi[order, :]\n pred_fg_softmax_score = pred_fg_softmax_score[order]\n\n # 3. NMS Filteration \n keep = nms(torch.from_numpy(roi), torch.from_numpy(pred_fg_softmax_score), self.nms_thresh)\n \n # 4. Post NMS Filteration \n keep = keep[:self.n_post_nms]\n \n roi = roi[keep.cpu().numpy()]\n return roi", "title": "" }, { "docid": "3eb4a109747089aa54cfac5969d1f8e2", "score": "0.5057653", "text": "def _additional_score_checks(self, scores):", "title": "" }, { "docid": "75a90d4b3d7c06a781729f5d22582a5c", "score": "0.5046759", "text": "def signal_noise_stats(scores, x_model, top_k=10, threshold=0.01):\n\n signal = []\n noise_mean = []\n noise_max = []\n noise_topk = []\n for j, score in enumerate(scores):\n\n # calculate information of ground truth\n gt_info = np.log2(4) + np.sum(x_model[j]*np.log2(x_model[j]+1e-10), axis=1)\n\n # (don't evaluate over low info content motif positions) \n index = np.where(gt_info > threshold)[0]\n\n # evaluate noise levels\n index2 = np.where((score > 0) & (gt_info == np.min(gt_info)))[0]\n\n if len(index2) < top_k:\n signal.append(0)\n noise_max.append(0)\n noise_mean.append(0)\n noise_topk.append(0)\n else:\n signal.append(np.mean(score[index]))\n noise_max.append(np.max(score[index2]))\n noise_mean.append(np.mean(score[index2]))\n sort_score = np.sort(score[index2])[::-1]\n noise_topk.append(np.mean(sort_score[:top_k]))\n \n return (\n np.array(signal),\n np.array(noise_max),\n np.array(noise_mean),\n np.array(noise_topk),\n )", "title": "" }, { "docid": "538f74f6577143511f8f9cd796c66c43", "score": "0.5019669", "text": "def super_nms(prob_predictions, dist_thresh, prob_thresh=0.01, top_k=0):\n # Iterate through batch dimension\n im_h = prob_predictions.shape[1]\n im_w = prob_predictions.shape[2]\n output_lst = []\n for i in range(prob_predictions.shape[0]):\n # print(i)\n prob_pred = prob_predictions[i, ...]\n # Filter the points using prob_thresh\n coord = np.where(prob_pred >= prob_thresh) # HW format\n points = np.concatenate((coord[0][..., None], coord[1][..., None]),\n axis=1) # HW format\n\n # Get the probability score\n prob_score = prob_pred[points[:, 0], points[:, 1]]\n\n # Perform super nms\n # Modify the in_points to xy format (instead of HW format)\n in_points = np.concatenate((coord[1][..., None], coord[0][..., None],\n prob_score), axis=1).T\n keep_points_, keep_inds = nms_fast(in_points, im_h, im_w, dist_thresh)\n # Remember to flip outputs back to HW format\n keep_points = np.round(np.flip(keep_points_[:2, :], axis=0).T)\n keep_score = keep_points_[-1, :].T\n\n # Whether we only keep the topk value\n if (top_k > 0) or (top_k is None):\n k = min([keep_points.shape[0], top_k])\n keep_points = keep_points[:k, :]\n keep_score = keep_score[:k]\n\n # Re-compose the probability map\n output_map = np.zeros([im_h, im_w])\n output_map[keep_points[:, 0].astype(np.int),\n keep_points[:, 1].astype(np.int)] = keep_score.squeeze()\n\n output_lst.append(output_map[None, ...])\n\n return np.concatenate(output_lst, axis=0)", "title": "" }, { "docid": "706cd800ddf7c13771939fdd0d77feed", "score": "0.500985", "text": "def remove_outlier(self, data, sd_val):\n data = data.dropna()\n data = data[(np.abs(stats.zscore(data)) < float(sd_val)).all(axis=1)]\n return data", "title": "" }, { "docid": "96206d097283f1746c57cdfc1d1a9911", "score": "0.5008839", "text": "def filter_boxes(boxes_coords, objectness_scores, classes_pred, threshold=0.6):\n box_scores = objectness_scores.unsqueeze(-1) * classes_pred\n \n best_boxes, idx_best_boxes = torch.max(box_scores, dim=4)\n\n # Box score low because objcteness score is low \n # ind = idx_best_boxes[0,0,0,0]\n # print(\"Obj: \", objectness_scores[0,0,0,0])\n # print(\"Class pred: \", classes_pred[0,0,0,0,ind])\n\n filter_mask = (best_boxes >= threshold)\n \n return [boxes_coords[filter_mask], best_boxes[filter_mask], idx_best_boxes[filter_mask]]", "title": "" }, { "docid": "fc03e979a02fc568adc71b311a640141", "score": "0.49966657", "text": "def spicke_remover(data, nstd=20.0, spreed=500.0, max_loops=10.0 , verbose=False):\n datastd=data.std()\n data2=np.copy(data)\n\n peak_remove=True\n looper_count=0\n act_flag=False\n while peak_remove is True:\n if nstd* data.std() < np.max(np.abs(data2)):\n act_flag=True\n if verbose:\n print('true: '+ str(nstd* datastd) +' < '+str( np.max(np.abs(data)) ) )\n data2=M.spickes_to_mean(data2, nloop=0, spreed=spreed, gaussian=False)\n looper_count+=1\n else:\n if verbose:\n print('False: '+ str(nstd* datastd) +' > '+str( np.max(np.abs(data)) ) )\n peak_remove=False\n\n if looper_count > max_loops:\n peak_remove=False\n if verbose:\n print('stoped by max#')\n\n\n if verbose:\n plt.plot(G.timestamp, data, 'r')\n plt.plot(G.timestamp,data2, 'b')\n\n return data2 , act_flag", "title": "" }, { "docid": "a161f2dbfb4e6ad588abc0ade576252b", "score": "0.49927843", "text": "def apply_threshold(im,params):\n x, y = im.shape\n ws = oddNum(x / 10)\n \n sauv = filters.threshold_sauvola(im, window_size = ws)\n sauv = np.nan_to_num(sauv)\n if(params[\"TEST_MODE\"][\"im_treatement\"]): \n plt_i(sauv, \"sauvola\") \n \n res = np.greater(im,sauv)\n #kernel = np.ones((6,1),np.uint8)\n \n return res", "title": "" }, { "docid": "ac469f9bcc34bf762c22215c80a71c88", "score": "0.4988257", "text": "def remove_outlier(arr, std_thres = 2):\n\tstd = np.std(arr)\n\tmu = np.median(arr)\n\t\n\treturn arr[(arr - mu) < (std_thres * std)]", "title": "" }, { "docid": "7d052961a9b819e64ccacf966bfb1412", "score": "0.49880686", "text": "def calculate_score(self, detections, ground_truth, image):\n tp, fp, fn = 0, 0, 0\n detections_copy = detections.copy()\n match_threshold = 0.7\n correct_classifications_yolo = 0\n correct_classifications_svm = 0\n for i, row in ground_truth.iterrows():\n gt = [ row['gt_x1'], row['gt_y1'], row['gt_x2'], row['gt_y2'] ]\n # find maximum overlap\n max_overlap = [0.0, []]\n for d in detections_copy:\n pred = d[2:6]\n overlap = self.calculate_overlap(pred, gt)\n if overlap > max_overlap[0]:\n max_overlap = [overlap, d]\n # if max overlap exceed threshold, then mark as true positive\n if max_overlap[0] > match_threshold:\n tp += 1\n # score yolo classification\n if max_overlap[1][1] == row['label']:\n self.metrics['yolo'] += 1\n correct_classifications_yolo += 1\n # score svm classification\n svm_classification = self.classify_svm(max_overlap[1], image)\n self.metrics['predictions'].append([ svm_classification, max_overlap[1][1], row['label'] ])\n if svm_classification == row['label']:\n self.metrics['svm'] += 1\n correct_classifications_svm += 1\n detections_copy.remove(max_overlap[1])\n else:\n fn += 1\n # mark any other detections as false positive\n fp = len(detections) - tp\n\n # update global metrics\n self.metrics['tp'] += tp\n self.metrics['fp'] += fp\n self.metrics['fn'] += fn\n print('tp: {}, fp: {}, fn: {}'.format(tp,fp,fn))\n print('svm: {}/{}'.format(correct_classifications_svm, tp))\n print('yolo: {}/{} \\n'.format(correct_classifications_yolo, tp))\n\n # print latest total metrics\n print('\\nTotal Metrics - tp: {}, fp: {}, fn: {} '.format(self.metrics['tp'],self.metrics['fp'],self.metrics['fn']))\n print('Correct Classifications (YOLO): {}/{}'.format(self.metrics['yolo'], self.metrics['tp']))\n print('Correct Classifications (SVM): {}/{} \\n'.format(self.metrics['svm'], self.metrics['tp']))", "title": "" }, { "docid": "34ec71f70f69ed4066bca958f6d9c92d", "score": "0.4985047", "text": "def _generate_detections_batched(boxes,\n scores,\n pre_nms_score_threshold,\n nms_iou_threshold,\n max_num_detections):\n with tf.name_scope('generate_detections'):\n nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections = (\n tf.image.combined_non_max_suppression(\n boxes,\n scores,\n max_output_size_per_class=max_num_detections,\n max_total_size=max_num_detections,\n iou_threshold=nms_iou_threshold,\n score_threshold=pre_nms_score_threshold,\n pad_per_class=False,\n clip_boxes=False))\n return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections", "title": "" }, { "docid": "be81a84feaa1b9f03ac318e5276e7825", "score": "0.49805504", "text": "def _get_detections(generator, model, word_seen, word_unseen, top, score_threshold=0.05, max_detections=100, save_path=None):\n all_detections = [[None for i in range(generator.num_classes())] for j in range(generator.size())]\n num_seen = 65\n for i in range(generator.size()):\n raw_image = generator.load_image(i)\n image = generator.preprocess_image(raw_image.copy())\n image, scale = generator.resize_image(image)\n\n # run network\n _, _, detections = model.predict_on_batch(np.expand_dims(image, axis=0))\n\n # clip to image shape\n detections[:, :, 0] = np.maximum(0, detections[:, :, 0])\n detections[:, :, 1] = np.maximum(0, detections[:, :, 1])\n detections[:, :, 2] = np.minimum(image.shape[1], detections[:, :, 2])\n detections[:, :, 3] = np.minimum(image.shape[0], detections[:, :, 3])\n\n # correct boxes for image scale\n detections[0, :, :4] /= scale\n\n # select scores from detections\n scores = detections[0, :, 4:]\n\n # select indices which have a score above the threshold\n indices_seen = np.where(scores > .3)\n\n T = top\n mask = np.ones_like(scores, dtype='float32')\n mask[:, T:] = 0.0\n sorted_score = -np.sort(-scores, axis=1)\n sorted_score_arg = np.argsort(-scores, axis=1)\n sorted_score = np.multiply(sorted_score, mask)\n\n restroed_score = mask\n\n for idx in range(scores.shape[0]):\n restroed_score[idx, sorted_score_arg[idx, :]] = sorted_score[idx, :]\n\n unseen_pd = np.dot(restroed_score, np.transpose(word_seen))\n\n unseen_scores = np.dot(unseen_pd, word_unseen)\n val = np.max(unseen_scores, axis=1)\n val_arg = np.argmax(unseen_scores, axis=1)\n pos = np.where(val > .1)\n\n indices_unseen = []\n indices_unseen.append(pos[0])\n indices_unseen.append(num_seen + val_arg[pos[0]])\n indices_unseen = tuple(indices_unseen)\n scores = np.concatenate((scores, unseen_scores), axis=1)\n\n indices = []\n indices.append(np.concatenate((indices_seen[0], indices_unseen[0])))\n indices.append(np.concatenate((indices_seen[1], indices_unseen[1])))\n indices = tuple(indices)\n\n # For ZSL\n # indices = indices_unseen\n\n # Only seen\n # indices = indices_seen\n\n\n # select those scores\n scores_ = scores[indices]\n\n # find the order with which to sort the scores\n scores_sort = np.argsort(-scores_)[:max_detections]\n\n # select detections\n image_boxes = detections[0, indices[0][scores_sort], :4]\n image_scores = np.expand_dims(scores[indices[0][scores_sort], indices[1][scores_sort]], axis=1)\n image_detections = np.append(image_boxes, image_scores, axis=1)\n image_predicted_labels = indices[1][scores_sort]\n\n if save_path is not None:\n draw_annotations(raw_image, generator.load_annotations(i), generator=generator)\n draw_detections(raw_image, detections[0, indices[0][scores_sort], :], generator=generator)\n\n cv2.imwrite(os.path.join(save_path, '{}.png'.format(i)), raw_image)\n\n # copy detections to all_detections\n for label in range(generator.num_classes()):\n all_detections[i][label] = image_detections[image_predicted_labels == label, :]\n\n # print('{}/{}'.format(i, generator.size()), end='\\r')\n return all_detections", "title": "" }, { "docid": "c0b4f92a6f3d6d9b76b5d139cdb9eab2", "score": "0.4967298", "text": "def filter_flickers(past, current, treshold=0.6):\n overlaps = car_detection.mrcnn.utils.compute_overlaps(current['rois'], past['rois'])\n\n mask = [True]*len(current['rois'])\n for i, row in enumerate(overlaps):\n if row.size == 0:\n mask[i] = False\n continue\n \n max_index = row.argmax(axis=0)\n if row[max_index] < treshold:\n mask[i] = False\n\n mask = np.array(mask, dtype=bool)\n results = {\n 'rois': current['rois'][mask],\n 'class_ids': current['class_ids'][mask],\n 'scores': current['scores'][mask],\n 'masks': current['masks'][:,:,mask]\n }\n\n return results", "title": "" }, { "docid": "eb9b6724fea44f950b407271fb29ebca", "score": "0.49484023", "text": "def py_cpu_nms(dets, thresh):\n x1 = dets[:, 0]\n y1 = dets[:, 1]\n x2 = dets[:, 2]\n y2 = dets[:, 3]\n scores = dets[:, 4]\n\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n order = scores.argsort()[::-1]\n\n keep = []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n\n inds = np.where(ovr <= thresh)[0]\n order = order[inds + 1]\n\n return keep", "title": "" }, { "docid": "eb9b6724fea44f950b407271fb29ebca", "score": "0.49484023", "text": "def py_cpu_nms(dets, thresh):\n x1 = dets[:, 0]\n y1 = dets[:, 1]\n x2 = dets[:, 2]\n y2 = dets[:, 3]\n scores = dets[:, 4]\n\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n order = scores.argsort()[::-1]\n\n keep = []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n\n inds = np.where(ovr <= thresh)[0]\n order = order[inds + 1]\n\n return keep", "title": "" }, { "docid": "eb9b6724fea44f950b407271fb29ebca", "score": "0.49484023", "text": "def py_cpu_nms(dets, thresh):\n x1 = dets[:, 0]\n y1 = dets[:, 1]\n x2 = dets[:, 2]\n y2 = dets[:, 3]\n scores = dets[:, 4]\n\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n order = scores.argsort()[::-1]\n\n keep = []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n\n inds = np.where(ovr <= thresh)[0]\n order = order[inds + 1]\n\n return keep", "title": "" }, { "docid": "eb9b6724fea44f950b407271fb29ebca", "score": "0.49484023", "text": "def py_cpu_nms(dets, thresh):\n x1 = dets[:, 0]\n y1 = dets[:, 1]\n x2 = dets[:, 2]\n y2 = dets[:, 3]\n scores = dets[:, 4]\n\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n order = scores.argsort()[::-1]\n\n keep = []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n\n inds = np.where(ovr <= thresh)[0]\n order = order[inds + 1]\n\n return keep", "title": "" }, { "docid": "edcce91f891a3758f788fa3aaa89102b", "score": "0.4942723", "text": "def _global_mask(self, sparsity):\n global_scores = torch.cat([torch.flatten(v) for v in self.scores.values()])\n k = int((1.0 - sparsity) * global_scores.numel())\n if not k < 1:\n threshold, _ = torch.kthvalue(global_scores, k)\n for mask, param in self.masked_parameters:\n score = self.scores[id(param)] \n zero = torch.tensor([0.]).to(mask.device)\n one = torch.tensor([1.]).to(mask.device)\n mask.copy_(torch.where(score <= threshold, zero, one))", "title": "" }, { "docid": "746a5aa502c8da31065f9f4cd0db8ea5", "score": "0.49364847", "text": "def postprocess(self, ypred, maxiou=0.1, minscore=0.5, flip=0):\n # copy so can rerun without losing the original\n ypred = deepcopy(ypred)\n for i, y in enumerate(ypred):\n y = {\n k: (v.cpu() if isinstance(v, torch.Tensor) else v) for k, v in y.items()\n }\n\n # only accept items above cutoff score\n sel = torch.nonzero(y[\"scores\"] > minscore)[:, 0]\n y = {k: v[sel] for k, v in y.items()}\n\n # boost malignant to be more important\n y[\"scores2\"] = torch.Tensor(\n [\n score + 1 if label == 2 else score\n for label, score in zip(y[\"labels\"], y[\"scores\"])\n ]\n )\n # nms on all classes at once with priority to malignant\n sel = nms(y[\"boxes\"], y[\"scores2\"], maxiou)\n y = {k: v[sel] for k, v in y.items()}\n del y[\"scores2\"]\n\n # convert benign with p < flip to malignant\n for i2, label in enumerate(y[\"labels\"]):\n if label == 1 and y[\"scores\"][i2] < flip:\n y[\"labels\"][i2] = 2\n y[\"scores\"][i2] = 1 - y[\"scores\"][i2]\n\n # move to numpy after nms complete\n y = {\n k: (v.numpy() if isinstance(v, torch.Tensor) else v)\n for k, v in y.items()\n }\n # image level consolidate\n y[\"class_\"] = 0 if len(y[\"labels\"]) == 0 else y[\"labels\"].max()\n y[\"class_\"] = 1 if y[\"class_\"] == 2 else 0\n\n ypred[i] = y\n return ypred", "title": "" }, { "docid": "cc1fa8bc991d3d9173ca9d159ce87418", "score": "0.49326184", "text": "def prune(self, minimum_word_frequency_percentage=1):\n pruned_resulting_documents = []\n\n for document in self.resulting_documents:\n new_document = []\n for word in document:\n if self.word_in_how_many_documents[word] >= minimum_word_frequency_percentage / 100. * len(\n self.resulting_documents):\n new_document.append(word)\n pruned_resulting_documents.append(new_document)\n self.resulting_documents = pruned_resulting_documents", "title": "" }, { "docid": "1b366667309648b5649b6f253628a4f7", "score": "0.49265677", "text": "def do_preprocessing(self, scores):\n # Preprocessing\n print(\"--------------------- BEGIN PREPROCESSING ---------------------\")\n new_scores = []\n for piece in scores:\n piece = self.erase_double_notes(piece)\n piece = self.flatten(piece) #TODO find mistake in flatten that divisions isn't anymore in the piece\n new_scores.append(piece)\n\n return new_scores", "title": "" }, { "docid": "eb91a6cbdae7dc5d08f445b5afb6c39f", "score": "0.49264088", "text": "def RemoveHighPval(X,y,threshold=0.05):\r\n changed=True\r\n included=list(set(X.columns)) #currently included feature list \r\n while changed:\r\n changed=False\r\n new_pval=pd.Series(index=included) #create the p_value list for currently included feature\r\n #fit the Logistics Regression Model with currently includede feature and record the p-value\r\n for column in included:\r\n model=sm.Logit(y,sm.add_constant(pd.DataFrame(X[included]))).fit()\r\n new_pval[column]=model.pvalues[column]\r\n #get the maximum p-value between all currently excluded feature\r\n worst_pval=new_pval.max()\r\n print (model.summary2())\r\n print (new_pval)\r\n #add the feature to included list if the p_value is smaller than the threshold\r\n if worst_pval>threshold:\r\n worst_feature=new_pval.argmax()\r\n included.remove(worst_feature)\r\n changed=True\r\n \r\n return included", "title": "" }, { "docid": "d4f908f9f557551ec9996512063cedea", "score": "0.49170107", "text": "def filter_extremes(self, no_below=5, no_above=0.5, keep_n=100000):\n no_above_abs = int(no_above * self.num_docs) # convert fractional threshold to absolute threshold\n ok = [item for item in iteritems(self.dfs_debug) if no_below <= item[1] <= no_above_abs]\n ok = frozenset(word for word, freq in sorted(ok, key=lambda item: -item[1])[:keep_n])\n\n self.dfs_debug = dict((word, freq)\n for word, freq in iteritems(self.dfs_debug)\n if word in ok)\n self.token2id = dict((token, tokenid)\n for token, tokenid in iteritems(self.token2id)\n if token in self.dfs_debug)\n self.id2token = dict((tokenid, set(token for token in tokens if token in self.dfs_debug))\n for tokenid, tokens in iteritems(self.id2token))\n self.dfs = dict((tokenid, freq)\n for tokenid, freq in iteritems(self.dfs)\n if self.id2token.get(tokenid, set()))\n\n # for word->document frequency\n logger.info(\n \"kept statistics for which were in no less than %i and no more than %i (=%.1f%%) documents\",\n no_below, no_above_abs, 100.0 * no_above)", "title": "" }, { "docid": "49efb590f3f9adba4af93dce205bc030", "score": "0.4909429", "text": "def filter_dataset(dataset, min_num_img_per_class):", "title": "" }, { "docid": "5476333c71c4c6e90d6a9a862ac18667", "score": "0.49024156", "text": "def statistical_outilier_removal(self, kdtree, k=8, z_max=2 ):\n\n\t\tdistances, i = kdtree.query(kdtree.data, k=k, n_jobs=-1) \n\n\t\tz_distances = stats.zscore(np.mean(distances, axis=1))\n\n\t\tsor_filter = abs(z_distances) < z_max\n\n\t\treturn sor_filter", "title": "" }, { "docid": "a89f581d8c2abe9ef8abc2f0db537ffa", "score": "0.489656", "text": "def top_filtering(logits, top_k=0, top_p=0.0, threshold=-float('Inf'), filter_value=-float('Inf')):\n assert logits.dim() == 1 # Only work for batch size 1 for now - could update but it would obfuscate a bit the code\n top_k = min(top_k, logits.size(-1))\n if top_k > 0:\n # Remove all tokens with a probability less than the last token in the top-k tokens\n indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]\n logits[indices_to_remove] = filter_value\n\n if top_p > 0.0:\n # Compute cumulative probabilities of sorted tokens\n sorted_logits, sorted_indices = torch.sort(logits, descending=True)\n cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)\n\n # Remove tokens with cumulative probability above the threshold\n sorted_indices_to_remove = cumulative_probabilities > top_p\n # Shift the indices to the right to keep also the first token above the threshold\n sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()\n sorted_indices_to_remove[..., 0] = 0\n\n # Back to unsorted indices and set them to -infinity\n indices_to_remove = sorted_indices[sorted_indices_to_remove]\n logits[indices_to_remove] = filter_value\n\n indices_to_remove = logits < threshold\n logits[indices_to_remove] = filter_value\n return logits", "title": "" } ]
f49af8f74c165029b6ecd7c9bd35a5ef
Compute the limit of the expression at the infinity. Examples ======== >>> limitinf(exp(x)(exp(1/x exp(x)) exp(1/x)), x) 1
[ { "docid": "2140363f8cc9d011cf824d6cdb5a0520", "score": "0.70066345", "text": "def limitinf(e, x):\n # Rewrite e in terms of tractable functions only:\n e = e.rewrite('tractable', wrt=x)\n\n if not e.has(x):\n # This is a bit of a heuristic for nice results. We always rewrite\n # tractable functions in terms of familiar intractable ones.\n # TODO: It might be nicer to rewrite the exactly to what they were\n # initially, but that would take some work to implement.\n return e.rewrite('intractable')\n\n c0, e0 = leadterm(e, x)\n sig = signinf(e0, x)\n if sig == 1:\n return Integer(0)\n if sig == -1:\n return signinf(c0, x)*oo\n if sig == 0:\n return limitinf(c0, x)\n raise NotImplementedError(f'Result depends on the sign of {sig}.')", "title": "" } ]
[ { "docid": "5f7b5f373758238dd0dc4b1e16ef8479", "score": "0.6337069", "text": "def inf(self):\n return self._inf", "title": "" }, { "docid": "b3c056338e95bc7f89d42d99169dc947", "score": "0.62408656", "text": "def inf(self):\r\n\t\treturn float('inf')", "title": "" }, { "docid": "f0ae912510266c042e34edba93504cdd", "score": "0.6229042", "text": "def _infimum_key(expr):\n try:\n infimum = expr.inf\n assert infimum.is_comparable\n except (NotImplementedError,\n AttributeError, AssertionError, ValueError):\n infimum = S.Infinity\n return infimum", "title": "" }, { "docid": "3a590bd1828ab8b0cdde1de0070c65cb", "score": "0.6221621", "text": "def test_infinite_value(self):\n test_m = [124, -678, float('inf'), 24, 999]\n self.assertEqual(max_integer(test_m), float('inf'))", "title": "" }, { "docid": "bb60877b7ae78f47959785be1f8c336b", "score": "0.60393906", "text": "def signinf(e, x):\n from ..functions import sign\n\n if not e.has(x):\n return sign(e).simplify()\n if e == x:\n return Integer(1)\n if e.is_Mul:\n a, b = e.as_two_terms()\n return signinf(a, x)*signinf(b, x)\n if e.is_Pow and signinf(e.base, x) == 1:\n return Integer(1)\n\n c0, _ = leadterm(e, x)\n return signinf(c0, x)", "title": "" }, { "docid": "df03d138c6a3cab2014e9725fd1233d9", "score": "0.6001559", "text": "def _isinfinity(self):\r\n if self._exp == 'F':\r\n if self._sign:\r\n return -1\r\n return 1\r\n return 0", "title": "" }, { "docid": "73c45be947831d5638ee77a189164f3c", "score": "0.5941031", "text": "def default_inf(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except exc.HistoryException:\n return float(\"inf\")\n\n return wrapper", "title": "" }, { "docid": "4aaf3519eac146b2cfee2aed30098720", "score": "0.59017694", "text": "def limit(x):\n return max([-1.0, min([1.0, x])])", "title": "" }, { "docid": "40a828b55e219d7d5e65340beba0e106", "score": "0.5894332", "text": "def test_inf(self):\n value = float(\"inf\")\n self.assertEqual(substitute_inf(value), \"Infinity\")", "title": "" }, { "docid": "5ea6b9f61e403c72592a8a4c7c43b9f7", "score": "0.5854853", "text": "def _put01_inf(v):\n # Arctan alternative\n # return tan(pi*(v-0.5))\n\n v = (v - 0.5) * 4 * _E_MAX\n s = math.copysign(1., v)\n v *= s\n e = int(v)\n m = v - e\n x = math.ldexp(s * m, e + _E_MIN)\n # print \"< x,e,m,s,v\",x,e+_e_min,s*m,s,v\n return x", "title": "" }, { "docid": "cc992a557503502de5ca4f7ea0ea0b2a", "score": "0.578106", "text": "def poisson_limit(self, level=0.9):\n limit = signal_upper_limit(self.background, self.observed, level)\n try:\n return self.sigma * limit / self.signal(self.sigma)\n except ZeroDivisionError:\n warn(\"signal was zero; assume limit inf\")\n return np.inf", "title": "" }, { "docid": "453ab212907ad417444638ee3c98e853", "score": "0.5763142", "text": "def inf_like(x):\n return inf if np.isscalar(x) else np.full_like(x, inf, dtype=float)", "title": "" }, { "docid": "ebe4646beae7e8ef560c55eafccb056a", "score": "0.5740616", "text": "def is_infinite(self):\r\n return self._exp == 'F'", "title": "" }, { "docid": "2916e405d07b192f37f4b8fc094894b3", "score": "0.57167184", "text": "def _get01_inf(x):\n # Arctan alternative\n # Arctan is approximately linear in (-0.5, 0.5), but the\n # transform is only useful up to (-10**15,10**15).\n # return atan(x)/pi + 0.5\n m, e = math.frexp(x)\n s = math.copysign(1.0, m)\n v = (e - _E_MIN + m * s) * s\n v = v / (4 * _E_MAX) + 0.5\n v = 0 if _E_MIN > e else (1 if _E_MAX < e else v)\n return v", "title": "" }, { "docid": "fae0ee1fb0fb762d7be034020668beb8", "score": "0.57029104", "text": "def solr_inf(x):\n if x == -1:\n return '*'\n return x", "title": "" }, { "docid": "8381418dbe2e406e9a297956e5c18e4a", "score": "0.5698584", "text": "def safe_exp(value):\n try:\n ans = math.exp(value)\n except OverflowError:\n ans = float(\"inf\")\n return ans", "title": "" }, { "docid": "4fb9105f0ad8c5b171cbf86b818ad822", "score": "0.56789726", "text": "def norml_inf(self) -> float:\n ...", "title": "" }, { "docid": "7a9ede36dd686dc8cb3e0cf48937e289", "score": "0.55125654", "text": "def _detect_nan_inf(tensor):\n\n if tensor.dtype.is_floating:\n mask = math_ops.reduce_any(\n gen_math_ops.logical_or(\n gen_math_ops.is_nan(tensor), gen_math_ops.is_inf(tensor)))\n output_tensor = control_flow_ops.cond(mask,\n lambda: constant_op.constant(1.0),\n lambda: constant_op.constant(0.0))\n else:\n output_tensor = constant_op.constant(0.0)\n # The shape has to be 1. Set it if it does not have the information.\n output_tensor = array_ops.reshape(output_tensor, [1])\n return output_tensor", "title": "" }, { "docid": "b0ce07c5a4bed7ace20d4d429134f365", "score": "0.5498537", "text": "def ferm(x):\n if (x>100): return 0\n if (x<-100): return 1\n return 1/(exp(x)+1)", "title": "" }, { "docid": "91de65e1d319db1c06b4c0afb4e440ac", "score": "0.54954135", "text": "def test_inf(self):\r\n # Ability to convert to infinity depends on your C\r\n # Float library as well as the python version.\r\n try:\r\n inf = float('infinity')\r\n except ValueError:\r\n raise SkipTest\r\n self.assertEqual(self.validator.to_python('infinity'), inf)", "title": "" }, { "docid": "c89d4ec2f8e7de9d83e5fd0c30652a14", "score": "0.54902464", "text": "def l_inf_norm(x):\n max = -1 # (since at least 1 x is positive (should be))\n for i in x:\n if i > max:\n max = i\n return max", "title": "" }, { "docid": "08958e63572454625f57585a31e55847", "score": "0.54776454", "text": "def lp_norm_inf_compute(abs_x, y, p, axes, keepdim, kernel_name):\n if (p == \"inf\") or (p == _CONST_INF):\n res = te.lang.cce.reduce_max(abs_x, axis=axes, keepdims=keepdim)\n else:\n # p is \"-inf\"\n res = te.lang.cce.reduce_min(abs_x, axis=axes, keepdims=keepdim)\n return res", "title": "" }, { "docid": "ad7c554c36f780e83ab3e91d38c2ddfa", "score": "0.5449924", "text": "def isinf(x,out):\n \n \n return bool()", "title": "" }, { "docid": "a23a6afbbc105967f1ef067e24ea2762", "score": "0.5421496", "text": "def is_Infinite(x):\n return isinstance(x, InfinityElement)", "title": "" }, { "docid": "35f9c0bc65ad35e9952424f88317549c", "score": "0.53843963", "text": "def test_get_exceed_limit_count(self):\n self.get_fibonacci(app.config[\"MAXIMUM_FIBONACCI_COUNT\"] + 1, 400)", "title": "" }, { "docid": "ae320db0c3d59d7971c35567db6320cd", "score": "0.53633136", "text": "def infinity_norm(x: Array) -> float:\n return np.abs(x).max()", "title": "" }, { "docid": "63acecff1da0a9583bc81215d1e4184b", "score": "0.53312016", "text": "def exp_n_x(n, x):\n if n == 0:\n return 1\n else:\n return exp_n_x(n - 1, x) + (x ** n) / factorial(n)", "title": "" }, { "docid": "f21a775a301385d4978e162c5a4061dc", "score": "0.5273511", "text": "def replinf(x, repl):\n for i,_ in enumerate(x):\n if x[i] == np.inf:\n x[i] = repl\n return x", "title": "" }, { "docid": "3ad2f04812766bff8caf36e05f812675", "score": "0.52212906", "text": "async def xplimit(self, ctx, *, limit = None):\r\n\r\n\t\tif not await Utils.is_admin_reply(ctx): return\r\n\t\t\t\r\n\t\tif limit == None:\r\n\t\t\t# print the current limit\r\n\t\t\tserver_lim = self.settings.getServerStat(ctx.guild, \"XPLimit\")\r\n\t\t\tif server_lim == None:\r\n\t\t\t\tawait ctx.send(\"There is no xp limit.\")\r\n\t\t\t\treturn\r\n\t\t\telse:\r\n\t\t\t\tawait ctx.send(\"The current xp limit is *{:,}*.\".format(server_lim))\r\n\r\n\t\ttry:\r\n\t\t\tlimit = int(limit)\r\n\t\texcept Exception:\r\n\t\t\treturn await ctx.send(\"Limit must be an integer.\")\r\n\r\n\t\tif limit < 0:\r\n\t\t\tself.settings.setServerStat(ctx.guild, \"XPLimit\", None)\r\n\t\t\tawait ctx.send(\"Xp limit removed!\")\r\n\t\telse:\r\n\t\t\tself.settings.setServerStat(ctx.guild, \"XPLimit\", limit)\r\n\t\t\tawait ctx.send(\"Xp limit set to *{:,}*.\".format(limit))", "title": "" }, { "docid": "fea72ab0265926c8fa7e3fdd475e8c9e", "score": "0.52129704", "text": "def int_or_none(x, limit):\n try:\n value = int(x)\n if 1 <= value <= limit:\n return value\n else:\n return None\n except ValueError:\n return None", "title": "" }, { "docid": "e57a8273a6f06423c0a0ca74659bb701", "score": "0.5203826", "text": "def is_infinite(self, a):\r\n return a.is_infinite()", "title": "" }, { "docid": "af64af9996e9b5a437c7c0797cb2b111", "score": "0.51997566", "text": "def logsumexp(*args):\n if all(a == NEG_INF for a in args):\n return NEG_INF\n a_max = max(args)\n lsp = math.log(sum(math.exp(a - a_max) for a in args))\n return a_max + lsp", "title": "" }, { "docid": "1d6a6f50abd4fb9d0999b63fd81d9900", "score": "0.5179995", "text": "def chi_squared_limit(self, level=0.9):\n if np.isinf(self.best_fit_sigma):\n warn(\"best-fit inf; assume limit inf\")\n return np.inf\n\n critical = chi2.isf(2. * (1. - level), 1) # 1/2 chi-squared with 1 dof\n goal = critical + self.minimum_chi_squared\n\n def crossing(log_sigma):\n \"\"\"\n @returns Function that we desire zero-crossing\n \"\"\"\n return self.chi_squared(exp(log_sigma)) - goal\n\n try:\n log_limit = safe_bisect(crossing, *self.BOUNDS, rtol=1E-6, xtol=1E-6)\n except RuntimeError:\n warn(\"could not find upper limit; assume inf\")\n return np.inf\n\n return exp(log_limit)", "title": "" }, { "docid": "d960085beb24fe9b068755ecef6585d4", "score": "0.51486254", "text": "def process_max_frames_arg(max_frames_arg):\n if max_frames_arg > 0:\n return max_frames_arg\n return float('inf')", "title": "" }, { "docid": "d0b175e0a9054cbe1e6e8e0fe1e44e89", "score": "0.51318175", "text": "def is_infinite(self):\n # TODO\n return None", "title": "" }, { "docid": "28d45c533e4554203bd4b48cb05bd9e1", "score": "0.5131364", "text": "def minFunc(param):\n\n logLikelihood = GeneralizedExtremeValueDistribution\\\n .logLikelihood(param[0], param[1], param[2], data)\n\n return -logLikelihood if logLikelihood is not None else np.inf", "title": "" }, { "docid": "19752fe845e2beb7474f650ad0c1df06", "score": "0.5127281", "text": "def test_max_inflows_pos(self, *args, **kwargs):\n # This method should always return Money('Infinity')\n account = self.AccountType(\n self.owner, *args, balance=100, **kwargs)\n result = account.max_inflows(self.timing)\n for value in result.values():\n self.assertEqual(value, Money('Infinity'))", "title": "" }, { "docid": "15cb95f94c94533dc048e9f8323d3f74", "score": "0.51271427", "text": "def cflmax(self):\n def _gain_imag(sigma):\n return abs(self.propagator(1j*sigma))-1.\n return newton(_gain_imag, 10.)", "title": "" }, { "docid": "910055874af59edc72ff98a7f26ed187", "score": "0.5111549", "text": "def f(x):\n\n machine_eps = np.finfo(float).eps\n \n return fd.exp(-new_max(x,machine_eps)**(-1.0)) * heaviside(x)", "title": "" }, { "docid": "81382e33cc74fa71c4cf6361ebbe8596", "score": "0.5080417", "text": "def norm_inf(u):\n return abs(u).max()", "title": "" }, { "docid": "2b88e4d6492101a3e48ec48587a4df0a", "score": "0.50673544", "text": "def checkForImmediateExpression(self, op: str, \\\n lowerLimit=-sys.maxsize, upperLimit=sys.maxsize, targetBits=16, orgpos=0):\n \n # this mainly creates one context, in which the raise of LabelParseException\n # will immediately stop recursion\n try: \n res = self.__checkForImmediateExpression(op, lowerLimit=lowerLimit, upperLimit=upperLimit, \\\n targetBits=targetBits, orgpos=orgpos)\n return res\n except LabelParseException as err: \n OPTIONS.debug (2, \".. parsing expression %s gave: %s\" % (op, str(err)))\n \n return None", "title": "" }, { "docid": "4520f29947b1f447d317c848d5a9e0f8", "score": "0.50592524", "text": "def isinfinite(self):\r\n return self.start == -Inf or self.end == Inf", "title": "" }, { "docid": "f4ff6b683348976f4226369eac8db087", "score": "0.50566554", "text": "def is_finite(self, x: float) -> bool:\n return not math.isnan(x) and not math.isinf(x)", "title": "" }, { "docid": "4c147be69b5357d0d46f1cf76708759b", "score": "0.50500506", "text": "def _ln_exp_bound(self):\r\n\r\n # for 0.1 <= x <= 10 we use the inequalities 1-1/x <= ln(x) <= x-1\r\n adj = self._exp + len(self._int) - 1\r\n if adj >= 1:\r\n # argument >= 10; we use 23/10 = 2.3 as a lower bound for ln(10)\r\n return len(str(adj*23//10)) - 1\r\n if adj <= -2:\r\n # argument <= 0.1\r\n return len(str((-1-adj)*23//10)) - 1\r\n op = _WorkRep(self)\r\n c, e = op.int, op.exp\r\n if adj == 0:\r\n # 1 < self < 10\r\n num = str(c-10**-e)\r\n den = str(c)\r\n return len(num) - len(den) - (num < den)\r\n # adj == -1, 0.1 <= self < 1\r\n return e + len(str(10**-e - c)) - 1", "title": "" }, { "docid": "6234e34602e542b56a3841c429cbfa7f", "score": "0.5048374", "text": "def confine_infinite(dataframe):\r\n# number_features = [f_ for f_ in dataframe.columns \\\r\n# if dataframe[f_].dtype != 'object']\r\n number_features = dataframe.select_dtypes('number').columns.tolist()\r\n for f in number_features:\r\n col = dataframe[f]\r\n col_inf_n = np.isneginf(col)\r\n col_inf_p = np.isposinf(col)\r\n col[col_inf_n]=np.nanmin(col) \r\n col[col_inf_p]=np.nanmax(col)\r\n \r\n debug('confine_infinite: '+f)\r\n debug(np.sum(col_inf_n))\r\n debug(np.sum(col_inf_p))\r\n debug(np.nanmin(col))\r\n debug(np.nanmax(col))\r\n return dataframe", "title": "" }, { "docid": "d5554360bcd171699cd35ab9f923a78c", "score": "0.5048304", "text": "def exp_sigmoid(x, exponent=10.0, max_value=2.0, threshold=1e-7):\n x = tf_float32(x)\n return max_value * tf.nn.sigmoid(x)**tf.math.log(exponent) + threshold", "title": "" }, { "docid": "a26ee9f8065b7679c3e93aef6680ae16", "score": "0.5043954", "text": "def dinf(self):\n if not self.hyperbolic:\n raise ValueError(\"dinf undefined : orbit not hyperbolic\")\n\n return abs(self.kep.a * self.kep.e) * np.sqrt(1 - (1 / self.kep.e) ** 2)", "title": "" }, { "docid": "0fca6edea27b2b7d3e2d512fcd80c404", "score": "0.50420916", "text": "def GN_max(xi, epsilon):\n u, v = xi.real, xi.imag\n y = v - epsilon\n gamma = tail[0]\n Ei = -E1(1, y*gamma)\n return (1/y + 4*log(gamma*(1-1/2/pi)))*exp(-y*gamma) + 2*Ei/log(gamma/2/pi)", "title": "" }, { "docid": "69eba3ed57ce61ab0aad68e4f421f80e", "score": "0.5022108", "text": "def replace_nan_and_inf(a, replacement=0):\n a[torch.isnan(a)] = replacement\n a[a == float('inf')] = replacement\n a[a == -float('inf')] = replacement\n return a", "title": "" }, { "docid": "6b3a5dfd790e04976b8b202361bc4d2a", "score": "0.4979272", "text": "def limitValue(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "03b90794b24ccdcc43a0d791229ce12f", "score": "0.49624068", "text": "def distanciaInf(x, y):\n res = 0\n if x[1] == float('inf') and y[1] == float('inf'):\n res = abs(x[0]-y[0])\n elif x[1] == float('inf') and y[1] != float('inf'):\n res = float('inf')\n elif x[1] != float('inf') and y[1] == float('inf'):\n res = float('inf')\n else:\n res = max(abs(x[0]-y[0]), abs(x[1]-y[1]))\n\n return res", "title": "" }, { "docid": "29f95ba5ea79d7a9d364c597e5ee24e4", "score": "0.49611208", "text": "def fibonacci_recursive(limit):\n\n if limit <= 1:\n return limit\n\n return fibonacci_recursive(limit - 1) + fibonacci_recursive(limit - 2)", "title": "" }, { "docid": "5f435b74418d686a4961c133ea088d93", "score": "0.49575996", "text": "def fibonacci(limit):\n \n if limit <= 2:\n return 1\n else:\n return fibonacci(limit - 1) + fibonacci(limit - 2)", "title": "" }, { "docid": "ba217188f484ebcb2c42c1e98e257e3c", "score": "0.49447665", "text": "def isfinite(value):\n return (isinstance(value, int) or\n (isinstance(value, float) and not math.isinf(value)\n and not math.isnan(value)) or\n (isinstance(value, decimal.Decimal) and value.is_finite()))", "title": "" }, { "docid": "909ef04c223847d52de588954ac6b09f", "score": "0.49386385", "text": "def fix_infs(df, col):\n df[col] = df[col].replace([np.inf, -np.inf], np.nan)\n df[col] = df[col].fillna(value=0)", "title": "" }, { "docid": "4e491d6c6f894aec2c9f78a1f829c9bf", "score": "0.49324352", "text": "def is_infinite(x) -> ProcessBuilder:\n return _process('is_infinite', x=x)", "title": "" }, { "docid": "7a4dc21d7411d826d5093a05ebe670ed", "score": "0.49322984", "text": "def replace_inf(t):\n return torch.where(torch.isinf(t), torch.zeros_like(t), t)", "title": "" }, { "docid": "c63e05a62bf2f3e4f77189acfed1bce8", "score": "0.49280274", "text": "def test_signed_infinity(pos_inf):\n msg = 'testing {} ({})'.format(pos_inf, type(pos_inf))\n assert InfinityRing(pos_inf) is infinity, msg\n assert InfinityRing(-pos_inf) is minus_infinity, msg\n assert infinity == pos_inf, msg\n assert not(infinity > pos_inf), msg\n assert not(infinity < pos_inf), msg\n assert minus_infinity == -pos_inf, msg\n assert not(minus_infinity > -pos_inf), msg\n assert not(minus_infinity < -pos_inf), msg\n assert pos_inf > -pos_inf, msg\n assert infinity > -pos_inf, msg\n assert pos_inf > minus_infinity, msg", "title": "" }, { "docid": "aadab378dc21978c30fe53fedbcac398", "score": "0.49189115", "text": "def log_sum_exp(values):\n \n if len(values) == 0:\n # No values\n # Return the log of the additive identity\n return float(\"-inf\")\n \n # Make it be an array\n values = numpy.asarray(values)\n \n # What value should we pull out?\n # First, what values are finite?\n # Not the +inf ones!\n finite_values = values[values != float(\"inf\")]\n if len(finite_values) != len(values):\n # We had a +inf in there. That's log(+inf).\n # So we can cheat and return +inf\n return float(\"+inf\")\n \n # We don't care about any -infs\n finite_values = finite_values[finite_values != float(\"-inf\")]\n\n if len(finite_values) == 0:\n # All our values are infinities. Return one arbitrarily.\n return values[0]\n \n # Find the smallest and biggest finite values\n biggest = numpy.max(finite_values)\n smallest = numpy.min(finite_values)\n\n # Which ever has greater magnitude is the log of the factor we factor out.\n if abs(smallest) > biggest:\n log_factor = smallest\n else:\n log_factor = biggest\n \n \n # Enable errors on dubious math, so we don't get nans\n old_settings = numpy.seterr(all=\"raise\")\n \n try:\n # Divide everything by factor, sum, and then multiply by factor. Save \n # the value. \n to_return = log(numpy.sum(exp(values - log_factor))) + log_factor\n except FloatingPointError:\n # This can fail if we try to add an absurdly tiny number to an absurdly\n # tinier number. If one is e^-800 and the other e^-3000, we try to add \n # e^0 and e^+2200 when we do the trick, and e^+2200 is far too big to \n # exist.\n # The solution: throw away the min value (since it's too small to \n # matter) and recurse.\n # Yes, we recurse on exception.\n\n # What's left?\n remaining = numpy.delete(finite_values, numpy.argmin(finite_values))\n to_return = log_sum_exp(remaining)\n\n # Put back Numpy error settings\n numpy.seterr(**old_settings)\n \n # Return it\n return to_return", "title": "" }, { "docid": "8621946b1243fff166f850fb2368eb9c", "score": "0.49153635", "text": "def exp_softmax(x):\n rebase_x = x - np.max(x)\n return np.exp(x) / np.sum(np.exp(x))", "title": "" }, { "docid": "68b9cb30f87de9c3f41c37b0be8dfac2", "score": "0.4909963", "text": "def clamp(val, limit):\n return max(min(limit, val), -limit)", "title": "" }, { "docid": "6b300fac064969bff711e8dd417e0758", "score": "0.49075326", "text": "def is_infinite(self) -> Series:", "title": "" }, { "docid": "4751f88aeb6eaec271248ee4b67f022c", "score": "0.48994896", "text": "def _calculate_limit(self, default_limit, max_limit):\n if self._limit is None:\n return default_limit\n\n return min(self._limit, max_limit)", "title": "" }, { "docid": "92dddba3d473e88ef7ab092b8e8c2575", "score": "0.48900223", "text": "def upper_limit():\n return 0.3", "title": "" }, { "docid": "14153a60fc145b891f2f76242f6ee18e", "score": "0.48832956", "text": "def perfect_power_generator(exponent: int, upper_limit: typing.Optional[int]=None) -> typing.Generator[int, None, None]:\n current = 1\n while (result := current ** exponent) < (upper_limit or result + 1):\n current += 1\n yield result\n return None", "title": "" }, { "docid": "e8701eea69d6f0589d63bfa6337c04ad", "score": "0.48808536", "text": "def test_max_inflows_neg(self, *args, **kwargs):\n # This method should always return Money('Infinity')\n account = self.AccountType(\n self.owner, *args, balance=-100, **kwargs)\n result = account.max_inflows(self.timing)\n for value in result.values():\n self.assertEqual(value, Money('Infinity'))", "title": "" }, { "docid": "a40d8a028c0d5d529c919cf04b4e5cf3", "score": "0.48769423", "text": "def ceilout(x):\n if (x > 0):\n return math.ceil(x)\n else:\n return(math.ceil(x-1 + 1e-6)) # Avoid a bug that ceilout(1) = 1, but ceilout(-1) = 2", "title": "" }, { "docid": "2978cf996289191cf7f0c762bf4959b0", "score": "0.48691636", "text": "def less_than_infinity(self):\n try:\n return self._less_than_infinity\n except AttributeError:\n self._less_than_infinity = LessThanInfinity(self)\n return self._less_than_infinity", "title": "" }, { "docid": "5294e23cca72a44089765c8154769d93", "score": "0.48655137", "text": "def visitLimitClause(self, ctx: MySqlParser.LimitClauseContext) -> SQLToken:\n if ctx.OFFSET():\n offset, limit = CONST(int(ctx.DECIMAL_LITERAL(1).getText())), CONST(int(ctx.DECIMAL_LITERAL(0).getText()))\n elif ctx.COMMA():\n offset, limit = CONST(int(ctx.DECIMAL_LITERAL(0).getText())), CONST(int(ctx.DECIMAL_LITERAL(1).getText()))\n else:\n offset, limit = CONST(0), CONST(int(ctx.DECIMAL_LITERAL(0).getText()))\n return SQLToken(LIMIT, (offset, limit))", "title": "" }, { "docid": "3250080a1e6869ba9fced551115ae91c", "score": "0.48651958", "text": "def isinfinite(self):\r\n return self._ranges.first.value.start == -Inf or self._ranges.last.value.end == Inf", "title": "" }, { "docid": "62dee089ba1e927cad34045cfdbc9c87", "score": "0.4858119", "text": "def change_nan_inf(data):\n\n data_array = np.nan_to_num(\n data * 1,\n nan=np.nanmean(data[np.isfinite(data)], axis=0),\n posinf=np.nanmean(data[np.isfinite(data)], axis=0),\n neginf=np.nanmean(data[np.isfinite(data)], axis=0),\n )\n\n return data_array", "title": "" }, { "docid": "d4912f3337ff6a8d41e06e6d75fbe54c", "score": "0.4853928", "text": "def bounded(val, floor, ceiling):\n if val is None or np.ma.is_masked(val) or math.isnan(val):\n return None\n val = float(val)\n # belt and suspenders check here\n if math.isnan(val) or val < floor or val > ceiling:\n return None\n return val", "title": "" }, { "docid": "59534eb484f197bb5af6ac4ab9a683ff", "score": "0.48448563", "text": "def mean_finite_(x, min_finite=1):\n isfin = np.isfinite(x)\n if np.count_nonzero(isfin) > min_finite:\n return np.mean(x[isfin])\n else:\n return np.nan", "title": "" }, { "docid": "1444448f81dec1e6b2482111ced2c25e", "score": "0.483708", "text": "def power_limit(self):\n v = self.entry['enforced.power.limit']\n return int(v) if v is not None else None", "title": "" }, { "docid": "759a065a1ab4c190733acfe350cb25c0", "score": "0.48320174", "text": "def fc_find_limit(x_value, x_values, y_values):\n if x_value > max(x_values):\n raise ValueError(\"Measured x outside of confidence belt!\")\n\n # Loop through the x-values in reverse order\n for i in reversed(range(len(x_values))):\n current_x = x_values[i]\n # The measured value sits on a bin edge. In this case we want the upper\n # most point to be conservative, so it's the first point where this\n # condition is true.\n if x_value == current_x:\n return y_values[i]\n # If the current value lies between two bins, take the higher y-value\n # in order to be conservative.\n if x_value > current_x:\n return y_values[i + 1]", "title": "" }, { "docid": "67031d373c005b05f17521a345c41de5", "score": "0.48298088", "text": "def Imaxima(self):\r\n try:\r\n return 4.65 * pow(self.Qfinal, -0.67)\r\n except Exception as e:\r\n try_Except(e)", "title": "" }, { "docid": "12f01d051ea9dd85dc12effe9927b49b", "score": "0.48212615", "text": "def lnprior(pi):\n if np.all(pi >= 0) and np.sum(pi) < 1:\n #return np.sum(pi*np.log(pi))\n return 0.0\n else:\n return -np.inf", "title": "" }, { "docid": "2deac1eae4c54b865d2c61b6cd3bd861", "score": "0.48022377", "text": "def truncate(x):\n return max(min(x, 1.0), 0.0)", "title": "" }, { "docid": "8525fa5ec44c54edd02466262f807d74", "score": "0.48016217", "text": "def _repr_(self):\n return \"A number less than infinity\"", "title": "" }, { "docid": "a0d99a0cb35359a4dd3e3cf867fb5c8a", "score": "0.479593", "text": "def log1mexp_numpy(x: float) -> float:\n return np.where(x < 0.683, np.log(-np.expm1(-x)), np.log1p(-np.exp(-x)))", "title": "" }, { "docid": "fa67466801dff51979fe2def1ec1c9b8", "score": "0.47893766", "text": "def single_exp_hetero(x,f, n = 0.):\n return n + 2 * exp( - (f * x))", "title": "" }, { "docid": "5d2c82b7ab9803089ca6d2b7e3b23fc5", "score": "0.4788153", "text": "def test_finite_field(self):\n p1 = sympy.Poly([1, 5, 3, 7, 3], sympy.abc.x, domain=sympy.GF(11))\n p2 = sympy.Poly([0], sympy.abc.x, domain=sympy.GF(11))\n p3 = sympy.Poly([10], sympy.abc.x, domain=sympy.GF(11))\n self.assertEqual(symbaudio.utils.poly.max_coeff(p1), 7)\n self.assertEqual(symbaudio.utils.poly.max_coeff(p2), 0)\n self.assertEqual(symbaudio.utils.poly.max_coeff(p3), 10)", "title": "" }, { "docid": "53abd3ad7657834d6f008b379f04efcf", "score": "0.47805253", "text": "def upper_limit():\n limit = math.factorial(9) * num_digits(math.factorial(9))\n return limit", "title": "" }, { "docid": "95b0c62fe3fbc554a04e435633c276ac", "score": "0.47796988", "text": "def ceil(value: float):\n return -int(-value//1)", "title": "" }, { "docid": "8273995ffc727fef40eb7cc8b9107067", "score": "0.4778565", "text": "def _validate_limit(self, limit):\n\n if limit is not None and ((limit < 1) or (limit > 50)):\n raise InvalidParameterError(\"Limit must be between 1 and 50\")\n\n return limit", "title": "" }, { "docid": "69549e9eefd3c25eb85dd019a44bd030", "score": "0.47782227", "text": "def sp_maximum_1D ( fun , xmin , xmax , x0 = None , *args ) :\n funmin = lambda x , *a : -1.0 * ( float ( fun ( x , *a ) ) )\n return sp_minimum_1D ( funmin , xmin , xmax , x0 , *args )", "title": "" }, { "docid": "7df8d285812dffb0c42bc7d19ef25294", "score": "0.47766235", "text": "def test_isfinite5():\n x = np.array([float(\"-inf\"), float(\"-inf\"), float(\"inf\")])\n res = np.isfinite(x)\n obj.run(res=res, x=x)", "title": "" }, { "docid": "17e5df3bc1f80d353dbb7e4201026ba0", "score": "0.4776557", "text": "def one_d_cutoff(x,y,w,eps):\n\n #return Iab(x,0.0,1.0)/(1.0 + fd.exp(1.0/x + 1.0/(1.0-x))) + Iab(x,1.0,100.0) # Need to stick a warning in if this is used\n \n return one_d_transition(x-y+w/2.0+eps,eps)*one_d_transition(y+w/2.0+eps-x,eps)", "title": "" }, { "docid": "4ec2b00b1decaee5113eefa5979e5f7e", "score": "0.47739488", "text": "def fb(x):\n return np.exp(-100 * (x**2))", "title": "" }, { "docid": "8a7feacc42c4f44ab41eeeb19c14a457", "score": "0.47678283", "text": "def inf_norm(self):\n return _vnl_vectorPython.vnl_vectorUL_inf_norm(self)", "title": "" }, { "docid": "66145ad48cda49fade18d5f620ae0d37", "score": "0.476507", "text": "def softplusinv(y):\n #return np.log(np.expm1(y))\n s = np.sign(y)\n with np.errstate(divide='ignore'):\n return np.log(-s*np.expm1(-np.abs(y))) + np.maximum(y,0)", "title": "" }, { "docid": "880bd8ccac21387cc83bee8e9d9c9c93", "score": "0.4764338", "text": "def _get_limit(self, req):\n try:\n limit = int(req.params.get('limit', CONF.limit_param_default))\n except ValueError:\n raise exc.HTTPBadRequest(_(\"limit param must be an integer\"))\n\n if limit < 0:\n raise exc.HTTPBadRequest(_(\"limit param must be positive\"))\n\n return min(CONF.api_limit_max, limit)", "title": "" }, { "docid": "880bd8ccac21387cc83bee8e9d9c9c93", "score": "0.4764338", "text": "def _get_limit(self, req):\n try:\n limit = int(req.params.get('limit', CONF.limit_param_default))\n except ValueError:\n raise exc.HTTPBadRequest(_(\"limit param must be an integer\"))\n\n if limit < 0:\n raise exc.HTTPBadRequest(_(\"limit param must be positive\"))\n\n return min(CONF.api_limit_max, limit)", "title": "" }, { "docid": "2de8efbe1898fbb5a750de5b6df3c5dd", "score": "0.47577786", "text": "def FN_min(xi, epsilon):\n def f(t):\n return abs(FN(xi + epsilon*exp(t*1j)))\n return minimize(f, 0).fun", "title": "" }, { "docid": "f68f41d20a8f637798a6e34c9a29fe7f", "score": "0.47521636", "text": "def MAX_VALUE(gameState, depth):\n winvar = gameState.isWin()\n losevar = gameState.isLose()\n zerovar = 0\n onevar = 1\n infvar = '-inf'\n ifvar = winvar or losevar or depth == (zerovar + 0)\n if ifvar:\n print \"Zero depth? Calculating evaluation function\"\n evalfunc = self.evaluationFunction(gameState)\n return evalfunc #return evaluation function if the gamestate is win or lose or the depth is zero\n value = float(infvar) #this is float(-inf)\n for a in gameState.getLegalActions(zerovar + 0): # for each action in legal actions\n \"\"\"Calculating expected value\"\"\"\n expvalvar = EXP_VALUE(gameState.generateSuccessor(zerovar, a), onevar + (1-onevar), depth + 0)\n #In the above variable, we calculate the expected value by generating successors\n value = max(value, expvalvar) #we calculate the max of the float(-inf) and the expected value\n return value", "title": "" }, { "docid": "91a9e8f4699a00ea7334364cf4f526e1", "score": "0.47517887", "text": "def __float__(self):\n if self._sign == 0:\n raise ValueError('unsigned infinity cannot be represented in a float')\n return float(self._sign_char + 'inf')", "title": "" }, { "docid": "77ec581d9ea6043d7fcd03d4705248cf", "score": "0.47493616", "text": "def expm1(x):\n if (x < -0.5) or (x > 0.5):\n return (np.exp(x) - 1.0)\n xx = x * x\n r = x * polevl(xx, EP)\n r /= polevl(xx, EQ) - r\n return r + r", "title": "" }, { "docid": "596aeaeecb80bda9df1d15441f8a7541", "score": "0.47491345", "text": "def softmax(x):\n x = x - x.max() # numeric stability\n dominator = np.sum(np.e ** x)\n x = np.exp(x) / dominator\n return x", "title": "" }, { "docid": "1f9d235efb5ac546389332d593760a9d", "score": "0.47428957", "text": "def trunc(x: float) -> float:\n return x if abs(x) >= epsilon else 0", "title": "" }, { "docid": "9dbd49d245db50334409e4372a9399bf", "score": "0.47389758", "text": "def safeInverse(self, value):\n if value < 1e-20 and value > -1e-20:\n return float('inf')\n else: \n return (1.0/value)", "title": "" } ]
5e7513caf8e5285320509fdb64e9dc85
Run Dragonfly Bayesian optimization to minimize function f.
[ { "docid": "aebc2f1871e7dadc85272c82d92c67c9", "score": "0.64840174", "text": "def minimize_function(self, f, n_iter=10, verbose=True, seed=None):\n if seed is not None:\n np.random.seed(seed)\n\n domain = self._get_domain()\n opt_method = 'bo'\n parsed_config = self._get_parsed_config()\n options = self._get_options()\n\n opt_val, opt_pt, history = dragonfly.minimise_function(\n func=f,\n domain=domain,\n max_capital=n_iter,\n opt_method=opt_method,\n config=parsed_config,\n options=options,\n )\n results = Namespace(opt_val=opt_val, opt_pt=opt_pt, history=history)\n\n if verbose:\n vals = results.history.query_vals\n min_vals_idx = min(range(len(vals)), key=lambda x: vals[x])\n\n print('Minimum y = {}'.format(results.opt_val))\n print('Minimizer x = {}'.format(results.opt_pt))\n print('Found at iter = {}'.format(min_vals_idx + 1))\n\n return results", "title": "" } ]
[ { "docid": "2e2b69b3ef096ecfff7938346f50cb3d", "score": "0.6806004", "text": "def minimize(f,df, x0):\n\n return scipy.optimize.minimize(\n f, x0, method='BFGS', jac=df, options={'maxiter':100})", "title": "" }, { "docid": "e3bfd706e2203a2c42a9b09bac6e85c1", "score": "0.63456947", "text": "def bayesian_optimize(self): \n if self.algorithm in ['GNB','Perceptron']:\n return self\n else:\n cs = ConfigurationSpace()\n cs.add_hyperparameters(list(getattr(util, self.algorithm + '_range')(self.hyperparameters).values()))\n #set runcount-limit in Bayesian optimization\n if self.algorithm == 'kNN':\n if self.hyperparameters['k'] == 1: num = 3\n else: num = 5\n else: num = self.num_bayesian_optimize\n scenario = Scenario({'run_obj': 'quality', 'runcount-limit': num, 'cs': cs, 'deterministic': 'true', 'memory_limit': None})\n smac = SMAC(scenario=scenario, rng=np.random.RandomState(100), tae_runner=self.error_function)\n try:\n incumbent = smac.optimize()\n finally:\n incumbent = smac.solver.incumbent\n self.error = smac.get_tae_runner().run(incumbent, 1)[1]\n self.hyperparameters = incumbent.get_dictionary()\n self.bayesian_optimized = True\n return self", "title": "" }, { "docid": "a97919618d35cb81677c530bc00a1ecd", "score": "0.62703973", "text": "def bayes_opt(f, initial_x, all_x, acquisition, max_iter=100, debug=False,\n random_state=None):\n\n X, y = list(), list()\n for x in initial_x:\n if not np.isinf(f(x)):\n y.append(f(x))\n X.append(x)\n\n best_x = X[np.argmin(y)]\n best_f = y[np.argmin(y)]\n gp = gaussian_process.GaussianProcessRegressor(random_state=random_state)\n\n if debug: # pragma: no cover\n print(\"iter\", -1, \"best_x\", best_x, best_f)\n\n for i in range(max_iter):\n gp.fit(np.array(X)[:, None], np.array(y))\n new_x = all_x[acquisition(gp, best_f, all_x).argmin()]\n new_f = f(new_x)\n if not np.isinf(new_f):\n X.append(new_x)\n y.append(new_f)\n if new_f < best_f:\n best_f = new_f\n best_x = new_x\n\n if debug: # pragma: no cover\n print(\"iter\", i, \"best_x\", best_x, best_f)\n\n if debug: # pragma: no cover\n import matplotlib.pyplot as plt\n scale = 1e6\n sort_idx = np.argsort(X)\n plt.plot(np.array(X)[sort_idx] * scale,\n np.array(y)[sort_idx] * scale, 'bo-')\n plt.axvline(best_x * scale, linestyle='--')\n plt.show()\n\n return best_x, best_f", "title": "" }, { "docid": "2d6bd43df8ffde8f4138b43bcd6f66a3", "score": "0.61310077", "text": "def test_optim():\n def f(L): return (L[0]-3.0)**2+(L[1]-4.0)**2\n x , y = optim(f,2,1000)\n print x,y", "title": "" }, { "docid": "0c00daf5b87a47f2df3ecbcfde07c34b", "score": "0.61294436", "text": "def minimise(function):\n if function.split()[0] in _workspace:\n # Find the min BF, and add it to the workspace\n rv = _workspace[function.split()[0]].min_sop()\n create_BF(rv._print())\n\n else:\n printc(\"%s is not a BF in workspace\" %function.strip(), fail)", "title": "" }, { "docid": "ce4a24b8c2a2da93783bc22d2d56925b", "score": "0.6076602", "text": "def bayesian_optimization(self):\n def optimize(**params):\n \"\"\"Optimization function for the BO.\n\n Parameters\n ----------\n params: dict\n Model's hyperparameters used in this call of the BO.\n\n Returns\n -------\n score: float\n Score achieved by the model.\n\n \"\"\"\n def fit_model(train_idx, val_idx):\n \"\"\"Fit the model. Function for parallelization.\n\n Divide the training set in a (sub)train and validation\n set for this fit. Fit the model on custom_fit if exists,\n else normally. Return the score on the validation set.\n\n Parameters\n ----------\n train_idx: list\n Indices for the subtrain set.\n\n val_idx: list\n Indices for the validation set.\n\n Returns\n -------\n score: float\n Score of the fitted model on the validation set.\n\n \"\"\"\n X_subtrain = self.X_train.loc[train_idx]\n y_subtrain = self.y_train.loc[train_idx]\n X_val = self.X_train.loc[val_idx]\n y_val = self.y_train.loc[val_idx]\n\n # Match the sample_weights with the length of the subtrain set\n # Make copy of est_params to not alter the mutable variable\n est_copy = self._est_params_fit.copy()\n if \"sample_weight\" in est_copy:\n est_copy[\"sample_weight\"] = [\n self._est_params_fit[\"sample_weight\"][i] for i in train_idx\n ]\n\n if hasattr(self, \"custom_fit\"):\n self.custom_fit(\n est=est,\n train=(X_subtrain, y_subtrain),\n validation=(X_val, y_val),\n params=est_copy\n )\n\n # Alert if early stopping was applied (only for cv=1)\n if self.T._cv == 1 and self._stopped:\n self.T.log(\n f\"Early stop at iteration {self._stopped[0]} \"\n f\"of {self._stopped[1]}.\", 2\n )\n else:\n est.fit(arr(X_subtrain), y_subtrain, **est_copy)\n\n # Calculate metrics on the validation set\n return [metric(est, arr(X_val), y_val) for metric in self.T._metric]\n\n t_iter = datetime.now() # Get current time for start of the iteration\n\n # Print iteration and time\n self._iter += 1\n if self._iter > self._n_initial_points:\n call = f\"Iteration {self._iter}\"\n else:\n call = f\"Initial point {self._iter}\"\n\n if self._pbar:\n self._pbar.set_description(call)\n len_ = \"-\" * (48 - len(call))\n self.T.log(f\"{call} {len_}\", 2)\n self.T.log(f\"Parameters --> {params}\", 2)\n\n est = self.get_estimator({**self._est_params, **params})\n\n # Same splits per model, but different for every iteration of the BO\n rs = self.T.random_state + self._iter if self.T.random_state else None\n\n if self.T._cv == 1:\n # Select test_size from ATOM or use default of 0.2\n t_size = self.T._test_size if hasattr(self.T, \"_test_size\") else 0.2\n kwargs = dict(test_size=t_size, random_state=rs)\n if self.T.goal.startswith(\"class\"):\n # Folds are made preserving the % of samples for each class\n split = StratifiedShuffleSplit(1, **kwargs)\n else:\n split = ShuffleSplit(1, **kwargs)\n\n scores = fit_model(*next(split.split(self.X_train, self.y_train)))\n\n else: # Use cross validation to get the score\n kwargs = dict(n_splits=self.T._cv, shuffle=True, random_state=rs)\n if self.T.goal.startswith(\"class\"):\n # Folds are made preserving the % of samples for each class\n k_fold = StratifiedKFold(**kwargs)\n else:\n k_fold = KFold(**kwargs)\n\n # Parallel loop over fit_model\n jobs = Parallel(self.T.n_jobs)(\n delayed(fit_model)(i, j)\n for i, j in k_fold.split(self.X_train, self.y_train)\n )\n scores = list(np.mean(jobs, axis=0))\n\n # Append row to the bo attribute\n t = time_to_str(t_iter)\n t_tot = time_to_str(self._init_bo)\n self.bo.loc[call] = {\n \"params\": params,\n \"estimator\": est,\n \"score\": flt(scores),\n \"time_iteration\": t,\n \"time\": t_tot,\n }\n\n # Update the progress bar\n if self._pbar:\n self._pbar.update(1)\n\n # Print output of the BO\n out = [\n f\"{m.name}: {scores[i]:.4f} Best {m.name}: \"\n f\"{max([lst(s)[i] for s in self.bo.score]):.4f}\"\n for i, m in enumerate(self.T._metric)\n ]\n self.T.log(f\"Evaluation --> {' '.join(out)}\", 2)\n self.T.log(f\"Time iteration: {t} Total time: {t_tot}\", 2)\n\n return -scores[0] # Negative since skopt tries to minimize\n\n # Running optimization ===================================== >>\n\n if self._n_calls < self._n_initial_points:\n raise ValueError(\n \"Invalid value for the n_calls parameter. Value \"\n f\"should be >n_initial_points, got {self._n_calls}.\"\n )\n\n self.T.log(f\"\\n\\nRunning BO for {self.fullname}...\", 1)\n\n self._init_bo = datetime.now()\n if self.T.verbose == 1:\n self._pbar = tqdm(total=self._n_calls, desc=\"Random start 1\")\n\n self._check_est_params() # Check validity of parameters\n\n # Drop dimensions from BO if already in est_params\n for param in self._est_params:\n if param in self.params:\n self.params.pop(param)\n\n # Specify model dimensions\n def pre_defined_hyperparameters(x):\n return optimize(**self.get_params(x))\n\n # Get custom dimensions (if provided)\n if self._dimensions:\n @use_named_args(self._dimensions)\n def custom_hyperparameters(**x):\n return optimize(**x)\n\n dimensions = self._dimensions\n func = custom_hyperparameters # Use custom hyperparameters\n\n else: # If there were no custom dimensions, use the default\n dimensions = self.get_dimensions()\n func = pre_defined_hyperparameters # Default optimization func\n\n # If only 1 initial point, use the model's default parameters\n x0 = None\n if self._n_initial_points == 1 and hasattr(self, \"get_init_values\"):\n x0 = self.get_init_values()\n\n # Prepare keyword arguments for the optimizer\n kwargs = dict(\n func=func,\n dimensions=dimensions,\n n_calls=self._n_calls,\n n_initial_points=self._n_initial_points,\n x0=x0,\n callback=self.T._callbacks,\n n_jobs=self.T.n_jobs,\n random_state=self.T.random_state,\n **self.T._bo_kwargs,\n )\n\n if str(self.T._base_estimator).lower() == \"gp\":\n optimizer = gp_minimize(**kwargs)\n elif str(self.T._base_estimator).lower() == \"et\":\n optimizer = forest_minimize(base_estimator=\"ET\", **kwargs)\n elif str(self.T._base_estimator).lower() == \"rf\":\n optimizer = forest_minimize(base_estimator=\"RF\", **kwargs)\n elif str(self.T._base_estimator).lower() == \"gbrt\":\n optimizer = gbrt_minimize(**kwargs)\n else:\n optimizer = base_minimize(base_estimator=self.T._base_estimator, **kwargs)\n\n if self._pbar:\n self._pbar.close()\n\n # Optimal parameters found by the BO\n # Return from skopt wrapper to get dict of custom hyperparameter space\n if func is pre_defined_hyperparameters:\n self.best_params = self.get_params(optimizer.x)\n else:\n @use_named_args(dimensions)\n def get_custom_params(**x):\n return x\n\n self.best_params = get_custom_params(optimizer.x)\n\n # Optimal score found by the BO\n self.metric_bo = self.bo.score.max(axis=0)\n\n # Save best model (not yet fitted)\n self.estimator = self.get_estimator({**self._est_params, **self.best_params})\n\n # Get the BO duration\n self.time_bo = time_to_str(self._init_bo)\n\n # Print results\n self.T.log(f\"\\nResults for {self.fullname}:{' ':9s}\", 1)\n self.T.log(\"Bayesian Optimization ---------------------------\", 1)\n self.T.log(f\"Best parameters --> {self.best_params}\", 1)\n out = [\n f\"{m.name}: {round(lst(self.metric_bo)[i], 4)}\"\n for i, m in enumerate(self.T._metric)\n ]\n self.T.log(f\"Best evaluation --> {' '.join(out)}\", 1)\n self.T.log(f\"Time elapsed: {self.time_bo}\", 1)", "title": "" }, { "docid": "acf56c9fd37918b96f159513b047e94e", "score": "0.60053533", "text": "def alternating_update(self, f, g, f_optim, g_optim,f_ratio=1):\n noise = self.noise.sample([self.batch_size])\n real = self.data.sample([self.batch_size])\n if self.step % f_ratio==0:\n \n f.eval()\n g.train()\n \n g_optim.zero_grad() \n obj_g = self.objective( f, g, real, noise )\n obj_g.backward() \n g_optim.step()\n \n else:\n \n f.train()\n g.eval()\n \n f_optim.zero_grad() \n obj_f = -self.objective( f, g, real, noise ) #default objective is to minimize so we add the minus\n obj_f.backward() \n f_optim.step()\n f.enforce_lipschitz()", "title": "" }, { "docid": "9ebe1ccddb52420fbcf5bec2325e7fae", "score": "0.59521717", "text": "def cost_function(x):\n return f(x)", "title": "" }, { "docid": "e66abf02aef3e4517ff5427246b79de4", "score": "0.5941664", "text": "def _optimize(rng, x):\n def body_fn(_, inputs):\n opt_state, current_x = inputs\n current_x, _, opt_state = self._optimizer.minimize(current_x, opt_state)\n current_x = self._project_fn(current_x, x)\n return opt_state, current_x\n opt_state = self._optimizer.init(loss_fn, x)\n current_x = self._project_fn(self._initialize_fn(rng, x), x)\n _, current_x = jax.lax.fori_loop(0, self._num_steps, body_fn,\n (opt_state, current_x))\n return current_x", "title": "" }, { "docid": "a3c2949813740ebc6b561f147f311308", "score": "0.5908136", "text": "def optimize(\n # trials,\n random_state=2017):\n # To learn more about XGBoost parameters, head to this page:\n # https://github.com/dmlc/xgboost/blob/master/doc/parameter.md\n space = {\n # 'boosting_type': hp.choice( 'boosting_type', ['gbdt', 'dart' ] ),\n 'max_depth': hp.quniform(\"max_depth\", 4, 9, 1),\n 'num_leaves': hp.quniform('num_leaves', 10, 100, 1),\n 'min_data_in_leaf': hp.quniform('min_data_in_leaf', 10, 150, 1),\n 'feature_fraction': hp.uniform('feature_fraction', 0.4, 1.0),\n 'bagging_fraction': hp.uniform('bagging_fraction', 0.6, 1.0),\n 'learning_rate': hp.loguniform('learning_rate', -2.5, -1.6),\n 'min_sum_hessian_in_leaf': hp.loguniform('min_sum_hessian_in_leaf', 0, 4),\n # \"scale_pos_weight\": hp.loguniform('scale_pos_weight', 3, 10),\n # 'lambda_l1': hp.uniform('lambda_l1', 1e-4, 1e-6 ),\n # 'lambda_l2': hp.uniform('lambda_l2', 1e-4, 1e-6 ),\n 'seed': hp.randint('seed', 2000000)\n }\n trials = Trials()\n best = hyperopt.fmin(fn=score,\n space=space,\n algo=tpe.suggest,\n max_evals=500,\n trials=trials,\n verbose=1)\n return best", "title": "" }, { "docid": "3952c60c80938462023dff555e59fb43", "score": "0.5878123", "text": "def Bayes_Optimizer(init_sample,budget,method,test_f_num=1,SEEDS=1):\n\n def Optimisation(gp,thts):\n def Objective_Function(X ,Y_sample, gpr,xi=0):\n ''' Computes the EI at points X based on existing samples X_sample and Y_sample using a Gaussian process surrogate model.\n Args: X: Points at which EI shall be computed (m x d). X_sample: Sample locations (n x d).\n Y_sample: Sample values (n x 1).\n gpr: A GaussianProcessRegressor fitted to samples.\n xi: Exploitation-exploration trade-off parameter.\n Returns: Expected improvements at points X.\n '''\n #print('X',X)\n mu, sigma = gpr.predict(np.vstack(X), include_likelihood=False)\n mu_sample_opt = np.min(Y_sample)\n imp = mu_sample_opt - mu\n Z = imp / sigma\n ei = imp * scipy.stats.norm.cdf(Z) + sigma * scipy.stats.norm.pdf(Z)\n return -1*ei\n\n def acc(x):\n if x>f.u_limit or x<f.l_limit:\n return 10000\n else:\n# print('x',x,'E_EI(x)',E_EI(x))\n# print('Optimisation.x_val',Optimisation.x_val)\n if E_EI(x)<Optimisation.x_val:\n Optimisation.x_val = E_EI(x)\n Optimisation.x_init = x\n return E_EI(x)\n\n x_plot = np.linspace(0,10,1000)[:,None]\n Ns = 5\n \n Y_sample = gp.Y\n\n Optimisation.x_init = -10\n Optimisation.x_val = -10000\n \n if method == 'BAYES':\n Optimisation.x_init = -10\n Optimisation.x_val = 10000\n \n XAs = np.random.random(Ns)*10\n EI_function = []\n for i in np.exp(thts):\n m.rbf.lengthscale = i[0]\n m.rbf.variance = i[1]\n m.Gaussian_noise.variance = alpha\n EI_function.append(Objective_Function(x_plot, Y_sample, m,xi=0))\n \n mean_EI_function = np.mean(EI_function,axis=0)\n\n E_EI = scipy.interpolate.interp1d(x_plot.T[0], mean_EI_function.T[0])\n \n# plt.plot(x_plot,E_EI(x_plot),label='acc')\n# plt.legend()\n# plt.show()\n \n A = [scipy.optimize.minimize(acc, np.array([[i]]), method='nelder-mead', tol= 1e-8).x for i in XAs]\n else:\n mean_EI_function = np.vstack(Objective_Function(x_plot, Y_sample, m,xi=0))\n Optimisation.x_init = -50\n Optimisation.x_val = 10000\n XAs = np.random.random(Ns) + (x_plot[np.argmin(mean_EI_function)]-0.5)\n for i in XAs:\n x_opt = scipy.optimize.minimize(Objective_Function, np.array([[i]]), args =(Y_sample, m) ,method='nelder-mead', tol= 1e-12).x\n if x_opt < f.l_limit or x_opt > f.u_limit:\n EI_eval = 10000\n elif x_opt > f.l_limit or x_opt < f.u_limit:\n EI_eval = Objective_Function(np.array([[i]]),Y_sample, m)\n\n if EI_eval < Optimisation.x_val:\n Optimisation.x_val = EI_eval\n Optimisation.x_init = x_opt\n\n return np.array([Optimisation.x_init])\n \n DATA = np.zeros((budget+N,2))\n \n f.init(test_f_num,SEEDS,init_sample)\n\n DATA[0:N,0] = f.generate_data()\n DATA[0:N,1] = f.evl(DATA[0:N,0])\n f.data(DATA[0:N,0],DATA[0:N,1])\n #f.verbose()\n #MAXIMUM LIKELIHOOD ESTIMATION\n m = GPy.models.GPRegression(DATA[0:N,0][:,None],DATA[0:N,1][:,None])\n \n if method == 'BAYES':\n\n sample_theta = SLICE_SAMPLER(DATA[0:N,:],burn_in=10, Ns=10, verbose=False)[0]\n m.optimize_restarts(5, robust=True, verbose=False)\n alpha = m.Gaussian_noise.variance\n \n\n else:\n m.optimize_restarts(5, robust=True, verbose=False)\n \n CC = np.zeros(budget)\n for bb in range(budget):\n m = GPy.models.GPRegression(DATA[0:N+bb,0][:,None],DATA[0:N+bb,1][:,None])\n if method == 'BAYES':\n dsgn = Optimisation(gp = m ,thts=sample_theta)\n else:\n dsgn = Optimisation(gp = m ,thts=0)\n# print('dgn',dsgn)\n DATA[N+bb,0] = dsgn\n DATA[N+bb,1] = f.evl(dsgn)\n f.data(np.array([DATA[N+bb,0]]),np.array([DATA[N+bb,1]]))\n return", "title": "" }, { "docid": "63d4f42fe3c407c43ca018531bc44843", "score": "0.58705264", "text": "def f(inicio,obj):\n \"\"\" f function computes the weight given by the taxicab distance from intial postion to current position,\n given by g and the heuristic cost from current position to objective.\n \"\"\"\n return g(inicio,obj)+h(inicio,obj)", "title": "" }, { "docid": "de9653e625e6ed03b24bda13542d5cc9", "score": "0.5782633", "text": "def optimizer(self, timestamp, x0, WKBAR,**kwargs):\n\n bounds = [(self.xmin, self.xmax)]\n take_step = RandomDisplacementBounds(self.xmin, self.xmax)\n\n (niter, niter_success) = (250,25)\n minimizer_kwargs = dict(args=(timestamp,WKBAR), method='L-BFGS-B', bounds=bounds)\n\n prnt = kwargs.get('prnt',True)\n if prnt:\n ret = spo.basinhopping(self.fcn, x0, T=1.0, minimizer_kwargs=minimizer_kwargs, niter=niter, \\\n niter_success=niter_success,take_step=take_step,callback=self.print_fun)\n # ret = spo.basinhopping(self.fcn, x0, T=1.0, minimizer_kwargs=minimizer_kwargs, niter=niter, \\\n # niter_success=niter_success,take_step=take_step,callback=self.print_fun)\n else:\n ret = spo.basinhopping(slef.fcn, x0, T=1.0, minimizer_kwargs=minimizer_kwargs, niter=niter, \\\n niter_success=niter_success,take_step=take_step,callback=None)\n print('finish once:\\n', ret)\n res = (ret.x, ret.fun)\n return res", "title": "" }, { "docid": "e7d00556f3cb9051bb3b46129372597f", "score": "0.5763634", "text": "def zipf_solver(self, ab):\n #ab = self.obs\n par0 = 1 + len(ab) / (sum(np.log(2 * np.array(ab))))\n def zipf_func(x):\n return -self.zipf_ll(ab, x)\n #par = optimize.fmin(zipf_func, x0 = par0, disp=False)[0]\n estimator = str(self.estimator)\n par = getattr(optimize, estimator)(zipf_func, x0 = par0, disp=False)[0]\n return par", "title": "" }, { "docid": "27d9670ad63528c3dbd722dc30dc6ada", "score": "0.5748948", "text": "def get_problem():\n\n # Rename this and/or move to optim package?\n problem = beluga.optim.Problem('brysonDenhamConstrained')\n\n # Define independent variables\n problem.independent('t', 's')\n\n # Define equations of motion\n problem.state('x', 'v','m') \\\n .state('v', 'u','m/s') \\\n # Define controls\n problem.control('u','m/s')\n\n # Define costs\n problem.cost['path'] = Expression('u^2','m^2/s')\n\n # Define constraints\n problem.constraints('default',0) \\\n .initial('x-x_0','m') \\\n .initial('v-v_0','m/s') \\\n .terminal('x-x_f','m') \\\n .terminal('v-v_f','m/s') \\\n .path('xlim','x','<',0.18,'m') \\\n .independent('tf - 1','s') # Fixed final time\n\n problem.scale.unit('m',1) \\\n .unit('s',1)\\\n .unit('kg',1) \\\n .unit('rad',1) \\\n .unit('nd',1)\n\n # problem.bvp_solver = algorithms.SingleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=30, verbose = True, cached=False)\n problem.bvp_solver = algorithms.MultipleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=40, verbose = True, cached=False, number_arcs=2)\n\n\n # Smoothed path constraint\n # c1 = '( x )' # Constraint\n # c1_1 = '( v )' # First derivative\n # c1_2 = '( u )' # Second derivative\n # h1_1 = '(psi1)';\n # h1_2 = '(psi11*xi12)'; # xi11dot = xi12\n # h1_3 = '(psi12*xi12^2 + psi11*ue1)'; # xi12dot = ue1\n\n # problem.constant('eps1',1e-1,'nd') # The smoothing 'penalty' factor\n # problem.control('ue1','m/s') # The extra control\n # problem.constant('lim',0.14,'m') # The constraint limit\n\n # problem.quantity ('psi1','(lim - (2*lim/(1+exp((2/lim)*xi11))))') \\\n # .quantity('psi11','((4*exp((2*xi11)/lim))/(exp((2*xi11)/lim) + 1)**2)') \\\n # .quantity('psi12','( 8*exp(2*xi11/lim)/(lim*(exp(2*xi11/lim) + 1)**2) - 16*exp(4*xi11/lim)/(lim*(exp(2*xi11/lim) + 1)**3))') # \\\n # # .quantity('psi13','(16*exp(2*xi11/lim)/(lim**2*(exp(2*xi11/lim) + 1)**2) - 96*exp(4*xi11/lim)/(lim**2*(exp(2*xi11/lim) + 1)**3) + 96*exp(6*xi11/lim)/(lim**2*(exp(2*xi11/lim) + 1)**4))')\n\n # problem.state('xi11','xi12','m')\n # problem.state('xi12','ue1','m/s')\n # # solve psi1 - c = 0 -> xi11_0\n # # solve psi11*xi12 - c1 = 0 -> xi12_0\n # problem.constraints('default',0).initial('xi11 - x_0','m') \\\n # .initial('xi12 - v_0','m/s') \\\n # .equality(c1_2+' - '+h1_3,'m/s')\n\n # problem.cost['path'] = Expression('u^2 + eps1*(ue1^2)','m^2/s')\n\n problem.guess.setup('auto',\n start=[0,.01], # Starting values for states in order\n direction='forward',\n costate_guess = 0.1,\n time_integrate = 1 ## REQUIRED BECAUSE OF FIXED FINAL TIME\n )\n\n # problem.steps.add_step().num_cases(11) \\\n problem.steps.add_step('bisection', max_divisions=30).num_cases(51) \\\n .terminal('x',0) \\\n .initial('v',1) \\\n .terminal('v', -1)\n # .const('lim',0.20)\n\n problem.steps.add_step('bisection').num_cases(11) \\\n .const('_xlim',0.14)\n # #\n problem.steps.add_step('bisection').num_cases(11,spacing='log') \\\n .const('eps_lim1',1e-4)\n return problem", "title": "" }, { "docid": "ed3d6058b9ea155b5b290c239e1d8e84", "score": "0.5747688", "text": "def run_optim(self, x, maxiter=10000, debug=False):\n x0 = np.concatenate([self.__init_mu_theta(x), self.__init_disp_theta(x)])\n\n if debug:\n minimize_out = minimize(\n fun=objective,\n x0=x0,\n args=(x, self.design_loc, self.design_scale, self.lib_size, self.batch_size),\n method='BFGS',\n options={'maxiter': maxiter, 'gtol': 1e-05}\n )\n else:\n try:\n minimize_out = minimize(\n fun=objective,\n x0=x0,\n args=(x, self.design_loc, self.design_scale, self.lib_size, self.batch_size),\n method='BFGS',\n options={'maxiter': maxiter, 'gtol': 1e-05}\n )\n err = ''\n except Exception as e:\n minimize_out = None\n err = e\n print(e)\n\n return minimize_out", "title": "" }, { "docid": "dc757de4c16892e9fcf099d2114707e8", "score": "0.5722914", "text": "def fmincg(f, jac, x0, args=(), alpha=0.25, beta=0.5, maxiter=50, tol=1e-9):\n warnings.filterwarnings(\"ignore\")\n\n # Initialization.\n dx_prev = -jac(x0, *args)\n s_prev = dx_prev\n x = x0\n\n print(\"Minimizing function...\")\n\n for i in range(maxiter):\n dx = -jac(x, *args)\n if dx.T@dx < tol:\n print(\"Terminated because tolerance criterion is reached...\")\n return x\n\n # Polak-Ribiere formula\n beta_pr = dx.T @ (dx-dx_prev) / (dx_prev.T@dx_prev)\n # Positive beta_pr indicates moving to min.\n beta_pr = np.max((0, beta_pr))\n # Search direction.\n s = dx + beta_pr*s_prev\n\n # Backtracking (https://en.wikipedia.org/wiki/Backtracking).\n t = 1.0\n cost = f(x, *args)\n grad = jac(x, *args)\n cost_new = f(x+t*s, *args)\n alpha_grad = alpha * grad.T @ s\n # f(x+t*s) - f(x) = t*s*jac(x) where s*jac(x) is alpha_grad.\n while cost_new > cost + t*alpha_grad:\n t = beta * t\n cost_new = f(x+t*s, *args)\n # Throw out all big values of t*s.\n\n # Search right side.\n t_right = 2 * t\n t_temp = t\n while t_right - t > 1e-4:\n cost_right = f(x+t_right*s, *args)\n if cost_right > cost_new:\n t_right = (t+t_right) / 2\n else:\n t = t_right\n t_right = 2 * t\n cost_new = cost_right\n\n # Search left side.\n if t == t_temp:\n t_left = t / 2\n while t - t_left > 1e-4:\n cost_left = f(x+t_left*s, *args)\n if cost_left > cost_new:\n t_left = (t+t_left) / 2\n else:\n t = t_left\n t_left = t / 2\n cost_new = cost_left\n\n x = x + t*s\n s_prev = s\n dx_prev = dx\n\n # To replace old line with new lines, add end=\"\\r\".\n print(\"Iteration {:d} | Cost: {:f}\".format(i+1, cost_new), end=\"\\r\")\n\n print(\"\\nProgram completed.\")\n\n return x", "title": "" }, { "docid": "7a7244500cd80f5841782dc6c0a22078", "score": "0.57020503", "text": "def maximize(self, budget, optimizer):\n\n\t\tpass", "title": "" }, { "docid": "9669c5300c9310b39d48ad21efd90f70", "score": "0.5689034", "text": "def optimizer():\n raise ValueError(\"litepresence needs funding\")", "title": "" }, { "docid": "32f9f09021936594c035a479934a9622", "score": "0.56831855", "text": "def sgd(\n fun,\n x0,\n jac,\n args=(),\n learning_rate=0.001,\n mass=0.9,\n startiter=0,\n maxiter=1000,\n callback=None,\n **kwargs\n):\n x = x0\n velocity = np.zeros_like(x)\n\n for i in range(startiter, startiter + maxiter):\n g = jac(x)\n\n if callback and callback(x):\n break\n\n velocity = mass * velocity - (1.0 - mass) * g\n x = x + learning_rate * velocity\n\n i += 1\n return OptimizeResult(x=x, fun=fun(x), jac=g, nit=i, nfev=i, success=True)", "title": "" }, { "docid": "2aad89b6dec814a24dc039d913d7d45e", "score": "0.5660993", "text": "def optimize(self, f, iters, print_step=1, verbose=1):\n raise NotImplementedError(\"SwarmBase::optimize()\")", "title": "" }, { "docid": "b31e263ee37e19b4dc19e60c1c9ed397", "score": "0.5655273", "text": "def optimizer():\n xdat = get_xdat()\n FFCache.xdat = xdat\n\n p0 = [10, .5, 0.5 * np.pi, -10, 1, -0.01, .01, 0.55, 0.15, 56000, 0.1, 0.1]\n p0 = [\n 18.01, 0.22, 0.1, -17.18, 1.14, 0.0013, 0.019, 0.55, 0.15,\n 56075.83169385737, 0.17, 0.01, 0.01\n ]\n\n p0 = [\n 18.400779846117267, 0.22971787544099842, 0.10155253120188,\n -17.419509553338237, 1.10935718731397, 0.001337832080287821,\n 0.02101724622679121, 0.5999999, 0.17088848513491867, 56152.7311204481,\n 0.19911041030960425, 0.004994250357441403, 0.012459448104491906\n ]\n p0 = [\n 1.87415482e+01,\n 2.10336313e-01,\n 2.88965103e-02,\n -1.80146378e+01,\n 1.46305331e+00,\n 1.49149247e-03,\n 1.83705929e-02,\n 5.93203255e-01,\n 1.95010188e-01,\n 5.61257260e+04,\n 3.63695628e-01,\n 1.63649945e-05,\n ] # 1.23094872e-02]\n for i in range(10):\n\n ret = scipy.optimize.minimize(edge_wrap(like),\n p0,\n args=(xdat, 1),\n method='Nelder-Mead')\n p0 = ret['x']\n ret = scipy.optimize.minimize(\n edge_wrap(like),\n p0,\n args=(xdat, 1),\n )\n p0 = ret['x']", "title": "" }, { "docid": "9e5493397f260378b23a927082430246", "score": "0.5654936", "text": "def objective(f, g, data_sample, noise_sample): \n # method signature\n \n \n W1= (f(data_sample ) - f(g(noise_sample) ) ).mean()\n \n \n #raise NotImplementedError(\"Your W1 estimate here\")\n return W1", "title": "" }, { "docid": "2f7e58b2b96fe2f018fdd61398c3f16a", "score": "0.56538814", "text": "def minimize_example(args, f, opt):\n result = process_optimize(\n f(), opt\n )\n step = 1\n res = []\n with open(args.ans, \"w\") as file:\n print(result, file=file)\n with open(args.short_ans, \"w\") as file:\n for x in result.values():\n x[\"result\"].pop(\"history\")\n print(\n f'X value: {x[\"result\"][\"x_min\"]}, ' +\n f'distance to min: {x[\"result\"][\"min_delta_result\"]}, ' +\n f'N steps: {x[\"result\"][\"steps_num\"]}'\n )\n res.append(f'X value: {x[\"result\"][\"x_min\"]}, ' + f'distance to min: {x[\"result\"][\"min_delta_result\"]}, ' + f'N steps: {x[\"result\"][\"steps_num\"]}')\n # if step % 21 == 0:\n # print(\"##################################\")\n step += 1\n print(result, file=file)\n return res", "title": "" }, { "docid": "99a47252dae4bba982acd860e8ed0751", "score": "0.56482846", "text": "def optimize(function, opt_params, iterations, random_seed=None, verb_model=False, verb=True):\n start_time = time.time()\n \n # global b/c I couldn't find a better way to directly pass these to the objective function\n global opt_params_, seed_, verb_model_\n opt_params_ = opt_params\n seed_ = None\n random.seed(random_seed)\n verb_model_ = verb_model\n \n # kwargs b/c dummy_minimize can not take n_jobs\n optfunc_params = {'n_calls':iterations, 'random_state':random_seed, 'verbose':verb, 'callback':callback}\n if(function != dummy_minimize):\n optfunc_params['n_random_starts'] = opt_params['n_rand']\n optfunc_params['n_jobs'] = -1\n result = function(objective, opt_params['space'], **optfunc_params)\n del opt_params_, seed_, verb_model_\n if(verb):\n res_plot(result)\n print()\n res_stats(result, start_time)\n return result", "title": "" }, { "docid": "fe2dd48ddc8b7df389db74a9baf37baf", "score": "0.56406885", "text": "def opt_func(self, frog, ffnc='exp'):\n # Find the distances between the frog and registered obstacles\n distances = np.array(list(map(np.linalg.norm, self.obstacles - frog)))\n norm = np.amin(distances) if distances.size else 0\n # Fitness function\n if ffnc == 'exp':\n output = self.w1 * np.exp(-norm) + self.w2 * \\\n np.linalg.norm(self.target - frog)\n elif ffnc == 'rtnl':\n output = self.w1 * (1 / norm) + self.w2 * \\\n np.linalg.norm(self.target - frog)\n else:\n output = -1\n print(\"Unknown fitness function\")\n\n return output", "title": "" }, { "docid": "e7bc720f69183e306c749804258ec01b", "score": "0.5610292", "text": "def _find_value_bdd(f, y, x_min, x_max, args = (), maxiter=100, algorithm=None,\n function_type=None, xtol=10**-12, full_output=False, disp=True):\n if y != 0:\n g = lambda x : f(x, *args) - y\n args = ()\n else:\n g = f\n \n return scipy.optimize.brentq(g, a=x_min, b=x_max, args=args, xtol=xtol, maxiter=maxiter, \n full_output=full_output, disp=disp)", "title": "" }, { "docid": "20c7800ed6d115fd434b197ca4bcaa81", "score": "0.5588473", "text": "def run_closure(f):\n\n tmp = f + \".tmp.js\"\n f.move(tmp)\n\n try:\n sh(\"java -jar '%s' --js='%s' > '%s'\" % (options.closure_bin, tmp, f))\n except BuildFailure, e:\n paver.tasks.environment.error(\n \"%s minimization failed, copying plain file\", f)\n tmp.copy(f)\n\n tmp.remove()", "title": "" }, { "docid": "0f985fbf3b5bf1e19a56b935ada7f91d", "score": "0.55715525", "text": "def bayesian_optimization(predictor_cls, train_x, train_ys, params, max_iter, max_time=600, model_type='GP', acquisition_type='EI',\n acquisition_weight=2, eps=1e-6, batch_method='local_penalization', batch_size=1, method='split', nfolds=3,\n silent=True, persist=True, write_to=TUNING_OUTPUT_DEFAULT):\n\n print(\"Applying Bayesian Optimization to configure {} in at most {} iterations and {} seconds.\"\n .format(predictor_cls, max_iter, max_time))\n\n def create_mapping(p_array):\n \"\"\" Changes the 2d np.array from GPyOpt to a dictionary. \"\"\"\n mapping = dict()\n for i in range(len(params)):\n mapping[params[i][\"name\"]] = p_array[0, i]\n\n return mapping\n\n # define the optimization function\n def f(parameter_array):\n param_dict = create_mapping(parameter_array)\n score = eval_permutation(params=param_dict,\n predictor_cls=predictor_cls,\n train_x=train_x,\n train_ys=train_ys,\n method=method,\n nfolds=nfolds,\n silent=silent)\n\n scores.append(score)\n # only return score to optimizer\n return score[1]\n\n # scores are added to this list in the optimization function f\n scores = []\n\n # run optimization in parallel\n num_cores = max(1, multiprocessing.cpu_count() - 1)\n\n # set batch_size equal to num_cores if no batch_size is provided\n if not batch_size:\n batch_size = num_cores\n\n if not silent:\n print(\"Running Bayesian Optimization in batches of {} on {} cores using {}.\".format(batch_size, num_cores, batch_method))\n\n # define optimization problem\n opt = BayesianOptimization(f, domain=params, model_type=model_type, acquisition_type=acquisition_type,\n normalize_Y=False, acquisition_weight=acquisition_weight, num_cores=num_cores, batch_size=batch_size)\n\n # run optimization\n opt.run_optimization(max_iter=max_iter, max_time=max_time, eps=eps, verbosity=False)\n\n # report results\n if persist:\n write_results(write_to, scores, predictor_cls)\n\n best_params, best_score = max(scores, key=lambda t: t[1])\n\n return dict(best_params), best_score", "title": "" }, { "docid": "9a09e44a2b4935023c68b18d7fe9c7bb", "score": "0.5568485", "text": "def test_bayesian_optimizer_on_simple_2d_quadratic_function_cold_start(self):\r\n objective_function_config = objective_function_config_store.get_config_by_name('2d_quadratic_concave_up')\r\n objective_function = ObjectiveFunctionFactory.create_objective_function(objective_function_config)\r\n\r\n optimization_problem = OptimizationProblem(\r\n parameter_space=objective_function.parameter_space,\r\n objective_space=objective_function.output_space,\r\n objectives=[Objective(name='y', minimize=True)]\r\n )\r\n\r\n bayesian_optimizer = BayesianOptimizer(\r\n optimization_problem=optimization_problem,\r\n optimizer_config=bayesian_optimizer_config_store.default,\r\n logger=self.logger\r\n )\r\n\r\n num_guided_samples = 1000\r\n for i in range(num_guided_samples):\r\n suggested_params = bayesian_optimizer.suggest()\r\n target_value = objective_function.evaluate_point(suggested_params)\r\n self.logger.info(f\"[{i}/{num_guided_samples}] suggested params: {suggested_params}, target: {target_value}\")\r\n\r\n\r\n bayesian_optimizer.register(suggested_params.to_dataframe(), target_value.to_dataframe())\r\n if i > 20 and i % 20 == 0:\r\n best_config_point, best_objective = bayesian_optimizer.optimum()\r\n self.logger.info(f\"[{i}/{num_guided_samples}] Optimum config: {best_config_point}, optimum objective: {best_objective}\")\r\n\r\n self.validate_optima(bayesian_optimizer)\r\n best_config, optimum = bayesian_optimizer.optimum()\r\n assert objective_function.parameter_space.contains_point(best_config)\r\n assert objective_function.output_space.contains_point(optimum)\r\n _, all_targets, _ = bayesian_optimizer.get_all_observations()\r\n assert optimum.y == all_targets.min()[0]\r\n self.logger.info(f\"Optimum: {optimum} best configuration: {best_config}\")", "title": "" }, { "docid": "9138ca8e4edae8a1ed6121f488e77652", "score": "0.554557", "text": "def optimize(self, log_par_0, fcn_obs, x_obs, method='BFGS', **kwargs):\n obj_func = self.neg_log_marginal_likelihood\n jac = True\n jitter = 1e-8 * np.eye(x_obs.shape[1])\n return minimize(obj_func, log_par_0, args=(fcn_obs, x_obs, jitter), method=method, jac=jac, **kwargs)", "title": "" }, { "docid": "ad3ae1a151ae840c801fa31d5c7eb5b0", "score": "0.5545351", "text": "def optimize(self) -> None:\n raise ValueError(\"RBF policies cannot be optimized individually.\"\n \"Please use the optimization in the pilco class.\")", "title": "" }, { "docid": "726b96d72efe33790eb1a35d1dcdc5ee", "score": "0.5543987", "text": "def fmin_bfgs(func, x0, args=(), options=None):\n\n if options is None:\n options = dict()\n maxiter: Optional[int] = options.get('maxiter', None)\n norm: float = options.get('norm', jnp.inf)\n gtol: float = options.get('gtol', 1e-5)\n ls_maxiter: int = options.get('ls_maxiter', 10)\n\n state = BFGSResults(converged=False,\n failed=False,\n k=0,\n nfev=0,\n ngev=0,\n nhev=0,\n x_k=x0,\n f_k=None,\n g_k=None,\n H_k=None,\n status=None,\n ls_status=jnp.array(0))\n\n if maxiter is None:\n maxiter = jnp.size(x0) * 200\n\n d = x0.shape[0]\n\n initial_H = jnp.eye(d)\n initial_H = options.get('hess_inv', initial_H)\n\n def func_with_args(x):\n return func(x, *args)\n\n value_and_grad = jax.value_and_grad(func_with_args)\n\n f_0, g_0 = value_and_grad(x0)\n state = state._replace(f_k=f_0, g_k=g_0, H_k=initial_H, nfev=state.nfev + 1, ngev=state.ngev + 1,\n converged=jnp.linalg.norm(g_0, ord=norm) < gtol)\n\n def body(state):\n p_k = -(state.H_k @ state.g_k)\n line_search_results = line_search(value_and_grad, state.x_k, p_k, old_fval=state.f_k, gfk=state.g_k,\n maxiter=ls_maxiter)\n state = state._replace(nfev=state.nfev + line_search_results.nfev,\n ngev=state.ngev + line_search_results.ngev,\n failed=line_search_results.failed,\n ls_status=line_search_results.status)\n s_k = line_search_results.a_k * p_k\n x_kp1 = state.x_k + s_k\n f_kp1 = line_search_results.f_k\n g_kp1 = line_search_results.g_k\n # print(g_kp1)\n y_k = g_kp1 - state.g_k\n rho_k = jnp.reciprocal(y_k @ s_k)\n\n sy_k = s_k[:, None] * y_k[None, :]\n w = jnp.eye(d) - rho_k * sy_k\n H_kp1 = jnp.where(jnp.isfinite(rho_k),\n jnp.linalg.multi_dot([w, state.H_k, w.T]) + rho_k * s_k[:, None] * s_k[None, :], state.H_k)\n\n converged = jnp.linalg.norm(g_kp1, ord=norm) < gtol\n\n state = state._replace(converged=converged,\n k=state.k + 1,\n x_k=x_kp1,\n f_k=f_kp1,\n g_k=g_kp1,\n H_k=H_kp1\n )\n\n return state\n\n state = while_loop(\n lambda state: (~ state.converged) & (~state.failed) & (state.k < maxiter),\n body,\n state)\n\n state = state._replace(status=jnp.where(state.converged, jnp.array(0),#converged\n jnp.where(state.k == maxiter, jnp.array(1),#max iters reached\n jnp.where(state.failed, jnp.array(2)+state.ls_status,#ls failed (+ reason)\n jnp.array(-1)))))#undefined\n\n return state", "title": "" }, { "docid": "d2f24551ec8dd3b93826c1980ee3ef47", "score": "0.55366325", "text": "def optimize(self, state):\n\n raise NotImplementedError()", "title": "" }, { "docid": "9a7e0cc0c1670bba817b52fa2ee620f3", "score": "0.5522172", "text": "def _optimize(self):\n self.stopCriteria.startTime()\n\n up = self.task.bounds.get_max().tolist()\n lb = self.task.bounds.get_min().tolist()\n if self.x0 is None:\n self.x0 = self.task.bounds.sample_uniform((1,)) # Randomly sample mean distribution\n\n def objfunc(parameters):\n return np.array(self._evaluate(np.matrix(parameters)))[:, 0][0] # Deal with transformations from/to np.matrix\n\n res = cma.fmin(objfunc, self.x0.tolist(), self.sigma,\n options={\"bounds\": [lb, up], \"verbose\": -1, \"verb_disp\": False,\n \"maxfevals\": self.stopCriteria.get_n_maxEvals(), \"popsize\": self.popsize})\n\n # Delete log file optimizer (pretty much useless)\n try:\n os.remove('outcmaesaxlen.dat')\n os.remove('outcmaesaxlencorr.dat')\n os.remove('outcmaesfit.dat')\n os.remove('outcmaesstddev.dat')\n os.remove('outcmaesxmean.dat')\n os.remove('outcmaesxrecentbest.dat')\n except:\n # Something went wrong\n pass\n\n # Logs\n # self._logs.data.n_evals = res[3]\n self._logs.data.xOpt = res[0]\n self._logs.data.fOpt = np.array(res[1])\n self._logs.data.time = np.matrix(self.stopCriteria.get_time())\n\n # self._logs.add_evals(x=np.matrix(res[0]).T, fx=np.matrix(res[1]),\n # opt_x=np.matrix(res[0]).T, opt_fx=np.matrix(res[1]),\n # time=np.matrix(self.stopCriteria.get_time())\n # )\n # self._logs.n_evals = res[3]\n\n out = np.array(res[0])\n\n return out", "title": "" }, { "docid": "81ea0156f332a9adc297f68990e5f2f6", "score": "0.5515792", "text": "def Minimize_PSF(optimization_round):\n\n if Set.decon_type in ('classical', 'myopic', 'npsfs'):\n\n Set.costfunction_type = 0 ## minimize PSF\n elif Set.decon_type == 'nobjects':\n\n Set.costfunction_type = 2 ## minimize PSF in the presence of nobjects\n elif Set.decon_type == 'si':\n \n Set.costfunction_type = 5 ## NEEDS INTEGRATION with SI code\n\n# if optimization_round > 1: # for optimization round 1, this is taken\n# # care of by the initialization in function\n# # CalculatePSTOTFstats for PSF=mPSF\n# # otherwise, normalization is assumed to be\n# # okay for user inputted initial guess (e.g., 0)\n# ## first normalize OTF and regenerate PSF (do per optimization round)\n# fftw.rfft(a=Set.PSF, af=Set.OTF, inplace=False)\n# \n# print \"HEY!\", Set.OTF.flat[0]\n# \n# Set.OTF.flat[0] = AGF.FitOTFzero(OTF=Set.OTF, npoints=20)\n# Set.OTF -= N.abs(Set.OTF).min()\n# Set.OTF /= Set.OTF.flat[0] # scale OTF to start at 1.0\n# fftw.irfft(af=Set.OTF*Set.inv_Nd, a=Set.PSF, inplace=False) \n# print \"YO!\", Set.OTF.flat[0]\n\n normalization = float(N.sum(Set.PSF.flat))\n\n if normalization > 0:\n \n Set.PSF /= normalization # renormalize everytime Minimize_PSF is called\n\n ### Initialization of CCG Variables ###\n Set.old_estimate = Set.PSF.copy()\t# use .copy() instead of [:] = \n Set.old_estimate_difference[:] = 0.\n itn = 0; ifn = 0; fn = 0.; fmin = 0.\n Set.ivec[:] = 0; df0 = 0. # df0, scalar float; input and output\n Nclsrch = 0; istop = 100\n rising_test_count = 0\n old_test = 100000. ## to determine if solution steps go \"uphill\"\n PSF_stops = 0\n\n ### Minimization Loop Over PSF ###\n for i in range(Set.PSF_PCG_iter_array[optimization_round-1]):\n# PIh iterations here see Fig1B\n startCGtime = time.time()\n\n if i == 0:\n \n print('[PSF] PCG iter:', i+1, 'of', end=' ') \n print(Set.PSF_PCG_iter_array[optimization_round-1], end=' ')\n print(' in optimization round ', end=' ') \n print(optimization_round, ' out of ', end=' ') \n print(Set.max_total_PCG_blocks, '(max)')\n else:\n\n print('[PSF] PCG iter:', i+1, ' of ', end=' ') \n print(Set.PSF_PCG_iter_array[optimization_round-1])\n\n#seb (itn, ifn, istop, fn, df0, Nclsrch) = CCG.doCCG(Set.PSF, Set.xmin,\n#seb Set.xmax_PSF, Set.ivec, Set.max_PSF_CCG_iter, fmin, df0,\n#seb Set.PSF_CCG_tolerance, Nclsrch) #@ CCG\n# (istop, itn, ifn, Nclsrch, fn, df0) = \\\n# ccg.getsol( Set.PSF, Set.xmin,\n# Set.xmax_PSF, Set.ivec, Set.max_PSF_CCG_iter, fmin, df0,\n# Set.PSF_CCG_tolerance,\n# AIDA_CostGradFunctions.CostFunction)\n \n import scipy.optimize as opt\n\n \n def function_cost(X):\n temp_grad = X.copy()\n (res,dummy) = AIDA_CostGradFunctions.CostFunction(X.copy(),temp_grad)\n return(res,temp_grad.flatten())\n myPSF = Set.PSF.copy()\n \n if not hasattr(Set, 'evaluationTime'):\n Set.evaluationTime = 0\n Set.nb_Evaluations = 0\n import time as tt\n start_t = tt.time()\n res_osef = function_cost(myPSF.copy())\n end_t = tt.time()\n Set.evaluationTime = (Set.nb_Evaluations * Set.evaluationTime + 1000*(end_t - start_t))/(1+Set.nb_Evaluations)\n Set.nb_Evaluations += 1\n \n\n print(\"Duration of last evaluation: \",1000*(end_t - start_t),\"ms\")\n print(\"Mean: \",Set.evaluationTime,\"ms\")\n print(\"nb_evals: \", Set.nb_Evaluations)\n \n resOptim2 = opt.minimize(function_cost,myPSF.flatten(),jac = True,method='L-BFGS-B',\n bounds=N.asarray([(Set.xmin,Set.xmax_PSF)]*len(myPSF.flatten())),\n options={'disp': False,'maxiter': Set.max_PSF_CCG_iter/2\n })\n \n itn = resOptim2.nit\n ifn = resOptim2.nfev\n istop = 0\n fn = resOptim2.fun\n df0 = 0\n Nclsrch = 0\n Set.PSF[:]= resOptim2.x.reshape(Set.PSF.shape)\n\n if Set.info_level >= 1:\n\n Set.cum_CG_time[1:] += time.time() - startCGtime\n Set.decon_total_CG_itns[1:] += itn\n Set.decon_total_CostFunction_itns[1:] += ifn\n\n if Set.info_level >= 2:\n\n if i == 0 :\n try:\n print('<lambda_OTF>: %.7g' %Set.lambda_OTF.mean(), end=' ')\n except:\n \n print('lambda_OTF: %.7g' %Set.lambda_OTF, end=' ')\n\n try:\n print(' <lambda_PSF>: %.7g' %Set.lambda_PSF.mean())\n except:\n \n print(' lambda_PSF: %.7g' %Set.lambda_PSF)\n\n print('\\tCG itns', itn, ' istop', istop, ' ifn', end=' ')\n print(ifn, ' df0 %.6g' %df0, ' Nclsrch', Nclsrch, end=' ')\n print(' CGtime %.6g' %(time.time() - startCGtime))\n\n Nclsrch = 0\n\n ### Check Global Solution Convergence of PSF Estimate ###\n test = (N.abs(N.abs(Set.PSF - Set.old_estimate) - \\\n Set.old_estimate_difference)).mean()\n\n if test <= Set.PSF_PCG_tolerance:\n\n old_test = 0.\n PSF_stops += 1\n\n if Set.info_level >= 2:\n\n print('\\t PSF diff: ', test, '\\tcheck_PSF = stop')\n\n if PSF_stops == Set.max_sequential_PCG_stops:\n\n if Set.info_level >= 2:\n\n print('\\t\\t*** max PSF_stops reached ***')\n\n break\n else:\n\n PSF_stops = 0\n \n if itn < (Set.max_PSF_CCG_iter+1) and \\\n i > Set.PSF_PCG_iter_array[optimization_round-1]+1 and \\\n Set.decon_type != 'classical':\n\n if Set.info_level >= 2:\n\n print('\\t PSF diff: ', test, '\\tcheck_PSF = go')\n print('\\t >> max specified PSF iterations reached <<')\n\n break\n\n if i != 0 and test >= Set.rising_tol_ratio * old_test:\n\n rising_test_count += 1\n\n if rising_test_count == Set.max_uphill_PSF_PCG_steps:\n\n if Set.info_level >= 2:\n \n print('\\t PSF diff: ', test, '\\tcheck_PSF = go')\n print('\\t >> max rising test count encountered <<')\n \n break ## break out if test > old_test occurs\n ## more than max_uphill_steps, consecutively\n else:\n\n if Set.info_level >= 2:\n\n print('\\t PSF diff: ', test, '\\tcheck_PSF = go')\n else:\n\n Set.old_estimate_difference = N.abs(Set.PSF - Set.old_estimate).copy()\n # use .copy() instead of [:] = \n old_test = test\n \n if Set.info_level >= 2:\n\n print('\\t PSF diff: ', test, '\\tcheck_PSF = go')\n\n ### Swap Old PSF With Current PSF Estimate 'xo' ###\n Set.old_estimate = Set.PSF.copy()\t# use .copy() (deep copy) instead of [:] (view)\n fmin=0.; df0=0.\n \n return (PSF_stops, rising_test_count, fn)", "title": "" }, { "docid": "e030aa46fe417edbd67d5b10fb720792", "score": "0.54982316", "text": "def bisection(f, a, b, TOL=0.001, NMAX=100):", "title": "" }, { "docid": "fa57c37cf79538f5f0a81374d5643ced", "score": "0.5481595", "text": "def fit(self,\n X, noisy_X,\n optimization_args):\n\n def U(q):\n # because theano_loss is a vectorial function, we have to sum()\n return self.q_loss(q, X, noisy_X).sum()\n\n def grad_U(q):\n # because theano_gradients is NOT a vectorial function, no need so sum()\n return self.q_grad(q, X, noisy_X)\n\n # Read the initial state from q.\n # This means that we can call the \"fit\" function\n # repeatedly with various (X, noisy_X) and we will\n # always be building on the current solution.\n q0 = self.q_read_params()\n\n global callback_counter\n callback_counter = 0\n def logging_callback(current_q):\n global callback_counter\n if callback_counter % 10 == 0:\n print \"Iteration %d. Loss : %f\" % (callback_counter, U(current_q))\n callback_counter = callback_counter + 1\n\n # With everything set up, perform the optimization.\n if optimization_args['method'] == 'fmin_cg':\n best_q = scipy.optimize.fmin_cg(U, q0, grad_U,\n callback = logging_callback,\n gtol = optimization_args['gtol'],\n maxiter = optimization_args['maxiter'])\n elif optimization_args['method'] == 'fmin_ncg':\n best_q = scipy.optimize.fmin_ncg(U, q0, grad_U,\n callback = logging_callback,\n avextol = optimization_args['avextol'],\n maxiter = optimization_args['maxiter'])\n elif optimization_args['method'] == 'fmin_bfgs':\n best_q = scipy.optimize.fmin_bfgs(U, q0, grad_U,\n callback = logging_callback,\n maxiter = optimization_args['maxiter'])\n #elif optimization_args['method'] == 'fmin_l_bfgs_b':\n # # Cannot perform the logging.\n # best_q = scipy.optimize.fmin_l_bfgs_b(f, q0, fprime,\n # # m = optimization_args['m'],\n # maxfun = optimization_args['maxiter'])\n else:\n error(\"Unrecognized method name : \" + optimization_args['method'])\n\n # Don't forget to set the params after you optimized them !\n self.q_set_params(best_q)", "title": "" }, { "docid": "abd48ca0cd177c4e0995b753768f8dec", "score": "0.5475645", "text": "def test_bayesian_optimizer_on_simple_2d_quadratic_function_pre_heated(self):\r\n\r\n objective_function_config = objective_function_config_store.get_config_by_name('2d_quadratic_concave_up')\r\n objective_function = ObjectiveFunctionFactory.create_objective_function(objective_function_config)\r\n random_params_df = objective_function.parameter_space.random_dataframe(num_samples=10000)\r\n\r\n y_df = objective_function.evaluate_dataframe(random_params_df)\r\n\r\n\r\n optimization_problem = OptimizationProblem(\r\n parameter_space=objective_function.parameter_space,\r\n objective_space=objective_function.output_space,\r\n objectives=[Objective(name='y', minimize=True)]\r\n )\r\n\r\n bayesian_optimizer = BayesianOptimizer(\r\n optimization_problem=optimization_problem,\r\n optimizer_config=bayesian_optimizer_config_store.default,\r\n logger=self.logger\r\n )\r\n bayesian_optimizer.register(random_params_df, y_df)\r\n\r\n num_guided_samples = 20\r\n for i in range(num_guided_samples):\r\n # Suggest the parameters\r\n suggested_params = bayesian_optimizer.suggest()\r\n target_value = objective_function.evaluate_point(suggested_params)\r\n\r\n self.logger.info(f\"[{i}/{num_guided_samples}] suggested params: {suggested_params}, target: {target_value}\")\r\n\r\n # Register the observation with the optimizer\r\n bayesian_optimizer.register(suggested_params.to_dataframe(), target_value.to_dataframe())\r\n\r\n self.validate_optima(bayesian_optimizer)\r\n best_config_point, best_objective = bayesian_optimizer.optimum()\r\n self.logger.info(f\"Optimum: {best_objective} Best Configuration: {best_config_point}\")\r\n trace_output_path = os.path.join(self.temp_dir, \"PreHeatedTrace.json\")\r\n self.logger.info(f\"Writing trace to {trace_output_path}\")\r\n global_values.tracer.dump_trace_to_file(output_file_path=trace_output_path)\r\n global_values.tracer.clear_events()", "title": "" }, { "docid": "65bbc4bc5fe087783d7ae23344d1144f", "score": "0.54648703", "text": "def minimize(function, runs):\n elite_fitness_runs = []\n for run in range(runs):\n print \"Run: \", run\n \n my_es = OnePlusLambdaES()\n my_es.evolve(max_gen=200, \n genome_size=function.dim_, \n pop_size=100, \n fitness_func=function, \n domain_upper_bound=function.upper_bound_,\n domain_lower_bound=function.lower_bound_,\n verbose=True)\n \n elite_fitness_runs.append(np.array(my_es.staged_best_fitness_))\n \n all_mins = []\n all_maxs = []\n for run in range(runs): \n plt.plot(elite_fitness_runs[run], marker='', label='Fitness')\n all_mins.append(elite_fitness_runs[run].min())\n all_maxs.append(elite_fitness_runs[run].max()) \n \n a = min(all_mins)\n b = max(all_maxs)\n plt.ylim([a-(b-a)*0.05, b+(b-a)*0.05])\n plt.xlabel('Iteration')\n plt.ylabel('Fitness')\n plt.title('(1+lambda)ES')\n plt.show()", "title": "" }, { "docid": "26768c7b2c2c3ba960585c03754b449f", "score": "0.54628396", "text": "def run(self, x):\n \"*** YOUR CODE HERE ***\"\n node = nn.Linear(self.computePolyFeatures(x), self.w)\n return nn.AddBias(node, self.b)", "title": "" }, { "docid": "b6b102978737c169b8f0009690b766bc", "score": "0.5396851", "text": "def FoBaGreedy(X, y, epsilon = 0.1, maxit_f = 100, maxit_b = 5, backwards_freq = 5):\n\n n,d = X.shape\n F = {}\n F[0] = set()\n w = {}\n w[0] = np.zeros((d,1))\n k = 0\n delta = {}\n\n for forward_iter in range(maxit_f):\n\n k = k+1\n\n # forward step\n zero_coeffs = np.where(w[k-1] == 0)[0]\n err_after_addition = []\n residual = y - X.dot(w[k-1])\n for i in zero_coeffs:\n\n # Per figure 3 line 8 in paper, do not retrain old variables.\n # Only look for optimal alpha, which is solving for new w iff X is unitary\n alpha = X[:,i].T.dot(residual)/np.linalg.norm(X[:,i])**2\n\n w_added = np.copy(w[k-1])\n w_added[i] = alpha\n err_after_addition.append(np.linalg.norm(X.dot(w_added)-y))\n i = zero_coeffs[np.argmin(err_after_addition)]\n \n F[k] = F[k-1].union({i})\n w[k] = np.zeros((d,1), dtype=np.float64)\n w[k][list(F[k])] = np.linalg.lstsq(X[:, list(F[k])], y)[0]\n\n # check for break condition\n delta[k] = np.linalg.norm(X.dot(w[k-1]) - y) - np.linalg.norm(X.dot(w[k]) - y)\n if delta[k] < epsilon: return w[k-1]\n\n # backward step, do once every few forward steps\n if forward_iter % backwards_freq == 0 and forward_iter > 0:\n\n for backward_iter in range(maxit_b):\n\n non_zeros = np.where(w[k] != 0)[0]\n err_after_simplification = []\n for j in non_zeros:\n w_simple = np.copy(w[k])\n w_simple[j] = 0\n err_after_simplification.append(np.linalg.norm(X.dot(w_simple) - y))\n j = np.argmin(err_after_simplification)\n w_simple = np.copy(w[k])\n w_simple[non_zeros[j]] = 0\n\n # check for break condition on backward step\n delta_p = err_after_simplification[j] - np.linalg.norm(X.dot(w[k]) - y)\n if delta_p > 0.5*delta[k]: break\n\n k = k-1;\n F[k] = F[k+1].difference({j})\n w[k] = np.zeros((d,1))\n w[k][list(F[k])] = np.linalg.lstsq(X[:, list(F[k])], y)[0]\n\n return w[k]", "title": "" }, { "docid": "673438880fd4187157c92231503ad546", "score": "0.5390924", "text": "def f(x):\n\tvalue = x/D_OPT * exp(1-x/D_OPT)\n\treturn value", "title": "" }, { "docid": "becfe26dafe084a46e131c88c3b65556", "score": "0.5384861", "text": "def minimize(self, state, **kwargs):\n return self.alg.minimize(state, **kwargs)", "title": "" }, { "docid": "2e03f7b15264ef30d61263ee44b99edd", "score": "0.5361728", "text": "def minimize(\n self,\n x: chex.Array,\n state: optax.OptState) -> Tuple[chex.Array, chex.Array, optax.OptState]:\n g, loss = gradients_fn(self._loss_fn, x)\n if g is None:\n raise ValueError('loss_fn does not depend on input.')\n updates, state = self._gradient_transformation.update(g, state, x)\n return optax.apply_updates(x, updates), loss, state", "title": "" }, { "docid": "973e531377c972bdbfd76f77a62b53e6", "score": "0.5340884", "text": "def optimizer(x):\n \n return 1 * (portfolioVariance(x))", "title": "" }, { "docid": "c37333b3387e5164de7fcff5a0b304e9", "score": "0.5323821", "text": "def Optimism_VF(self, valuefunctions, posterior_transition_points, num_update, sa_confidence):\n horizon = 1\n #s = 0\n valuefunctions = [valuefunctions]\n \n #Store the nominal points for each state-action pairs\n nomianl_points = {}\n \n #Store the latest nominal of nominal point & threshold\n nominal_threshold = {}\n under_estimate, real_regret = 0.0, 0.0\n i=0\n while i <= num_update:\n #try:\n #keep track whether the current iteration keeps the mdp unchanged\n is_mdp_unchanged = True\n threshold = [[] for _ in range(3)]\n rmdp = crobust.MDP(0, self.discount_factor)\n \n for s in self.all_states:\n #hashable_state_index = totuple(s)\n p,v = obs_to_index(s, self.env_low, self.env_dx)\n state_index = index_to_single_index(p, v, self.resolution)\n for a in range(self.num_actions):\n \n bayes_points = np.asarray(posterior_transition_points[state_index,a][0])\n next_states = np.asarray(posterior_transition_points[state_index,a][1])\n \n RSVF_nomianlPoints = []\n \n #for bayes_points in trans:\n #print(\"bayes_points\", bayes_points, \"next_states\", next_states)\n ivf = construct_uset_known_value_function(bayes_points, valuefunctions[-1], sa_confidence, next_states)\n RSVF_nomianlPoints.append(ivf[2])\n new_trp = np.mean(RSVF_nomianlPoints, axis=0)\n \n if (state_index,a) not in nomianl_points:\n nomianl_points[(state_index,a)] = []\n \n trp, th = None, 0\n #If there's a previously constructed L1 ball. Check whether the new nominal point\n #resides outside of the current L1 ball & needs to be considered.\n if (state_index,a) in nominal_threshold:\n old_trp, old_th = nominal_threshold[(state_index,a)][0], nominal_threshold[(state_index,a)][1]\n \n #Compute the L1 distance between the newly computed nominal point & the previous \n #nominal of nominal points\n new_th = np.linalg.norm(new_trp - old_trp, ord = 1)\n \n #If the new point is inside the previous L1 ball, don't consider it & proceed with\n #the previous trp & threshold\n if (new_th - old_th) < 0.0001:\n trp, th = old_trp, old_th\n \n #Consider the new nominal point to construct a new uncertainty set. This block will\n #execute if there's no previous nominal_threshold entry or the new nominal point\n #resides outside of the existing L1 ball\n if trp is None:\n is_mdp_unchanged = False\n nomianl_points[(state_index,a)].append(new_trp)\n \n #Find the center of the L1 ball for the nominal points with different \n #value functions\n trp, th = find_nominal_point(np.asarray(nomianl_points[(state_index,a)]))\n nominal_threshold[(state_index,a)] = (trp, th)\n \n threshold[0].append(state_index)\n threshold[1].append(a)\n threshold[2].append(th)\n \n trp /= np.sum(trp)\n \n for s_index, s_next in enumerate(next_states):\n rmdp.add_transition(state_index, a, s_next, trp[s_index], get_reward(s_next, self.resolution, self.grid_x, self.grid_y))\n \n #Add the current transition to the RMDP\n #for next_st in range():\n # rmdp.add_transition(s, a, next_st, trp[int(next_st)], rewards[next_st])\n \n #Solve the current RMDP\n rsol = rmdp.rsolve_mpi(b\"optimistic_l1\",threshold)\n \n #If the whole MDP is unchanged, meaning the new value function didn't change the uncertanty\n #set for any state-action, no need to iterate more!\n if is_mdp_unchanged or i==num_update-1:\n #print(\"**** Add Values *****\")\n #print(\"MDP remains unchanged after number of iteration:\",i)\n #print(\"rsol.valuefunction\",rsol.valuefunction)\n return rsol\n \n valuefunction = rsol.valuefunction\n valuefunctions.append(valuefunction)\n i+=1\n #except Exception as e:\n # print(\"!!! Unexpected Error in RSVF !!!\", sys.exc_info()[0])\n # print(e)\n # continue\n \n #return under_estimate, real_regret, violation", "title": "" }, { "docid": "70264f400d9a38c20d60e7d7e525b59d", "score": "0.531335", "text": "def optimize(self) -> None:\n self.model.optimize()", "title": "" }, { "docid": "3d190c0b91cbdb17cd5da317c5405808", "score": "0.5310657", "text": "def _bidding_function(self, utility=None, cost=None):\n # write fwd step of nn\n pass", "title": "" }, { "docid": "ea0343ee80f1c8058be61b319649e27d", "score": "0.53088325", "text": "def optimize(self, duplicate_manager=None):\n if not self.analytical_gradient_acq:\n out = self.optimizer.optimize(f=self.acquisition_function, duplicate_manager=duplicate_manager)\n else:\n out = self.optimizer.optimize(f=self.acquisition_function, f_df=self.acquisition_function_withGradients, duplicate_manager=duplicate_manager)\n return out", "title": "" }, { "docid": "70589f41531b3e7cc4146b4cb659b791", "score": "0.53069746", "text": "def sim(self, max_iter=1000):\n super().sim()\n if self.rho_initial != None:\n self.rho = Function(self.A, self.rho_initial.vector(), name=\"Control\")\n print(\"Optimization Beginning\")\n self.state = Function(self.W)\n self.state2 = Function(self.W2)\n if self.cst_num >= 1: self.__vf_fun_var_assem__()\n if self.cst_num >= 2: self.__vf_fun_var_assem2__()\n if self.cst_num >= 3: self.__vf_fun_var_assem3__()\n self.nvars = len(self.rho.vector())\n\n # Number of Design Variables\n nvar = self.nvars\n # Upper and lower bounds\n x_L = np.ones(nvar) * 0. #bounds[0]\n x_U = np.ones(nvar) * 1. #bounds[1]\n # Number of non-zeros gradients\n constraints_nnz = nvar*self.cst_num\n acst_L = np.array(self.cst_L)\n acst_U = np.array(self.cst_U)\n OptObj = tobs.create(nvar, # number of the design variables\n x_L, # lower bounds of the design variables\n x_U, # upper bounds of the design variables\n self.cst_num, # number of constraints\n acst_L, # lower bounds on constraints,\n acst_U, # upper bounds on constraints,\n constraints_nnz, # number of nonzeros in the constraint Jacobian\n 0, # number of nonzeros in the Hessian\n self.obj_fun, # objective function\n self.obj_dfun, # gradient of the objective function\n self.cst_fval, # constraint function\n self.jacobian ) # gradient of the constraint function\n\n #Parameters\n\n def cb_post(rho_opt, rho_notfiltered, mesh_adapt):\n self.file_mesh_adapted << mesh_adapt\n self.rho = rho_opt\n\n self.file_mesh_adapted << self.rho.function_space().mesh()\n self.rho.full_geo = self.full_geo\n OptObj.solve(self.rho, minimize=self.minimize, filter_fun=self.density_filter, call_back=cb_post)\n return self.rho", "title": "" }, { "docid": "f9677a9be1a41a2715908cb4590b5c75", "score": "0.5306475", "text": "def run_optimizations(self, node):\n from . import optimize\n\n # TODO: support vectorized specializations\n if (self.context.optimize_broadcasting and not\n self.sp.is_contig_specializer or\n self.sp.is_vectorizing_specializer):\n optimizer = optimize.HoistBroadcastingExpressions(self.context)\n node = optimizer.visit(node)\n\n return node", "title": "" }, { "docid": "91232ab916407fdba77acba8ef5bb2e8", "score": "0.5303803", "text": "def eval_b(theta):\n pass;", "title": "" }, { "docid": "2ca0c06d1985cfbe92d285c64e443041", "score": "0.53021544", "text": "def _binary_optimize(self):\n self.B = np.sign(np.random.normal(size=(self.P.shape[0], self.dimensions)))\n for _ in tqdm(range(self.binarization_rounds), desc=\"Iteration\", leave=True):\n self._update_G()\n self._update_Q()\n self._update_B()", "title": "" }, { "docid": "bcc2cbe334053254b4bbd27fd091f277", "score": "0.52905345", "text": "def optimize(self, f=None, df=None, f_df=None, duplicate_manager=None):\n self.f = f\n self.df = df\n self.f_df = f_df\n\n ## --- Update the optimizer, in case context has beee passed.\n self.optimizer = choose_optimizer(self.optimizer_name, self.context_manager.noncontext_bounds)\n # print(\"In FlexibleAcquisitionOptimizer.optimize:\")\n # print(\" self.context_manager.nocontext_index : \", self.context_manager.noncontext_index)\n # print(\" self.context_manager.nocontext_bounds : \", self.context_manager.noncontext_bounds)\n # print(\" self.context_manager.nocontext_index_obj: \", self.context_manager.nocontext_index_obj)\n\n ## --- Selecting the anchor points and removing duplicates\n if self.type_anchor_points_logic == max_objective_anchor_points_logic:\n anchor_points_generator = ObjectiveAnchorPointsGenerator(self.space, random_design_type, f)\n elif self.type_anchor_points_logic == thompson_sampling_anchor_points_logic:\n anchor_points_generator = ThompsonSamplingAnchorPointsGenerator(self.space, sobol_design_type, self.model)\n\n ## -- Select the anchor points (with context)\n # print(\"In FlexibleAcquisitionOptimizer.optimize:\")\n # print(\" self.context_manager.nocontext_index : \", self.context_manager.noncontext_index)\n # print(\" self.context_manager.nocontext_bounds : \", self.context_manager.noncontext_bounds)\n # print(\" self.context_manager.nocontext_index_obj: \", self.context_manager.nocontext_index_obj)\n anchor_points = anchor_points_generator.get(duplicate_manager=duplicate_manager,\n context_manager=self.context_manager)\n\n ## --- Applying local optimizers at the anchor points and update bounds of the optimizer (according to the context)\n optimized_points = [\n apply_optimizer(self.optimizer, a, f=f, df=None, f_df=f_df, duplicate_manager=duplicate_manager,\n context_manager=self.context_manager, space=self.space) for a in anchor_points]\n x_min, fx_min = min(optimized_points, key=lambda t: t[1])\n\n # x_min, fx_min = min([apply_optimizer(self.optimizer, a, f=f, df=None, f_df=f_df, duplicate_manager=duplicate_manager, context_manager=self.context_manager, space = self.space) for a in anchor_points], key=lambda t:t[1])\n\n return x_min, fx_min", "title": "" }, { "docid": "6b0c5e966e718ac83e342ca0f8d9de6f", "score": "0.5284349", "text": "def optimize(self):\n logging.info(\"generating initial values for bayesian optimization\")\n if self.lambda_0 is None or self.c_lambda_0 is None:\n self.generate_initial_samples()\n for i in range(self.num_function_evaluations):\n logging.info(\"=\" * 20)\n logging.info(\"Evaluation iteration: \" + str(i + 1))\n self.gpr.fit(self.lambda_0, self.c_lambda_0)\n self.gpr.optimize_l()\n mu_lambda_1, sigma_lambda_1, samples_1 = self.gpr.predict(self.x_star, self.num_sample_curves)\n next_lambda = self.find_lambda_byEI()\n Y_new = self.c_lambda(next_lambda) # train_resnet(lr=next_lambda[0][0])\n if i % 4 == 0:\n plt.figure()\n plt.rc('font', size=8)\n plt.rc('legend', fontsize=8)\n logging.info(\"Lambda :\" + str(next_lambda) + \"C_lambda :\" + str(Y_new))\n plt.subplot(4, 2, 2 * (i % 4) + 1)\n self.plotutil.plot_gp(mu_lambda_1, sigma_lambda_1, self.x_star, self.lambda_0, self.c_lambda_0, samples_1,\n show_legend=i == 0, savefig=False)\n plt.title(f'Iteration {i + 1}')\n\n plt.subplot(4, 2, 2 * (i % 4) + 2)\n self.plotutil.plot_acquisition(self.x_star, self.expected_improvement(self.x_star), next_lambda,\n show_legend=i == 0, savefig=False)\n\n if self.plotutil is not None:\n if (i + 1) % 4 == 0 or (i + 1) == self.num_function_evaluations:\n self.plotutil.pp.savefig()\n\n self.lambda_0 = np.append(self.lambda_0, next_lambda)[np.newaxis][:].reshape(-1, 1)\n self.c_lambda_0 = np.append(self.c_lambda_0, Y_new)[np.newaxis][:].reshape(-1, 1)\n\n self.plotutil.plot_convergence(self.lambda_0, self.c_lambda_0, show_legend=True, savefig=False)\n if self.plotutil is not None:\n self.plotutil.pp.savefig()\n logging.info(\"Optimization complete\")\n return self.lambda_0, self.c_lambda_0", "title": "" }, { "docid": "0da2e1baab1b2a8c0b7afbe9c1121f80", "score": "0.5283377", "text": "def trainNN(mylambda=0.):\n\n randomThetas_unrolled = flattenParams(genRandThetas())\n startTime = time.time()\n result = scipy.optimize.fmin_cg(computeCost, x0=randomThetas_unrolled, fprime=backPropagate, \\\n args=(flattenX(X),y,mylambda),maxiter=50,disp=True,full_output=True)\n print \"elapsed time: \", time.time() - startTime\n return reshapeParams(result[0])", "title": "" }, { "docid": "a31cfd7905ad9a6efbc94194b59dcb6d", "score": "0.5283261", "text": "def optimiser(self):\n x0 = 3\n self.history.append(np.transpose(np.r_[x0, self.runAndComputeError(x0)]))\n #history= np.append(history, [[x0, runAndComputeError(x0,planet_name,input_file,nb_steps,freq,timestep)]], axis = 0)\n scaling_factor = soo.fmin(self.runAndComputeError, x0, callback = self.callback)\n #print('The scaling factor is equal to ' + scaling_factor)\n return scaling_factor", "title": "" }, { "docid": "e40ce6b3c4ed890b77248e83911b78fd", "score": "0.52723086", "text": "def f_factory(epoch, shrinkage, learner, g):\n return lambda vec: g[epoch](vec) + shrinkage * learner.predict(vec)", "title": "" }, { "docid": "bfa104eefd34e51dc7191e381225b486", "score": "0.52672315", "text": "def _run(self):\n if self._optimizer == 'steepest':\n hessian = self._pseudo_hessian()\n else:\n if self._hess_diag_approx:\n hessian = self._hessian_diag\n else:\n hessian = self._hessian\n bounds = None\n if not self._vmax is None:\n if self._family == 'factor_gaussian':\n theta2_max = -.5 / self._vmax - self._sample._context.theta[(self._dim + 1):]\n bounds = [(None, None) for i in range(self._dim + 1)]\n bounds += [(None, theta2_max[i]) for i in range(self._dim)]\n\n # Initial parameter guess: fit the sampled point with largest probability\n self._theta_init = np.zeros(self._F.shape[0])\n self._theta_init[0] = self._sample._log_fmax\n self._theta = self._theta_init\n m = minimizer(self._loss, self._theta, self._optimizer,\n self._gradient, hessian,\n maxiter=self._maxiter, tol=self._tol,\n bounds=bounds)\n theta = m.argmin()\n self._factor_fit = self._family_obj.from_theta(theta)\n return m.info()", "title": "" }, { "docid": "6d7b679fc7f670f071b2052ccab8895d", "score": "0.5264796", "text": "def minimize(function, parameters, verbose=False, minos=False,\n negate=False):\n\n if isinstance(function, str):\n function = hep.expr.parse(function)\n if not callable(function):\n raise TypeError, \\\n \"'function' must be callable or an expression string\"\n parameters = normalizeParameters(parameters)\n \n # Do the fit.\n start_time = time.time()\n result = ext.minuit_minimize(\n function, parameters, verbose, minos, negate)\n end_time = time.time()\n\n # Store the parameter settings.\n result.parameters = dict([ (p[0], p[1 :]) for p in parameters ])\n # Store the elapsed time.\n result.elapsed_time = end_time - start_time\n # All done.\n return result", "title": "" }, { "docid": "ba672f268975685d5e9f19631dd519de", "score": "0.526137", "text": "def solve_stagewise_optim(self, i, H, g, x_min, x_max, x_next_min, x_next_max):\n raise NotImplementedError", "title": "" }, { "docid": "1c5ae3140f717e171e3b5e679bcca7cb", "score": "0.5249138", "text": "def ApproximateJacobian(f, x, dx=1e-6):\n try:\n n = len(x)\n except TypeError:\n n = 1\n fx = f(x)\n Df_x = N.matrix(N.zeros((n,n)))\n\n for i in range(n):\n v = N.matrix(N.zeros((n,1)))\n v[i,0] = dx\n Df_x[:,i] = (f(x + v) - fx) / dx\n return Df_x", "title": "" }, { "docid": "8b42740b527e9cde66ee519725534672", "score": "0.5239783", "text": "def test_2():\n d = 5\n f = mt_obj.styblinski_tang_function\n g = mt_obj.styblinski_tang_gradient\n check_func = mt_obj.calc_minimizer_styb\n func_args = ()\n func_args_check_func = func_args\n num = 0\n projection = False\n option = 'minimize_scalar'\n met = 'Brent'\n initial_guess = 0.005\n relax_sd_it = 1\n bounds_1 = -5\n bounds_2 = 5\n tolerance = 0.00001\n num_points = 20\n number_its_compare = 3\n test_beta = [0.001, 0.01, 0.025]\n usage = 'metod_algorithm'\n num_functions = 3\n (fails_nsm_total, checks_nsm_total,\n fails_sm_total, checks_sm_total,\n max_b_calc_func_val_nsm,\n store_all_its,\n all_store_minimizer,\n store_all_norm_grad) = (mt_ays.main_analysis_other\n (d, f, g, check_func, func_args,\n func_args_check_func, test_beta, num_functions,\n num_points, projection, tolerance, option, met,\n initial_guess, bounds_1, bounds_2, usage,\n relax_sd_it, num, number_its_compare))\n assert(fails_nsm_total.shape == (len(test_beta), number_its_compare - num,\n number_its_compare - num))\n assert(fails_sm_total.shape == (len(test_beta), number_its_compare - num,\n number_its_compare - num))\n assert(checks_nsm_total.shape == (len(test_beta), number_its_compare - num,\n number_its_compare - num))\n assert(checks_sm_total.shape == (len(test_beta), number_its_compare - num,\n number_its_compare - num))\n assert(max_b_calc_func_val_nsm.shape == (len(test_beta), num_functions))\n assert(store_all_its.shape == (num_functions, num_points))\n assert(all_store_minimizer.shape == (num_functions, num_points))\n assert(store_all_norm_grad.shape == (num_functions, num_points))", "title": "" }, { "docid": "13d3dafeb0a2ae642bae8ff1b94aa8ad", "score": "0.52278584", "text": "def optimise(loss, n_runs):\n\n opt_runs = []\n\n for _ in range(5):\n\n # Weights (must add up to 1)\n w = np.random.random(n_runs)\n w /= np.linalg.norm(w)\n\n # Rescaling factor (preferably 0.95<r<1.05)\n r = np.random.random(n_runs)\n r = 0.95 + r/10\n\n # Shift factor\n s = 50.0 + 25.0 * np.random.randn()\n\n in_arg = [s]\n for weight, rescaling in zip(w, r):\n in_arg.append(weight)\n in_arg.append(rescaling)\n in_arg = np.array(in_arg)\n \n opt = minimize(loss, in_arg, method='BFGS')\n opt_runs.append(opt)\n \n opt_runs = sorted(opt_runs, key=lambda x: x['fun'])\n return opt_runs[0]['x']", "title": "" }, { "docid": "9b7a8f3295838954c8cef556ed0af252", "score": "0.5225455", "text": "def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):\n costs =[]\n for i in range(num_iterations):\n grads,cost=propagate(w,b,X,Y)\n \n dw = grads[\"dw\"]\n db = grads[\"db\"]\n \n w = w-learning_rate*dw\n b = b-learning_rate*db\n \n #print(\"i=\",i,\" cost=\",cost)\n \n if i%100==0:\n costs.append(cost)\n if print_cost:\n print(\"Cost after iteration %i:%f\" % (i,cost))\n \n params={\"w\":w,\n \"b\":b\n }\n grads = {\"dw\":dw,\n \"db\":db\n } \n return params,grads,costs", "title": "" }, { "docid": "e07f8d57ac136923cd1e2b81e15c2466", "score": "0.5220874", "text": "def objective(self, p):\n p = [0.0, p[0], p[1], 0.0, p[2]]\n return self.fbe(p) + 25*(self.fxf(p) + self.fyf(p)) + 30*self.ftf(p)", "title": "" }, { "docid": "9da537dcc6d57b04590994eef97ec7ab", "score": "0.52052826", "text": "def trainNN(mylambda=0.):\n\n randomThetas_unrolled = flattenParams(Rand_Thetas())\n result = scipy.optimize.fmin_cg(computeCost, x0=randomThetas_unrolled, fprime=backPropagate, \\\n args=(flattenX(X),y,mylambda),maxiter=50,disp=True,full_output=True)\n return reshapeParams(result[0])", "title": "" }, { "docid": "459b762cfee13e5dddf2f556c86f53fc", "score": "0.5200039", "text": "def objective_function(self, configuration, **kwargs):\n pass", "title": "" }, { "docid": "8809cb78107dadbda56aefb247da0d68", "score": "0.5196856", "text": "def spsa_minimize(\n self, func, guess, iterations=200, constrain=None, a=None, c=None, acap=None\n ):\n # c>0; a>0; acap is an integer > 0\n if c == None:\n c = 0.001 # Guideline (Spall 1998)\n if acap == None:\n acap = max([1, int(iterations / 10)]) # Guideline (Spall 1998)\n if a == None:\n # Guideline (Spall 1998). Assume the desired\n # movement in early iterations is 1/10 of magnitude\n # (norm) of ghat(guess,func,c,acap):\n # > ghats=[_norm(ghat(guess,func,c,acap)) for i in range(5)]\n # > meanghat=sum(ghats)/len(ghats)\n # > movementRatio=desiredMovement/meanghat\n movementRatio = 0.1\n a = ((acap + 1) ** 1.0) * movementRatio\n if a <= 0 or c <= 0 or acap <= 0:\n raise ValueError\n g = 1.0 / 6\n low = [x for x in guess]\n high = [x for x in guess]\n curguess = [x for x in guess]\n newguess = [x for x in guess]\n oldvalue = func(guess)\n bestguess = [x for x in guess]\n bestvalue = oldvalue\n nochangecount = 0\n for i in range(iterations):\n ci = c * 1.0 / (1 + i) ** g\n d = [ci * (self.rndint(1) * 2 - 1) for x in curguess]\n for j in range(len(curguess)):\n high[j] = curguess[j] + d[j]\n low[j] = curguess[j] - d[j]\n gr = func(high) - func(low)\n ai = a * 1.0 / (1 + i + acap)\n for j in range(len(curguess)):\n newguess[j] = curguess[j] - ai * gr / (d[j] * 2.0)\n # constraint\n if constrain != None:\n constrain(newguess)\n newvalue = func(newguess)\n if newvalue > oldvalue + 10:\n continue\n # update current guess\n for j in range(len(curguess)):\n curguess[j] = newguess[j]\n if newvalue < bestvalue:\n bestvalue = newvalue\n for j in range(len(curguess)):\n bestguess[j] = newguess[j]\n # NOTE: Here, 1e-5 is a tolerance\n # between successive iterations\n # of the algorithm; values within\n # tolerance are treated as changing\n # little\n if abs(newvalue - oldvalue) < 1e-5:\n nochangecount += 1\n if nochangecount > 10:\n break\n else:\n nochangecount = 0\n oldvalue = newvalue\n return bestguess", "title": "" }, { "docid": "1ae96c3296a24f96c2522685393829ad", "score": "0.5194547", "text": "def fb_cruz_nghia3(J, f, g, df, prox_g, x0, numb_iter=100):\n\n begin = time()\n th = 0.7\n delta = 0.49\n values = [J(x0)]\n iterates = [values, x0]\n\n def T(values, x):\n df_x = df(x)\n z = prox_g(x - df_x, 1)\n beta = 1\n for i in count(0):\n x1 = x - beta * (x - z)\n if f(x1) + g(x1) <= f(x) + g(x) - beta * (g(x) - g(z)) - beta * df_x.dot(x - z) + beta / 2 * LA.norm(x - z)**2:\n break\n else:\n beta *= th\n x = x1\n values.append(J(x))\n res = [values, x]\n return res\n\n for i in range(numb_iter):\n iterates = T(*iterates)\n end = time()\n print \"---- Forward-backward method 3----\"\n print \"Number of iterations:\", numb_iter\n # print \"Number of gradients, n_grad:\", iterates[-1]\n # print \"Number of prox_g:\", numb_iter\n print \"Time execution:\", end - begin\n\n return iterates", "title": "" }, { "docid": "abc3381fab55c47aafca999bf2bc96e1", "score": "0.5193821", "text": "def _run_optimiser(cls, raw_funcs, domain_config_file, worker_manager, max_capital,\n mode, *args, **kwargs):\n raise NotImplementedError('Implement in a child class.')", "title": "" }, { "docid": "ee6392509800d16ddfd67aacd68c2c14", "score": "0.5189675", "text": "def minimize(term):\n return _optimize(term, True)", "title": "" }, { "docid": "3c82d10ab90572147ffc2f4276e26052", "score": "0.51872575", "text": "def objective(\n self,\n x: Optional[Union[Array, BlockArray]] = None,\n ) -> float:\n if x is None:\n x = self.x\n return self.f(x) + self.g(self.C(x))", "title": "" }, { "docid": "c8c2911044d32da615b8794bcb52981e", "score": "0.5186707", "text": "def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):\r\n\r\n costs = []\r\n\r\n for i in range(num_iterations):\r\n\r\n grads,cost = propagate(w, b , X, Y)\r\n\r\n dw = grads[\"dw\"]\r\n db = grads[\"db\"]\r\n\r\n w = w - learning_rate*dw\r\n b = b - learning_rate*db\r\n\r\n if i % 100 == 0:\r\n costs.append(cost)\r\n\r\n if print_cost and i % 100 ==0:\r\n print (\"Cost after iteration %i: %f\" %(i, cost))\r\n\r\n params = {'w':w,\r\n 'b':b}\r\n\r\n grads = {'dw':dw,\r\n 'db':db}\r\n\r\n return params, grads, costs", "title": "" }, { "docid": "920be7535ebc79b5e8a91e46b79c9828", "score": "0.516906", "text": "def stab_BB(\n x0,\n costFn,\n gradFn,\n bb = 1,\n deltaUpdateStrategy = 'adaptative',\n deltaInput = 1e6,\n c = 0.2,\n maxIt = 10000,\n tol = 1e-7,\n verbose=False,\n):\n\n class StabBBInt:\n \"\"\" Internals of the function \n The motivation for using this internal class is to be able to return a\n value even if the user makes a keyboard interruption.\n \"\"\"\n\n def __init__(self):\n #: History of evaluated x's\n self.x = [x0, self.backtracking(x0, gradFn(x0))]\n #: History of evaluated alphas\n self.alpha = []\n\n def backtracking(self, x0, g0):\n \"\"\" Suggested algorithm to avoid poor choices of 2 initial points \"\"\"\n alpha0 = 1 / np.linalg.norm(x0, np.inf)\n s0 = -alpha0 * g0\n x1 = x0 + s0\n while costFn(x1) > costFn(x0):\n s0 = s0 / 4\n x1 = x0 + s0\n return x1\n\n def mainAlg(self):\n delta = deltaInput\n #: g_{k-1} for the algorithm\n gkant = gradFn(x0)\n #: g_{k} for the algorithm\n gk = gradFn(self.x[1])\n #: History of gradient norm\n self.normGrad = [np.linalg.norm(gk)]\n for k in range(int(maxIt)):\n if self.normGrad[-1] > tol:\n #: $s_{k-1} \\leftarrow x_{k-1} - x_{k-2}$\n skant = self.x[-1] - self.x[-2]\n #: $y_{k-1} \\leftarrow g_k - g_{k-1}\n ykant = gk - gkant\n \n # Compute $\\alpha_k$\n #\n alpha_bb = self.calcAlpha(skant, ykant)\n # Adaptation of method to deal with nonconvex functions\n alpha_bb_corrected = self.correctAlpha(alpha_bb, skant, ykant)\n # Applies the proposed stabilization\n alphak = min([alpha_bb_corrected, delta / np.linalg.norm(gk),])\n\n #: Set $x_{k+1} \\leftarrow x_k - \\alpha_k g_k$\n xk = self.x[-1] - alphak * gk\n \n # Update $g$ \n gkant = gk\n gk = gradFn(xk)\n\n # Update $\\Delta$ depending on strategy\n if deltaUpdateStrategy == \"adaptative\":\n delta = self.deltaUpdateFn(delta, k)\n\n # Store processed states\n self.alpha.append(alphak)\n self.x.append(xk)\n self.normGrad.append(np.linalg.norm(gk))\n\n if verbose:\n print(f\"New norm of gradient is {self.normGrad[-1]}\")\n\n def calcAlpha(self, skant, ykant):\n if bb == 1:\n return (skant.transpose() @ ykant) / (ykant.transpose() @ ykant)\n elif bb == 2:\n return (skant.transpose() @ skant) / (skant.transpose() @ ykant)\n else:\n raise Exception(\"parameter bb is not within range\")\n\n def correctAlpha(self, alpha_bb, skant, ykant):\n if alpha_bb <= 0:\n # eq 4.2\n # Although the paper says this correction for nonconvex\n # problems depends on sk for the current iteration,\n # it's not available up to this point. Will use k-1\n # regardless\n return np.linalg.norm(skant) / np.linalg.norm(ykant)\n else:\n return alpha_bb\n\n def deltaUpdateFn(self, lastDelta, k):\n if k == 4:\n # Adaptative choice of parameter delta as equation (4.1)\n return c * min(\n [\n np.linalg.norm(self.x[4] - self.x[3]),\n np.linalg.norm(self.x[3] - self.x[2]),\n np.linalg.norm(self.x[2] - self.x[1]),\n ]\n )\n else:\n return lastDelta\n\n stabint = StabBBInt()\n\n try:\n stabint.mainAlg()\n except KeyboardInterrupt:\n pass\n bestX = stabint.x[np.argmin(stabint.normGrad) + 1]\n xHistory, alphaHistory, normGradHistory = (\n stabint.x,\n stabint.alpha,\n stabint.normGrad,\n )\n return bestX, (xHistory, alphaHistory, normGradHistory)", "title": "" }, { "docid": "bd2f8227a088548dc1c5060d0e492609", "score": "0.5168162", "text": "def approximate(f, f_target, approximate_option):\n history = {}\n params_cur = copy.deepcopy(approximate_option[\"params\"])\n params_old = copy.deepcopy(approximate_option[\"params\"])\n steps_num = 0\n params_min = copy.deepcopy(approximate_option[\"params\"])\n f_target_val = []\n f_val = []\n var_list = approximate_option[\"x\"]\n var_list_validate = approximate_option[\"x_validate\"]\n loss_function = approximate_option[\"loss_function\"]\n opt = approximate_option[\"opt\"]\n eps = approximate_option[\"eps\"]\n max_steps = approximate_option.get(\"max_steps\", 10**10)\n val = approximate_option.get(\"val\", None)\n \"\"\"\n if val:\n f_target.val = val\n full_x = np.concatenate((np.array(var_list), np.array(var_list_validate)))\n full_y = []\n for x in full_x:\n f_target.set_var_list(x)\n full_y.append(f_target())\n full_y = np.array(full_y)\n snr = approximate_option.get(\"snr\", None)\n mean = approximate_option.get(\"mean\", 0)\n std = approximate_option.get(\"std\", 1)\n probability_threshold = approximate_option.get(\n \"probability_threshold\", 0.5\n )\n seed = approximate_option.get(\"seed\", 42)\n noise_type = approximate_option.get(\"noise_type\", \"\")\n x2y = add_noise(\n full_x, full_y, noise_type, snr, mean,\n std, probability_threshold, seed\n )\n\n if noise_type:\n f_target.set_x2y(x2y)\n \"\"\"\n for x in var_list:\n f.set_var_list(x)\n f_target.set_var_list(x)\n f_val.append(f(params_cur))\n f_target_val.append(f_target())\n # Calculate nearness of target and approximate function\n loss_min = loss_function(f_target_val, f_val)\n\n while True:\n # Approximate cycle\n if steps_num >= max_steps:\n break\n with tf.GradientTape() as t:\n f_target_val = []\n f_val = []\n for x in var_list:\n f.set_var_list(x)\n f_target.set_var_list(x)\n f_val.append(f(params_cur))\n f_target_val.append(f_target())\n # Calculate nearness of target and approximate function\n loss_val = loss_function(f_target_val, f_val)\n\n gradients = t.gradient(loss_val, params_cur)\n opt.apply_gradients(zip(gradients, params_cur))\n params_delta = get_delta(params_old, params_cur)\n history[steps_num] = {\n \"params\": get_numpy_array(params_cur), \"params_old\": get_numpy_array(params_old),\n \"loss\": loss_val.numpy(), \"params_delta (L2 Norm)\": params_delta,\n }\n\n params_old = copy.deepcopy(params_cur)\n if loss_val < loss_min:\n params_min = copy.deepcopy(params_cur)\n loss_min = loss_val\n if params_delta < eps:\n break\n steps_num += 1\n\n f_target_val = []\n f_val = []\n for x in var_list_validate:\n f.set_var_list(x)\n f_target.set_var_list(x)\n f_val.append(f(params_min))\n f_target_val.append(f_target())\n\n loss_validate = loss_function(f_target_val, f_val)\n\n result = {\n \"steps_num\": steps_num,\n \"history\": history,\n \"loss_min\": loss_min.numpy(),\n \"loss_validate\": loss_validate.numpy(),\n \"params_min\": get_numpy_array(params_min),\n }\n\n return result", "title": "" }, { "docid": "380077b4ecd9cf6952c9b7efcf3e6bc2", "score": "0.5163799", "text": "def minimize(self,\n function: Callable,\n bounds: Union[Tuple, List[Tuple]] = None,\n guess: np.ndarray = None) -> OptResult:\n raise NotImplementedError", "title": "" }, { "docid": "6e7f074cceaa253ef972827d307e744b", "score": "0.5155544", "text": "def Brody(x,b): \r\n a=(sp.special.gamma((b+2.0)/(b+1.0)))**(b+1.0)\r\n p=(b+1.0)*(a)*(x**b)*(np.exp(-a*(x**(b+1.0))))\r\n return p", "title": "" }, { "docid": "ab832bf3377f9e8834acdbacf940fcb2", "score": "0.51544976", "text": "def f_effect(n_reps=10, dir_id=1000, job_id=0):\n p = {}\n p['n_input_graphs'] = 10\n p['duplicates'] = 3\n p['density_multiplier'] = 1.1\n p['p_keep_edge'] = 0.8\n p['g'] = 0.5\n p['f'] = 0.4\n p['gap_cost'] = p['f']\n p['n_entities'] = 100\n p['n_input_graph_nodes'] = 30\n p['max_iters'] = 300\n\n varied_param = 'f'\n p[varied_param] = list(np.arange(0.05, 0.61, 0.05)) + \\\n [0.7, 0.8, 0.9, 1, 1.1, 1.2, 1.3, 1.5]\n methods = ['ICM', 'progmKlau', 'upProgmKlau', 'mKlau', 'LD', 'binB-LD5',\n 'meLD5_75', 'meLD5_100', 'meLD5_125', 'isorankn', 'LD5']\n e_seed = np.random.randint(0, 1000000) + int(job_id)\n experiment_template(n_reps, p, varied_param, cv=False, methods=methods,\n title='f_effect{}'.format(job_id), e_seed=e_seed,\n dir_id=dir_id)", "title": "" }, { "docid": "d33f6600cddb25bc35972fc7207686e6", "score": "0.5142141", "text": "def BFGS(f, x, iters=10, tol=1e-10):\n\tif isinstance(x, list):\n\t\t# Number of variables\n\t\tm = len(x)\n\n\t\t# Convert to da.Var type\n\t\tif m > 1:\n\t\t\tfor i in range(len(x)):\n\t\t\t\tif not isinstance(x[i], da.Var):\n\t\t\t\t\tx[i] = da.Var(x[i], _get_unit_vec(m, i))\n\t\t\treturn _BFGSVector(f, x, iters=iters, tol=tol)\n\t\tx = x[0]\n\tif not isinstance(x, da.Var):\n\t\tx = da.Var(x)\n\n\t# Number of variables\n\tm = 1\n\n\t# Initial step\n\tg = f(x)\n\tvar_path = [x.val]\n\tg_path = [g.val]\n\n\t# Initialize inv_hessian guess\n\tinv_hessian = np.eye(m)\n\n\tfor i in range(iters):\n\t\t# Take step in direction of steepest descent\n\t\tstep = da.Var((-1 * inv_hessian * g.der)[0], None)\n\t\tx_update = x + step\n\n\t\t# If step size is below tolerance, no need to continue\n\t\tcond = -step if step < 0 else step\n\t\tif cond < tol:\n\t\t\t# print (\"Reached tol in {} iterations\".format(i + 1))\n\t\t\tg = f(x_update)\n\t\t\tvar_path.append(x_update.val.flatten())\n\t\t\tg_path.append(g.val.flatten())\n\t\t\tbreak\n\n\t\ty = f(x_update).der - f(x).der\n\t\tidentity = np.eye(m)\n\t\trho = 1 / (y * step.val)\n\n\t\tA = identity - step.val * rho * y\n\t\tB = identity - rho * y * step.val\n\t\tC = rho * step.val * step.val\n\n\t\tdelta_hessian = A * inv_hessian * B + C\n\n\t\tg = f(x_update)\n\t\tvar_path.append(x_update.val.flatten())\n\t\tg_path.append(g.val.flatten())\n\n\t\tx = x_update\n\t\tinv_hessian = delta_hessian\n\n\telse:\n\t\t# print (\"Reached {} iterations without satisfying tolerance.\".format(iters))\n\t\tpass\n\n\tminimum = da.Var(x.val, g.der)\n\tvar_path = np.reshape(np.concatenate((var_path)), [-1])\n\tg_path = np.concatenate(g_path)\n\treturn minimum, var_path, g_path", "title": "" }, { "docid": "c18b4d9cc7bcd2a5806dc8453761ab62", "score": "0.51385486", "text": "def minimize(\r\n\t\tself,\r\n\t\tobjective,\r\n\t\tbackend: str = None,\r\n\t\tsilent: bool = True,\r\n\t\tmaxiter: int = None,\r\n\t):\r\n\t\tif not maxiter:\r\n\t\t\tmaxiter = self.maxiter\r\n\t\treturn self._minimize(\r\n\t\t\tobjective=objective,\r\n\t\t\tmethod=self._method,\r\n\t\t\tgradient=None,\r\n\t\t\thessian=None,\r\n\t\t\tinitial_values=None,\r\n\t\t\tvariables=None,\r\n\t\t\tbackend=backend,\r\n\t\t\tsilent=silent,\r\n\t\t\ttol=1.e-13,\r\n\t\t\tmaxiter=maxiter,\r\n\t\t\t*self._args,\r\n\t\t\t**self._kwarks\r\n\t\t)", "title": "" }, { "docid": "f410b9c0df08f777897137feb15878c1", "score": "0.51336545", "text": "def optimize(node, environment):\r\n optimizer = Optimizer(environment)\r\n return optimizer.visit(node)", "title": "" }, { "docid": "8323a244b97416eb9b9e4142b14ef700", "score": "0.51294893", "text": "def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost=False):\r\n\r\n costs = []\r\n\r\n for i in range(num_iterations):\r\n\r\n grads, cost = propagate(w, b, X, Y)\r\n\r\n # Retrieve derivatives from grads\r\n dw = grads[\"dw\"]\r\n db = grads[\"db\"]\r\n\r\n # updates\r\n w = w - learning_rate * dw\r\n b = b - learning_rate * db\r\n\r\n # Record the costs\r\n if i % 100 == 0:\r\n costs.append(cost)\r\n\r\n # Print the cost every 100 training examples\r\n if print_cost and i % 100 == 0:\r\n print(\"Cost after iteration %i: %f\" % (i, cost))\r\n\r\n params = {\"w\": w,\r\n \"b\": b}\r\n\r\n grads = {\"dw\": dw,\r\n \"db\": db}\r\n\r\n return params, grads, costs", "title": "" }, { "docid": "6383e109b016ea83d42c47ecbe395503", "score": "0.5129055", "text": "def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):\n\n costs = []\n \n for i in range(num_iterations):\n \n \n # Cost and gradient calculation (≈ 1-4 lines of code)\n ### START CODE HERE ### \n grads, cost = propagate(w, b, X, Y)\n ### END CODE HERE ###\n \n # Retrieve derivatives from grads\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n \n # update rule (≈ 2 lines of code)\n ### START CODE HERE ###\n w = w - learning_rate * dw\n b = b - learning_rate * db\n ### END CODE HERE ###\n \n # Record the costs\n if i % 100 == 0:\n costs.append(cost)\n \n # Print the cost every 100 training iterations\n if print_cost and i % 100 == 0:\n print (\"Cost after iteration %i: %f\" %(i, cost))\n \n params = {\"w\": w,\n \"b\": b}\n \n grads = {\"dw\": dw,\n \"db\": db}\n \n return params, grads, costs", "title": "" }, { "docid": "3f5852d6e4e9fe4f542d1f2e598cc03b", "score": "0.51271546", "text": "def fdpf(system):\n\n # sparse library set up\n sparselib = system.Settings.sparselib.lower()\n\n # general settings\n niter = 1\n iter_max = system.SPF.maxit\n convergence = True\n tol = system.Settings.tol\n system.Settings.error = tol + 1\n err_vec = []\n if (not system.Line.Bp) or (not system.Line.Bpp):\n system.Line.build_b()\n\n # initialize indexing and Jacobian\n ngen = system.SW.n + system.PV.n\n sw = system.SW.a\n sw.sort(reverse=True)\n no_sw = system.Bus.a[:]\n no_swv = system.Bus.v[:]\n for item in sw:\n no_sw.pop(item)\n no_swv.pop(item)\n gen = system.SW.a + system.PV.a\n gen.sort(reverse=True)\n no_g = system.Bus.a[:]\n no_gv = system.Bus.v[:]\n for item in gen:\n no_g.pop(item)\n no_gv.pop(item)\n Bp = system.Line.Bp[no_sw, no_sw]\n Bpp = system.Line.Bpp[no_g, no_g]\n\n # F: symbolic, N: numeric\n Fp = lib.symbolic(Bp)\n Fpp = lib.symbolic(Bpp)\n Np = lib.numeric(Bp, Fp)\n Npp = lib.numeric(Bpp, Fpp)\n exec(system.Call.fdpf)\n\n # main loop\n while system.Settings.error > tol:\n # P-theta\n da = matrix(div(system.DAE.g[no_sw], system.DAE.y[no_swv]))\n if sparselib == 'umfpack':\n lib.solve(Bp, Np, da)\n elif sparselib == 'klu':\n lib.solve(Bp, Fp, Np, da)\n system.DAE.y[no_sw] += da\n exec(system.Call.fdpf)\n normP = max(abs(system.DAE.g[no_sw]))\n\n # Q-V\n dV = matrix(div(system.DAE.g[no_gv], system.DAE.y[no_gv]))\n if sparselib == 'umfpack':\n lib.solve(Bpp, Npp, dV)\n elif sparselib == 'klu':\n lib.solve(Bpp, Fpp, Npp, dV)\n system.DAE.y[no_gv] += dV\n exec(system.Call.fdpf)\n normQ = max(abs(system.DAE.g[no_gv]))\n\n err = max([normP, normQ])\n err_vec.append(err)\n system.Settings.error = err\n\n msg = 'Iter{:4d}. Max. Mismatch = {:8.7f}'.format(niter, err_vec[-1])\n system.Log.info(msg)\n niter += 1\n system.SPF.iter = niter\n\n if niter > 4 and err_vec[-1] > 1000 * err_vec[0]:\n system.Log.info('Blown up in {0} iterations.'.format(niter))\n convergence = False\n break\n\n if niter > iter_max:\n system.Log.info('Reached maximum number of iterations.')\n convergence = False\n break\n\n return convergence, niter", "title": "" }, { "docid": "99c9df8985a4f4148d53bd51ded56ff5", "score": "0.5126891", "text": "def cost_func(p):\n # test_model.weights[:] = p\n return test_model.cost_function(\n X, y,\n weights=p,\n lambda_param=lambda_param\n )", "title": "" }, { "docid": "a664e80255b5b1fb28060500e4f75e29", "score": "0.5122287", "text": "def fit(self, data_X, *args, **kwargs):\n\t\tf = self.get_objective_fn(data_X)\n\t\tparam0 = self.initial_params_value()\n\t\toptimal_param = autodiff.optimize.fmin_l_bfgs_b(f, param0, *args, **kwargs)\n\t\tself.params = optimal_param\n\t\treturn self", "title": "" }, { "docid": "44154c72605b22ea6febe33c3be6c6c3", "score": "0.51196563", "text": "def fmin_bfgs(f,fprime, x0, gtol=1e-5, callback=None, maxiter=None):\n \n x0 = np.asarray(x0).flatten()\n if maxiter is None:\n maxiter = len(x0)*200\n \n gfk, old_drag = fprime(x0,1) # initial gradient\n n_grad_calls = 1 # number of calls to fprime()\n \n k = 0 # iteration counter\n N = len(x0) # degreees of freedom\n I = np.eye(N, dtype=int)\n \n Hk = I # initial guess of the Hessian\n xk = x0\n sk = [2*gtol]\n \n gnorm = np.linalg.norm(gfk)\n iterations = 0\n alpha_guess = 0.02 # Initial guess for the step size\n ls_grad_calls = 1\n while (gnorm > gtol) and (k < maxiter) and ls_grad_calls < 20: # 20 is number of max iters\n iterations += 1\n\t# search direction\n pk = -np.dot(Hk, gfk)\n \n alpha_k, gfkp1, ls_grad_calls, old_drag = _line_search(f,fprime, xk, gfk, pk, alpha_guess, old_drag)\n\talpha_guess = alpha_k * 1.5 # Update alpha guess size as gradient decreases\n n_grad_calls += ls_grad_calls\n \n # advance in the direction of the step\n xkp1 = xk + alpha_k * pk\n sk = xkp1 - xk\n xk = xkp1\n\n if gfkp1 is None:\n gfkp1 = fprime(xkp1,1)\n n_grad_calls += 1\n \n yk = gfkp1 - gfk\n gfk = gfkp1\n \n if callback is not None:\n callback(xk)\n \n k += 1\n gnorm = np.linalg.norm(gfk)\n if gnorm < gtol:\n break\n \n try: #this was handled in numeric, let it remaines for more safety\n rhok = 1.0 / (np.dot(yk, sk))\n except ZeroDivisionError:\n rhok = 1000.0\n print \"Divide-by-zero encountered: rhok assumed large\"\n if np.isinf(rhok): #this is patch for numpy\n rhok = 1000.0\n print \"Divide-by-zero encountered: rhok assumed large\"\n\n # main bfgs update here. this is copied straight from\n # scipy.optimize._minimize_bfgs\n A1 = I - sk[:, np.newaxis] * yk[np.newaxis, :] * rhok\n A2 = I - yk[:, np.newaxis] * sk[np.newaxis, :] * rhok\n Hk = np.dot(A1, np.dot(Hk, A2)) + rhok * sk[:, np.newaxis] \\\n * sk[np.newaxis, :]\n \n \n if k >= maxiter:\n print \"Warning: %d iterations exceeded\" % maxiter\n print \" Current gnorm: %f\" % gnorm\n print \" grad calls: %d\" % n_grad_calls\n print \" iterations: %d\" % k\n \n \n elif gnorm < gtol:\n print \"Optimization terminated successfully.\"\n print \" Current gnorm: %f\" % gnorm\n print \" grad calls: %d\" % n_grad_calls\n print \" iterations: %d\" % k\n \n xopt = xk\n gopt = gfk\n Hopt = Hk\n n_grad_calls = n_grad_calls\n iterations = iterations \n return xopt, gopt, Hopt, n_grad_calls, iterations", "title": "" }, { "docid": "2d2e31088305b47d7c1e8c124c307769", "score": "0.51060605", "text": "def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):\n \n costs = []\n \n for i in range(num_iterations):\n \n \n # Cost and gradient calculation (≈ 1-4 lines of code)\n ### START CODE HERE ### \n grads, cost = propagate(w, b, X, Y)\n ### END CODE HERE ###\n \n # Retrieve derivatives from grads\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n \n # update rule (≈ 2 lines of code)\n ### START CODE HERE ###\n w = w - learning_rate*dw\n b = b - learning_rate*db\n ### END CODE HERE ###\n \n # Record the costs\n if i % 100 == 0:\n costs.append(cost)\n \n # Print the cost every 100 training examples\n if print_cost and i % 100 == 0:\n print (\"Cost after iteration %i: %f\" %(i, cost))\n \n params = {\"w\": w,\n \"b\": b}\n \n grads = {\"dw\": dw,\n \"db\": db}\n \n return params, grads, costs", "title": "" }, { "docid": "6aed6b6df47bc39f7fa66f45db134e02", "score": "0.50999814", "text": "def optimizer(self):\n pass", "title": "" }, { "docid": "1994f8393e98cdbf8795330555e39fc1", "score": "0.50932986", "text": "def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost=False):\n costs = [] \n for i in range(num_iterations):\n # retrieve grad-parameters and cost\n grads, cost = propagate(w, b, X, Y)\n # retrieve parameters\n dw, db = grads['dw'], grads['db']\n # update parameters\n w = w - (learning_rate * dw)\n b = b - (learning_rate * db)\n # record cost for every 100th iteration\n if i % 100 == 0:\n costs.append(cost)\n\n if print_cost and i % 100 == 0:\n print(\"Cost after iteration %i: %f\" % (i, cost))\n\n grads = {'dw':dw, \n 'db':db }\n params = {'w': w,\n 'b': b}\n return grads, params, costs", "title": "" }, { "docid": "0395f497e8ba0ca87c649830a008e4a6", "score": "0.5088645", "text": "def init_optimiser(self):\n parameters = self.denoiser.parameters()\n self._optimizer = optim.Adam(parameters, betas=[0.9, 0.99])", "title": "" }, { "docid": "43cfddbb8cce51ad8a6a5a20dc6f182b", "score": "0.50840694", "text": "def opt_bst_dynamic(f):\n fsum = precompute_range_freqs(f)\n cost = [[0] * len(f) for _ in range(len(f))]\n for i in range(len(f)):\n cost[i][i] = f[i]\n\n for j in range(1, len(f)):\n for i in reversed(range(0, j)):\n cost_right = cost[i + 1][j] if j > i else 0\n cost[i][j] = cost_right + fsum[i][j]\n for root in range(i, j + 1):\n cost_left = cost[i][root - 1] if root > i else 0\n cost_right = cost[root + 1][j] if root < j else 0\n cost[i][j] = min([fsum[i][j] + cost_left + cost_right, cost[i][j]]) \n\n return cost", "title": "" }, { "docid": "4553a0a0e3c09524cbd5fc8633ef32e9", "score": "0.5082513", "text": "def Freeze(self) -> CoefficientFunction:", "title": "" }, { "docid": "4553a0a0e3c09524cbd5fc8633ef32e9", "score": "0.5082513", "text": "def Freeze(self) -> CoefficientFunction:", "title": "" }, { "docid": "4553a0a0e3c09524cbd5fc8633ef32e9", "score": "0.5082513", "text": "def Freeze(self) -> CoefficientFunction:", "title": "" }, { "docid": "fafdea8f720666f13bf7123057b1e491", "score": "0.50818664", "text": "def fb_cruz_nghia1(J, df, prox_g, x0, a0, beta=1.5, numb_iter=100):\n\n begin = time()\n th = 0.7\n delta = 0.49\n values = [J(x0)]\n iterates = [values, x0, a0]\n\n def T(values, x, a):\n df_x = df(x)\n for i in count(0):\n z = prox_g(x - a * df_x, a)\n if a * LA.norm(df(z) - df_x) <= delta * LA.norm(z - x):\n break\n else:\n a *= th\n x = z\n values.append(J(x))\n res = [values, x, beta * a]\n return res\n\n for i in range(numb_iter):\n iterates = T(*iterates)\n end = time()\n print \"---- Forward-backward method 1----\"\n print \"Number of iterations:\", numb_iter\n # print \"Number of gradients, n_grad:\", iterates[-1]\n # print \"Number of prox_g:\", numb_iter\n print \"Time execution:\", end - begin\n\n return iterates", "title": "" }, { "docid": "6151309c3a775055da1f5a9d4ae17de4", "score": "0.50759625", "text": "def run(self, x):\n \"*** YOUR CODE HERE ***\"\n #To get a node with (batch_size x num_outputfeatures)\n node = nn.Linear(x, self.w)\n return nn.AddBias(node, self.b)", "title": "" }, { "docid": "74c3bc3d914f99fe9484c10a6257ecfe", "score": "0.5072377", "text": "def _optimize(self,pv_generation,ev_demand,house_demand,costdata,storage):\n costsell=0.07\n E_max=480\n b_max=100\n C_max= (10*24)\n chargeff=0.9\n diseff=0.9\n chargeff_ev=0.9\n delta_t = 1\n num_variables = len(pv_generation)\n\n b_d = Variable(num_variables)\n b_c = Variable(num_variables)\n G_E = Variable(num_variables)\n C = Variable(num_variables)\n E = Variable(num_variables)\n Z = Variable(num_variables)\n G_I = Variable(num_variables)\n\n objective = Minimize((delta_t)*(costdata.T*G_I) - sum(costsell*G_E)*(delta_t))\n\n constraints = [ G_I == house_demand + b_c + C + G_E - pv_generation - b_d]\n constraints += [E[0] == storage[0]]\n constraints += [ Z[0] == ev_demand[0]]\n\n #state boundary conditions at time = 24\n constraints += [G_E[num_variables-1] >= 0]\n constraints += [G_I[num_variables-1] >= 0]\n\n\n constraints += [b_c[num_variables-1] >= 0]\n constraints += [b_c[num_variables-1] <= b_max]\n constraints += [b_d[num_variables-1] >= 0]\n constraints += [b_d[num_variables-1] <= b_max]\n\n for k in range(0,num_variables-1,1):\n constraints += [E[k+1] == E[k] + (chargeff*b_c[k] - (1/diseff)*b_d[k])*delta_t]\n constraints += [E[k] >= 0]\n constraints += [E[k+1] >= storage[k+1]]\n constraints += [G_E[k] >= 0]\n constraints += [G_I[k] >= 0]\n constraints += [E[k] <= E_max]\n constraints += [b_c[k] >= 0]\n constraints += [b_c[k] <= b_max]\n constraints += [b_d[k] >= 0]\n constraints += [b_d[k] <= b_max]\n\n # EV charging dynamics\n constraints += [Z[k+1] == Z[k] + (chargeff_ev*C[k]*delta_t)]\n # constraints += [Z[k] >= sum(ev_demand[:k])]\n constraints += [C[k] >= 0]\n constraints += [C[k] <= C_max]\n constraints += [Z[k+1] >= .95*sum(ev_demand[:(k+2)])]\n constraints += [Z[k+1] <= 1.05*sum(ev_demand[:(k+2)])]\n constraints += [C[k+1] >= 0]\n constraints += [C[k+1] <= C_max]\n\n prob = Problem(objective, constraints)\n prob.solve()\n\n b_d_answer = []\n b_c_answer = []\n G_E_answer = []\n C_answer = []\n E_answer = []\n Z_answer = []\n G_I_answer = []\n\n for i in range(0,num_variables,1):\n b_d_answer.append(b_d[i].value)\n b_c_answer.append(b_c[i].value)\n G_E_answer.append(G_E[i].value)\n C_answer.append(C[i].value)\n E_answer.append(E[i].value)\n Z_answer.append(Z[i].value)\n G_I_answer.append(G_I[i].value)\n\n return (b_d_answer,b_c_answer,G_E_answer,C_answer,E_answer,Z_answer,G_I_answer,prob.value)", "title": "" } ]
03837da3826d0bff0b22a4d8a399f93e
r"""Time clip the input (if time interval is TSeries clip between start and stop).
[ { "docid": "963146cf0cd2edfe32c5da92dfc5a943", "score": "0.65635616", "text": "def time_clip(inp, tint):\n\n if isinstance(inp, xr.Dataset):\n coords_data = [inp[k] for k in filter(lambda x: x != \"time\", inp.dims)]\n coords_data = [time_clip(inp.time, tint), *coords_data]\n out_dict = {dim: coords_data[i] for i, dim in enumerate(inp.coords)}\n\n for k in inp:\n if \"time\" in list(inp[k].coords):\n data = time_clip(inp[k], tint)\n out_dict[k] = (list(inp[k].dims), data)\n else:\n out_dict[k] = (list(inp[k].dims), inp[k])\n\n out = xr.Dataset(out_dict)\n out.attrs = inp.attrs\n return out\n\n if isinstance(tint, xr.DataArray):\n t_start, t_stop = tint.time.data[[0, -1]]\n\n elif isinstance(tint, np.ndarray):\n if isinstance(tint[0], datetime.datetime) \\\n and isinstance(tint[-1], datetime.datetime):\n t_start, t_stop = [tint.time[0], tint.time[-1]]\n\n else:\n raise TypeError('Values must be in Datetime64')\n\n elif isinstance(tint, list):\n t_start, t_stop = iso86012datetime64(np.array(tint))\n\n else:\n raise TypeError(\"invalid tint\")\n\n idx_min = bisect.bisect_left(inp.time.data, np.datetime64(t_start))\n idx_max = bisect.bisect_right(inp.time.data, np.datetime64(t_stop))\n\n coord = [inp.time.data[idx_min:idx_max]]\n\n if len(inp.coords) > 1:\n for k in inp.dims[1:]:\n coord.append(inp.coords[k].data)\n\n out = xr.DataArray(inp.data[idx_min:idx_max, ...], coords=coord,\n dims=inp.dims, attrs=inp.attrs)\n\n out.time.attrs = inp.time.attrs\n\n return out", "title": "" } ]
[ { "docid": "c6e85b5f14ed32323e7239a36a9143d9", "score": "0.6463437", "text": "def crop(self, tmin: Optional[float] = None, tmax: Optional[float] = None) -> None:\n logger.debug(\"Cropping the signal between %s and %s.\", tmin, tmax)\n self._tmin, self._tmax = Sound._check_tmin_tmax(\n tmin, tmax, self._original_times\n )\n logger.debug(\n \"'tmin' corresponds to the idx %i and 'tmax' corresponds \" \"to the idx %i.\",\n self._tmin,\n self._tmax,\n )\n self._duration = (\n self._original_times[self._tmax] - self._original_times[self._tmin]\n )\n # tmax + 1 for slice\n self._times = self._original_times[self._tmin : self._tmax + 1]\n self._set_signal()", "title": "" }, { "docid": "bae7d9d86a7efe0a7f6856c622c107ea", "score": "0.6058259", "text": "def clip(self) -> Clip:\n ...", "title": "" }, { "docid": "ef39e9c035be794cf0438810d8c0d7b9", "score": "0.5980725", "text": "def clip_ct(ct_numpy, min, max):\n clipped = ct_numpy.clip(min, max)\n clipped[clipped != max] = 1\n clipped[clipped == max] = 0\n return clipped", "title": "" }, { "docid": "5f921e7b4d307e1a00f413093ef27e25", "score": "0.5800672", "text": "def addClip(self, clip, time, videoTrackIndex=0, audioTrackIndex=-1):\n ...", "title": "" }, { "docid": "9ce75f5a4f4807299015764c1b7082a7", "score": "0.57851255", "text": "def clip(x):\n a, b = interval\n return max(min(x, b), a)", "title": "" }, { "docid": "8e1fdd0b118b7b6d45ca9cd85fe57bd1", "score": "0.57814085", "text": "def crop_data(self):\n if self.time_range != None:\n tlist = self.time_range.split(':')\n print \"--> Crop data to range: \", tlist\n if len(tlist) != 2:\n print 'Error parsing --time-range argument. Be sure to use <start-time>:<end-time> syntax.'\n sys.exit(1)\n \n self.t_start = 0 if tlist[0] == '-1' else int(self.convert_seconds_to_index(int(tlist[0])))\n self.t_end = len(self.fluor_data) if tlist[1] == '-1' else int(self.convert_seconds_to_index(int(tlist[1])))\n \n if self.t_start < self.t_end:\n self.fluor_data = self.fluor_data[self.t_start:self.t_end]\n self.trigger_data = self.trigger_data[self.t_start:self.t_end]\n self.time_stamps = self.time_stamps[self.t_start:self.t_end]\n else:\n print \"--> Data not cropped. (End - start) time must be positive.\"\n else:\n print \"--> Data not cropped. No range has been specified.\"", "title": "" }, { "docid": "fd58274a724a6e255b5b0ec377cc86fc", "score": "0.57777494", "text": "def time_slice(self,t_start,t_stop):\n \n t_start = t_start.rescale(self.sampling_period.units)\n t_stop = t_stop.rescale(self.sampling_period.units)\n i = (t_start - self.t_start)/self.sampling_period\n j = (t_stop - self.t_start)/self.sampling_period\n i = int(np.rint(i.magnitude))\n j = int(np.rint(j.magnitude))\n\n if (i < 0) or (j > len(self)):\n raise ValueError('t_start, t_stop have to be withing the analog signal duration')\n \n # we gonna send the list of indicies so that we get *copy* of the sliced data\n obj = super(BaseAnalogSignal, self).__getitem__(np.arange(i,j,1))\n obj.t_start = self.t_start + i*self.sampling_period\n \n return obj", "title": "" }, { "docid": "8d55530939e7d5b492d06f035e7f648f", "score": "0.5776411", "text": "def time_symmetrize(clip):\n return concatenate_videoclips([clip, clip.fx(time_mirror)])", "title": "" }, { "docid": "97531ffdbc3defbd3ad6932edaa4c6bd", "score": "0.5744889", "text": "def clip(self, a_min, a_max, units=None, inplace=False, i=False):\n return self._apply_data_oper(\n _inplace_enabled_define_and_cleanup(self),\n \"clip\",\n (a_min, a_max),\n inplace=inplace,\n i=i,\n units=units,\n )", "title": "" }, { "docid": "67d143647d7edd7820edbf6defd5e2bd", "score": "0.57428694", "text": "def create_clip(self,\n save_path: str,\n file_name: str,\n start_t: float,\n end_t: float):\n\n clip_length = end_t - start_t\n\n ffmpeg_process = subprocess.run(\" \".join([\n self.env[\"ffmpeg_location\"], # ffmpeg executable\n \"-ss\", str(start_t), # Starting point\n \"-avoid_negative_ts\", \"1\", # Try to avoid artifacts (TODO)\n \"-i\", self.current_file, # Input file\n \"-c\", \"copy\", # Copy the file instead of editing it\n \"-t\", str(clip_length), # The duration of the clip\n f\"{save_path}/{file_name}.mp4\" # Output file\n ]), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)", "title": "" }, { "docid": "f73b72a018c06be6b5310d976f6dea8b", "score": "0.5695154", "text": "def test_shorten_interval_moved_into_exclusion(self):\n self.t.configure_exclusions([(time(16, 30, 0), time(7, 30, 0)),\n (time(12, 0, 0), time(13, 0, 0))])\n\n self.t('track 20170308T140000 - 20170308T161500')\n\n self.t(\"move @1 20170308T113000\")\n self.t(\"shorten @1 5min\") # Does not work.", "title": "" }, { "docid": "d78328edc32ad0b61a6210f2684d4a08", "score": "0.5606434", "text": "def clip(horName, min, max, clipType, areaOfInterest, propertyName, setValue):\r\n ScriptImpl.runScript7(\"ClipScript\", 7, horName, min, max, clipType, areaOfInterest, propertyName, setValue)", "title": "" }, { "docid": "ece63e1dec2e06082fdbc980e393287c", "score": "0.5572831", "text": "def cut(self, starttime=None, endtime=None):\n\n if starttime != None and endtime == None:\n new_inds = np.where(self.data[0] >= date2num(starttime))[0]\n self.data = self.data[:,new_inds]\n if self.pos != None:\n self.pos.positions = self.pos.positions[:,new_inds]\n elif starttime == None and endtime != None:\n new_inds = np.where(self.data[0] < date2num(endtime))[0]\n self.data = self.data[:,new_inds]\n if self.pos != None:\n self.pos.positions = self.pos.positions[:,new_inds]\n elif starttime != None and endtime != None:\n new_inds = np.where((self.data[0] >= date2num(starttime)) & (self.data[0] < date2num(endtime)))[0]\n self.data = self.data[:,new_inds]\n if self.pos != None:\n self.pos.positions = self.pos.positions[:,new_inds]\n return self", "title": "" }, { "docid": "0cf44c32f17a5a779816eb5c6d82a1a1", "score": "0.5548018", "text": "def trimIn(self, time):\n ...", "title": "" }, { "docid": "8884f18cbe0bb71897e87d9abee9a39b", "score": "0.54991126", "text": "def clip_data(data, fs, start, end):\n sf = int(np.floor(fs * start))\n ff = int(np.floor(fs * end))\n return data[sf:ff]", "title": "" }, { "docid": "5b708d69d7be168029f7f66acdf5021c", "score": "0.5418287", "text": "def _clip(x, lower, upper):\n return max(lower, min(x, upper))", "title": "" }, { "docid": "80eb2a5f380cbba39bacf8246add0140", "score": "0.5404695", "text": "def _time_mask(times, tmin=None, tmax=None, sfreq=None, raise_error=True):\n orig_tmin = tmin\n orig_tmax = tmax\n tmin = -np.inf if tmin is None else tmin\n tmax = np.inf if tmax is None else tmax\n if not np.isfinite(tmin):\n tmin = times[0]\n if not np.isfinite(tmax):\n tmax = times[-1]\n if sfreq is not None:\n # Push to a bit past the nearest sample boundary first\n sfreq = float(sfreq)\n tmin = int(round(tmin * sfreq)) / sfreq - 0.5 / sfreq\n tmax = int(round(tmax * sfreq)) / sfreq + 0.5 / sfreq\n if raise_error and tmin > tmax:\n raise ValueError('tmin (%s) must be less than or equal to tmax (%s)'\n % (orig_tmin, orig_tmax))\n mask = (times >= tmin)\n mask &= (times <= tmax)\n if raise_error and not mask.any():\n raise ValueError('No samples remain when using tmin=%s and tmax=%s '\n '(original time bounds are [%s, %s])'\n % (orig_tmin, orig_tmax, times[0], times[-1]))\n return mask", "title": "" }, { "docid": "8556b281b5934842948564a85322fe81", "score": "0.53886753", "text": "def clip_file(file, x_min, x_max, y_min, y_max, outfile=None):\r\n \r\n ds, array = read(file)\r\n\r\n ds, array = clip(ds, array, x_min, x_max, y_min, y_max)\r\n \r\n if outfile == None:\r\n outfile = '%s_clipped.tif' % (os.path.splitext(file)[0])\r\n \r\n export(array, ds, filename=outfile, bands='all')", "title": "" }, { "docid": "2a15bf4e3c3623c9913958a19b8a57b4", "score": "0.53648114", "text": "def truncate_times(self, start: float, end: float):\n ts = self.truncate_start_time(start)\n return ts.truncate_end_time(end)", "title": "" }, { "docid": "61a3810f8c623d56094cbdb3406e2e7f", "score": "0.5319245", "text": "def clip_action(self, action):\n action = np.clip(action, a_min=0, a_max=np.inf)\n return action", "title": "" }, { "docid": "be7916e1dfe36652040dabe97125e889", "score": "0.5316335", "text": "def clip(x: Tensor, lower_limit: float or Tensor, upper_limit: float or Tensor):\n if isinstance(lower_limit, Number) and isinstance(upper_limit, Number):\n\n def clip_(x):\n return x._op1(lambda native: choose_backend(native).clip(native, lower_limit, upper_limit))\n\n return broadcast_op(clip_, [x])\n else:\n return maximum(lower_limit, minimum(x, upper_limit))", "title": "" }, { "docid": "31af5cceebcf7630beaf4fca0418dee3", "score": "0.52941823", "text": "def test_clip_stringout(self):\n start_timecode = Timecode(24, '01:00:00:00')\n vfxlist = list()\n for vfx in self.vfxlist:\n vfxlist.append(mobs.VFXEvent(**vfx))\n stringout = vfxpull.clip_stringout(vfxlist, start_timecode)\n\n next_tc = start_timecode\n for event, vfxevent in zip(stringout, vfxlist):\n self.assertEqual(str(event.rec_start_tc), str(next_tc))\n next_tc += vfxevent.src_duration", "title": "" }, { "docid": "6ec5db49f0429f12106c5f7e19e6c518", "score": "0.52890736", "text": "def test_clip_property(self, data, arr):\n numeric_dtypes = hynp.integer_dtypes() | hynp.floating_dtypes()\n # Generate shapes for the bounds which can be broadcast with each other\n # and with the base shape. Below, we might decide to use scalar bounds,\n # but it's clearer to generate these shapes unconditionally in advance.\n in_shapes, result_shape = data.draw(\n hynp.mutually_broadcastable_shapes(\n num_shapes=2, base_shape=arr.shape\n )\n )\n # Scalar `nan` is deprecated due to the differing behaviour it shows.\n s = numeric_dtypes.flatmap(\n lambda x: hynp.from_dtype(x, allow_nan=False))\n amin = data.draw(s | hynp.arrays(dtype=numeric_dtypes,\n shape=in_shapes[0], elements={\"allow_nan\": False}))\n amax = data.draw(s | hynp.arrays(dtype=numeric_dtypes,\n shape=in_shapes[1], elements={\"allow_nan\": False}))\n\n # Then calculate our result and expected result and check that they're\n # equal! See gh-12519 and gh-19457 for discussion deciding on this\n # property and the result_type argument.\n result = np.clip(arr, amin, amax)\n t = np.result_type(arr, amin, amax)\n expected = np.minimum(amax, np.maximum(arr, amin, dtype=t), dtype=t)\n assert result.dtype == t\n assert_array_equal(result, expected)", "title": "" }, { "docid": "c8fecde162f0d6766cf66802ea379cc6", "score": "0.5273491", "text": "def clip(self, lower=None, upper=None):\n\n if lower is not None:\n self.data[self.data < lower] = lower\n\n if upper is not None:\n self.data[self.data > upper] = upper", "title": "" }, { "docid": "b6855381cad0b2805198f4a038dd2076", "score": "0.52648884", "text": "def _clip(val, lo, hi):\n if val < lo:\n val = lo\n if val > hi:\n val = hi\n return val", "title": "" }, { "docid": "14e787b48267a43bbfdeff372451b820", "score": "0.52617556", "text": "def slice_time_window(gaze_data, t=112.5, dt=225):\n slice_ = gaze_data[gaze_data[:,2] >= t - dt/2]\n if len(slice_) == 0:\n print(\"WARNING: empty slice\")\n return slice_\n slice_ = slice_[slice_[:,2] <= t + dt/2]\n if len(slice_) == 0:\n print(\"WARNING: empty slice\")\n return slice_", "title": "" }, { "docid": "4613703f9183c9f8c653fee441480667", "score": "0.52564883", "text": "def play_clip(self, clip, delay=0):\n clip.play()\n while pygame.mixer.get_busy():\n self.skip_img.draw(self.screen, 0, SKIP_X, SKIP_Y, SKIP_COLOR, True)\n mouse_pos = pygame.mouse.get_pos()\n self.clock.tick(delay)\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n if (SKIP_X + SKIP_WIDTH >= mouse_pos[0] >= SKIP_X) and (\n SKIP_Y + SKIP_HEIGHT >= mouse_pos[1] >= SKIP_Y):\n clip.stop()\n else:\n pygame.event.clear()", "title": "" }, { "docid": "53f7f3e0e006d7aece6c567fd1b37ff8", "score": "0.52198607", "text": "def _clip(value, minimum, maximum):\n\n if value < minimum:\n return minimum\n elif value > maximum:\n return maximum\n return value", "title": "" }, { "docid": "b44866bd43512a45fa34b83ed63165c8", "score": "0.52139527", "text": "def set_interval(self, start_time, stop_time):\n cp = self.__copy()\n cp.__start_time = start_time\n cp.__stop_time = stop_time\n return cp", "title": "" }, { "docid": "510154af90407dcb2af1c70437e247e3", "score": "0.5212739", "text": "def trimOut(self, time):\n ...", "title": "" }, { "docid": "ad838abd2f81a7ec58970942b5ab3665", "score": "0.5197755", "text": "def slice_time_window(gaze_data, t=112.5, dt=225):\n slice = gaze_data[gaze_data[:,2] > t - dt/2]\n if slice.shape[0] == 0:\n return slice\n slice = slice[slice[:,2] < t + dt/2]\n return slice", "title": "" }, { "docid": "f106409d054ccc414a5b917bac2e7070", "score": "0.51874524", "text": "def time_range_cutter_at_time(local,time_range,time_cut=(0,0,0)):\n\n ( start, end ) = time_range.get(local)\n index = start.replace(\n hour=time_cut[0],\n minute=time_cut[1],\n second=time_cut[2]\n )\n cuts = []\n index += datetime.timedelta(days=1)\n while index < end:\n cuts.append(index)\n\n index += datetime.timedelta(days=1)\n if local:\n index = time_range.normalize(index)\n return cuts", "title": "" }, { "docid": "71c45d8ce69dfd86c1b86f2764fcdac5", "score": "0.51848006", "text": "def clip_velocity(generated, min_velocity=30, max_velocity=100):\n max_velocity_encoded = max_velocity*32//128 + RANGES_SUM[1]\n min_velocity_encoded = min_velocity*32//128 + RANGES_SUM[1]\n \n mask = (generated>=RANGES_SUM[1]) & (generated<RANGES_SUM[2])\n generated[mask] = np.clip(generated[mask], min_velocity_encoded, max_velocity_encoded)", "title": "" }, { "docid": "ec5842d20d425beed374ddb28145e3cc", "score": "0.517242", "text": "def slip_time_fn():\n return BruneSlipFn()", "title": "" }, { "docid": "b169ccb874d63d9f6aea5eab938e9ce7", "score": "0.5140411", "text": "def clip(value, min_value, max_value):\n return min(max(value, min_value), max_value)", "title": "" }, { "docid": "8be59fab662d12d64bb1c5f3f36c9cec", "score": "0.5130502", "text": "async def ttsclip(self, ctx, *, clip : str):\n\t\tif \":\" not in clip:\n\t\t\ttry:\n\t\t\t\tclip = await self.get_clip(f\"local:{clip}\", ctx)\n\t\t\texcept ClipNotFound:\n\t\t\t\tawait ctx.send(f\"'{clip}' is not a valid clip. 🤦 Try `{self.cmdpfx(ctx)}playlist`\")\n\t\t\t\treturn\n\t\telse:\n\t\t\tclip = await self.get_clip(clip, ctx)\n\t\ttext = clip.text.lower()\n\t\tif text == \"\":\n\t\t\tawait ctx.send(f\"I can't read this clip for tts 😕. Try a different one.\")\n\t\t\treturn\n\n\t\tawait self.play_clip(f\"tts:{text}\", ctx)", "title": "" }, { "docid": "79f61edbdfd89268aa55cd2f96ccdffc", "score": "0.51134396", "text": "def truncate_start_time(self, time: float):\n if (not self.times) or time is None or time < min(self.times):\n return self\n else:\n start_idx = next(x[0] for x in enumerate(self.times) if x[1] > time)\n return TimeSeries(\n self.name,\n times=self.times[start_idx:],\n values=self.values[start_idx:],\n )", "title": "" }, { "docid": "a18da757b2edb01b9d75894f668b14ea", "score": "0.5099025", "text": "def clip(tensor: ITensor, min, max) -> ITensor:\n if is_backend(PYTORCH_BACKEND):\n from babilim.core.tmath.pytorch import clip as _clip\n return _clip(tensor, min, max)\n elif is_backend(TF_BACKEND):\n from babilim.core.tmath.tf import clip as _clip\n return _clip(tensor, min, max)", "title": "" }, { "docid": "32de1b024a6a0c51f7f9f49c03400ca5", "score": "0.50976825", "text": "def get_clip(self, index, dt) -> Clip:\n return self._get_ref(index, dt).clip", "title": "" }, { "docid": "5ca3e948675fdac3073899b411e9466e", "score": "0.5092431", "text": "def clip(x, _min=None, _max=None):\n limit = lambda f, k, lim: f(k, lim) if lim is not None else k\n return limit(min, limit(max, x, _min), _max)", "title": "" }, { "docid": "02b13f240b7044aade4112baaee90dc1", "score": "0.5090742", "text": "def set_timeSlider(self, time):\r\n cmds.playbackOptions(e=1, min=time[0])\r\n cmds.playbackOptions(e=1, max=time[1])", "title": "" }, { "docid": "f8bae39eeb26c71b7cfdda8c241752a2", "score": "0.5076869", "text": "def extract_clip(source_file, output_filename, start_time, end_time):\n # Defensive argument checking.\n assert isinstance(output_filename, str), \"output_filename must be string\"\n status = False\n # Construct command to trim the videos (ffmpeg required).\n command = [\n \"ffmpeg\",\n \"-ss\",\n str(start_time),\n \"-t\",\n str(end_time - start_time),\n \"-i\",\n \"'%s'\" % source_file,\n \"-c:v\",\n \"copy\",\n \"-c:a\",\n \"copy\",\n \"-threads\",\n \"1\",\n \"-loglevel\",\n \"panic\",\n '\"%s\"' % output_filename,\n ]\n command = \" \".join(command)\n try:\n subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as err:\n return status, err.output\n\n # Check if the video was successfully saved.\n status = os.path.exists(output_filename)\n return status", "title": "" }, { "docid": "aafcbb7ee36a2fd328b3229d2d1e9534", "score": "0.50718915", "text": "def soundcut (self, sound, time_to_cut):\r\n\t\tlength = sound.get_length()\r\n\t\tsnd_array = pygame.sndarray.array(sound)\r\n\t\tframe_rate = len(snd_array) / length\r\n\t\tcut_frames = frame_rate * time_to_cut\r\n\t\tresult = snd_array [cut_frames:]\r\n\t\tsoundresult = pygame.sndarray.make_sound (result)\r\n\t\treturn soundresult", "title": "" }, { "docid": "ad0505a18ef2176ab71a08ac882220aa", "score": "0.50682265", "text": "def pasteOverTime(self, nodes, baseNode=None):\n timeRange = None\n # timeRange = utils.getSelectedTimeRange()\n\n if timeRange is not None:\n pass\n # for f in timeRange.times:\n # pm.currentTime(f)\n # self.paste(sel, relative=relative, baseNode=relObj)\n # pm.setKeyframe(sel, at=['t', 'r', 's'])\n else:\n self.paste(nodes, baseNode=baseNode)", "title": "" }, { "docid": "0137dd7fd436dc4396d1a3c965c7ea76", "score": "0.5058165", "text": "def clip_shift(shift: float, record: Record, params: ConfigType) -> float:\n shift_meters = shift * record.grid_size[1:]\n shift_mag = np.linalg.norm(shift_meters)\n interval = get_interval(\n cast(Union[timedelta, np.timedelta64], record.interval)\n )\n velocity = shift_mag / interval\n\n unit = shift_meters / shift_mag\n if velocity > params[\"MAX_FLOW_MAG\"]:\n clipped = unit * params[\"MAX_FLOW_MAG\"] * interval\n clipped_pix = clipped / record.grid_size[1:]\n return clipped_pix\n return shift", "title": "" }, { "docid": "bbe697667a669df7d01b4457873a5021", "score": "0.5051844", "text": "def cut(surface, rect):# pygame.surface\n surface.set_clip(rect)\n clip = surface.subsurface(surface.get_clip())\n\n return clip", "title": "" }, { "docid": "2a6cd3b73cfcc7836dc39d4810995ee6", "score": "0.50444555", "text": "def time_slice(self, time_min, time_max):\n return self.energies[np.where((self.times >= time_min) & (self.times <= time_max))]", "title": "" }, { "docid": "f2d956a26ad8e1c97e39029669043fa5", "score": "0.50412196", "text": "def clip(z):\n z = K.clip(z, 1e-7, 1)\n return z", "title": "" }, { "docid": "65d99bd7469f074c024f713bc4ce6911", "score": "0.50167257", "text": "def Clip(self, p_float, vtkDataArray, vtkIncrementalPointLocator, vtkCellArray, vtkPointData, vtkPointData_1, vtkCellData, p_int, vtkCellData_1, p_int_1):\n ...", "title": "" }, { "docid": "2d0019fc69741dece196fdcc4965516a", "score": "0.50064844", "text": "def clip_image(image, clip_min, clip_max):\n return np.minimum(np.maximum(clip_min, image), clip_max)", "title": "" }, { "docid": "fd33ed1c806e19f0398f2709b2699bc5", "score": "0.49947357", "text": "def truncate_end_time(self, time: float):\n if (not self.times) or time is None or time > max(self.times):\n return self\n else:\n end_idx = next(x[0] for x in enumerate(self.times) if x[1] > time)\n return TimeSeries(\n self.name,\n times=self.times[:end_idx],\n values=self.values[:end_idx],\n )", "title": "" }, { "docid": "6fc7ca487da8817bfecd7fb3e5b515ab", "score": "0.4991876", "text": "def enhance_clip(image, *args):\n pass", "title": "" }, { "docid": "e58fbcdc082e4a2283b3c960ab2fd3a9", "score": "0.49455088", "text": "def clip_by_value_preserve_gradient(t, clip_value_min, clip_value_max,\n name=None):\n with tf.name_scope(name, 'clip_by_value_preserve_gradient',\n [t, clip_value_min, clip_value_max]):\n t = tf.convert_to_tensor(value=t, name='t')\n clip_t = tf.clip_by_value(t, clip_value_min, clip_value_max)\n return t + tf.stop_gradient(clip_t - t)", "title": "" }, { "docid": "9866ee79e6c0f433088761c34d21b11b", "score": "0.4924519", "text": "def local_clip(self, percept, action_set, clip):\r\n\r\n return clip.copy()", "title": "" }, { "docid": "da6e4cc8b40bf8368b7a77c9ba8fcb71", "score": "0.4922949", "text": "def restore_sin(data, t_range=None, chan=None, method=2, sweep_freq= 500, Vpp=90*2, clip_level_minus=-88, verbose=1): \n if t_range is not None:\n rd = data.reduce_time(t_range)\n else:\n rd = data.copy() # so fudge below will work\n\n # a big fudge to allow this to work on one channel!!! beware\n if chan is not None:\n rd.signal = rd.signal[chan]\n rd.channels = rd.channels[chan]\n\n if verbose>0:\n rd.plot_signals()\n # fourier filter to retain the fundamental - the amplitude with be reduced by the clipping\n stopband = sweep_freq * np.array([0.8,1.2])\n passband = sweep_freq * np.array([0.9,1.1])\n fd = rd.filter_fourier_bandpass(stopband=stopband,passband=passband)\n\n # calculate the time-varying amplitude of the filtered sinusoid\n amp = np.abs(analytic_signal(fd.signal))/np.sqrt(2)\n # normalise to one\n amp = amp/np.average(amp)\n if verbose > 0:\n fig, ax1 = plt.subplots(1, 1)\n if verbose > 0:\n ax1.plot(rd.timebase, rd.signal, 'b', label='orig', linewidth=.3)\n if verbose > 1:\n ax1.plot(rd.timebase, fd.signal, 'g', label='filtered', linewidth=.3)\n if method == 1:\n ax1.plot(rd.timebase, fd.signal/amp, 'm', label='corrected', linewidth=.3)\n ax1.plot(rd.timebase, 50*(1.3*amp-2.1) + 1.2*fd.signal/amp, 'r', label='corrected')\n\n # not bad, but try making the amplitude constant first, then take the\n # difference , excluding the clipped part, boxcar averaged over a\n # small, whole number of periods\n\n for i in range(2): # iterate to restore amplitude to a constant\n # first the reconstructed amplitude\n reconst = 1.0 * fd.signal # make a copy of the signal\n amprec = np.abs(analytic_signal(reconst))\n reconst = Vpp/2.0 * reconst/amprec\n if method == 2:\n if verbose > 0:\n ax1.plot(rd.timebase, reconst, 'm', label='reconst before DC adjust')\n\n # should have a very nice constant ampl. sinusoid\n # now blank out the clipped, use given value because amplifier clipping\n # is 'soft', so automatic detection of clipping is not simple.\n wc = np.where(rd.signal < clip_level_minus)[0]\n weight = 1 + 0*reconst\n weight[wc] = 0\n period = int(round(data.timebase.sample_freq/sweep_freq))\n from pyfusion.data.signal_processing import smooth\n # iterate to make waves match where there is no clipping\n for i in range(6):\n err = rd.signal - reconst\n err[wc] = 0\n if verbose > 0:\n print('average error {e:.3g}'.format(e=float(np.sum(err)/np.sum(weight))))\n corrn = np.cumsum(err[0:-period]) - np.cumsum(err[period:])\n divisor = np.cumsum(weight[0:-period]) - np.cumsum(weight[period:]) \n wnef = np.where(divisor <= 100)[0]\n divisor[wnef] = 100\n if verbose > 1:\n ax1.plot(rd.timebase, reconst, '--',\n label='reconst, offset {i}'.format(i=i))\n # reconst[period//2:-period//2] = reconst[period//2:-period//2] - corrn/divisor\n reconst[period//2:1-period//2] = reconst[period//2:1-period//2] + smooth(err,period)/smooth(weight,period)\n # plot(smooth(err,period)/smooth(weight,period))\n\n debug_(pyfusion.DEBUG, 1, key='restore_sin')\n\n if verbose>0:\n ax1.plot(rd.timebase, reconst,'r', label='reconst, final offset')\n ax1.legend()\n fig.show()\n return(reconst)", "title": "" }, { "docid": "946bad52bd97c7c82bd11fe6abef72d4", "score": "0.49111", "text": "def clip_boi(boi):\n for sample in range(boi.shape[0]):\n sample_mean = torch.mean(boi[sample, :, :, :])\n sample_std = torch.std(boi[sample, :, :, :])\n min_clip = sample_mean - 2*sample_std\n max_clip = sample_mean + 2*sample_std\n\n boi[sample, :, :, :][boi[sample, :, :, :] < min_clip] = min_clip\n boi[sample, :, :, :][boi[sample, :, :, :] > max_clip] = max_clip\n \n boi[sample, :, :, :] = (boi[sample, :, :, :] - min_clip)/(max_clip - min_clip)\n return boi", "title": "" }, { "docid": "fbab86598d02d5a5fdeedb9535df849a", "score": "0.49020982", "text": "def clip(x, lowest, highest):\n return max(lowest, min(x, highest))", "title": "" }, { "docid": "f0da100d849a6409367ac699ae1dd9bc", "score": "0.48753196", "text": "def test_clamp():\n x = np.linspace(-6, 6, dtype=\"float32\")\n np.testing.assert_allclose(\n F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)\n )\n np.testing.assert_allclose(\n F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)\n )", "title": "" }, { "docid": "110f14382042dd9aa5ec22f3720029c6", "score": "0.48644397", "text": "def get_trackletpair_t_range(t_min_1, t_max_1, t_min_2, t_max_2, window_len):\n assert t_min_1 <= t_max_1 <= t_min_2 <= t_max_2\n if t_max_1 == t_min_2:\n if t_max_1 - t_min_1 > t_max_2 - t_min_2 and t_max_1 > t_min_1:\n t_max_1 -= 1\n elif t_max_1 - t_min_1 <= t_max_2 - t_min_2:\n assert t_max_2 > t_min_2\n t_min_2 += 1\n if t_max_2 - t_min_1 + 1 <= window_len:\n # window covers both of the tracklets\n return t_min_1, t_max_1, t_min_2, t_max_2\n # window can't cover both of the tracklets\n mid_gap_t = int((t_max_1 + t_min_2) / 2) # the mid t point of the gap between two tracklets\n if mid_gap_t - t_min_1 + 1 >= 0.5 * window_len and t_max_2 - mid_gap_t + 1 <= 0.5 * window_len:\n # crop tracklet_1\n return t_max_2-window_len+1, t_max_1, t_min_2, t_max_2\n elif mid_gap_t - t_min_1 + 1 <= 0.5 * window_len and t_max_2 - mid_gap_t + 1 >= 0.5 * window_len:\n # crop tracklet_2\n return t_min_1, t_max_1, t_min_2, t_min_1+window_len-1\n else:\n # crop both tracklet_1 and tracklet_2\n t_start_1 = mid_gap_t - int(0.5 * window_len) + 1\n return t_start_1, t_max_1, t_min_2, t_start_1+window_len-1", "title": "" }, { "docid": "c6216c6cc9c35ca80e56172a592c838e", "score": "0.4861909", "text": "def time_range(self):\n if self._time_range is None:\n startT = self._obj.attrs['startazT']\n stopT = self._obj.attrs['stopazT']\n times = (startT + stopT) / 2.\n self._time_range = times\n\n return self._time_range", "title": "" }, { "docid": "48330734147a5ff2a7fce1a9199c0ddf", "score": "0.4849978", "text": "def clip(\n self,\n min: Union[int, float, npt.ArrayLike, None] = None,\n max: Union[int, float, npt.ArrayLike, None] = None,\n out: Union[npt.NDArray[Any], ndarray, None] = None,\n ) -> ndarray:\n args = (\n np.array(min, dtype=self.dtype),\n np.array(max, dtype=self.dtype),\n )\n if args[0].size != 1 or args[1].size != 1:\n runtime.warn(\n \"cuNumeric has not implemented clip with array-like \"\n \"arguments and is falling back to canonical numpy. You \"\n \"may notice significantly decreased performance for this \"\n \"function call.\",\n category=RuntimeWarning,\n )\n if isinstance(out, np.ndarray):\n self.__array__().clip(args[0], args[1], out=out)\n return convert_to_cunumeric_ndarray(out, share=True)\n elif isinstance(out, ndarray):\n self.__array__().clip(args[0], args[1], out=out.__array__())\n return out\n else:\n return convert_to_cunumeric_ndarray(\n self.__array__().clip(args[0], args[1])\n )\n return self._perform_unary_op(\n UnaryOpCode.CLIP, self, out=out, extra_args=args\n )", "title": "" }, { "docid": "180ef0cbbb449d0ecb9b7b74d9c1fa85", "score": "0.4849802", "text": "def _short_sample_time(self, min_time=100):\n return max(min_time,self._get_t_lande())", "title": "" }, { "docid": "f0499f46f682a82a8d9cbdaf98614528", "score": "0.48484287", "text": "def Truncate(self, t):\n self.series = self.series[self.series.index < t]", "title": "" }, { "docid": "2ae4bfb48e3b7f48bfc1e66ebe0f3b79", "score": "0.48415938", "text": "def timeInRange(start, end, x):\n if start <= end:\n return start <= x <= end\n else:\n return start <= x or x <= end", "title": "" }, { "docid": "54fe1662ae6528a742c8366fe04a5bc1", "score": "0.48394006", "text": "def _conv_time(in_t):\n in_t = in_t % 24\n return 2 * np.sin((2 * np.pi * in_t) / 48) - 1", "title": "" }, { "docid": "287b7a8e11e78b3a2b0cddbf8b7bb495", "score": "0.48278007", "text": "def make_non_realtime(self):\n \n self.start_time = self.remove_rt_from_time(self.start_time)\n self.end_time = self.remove_rt_from_time(self.end_time)", "title": "" }, { "docid": "cae8fdfc3e0bfdc7119dfd11ab5192d1", "score": "0.48237222", "text": "def get_clip(\n self, start_sec: float, end_sec: float\n ) -> Dict[str, Optional[torch.Tensor]]:\n video_frames = None\n if self._video is not None:\n video_start_pts = secs_to_pts(\n start_sec, self._video_time_base, self._video_start_pts\n )\n video_end_pts = secs_to_pts(\n end_sec, self._video_time_base, self._video_start_pts\n )\n video_frames = [\n f\n for f, pts in self._video\n if pts >= video_start_pts and pts <= video_end_pts\n ]\n\n audio_samples = None\n if self._decode_audio and self._audio:\n audio_start_pts = secs_to_pts(\n start_sec, self._audio_time_base, self._audio_start_pts\n )\n audio_end_pts = secs_to_pts(\n end_sec, self._audio_time_base, self._audio_start_pts\n )\n audio_samples = [\n f\n for f, pts in self._audio\n if pts >= audio_start_pts and pts <= audio_end_pts\n ]\n audio_samples = torch.cat(audio_samples, axis=0)\n audio_samples = audio_samples.to(torch.float32)\n\n if video_frames is None or len(video_frames) == 0:\n logger.warning(\n f\"No video found within {start_sec} and {end_sec} seconds. \"\n f\"Video starts at time 0 and ends at {self.duration}.\"\n )\n\n video_frames = None\n\n if video_frames is not None:\n video_frames = thwc_to_cthw(torch.stack(video_frames)).to(torch.float32)\n\n return {\n \"video\": video_frames,\n \"audio\": audio_samples,\n }", "title": "" }, { "docid": "8d8521474a0fa2188d092d982ad34df8", "score": "0.48235446", "text": "def clip(tensor, to=8.0):\n return torch.clamp(tensor, -to, to)", "title": "" }, { "docid": "227fdfac2590f1f40fe61df5967ee120", "score": "0.48140028", "text": "def snippet(self, start_time=None, end_time=None):\n start_point = self.time_to_position(start_time)\n end_point = self.time_to_position(end_time)\n\n wav_snippet = self.series[start_point:end_point]\n\n return self.__class__(wav_snippet, self.sample_rate)\n # self.series[start_point:end_point]\n #", "title": "" }, { "docid": "93effefc5e1037ae062e11f7ea99c312", "score": "0.48015875", "text": "def _trim_data(self, data, time, tstart = None, tend = None):\n idx_start = bisect(time, tstart) if tstart else None\n idx_end = bisect(time, tend) if tend else None\n\n return data[idx_start:idx_end]", "title": "" }, { "docid": "01289ea4119a63cd32ce6fd2ee7c49dd", "score": "0.4799413", "text": "def testSeconds(self):\n\t\tself.assertTrue(Time.FromString(\"000000\").GetTotalSeconds()==0,\"zero test\")\n\t\tself.assertTrue(Time.FromString(\"201740\").GetTotalSeconds()==73060,\"sample test\")\n\t\tself.assertTrue(Time.FromString(\"240000\").GetTotalSeconds()==86400,\"full day\")\n\t\t# leap second is equivalent to the second before, not the second after!\n\t\tself.assertTrue(Time.FromString(\"235960\").GetTotalSeconds()==86399,\"leap second before midnight\")\n\t\tt=Time()\n\t\tt,overflow=Time().Offset(seconds=0)\n\t\tself.assertTrue(t.GetTime()==(0,0,0) and not overflow,\"set zero\")\n\t\tt,overflow=Time().Offset(seconds=73060)\n\t\tself.assertTrue(t.GetTime()==(20,17,40) and not overflow,\"set sample time\")\n\t\tt,overflow=Time().Offset(seconds=73060.5)\n\t\tself.assertTrue(t.GetTime()==(20,17,40.5) and not overflow,\"set sample time with fraction\")\n\t\tt,overflow=Time().Offset(seconds=86400)\n\t\tself.assertTrue(t.GetTime()==(0,0,0) and overflow==1,\"set midnight end of day\")\n\t\tt,overflow=Time().Offset(seconds=677860)\n\t\tself.assertTrue(t.GetTime()==(20,17,40) and overflow==7,\"set sample time next week\")\n\t\tt,overflow=Time().Offset(seconds=-531740)\n\t\tself.assertTrue(t.GetTime()==(20,17,40) and overflow==-7,\"set sample time last week\")", "title": "" }, { "docid": "543b904a6ed2890339d485ff09f91abc", "score": "0.47984993", "text": "def segment_axis(self,a, length, overlap=0, axis=None, end='cut', endvalue=0):\n\n if axis is None:\n a = np.ravel(a) # may copy\n axis = 0\n\n l = a.shape[axis]\n\n if overlap>=length:\n raise ValueError, \"frames cannot overlap by more than 100%\"\n## if overlap<0 or length<=0:\n## raise ValueError, \"overlap must be nonnegative and length must be positive\"\n\n if l<length or (l-length)%(length-overlap):\n if l>length:\n roundup = length + (1+(l-length)//(length-overlap))*(length-overlap)\n rounddown = length + ((l-length)//(length-overlap))*(length-overlap)\n else:\n roundup = length\n rounddown = 0\n assert rounddown<l<roundup\n assert roundup==rounddown+(length-overlap) or (roundup==length and rounddown==0)\n a = a.swapaxes(-1,axis)\n\n if end=='cut':\n a = a[...,:rounddown]\n elif end in ['pad','wrap']: # copying will be necessary\n s = list(a.shape)\n s[-1]=roundup\n b = np.empty(s,dtype=a.dtype)\n b[...,:l] = a\n if end=='pad':\n b[...,l:] = endvalue\n elif end=='wrap':\n b[...,l:] = a[...,:roundup-l]\n a = b\n\n a = a.swapaxes(-1,axis)\n\n\n l = a.shape[axis]\n if l==0:\n raise ValueError, \"Not enough data points to segment array in 'cut' mode; try 'pad' or 'wrap'\"\n assert l>=length\n assert (l-length)%(length-overlap) == 0\n n = 1+(l-length)//(length-overlap)\n s = a.strides[axis]\n newshape = a.shape[:axis]+(n,length)+a.shape[axis+1:]\n newstrides = a.strides[:axis]+((length-overlap)*s,s) + a.strides[axis+1:]\n\n try:\n return np.ndarray.__new__(np.ndarray,strides=newstrides,shape=newshape,buffer=a,dtype=a.dtype).astype(np.complex)\n except TypeError:\n a = a.copy()\n # Shape doesn't change but strides does\n newstrides = a.strides[:axis]+((length-overlap)*s,s) + a.strides[axis+1:]\n return np.ndarray.__new__(np.ndarray,strides=newstrides,shape=newshape,buffer=a,dtype=a.dtype).astype(np.complex)", "title": "" }, { "docid": "e4d1d1cd51164d608936722ec9729af6", "score": "0.4792707", "text": "def set_min_max_time(self):\n m = self.start_time + datetime.timedelta(days=-1)\n self.min_time = datetime.datetime(m.year, m.month, m.day)\n M = self.stop_time + datetime.timedelta(days=1)\n self.max_time = datetime.datetime(M.year, M.month, M.day)", "title": "" }, { "docid": "5524f1de672941d6cff768c1472801d8", "score": "0.47914174", "text": "def time_in_range(start, end, x):\r\n if start < end:\r\n return start <= x < end\r\n else:\r\n return start <= x or x < end;", "title": "" }, { "docid": "5e6c191f89698ce7874f04d1476a2e3d", "score": "0.47903806", "text": "def filter_calibration(self, start, stop):\n return _filter_calibration(self._time_field, self._items, start, stop)", "title": "" }, { "docid": "701f6339fd3e78ebc1b1ed4ff8ae147f", "score": "0.47749156", "text": "def timestep(self, tslice):\n return NotImplementedError(\"You need to implement this function\")", "title": "" }, { "docid": "87be7e4bbadd16750d09050fb1878259", "score": "0.47735083", "text": "def mask_between_time(dts, start, end, include_start=True, include_end=True):\n # This function is adapted from\n # `pandas.Datetime.Index.indexer_between_time` which was originally\n # written by Wes McKinney, Chang She, and Grant Roch.\n time_micros = dts._get_time_micros()\n start_micros = _time_to_micros(start)\n end_micros = _time_to_micros(end)\n\n left_op, right_op, join_op = _opmap[\n bool(include_start),\n bool(include_end),\n start_micros <= end_micros,\n ]\n\n return join_op(\n left_op(start_micros, time_micros),\n right_op(time_micros, end_micros),\n )", "title": "" }, { "docid": "1b57d9ebd03069c7daa05d9d6d2eb025", "score": "0.4756291", "text": "def snodas_raster_clip(tif_file_path: Path, vector_extent: Path) -> None:\n\n # Initialize this module (if it has not already been done) so that configuration data are available.\n init_snodas_util()\n\n logger = logging.getLogger(__name__)\n logger.info('Start clipping raster {}'.format(tif_file_path))\n\n # Check for file extension .tif.\n if str(tif_file_path).upper().endswith('_WGS84.TIF'):\n\n # Change name from 20030930_WGS84.tif to 'Clip_YYYYMMDD.tif'.\n date_name = str(tif_file_path.name).replace('_WGS84', '')\n new_name = tif_file_path.parent / ('Clip_' + date_name)\n\n # Set full pathname of both input and output files to be used in the gdal.Warp tool.\n file_full_input = tif_file_path\n file_full_output = new_name\n\n # Clip .tif file by the input extent shapefile.\n # For more info on gdal.WarpOptions parameters, reference:\n # osgeo.gdal.Warp & osgeo.gdal.WarpOptions in the Table of Contents of URL: http://gdal.org/python/.\n #\n # Parameters Explained:\n # (1) destNameOrDestDS --- Output dataset name or object\n # (2) srcDSOrSrcDSTab --- an array of Dataset objects or filenames, or a Dataset object or a filename\n # (3) format --- output format (\"GTiff\", etc...)\n # (4) dstNodata --- output nodata value(s)\n # (5) cutlineDSName --- cutline dataset name\n # (6) cropToCutline --- whether to use cutline extent for output bounds\n # raster_layer = QgsRasterLayer(str(file_full_input), '{}'.format(file))\n gdal.Warp(str(file_full_output), str(file_full_input), format='GTiff',\n dstNodata=NULL_VAL, cutlineDSName=str(vector_extent), cropToCutline=True)\n\n # Delete un-clipped raster files.\n file_full_input.unlink()\n # Writes the projection to the log file.\n ds = gdal.Open(str(file_full_output))\n if not ds:\n logger.warning(' Null reading file: {}'.format(file_full_output))\n prj = ds.GetProjection()\n srs = osr.SpatialReference(wkt=prj)\n datum = srs.GetAttrValue('GEOGCS')\n\n if srs.IsProjected:\n proj_name = srs.GetAttrValue('AUTHORITY')\n proj_num = srs.GetAttrValue('AUTHORITY', 1)\n logger.info(\" Have projection {}:{} and datum {} for: {}\".format(\n proj_name, proj_num, datum, file_full_output.name))\n else:\n logger.info(\" Have projection {} and datum {} for: {}\".format(prj, datum, file_full_output.name))\n logger.info(' Successfully clipped:')\n logger.info(' from: {}.'.format(tif_file_path))\n logger.info(' to: {}.'.format(file_full_output))\n else:\n logger.info(' File does not end with _WGS84.tif so the clip was not processed:')\n logger.info(' {}'.format(tif_file_path))\n\n return", "title": "" }, { "docid": "ed3fcc3eda547db88ae1bd895ab6add3", "score": "0.47469732", "text": "def SNODAS_raster_clip(file, folder, vector_extent):\n\n logger.info('SNODAS_raster_clip: Starting %s' % file)\n\n # Check for file extension .tif\n file_upper = file.upper()\n if file_upper.endswith('WGS84.TIF'):\n\n # Change name from 20030930WGS84.tif.tif to 'ClipYYYYMMDD.tif'\n date_name = file.replace('WGS84', '')\n new_name = 'Clip' + date_name\n\n # Set full pathname of both input and output files to be used in the gdal.Warp tool\n file_full_input = os.path.join(folder, file)\n file_full_output = os.path.join(folder, new_name)\n\n # Clip .tif file by the input extent shapefile. For more info on gdal.WarpOptions parameters, reference\n # osgeo.gdal.Warp & osgeo.gdal.WarpOptions in the Table of Contents of URL: http://gdal.org/python/.\n #\n # Parameters Explained:\n # (1) destNameOrDestDS --- Output dataset name or object\n # (2) srcDSOrSrcDSTab --- an array of Dataset objects or filenames, or a Dataset object or a filename\n # (3) format --- output format (\"GTiff\", etc...)\n # (4) dstNodata --- output nodata value(s)\n # (5) cutlineDSName --- cutline dataset name\n # (6) cropToCutline --- whether to use cutline extent for output bounds\n gdal.Warp(file_full_output, file_full_input, format='GTiff',\n dstNodata=null_value, cutlineDSName=vector_extent, cropToCutline=True)\n\n # Delete unclipped raster files\n os.remove(file_full_input)\n\n # Writes the projection to the log file\n ds = gdal.Open(file_full_output)\n prj = ds.GetProjection()\n srs = osr.SpatialReference(wkt=prj)\n if srs.IsProjected:\n prj = srs.GetAttrValue('projcs')\n datum = srs.GetAttrValue('geogcs')\n logger.info(\"SNODAS_raster_clip: %s has projection %s and datum %s\" % (file_full_output, prj, datum))\n\n logger.info('SNODAS_raster_clip: %s has been clipped.' % file)\n\n else:\n logger.info('SNODAS_raster_clip: %s does not end with PRJCT.tif. The clip was not processed.' % file)\n\n logger.info('SNODAS_raster_clip: Finished %s \\n' % file)", "title": "" }, { "docid": "9fe005277ca01f1572defbd6364681b5", "score": "0.47431976", "text": "def test_subset_time(tmpdir):\n result = subset(\n ds=CMIP5_TAS_FILE,\n time=(\"2005-01-01T00:00:00\", \"2020-12-30T00:00:00\"),\n area=(0, -90.0, 360.0, 90.0),\n output_dir=tmpdir,\n output_type=\"nc\",\n file_namer=\"simple\",\n )\n _check_output_nc(result)", "title": "" }, { "docid": "2c682b201bf1ecf26942b83642349d94", "score": "0.4741398", "text": "def vector_clip(vector, lowest, highest):\n return type(vector)(map(clip, vector, lowest, highest))", "title": "" }, { "docid": "2c682b201bf1ecf26942b83642349d94", "score": "0.4741398", "text": "def vector_clip(vector, lowest, highest):\n return type(vector)(map(clip, vector, lowest, highest))", "title": "" }, { "docid": "d3a3251d2967f55c54f4b0592e3462ed", "score": "0.47310272", "text": "def get_shifted_waveforms(self, shifts, clip_value):\n unit_time_window = np.arange(\n self.n_time - 2 * clip_value) + shifts[:, None]\n default_range = np.arange(self.n_time - 2 * clip_value)\n sub_shifts = shifts - np.floor(shifts)\n shifts = np.floor(shifts).astype(np.int)\n\n def sub(i, shift, sub=None):\n if sub is None:\n return self.wave_forms[i, :, default_range + shift]\n return sub(i, shift) * sub + sub(i, shift + 1) * (1 - sub)\n\n if sub_shifts.sum() > 0.:\n # Linear interpolation.\n np.array(\n [sub(i, s, sub_shifts[i]) for i, s in enumerate(\n shifts)]).transpose([0, 2, 1])\n\n return np.array(\n [sub(i, s) for i, s in enumerate(shifts)]).transpose([0, 2, 1])", "title": "" }, { "docid": "6f1ef359df187cb6b291db28a70c8b9c", "score": "0.47266287", "text": "def set_clip(self, rect):\n self.draw_area = rect", "title": "" }, { "docid": "3d93d57ae880330a11d328e1c425363c", "score": "0.47185743", "text": "def crop(self, start, end):\n raise NotImplementedError", "title": "" }, { "docid": "acc7c8d792868413cfff39b58eea3e14", "score": "0.47110027", "text": "def __init__(self, start_time, end_time):\n CronTimeField.__init__(self)\n self._kind = 'range'\n self._start = start_time\n self._end = end_time\n self._text = '%d-%d' % (start_time, end_time)", "title": "" }, { "docid": "35aa9f3bebf0039a4508530c7b6a4343", "score": "0.4706849", "text": "def makespan(self):\n if self.start_time and self.end_time:\n return self.end_time - self.start_time\n return None", "title": "" }, { "docid": "d6d1f9c68b935e422e65d4604a8c48ca", "score": "0.46974742", "text": "def time_range(sel=None):\n\n if sel is None:\n sel = ls()\n\n k = m.keyframe(sel, q=True)\n\n return min(k), max(k)", "title": "" }, { "docid": "c949a7002bfbf561b5128c0541243aca", "score": "0.4688275", "text": "def __init__(self, time: TimeInt):\n self.time = time.trunc(self.unit, num=self.num)", "title": "" }, { "docid": "b56e2f4948c32cb1fcd05cb191626cce", "score": "0.46723476", "text": "def tf_clip(im, ref):\r\n\r\n ws = 5 # window size\r\n N = int(ws**2)\r\n ps = int((ws-1)/2) # padding\r\n\r\n y = tf.pad(ref, [[0, 0], [ps, ps], [ps, ps], [0, 0]], \"REFLECT\")\r\n y = tf.extract_image_patches(y, ksizes=[1, ws, ws, 1], strides=[1, 1, 1, 1], rates=[1, 1, 1, 1], padding='VALID') # (1, H, W, 3*25)\r\n y = tf.split(y, N, axis=-1)\r\n y = tf.stack(y, axis=-1)\r\n y_min = tf.reduce_min(y, axis=-1)\r\n y_max = tf.reduce_max(y, axis=-1)\r\n\r\n im_sharp_clip = tf.clip_by_value(im, y_min, y_max)\r\n\r\n return im_sharp_clip", "title": "" }, { "docid": "145db3cbf983640e1715e4eb79f19601", "score": "0.46714646", "text": "def clip_and_scale_image(img, img_type='naip', clip_min=0, clip_max=10000):\n if img_type in ['naip', 'rgb']:\n return img / 255\n elif img_type == 'landsat':\n return np.clip(img, clip_min, clip_max) / (clip_max - clip_min)", "title": "" }, { "docid": "eef4b72c237ccac4944d06d4d3a8a991", "score": "0.46709698", "text": "def cut_video(raw_video_path, slice_path, start, end):\n\n return_code = subprocess.call([\"ffmpeg\", \"-loglevel\", \"quiet\", \"-i\", raw_video_path, \"-strict\", \"-2\",\n \"-ss\", str(start), \"-to\", str(end), slice_path])\n success = return_code == 0\n\n return success", "title": "" }, { "docid": "57bb01b07c621eaf097030ec19019c7d", "score": "0.46701857", "text": "def test_trim_t_results_no_overlap_backwards(self):\n\n # empty object to assign attributes to\n empty_obj = type(\"\", (), {})()\n\n empty_obj.t = np.array([0.0, -1.0, -2.0])\n empty_obj.y = np.array([[0.0, 1.0], [0.5, 0.5], [1.0, 0.0]])\n\n t_span = np.array([0.0, -2.0])\n t_eval = np.array([-1.0])\n trimmed_obj = trim_t_results(empty_obj, t_span, t_eval)\n\n self.assertAllClose(trimmed_obj.t, np.array([-1.0]))\n self.assertAllClose(trimmed_obj.y, np.array([[0.5, 0.5]]))", "title": "" }, { "docid": "c20efb1af3dd30d2f9a24fa4190b52f9", "score": "0.46696693", "text": "def time_in_range(start, end, x):\n if start <= end:\n return start <= x <= end\n else:\n return start <= x or x <= end", "title": "" }, { "docid": "c20efb1af3dd30d2f9a24fa4190b52f9", "score": "0.46696693", "text": "def time_in_range(start, end, x):\n if start <= end:\n return start <= x <= end\n else:\n return start <= x or x <= end", "title": "" }, { "docid": "c20efb1af3dd30d2f9a24fa4190b52f9", "score": "0.46696693", "text": "def time_in_range(start, end, x):\n if start <= end:\n return start <= x <= end\n else:\n return start <= x or x <= end", "title": "" }, { "docid": "c20efb1af3dd30d2f9a24fa4190b52f9", "score": "0.46696693", "text": "def time_in_range(start, end, x):\n if start <= end:\n return start <= x <= end\n else:\n return start <= x or x <= end", "title": "" }, { "docid": "ef43c98cce0fb090107a5e406608e66a", "score": "0.46613812", "text": "def timePause(time_in, t0):\n t1 = datetime.now()\n t_diff = (t1-t0).seconds\n print \"t_diff = t1-t0 =\", t_diff\n\n if t_diff <2:\n \tprint \"Time Scale: \"+ \"=\" + str(time_in) + \" (\" +str(time_in)+ \" sec)\"\n print \">>>time.sleep(\" +str(time_in)+ \")\"; time.sleep(time_in);\n\n\telse:\n print \"Time Scale: \"+ \"=\" * t_diff + \" (\"+str(t_diff)+ \" sec)\"", "title": "" }, { "docid": "92a63f522b4690ecc8a1625097622d14", "score": "0.4660318", "text": "def draw_subtitles(clip):\n # Creates temp directory for storing temporary files\n if not os.path.exists('temp'):\n os.mkdir('temp')\n else:\n cleanup()\n # This program uses Arial font for drawing subtitles, can be used anything else\n font = ImageFont.truetype('fonts/arial.ttf', size=clip.size[1]//30)\n\n # Gets a sequence of durations after finding close to optimal sound division\n durations = get_durations(clip)\n\n # Writes audio files for previously specified durations to be then\n # processed via speech_recognition package\n audio_filenames = extract_audio(clip, durations)\n subtitles = recognize(audio_filenames) # text for corresponding duration\n cleanup()\n\n clips = []\n start = 0\n # Creates distinct clips for each duration and concatenates them\n for duration, text in zip(durations, subtitles):\n end = start + duration if start + duration < clip.duration else clip.duration\n\n subclip = clip.subclip(start, end)\n clips.append(add_overlay(subclip, text, font))\n\n start += duration\n\n clip = mp.concatenate_videoclips(clips)\n cleanup('text.png')\n if os.path.exists('temp'):\n os.rmdir('temp')\n return clip", "title": "" }, { "docid": "f5809a3de94c011b0e35b92e5ebf5e91", "score": "0.46602926", "text": "def clip(self, max_c=0, max_r=0):\n new_c = min(len(self.c), max_c)\n new_r = min(len(self.r), max_r)\n self.reshape(new_c, new_r)", "title": "" } ]
cd64915f0781bb045fb400e7aa888318
SQL insert 1. insert help 2. insert query
[ { "docid": "e62ef99df5246c1e629392599cafe13c", "score": "0.64105207", "text": "def insert(self, arg):\n if arg == '' or arg.lower() == 'help':\n return dbhelp(self, 'insert')\n if not db_ready():\n return\n DB.execute(\"insert \"+arg.replace(\";\", \"\"), db_alias(), list_results = 0)", "title": "" } ]
[ { "docid": "c4b65951d0e69b1758b99d9c093b7e15", "score": "0.72062004", "text": "def insert(self):\n try:\n \n self.cursor.execute(self.get_insert_query(), self.get_values())\n except Exception as e:\n print(e)\n \n self.conn.commit()", "title": "" }, { "docid": "c4b65951d0e69b1758b99d9c093b7e15", "score": "0.72062004", "text": "def insert(self):\n try:\n \n self.cursor.execute(self.get_insert_query(), self.get_values())\n except Exception as e:\n print(e)\n \n self.conn.commit()", "title": "" }, { "docid": "c4b65951d0e69b1758b99d9c093b7e15", "score": "0.72062004", "text": "def insert(self):\n try:\n \n self.cursor.execute(self.get_insert_query(), self.get_values())\n except Exception as e:\n print(e)\n \n self.conn.commit()", "title": "" }, { "docid": "c4b65951d0e69b1758b99d9c093b7e15", "score": "0.72062004", "text": "def insert(self):\n try:\n \n self.cursor.execute(self.get_insert_query(), self.get_values())\n except Exception as e:\n print(e)\n \n self.conn.commit()", "title": "" }, { "docid": "c4b65951d0e69b1758b99d9c093b7e15", "score": "0.72062004", "text": "def insert(self):\n try:\n \n self.cursor.execute(self.get_insert_query(), self.get_values())\n except Exception as e:\n print(e)\n \n self.conn.commit()", "title": "" }, { "docid": "f5d7c26a024c91778d73cd707b9f3d55", "score": "0.6822893", "text": "def insert(self, *args):\n data = \"\"\n for arg in args: #convert data into string mysql format\n data += \"\\\"%s\\\",\" %(arg)\n\n cur = mysql.connection.cursor()\n cur.execute(\"INSERT INTO %s%s VALUES(%s)\" %(self.table, self.columns, data[:len(data)-1]))\n mysql.connection.commit()\n cur.close()", "title": "" }, { "docid": "c452decd30709ff0fb5103ca2127fb0e", "score": "0.6770061", "text": "def do_insert(self, args):\n return self.__execute_query(self.__create_beeswax_query(\"insert %s\" % args),\n is_insert=True)", "title": "" }, { "docid": "2ef4e4c66964af51e1286200fcff6d65", "score": "0.6757796", "text": "def __insert_data(self, sql, params = None):\n try:\n self.__db.execute(sql, params)\n except IntegrityError as e:\n pass\n except Exception as e:\n print(str(e))", "title": "" }, { "docid": "7d670287e6f86718d79ef029790790a6", "score": "0.6736357", "text": "def insert(self, table_name, data):\n data = to_ascii(data)\n if data is None:\n return None\n sql = \"INSERT INTO %s\" % table_name\n sql += \"(id, %s) VALUES\" % ', '.join(data.keys())\n sql += \"(NULL, \"\n sql += ', '.join(['?'] * len(data.values()))\n sql = \"%s);\" % sql\n params = data.values()\n self.raw_sql(sql, params)", "title": "" }, { "docid": "06485170215bb2d9d9465a120b3af5fd", "score": "0.67226845", "text": "def insert(self,tableName,data):\n columns = ', '.join(data.keys())\n placeholders = ', '.join('?' * len(data))\n sql = 'INSERT INTO {} ({}) VALUES ({})'.format(tableName, columns, placeholders)\n cur = self.execQuery(sql, data.values())\n self.conn.commit()\n return cur", "title": "" }, { "docid": "75d1e65064be2dfa2ad0f25073f1560a", "score": "0.67039263", "text": "def insert(self, records, context):", "title": "" }, { "docid": "1ef088cc3962eada20c67aa1eb110062", "score": "0.66985285", "text": "def insert(self, **kwargs):\n\t\t# Set field names and values for inserting\n\t\tkeys, values = list(kwargs.keys()), list(kwargs.values())\n\n\t\ttry:\n\t\t\t# Build sql query for all passed parameters\n\t\t\tquery = sql.SQL(\"INSERT INTO {} ({}) values ({})\").format(\n\t\t\t\t\t\t\t# table name\n\t\t\t\t\t\t\tsql.Identifier(self.table),\n\t\t\t\t\t\t\t# field names\n\t\t\t\t\t\t\tsql.SQL(', ').join(map(sql.Identifier, keys)),\n\t\t\t\t\t\t\t# placeholders for values\n\t\t\t\t\t\t\tsql.SQL(', ').join(sql.Placeholder() * len(keys)))\n\t\t\t# Execute sql string with values\n\t\t\tself.cursor.execute(query, values)\n\t\t\t\n\t\t\treturn DBResult(3)\n\t\texcept Exception as e:\n\t\t\treturn DBResult(5, str(e))", "title": "" }, { "docid": "6f410576f9d61c45a17c13bc31e7e3bc", "score": "0.654342", "text": "def insert(self, query):\n self.cursor.execute(query)\n self.connection.commit()", "title": "" }, { "docid": "41af8be5304b2ba8b181046ce0e3da9e", "score": "0.65272415", "text": "def _insert_1_to_1_assoc():\n query = \"\"\"\\\nINSERT INTO assocxtrsource\n (runcat\n ,xtrsrc\n ,type\n ,distance_arcsec\n ,r\n ,v_int\n ,eta_int\n )\n SELECT t.runcat\n ,t.xtrsrc\n ,9 AS type\n ,0 AS distance_arcsec\n ,0 AS r\n ,t.v_int_inter / t.avg_f_int\n ,t.eta_int_inter / t.avg_f_int_weight\n FROM (SELECT runcat\n ,xtrsrc\n ,CASE WHEN avg_f_int = 0.0\n THEN 0.000001\n ELSE avg_f_int\n END AS avg_f_int\n ,avg_f_int_weight\n ,CASE WHEN f_datapoints = 1\n THEN 0\n ELSE CASE WHEN ABS(avg_f_int_sq - avg_f_int * avg_f_int) < 8e-14\n THEN 0\n ELSE SQRT(CAST(f_datapoints AS DOUBLE PRECISION)\n * (avg_f_int_sq - avg_f_int * avg_f_int)\n / (CAST(f_datapoints AS DOUBLE PRECISION) - 1.0)\n )\n END\n END AS v_int_inter\n ,CASE WHEN f_datapoints = 1\n THEN 0\n ELSE (CAST(f_datapoints AS DOUBLE PRECISION)\n / (CAST(f_datapoints AS DOUBLE PRECISION) - 1.0))\n * (avg_f_int_weight * avg_weighted_f_int_sq\n - avg_weighted_f_int * avg_weighted_f_int)\n END AS eta_int_inter\n FROM temprunningcatalog\n ) t\n\"\"\"\n cursor = execute(query, commit=True)\n cnt = cursor.rowcount\n if cnt > 0:\n logger.info(\"Inserted %s runcat-monitoring source pairs in assocxtrsource\" % cnt)", "title": "" }, { "docid": "d4372a2bdcb0c70286c3efd8244eec9b", "score": "0.6520704", "text": "def insert(self, data=None):\n data = self.chose_data(data)\n return self.execute(str(self.query.into(self.table).columns(*data.keys()).insert(*data.values())))", "title": "" }, { "docid": "217db6e3587a67f920984f783d307b63", "score": "0.65046877", "text": "def insert_data(self, table_name, args, vals):\n my_query = 'INSERT INTO ' + table_name + ' (' + args + ') VALUES (' + vals + ');'\n return self.query(my_query)", "title": "" }, { "docid": "7038f1c36f25532c1d9cd0673af3ad30", "score": "0.6491292", "text": "def _insert(self):\n self._cursor = self._cnx.cursor()\n max_query_size = 65535 // (len(self.dimension_list) + len(self.metric_list)) #largest number of rows allowed in a single insert statement\n insert_list_index = 0\n insert_query_intro = self._constructinsert_query_intro()\n while insert_list_index < len(self._insert_list):\n insert_query = insert_query_intro\n insert_query_size = 0\n while insert_query_size < max_query_size and insert_list_index < len(self._insert_list):\n insert_query += \"(\\\"%s\\\"\" %(self._insert_list[insert_list_index][0])\n for value in self._insert_list[insert_list_index][1:len(self._insert_list[insert_list_index])-1]:\n insert_query += \", \\\"%s\\\"\" % (value)\n for key in self.metric_list:\n if self._insert_list[insert_list_index][-1][key] == \"NULL\":\n insert_query += \", %s\" % (self._insert_list[insert_list_index][-1][key])\n else:\n insert_query += \", \\\"%s\\\"\" % (self._insert_list[insert_list_index][-1][key])\n insert_query += \"), \"\n insert_query_size += 1\n insert_list_index += 1\n insert_query = insert_query[0:len(insert_query)-2] + \";\"\n print(insert_query)\n self._cursor.execute(insert_query)\n self._cnx.commit() \n self._cursor.close()", "title": "" }, { "docid": "1cbd15cbf34357f5923bf399044dfe10", "score": "0.64825976", "text": "def insert(self, tablename, seqname=None, _test=False, **values):\r\n def q(x): return \"(\" + x + \")\"\r\n \r\n if values:\r\n _keys = SQLQuery.join(values.keys(), ', ')\r\n _values = SQLQuery.join([sqlparam(v) for v in values.values()], ', ')\r\n sql_query = \"INSERT INTO %s \" % tablename + q(_keys) + ' VALUES ' + q(_values)\r\n else:\r\n sql_query = SQLQuery(\"INSERT INTO %s DEFAULT VALUES\" % tablename)\r\n\r\n if _test: return sql_query\r\n \r\n db_cursor = self._db_cursor()\r\n if seqname is not False: \r\n sql_query = self._process_insert_query(sql_query, tablename, seqname)\r\n\r\n if isinstance(sql_query, tuple):\r\n # for some databases, a separate query has to be made to find \r\n # the id of the inserted row.\r\n q1, q2 = sql_query\r\n self._db_execute(db_cursor, q1)\r\n self._db_execute(db_cursor, q2)\r\n else:\r\n self._db_execute(db_cursor, sql_query)\r\n\r\n try: \r\n out = db_cursor.fetchone()[0]\r\n except Exception: \r\n out = None\r\n \r\n if not self.ctx.transactions: \r\n self.ctx.commit()\r\n return out", "title": "" }, { "docid": "c1a8ea56a4b0e3a145c7e11a65e19722", "score": "0.64529437", "text": "def insert(cnx, table_name, columns, values, commit=True):\n q = \"insert into \" + table_name + \" \"\n column_count = len(columns)\n column_list = \",\".join(columns)\n column_list = \"(\" + column_list + \")\"\n v = [\"%s\"] * column_count\n v = \",\".join(v)\n v = \" values (\" + v + \")\"\n q += \" \" + column_list + \" \" + v\n rr = run_q(cnx, q, values, False, commit=commit)\n return rr", "title": "" }, { "docid": "73488e2e49ecaa900b9dcc5260b839b0", "score": "0.6441834", "text": "def insert(self, query, params_tuple=None):\n success = 1\n try:\n cursor = self.connection.cursor()\n if isinstance(query, str) and params_tuple is None:\n print(\"Warning -- SQL injection -- candidate (insert)\")\n cursor.execute(query)\n elif isinstance(query, str) and isinstance(params_tuple, (tuple, dict)):\n cursor.execute(query, params_tuple)\n elif isinstance(query, tuple) and isinstance(params_tuple, (tuple, dict)):\n cursor.execute(query, params_tuple)\n else:\n raise ValueError(\"SQL (query) type combination not supported\")\n\n self.connection.commit()\n success = 0\n except ValueError as v:\n print(\"Failed Insert {}\".format(v))\n except mysqldb.Error as err:\n print(\"Failed Insert: {}\".format(err))\n self.connection.rollback()\n\n return success", "title": "" }, { "docid": "99b7f09b0c965862110a9221474cef8d", "score": "0.64381176", "text": "def multiple_insert(self, tablename, values, seqname=None, _test=False): \r\n if not values:\r\n return []\r\n \r\n if not self.supports_multiple_insert:\r\n out = [self.insert(tablename, seqname=seqname, _test=_test, **v) for v in values]\r\n if seqname is False:\r\n return None\r\n else:\r\n return out\r\n \r\n keys = values[0].keys()\r\n #@@ make sure all keys are valid\r\n\r\n # make sure all rows have same keys.\r\n for v in values:\r\n if v.keys() != keys:\r\n raise ValueError, 'Bad data'\r\n\r\n sql_query = SQLQuery('INSERT INTO %s (%s) VALUES ' % (tablename, ', '.join(keys))) \r\n\r\n data = []\r\n for row in values:\r\n d = SQLQuery.join([SQLParam(row[k]) for k in keys], ', ')\r\n data.append('(' + d + ')')\r\n sql_query += SQLQuery.join(data, ', ')\r\n\r\n if _test: return sql_query\r\n\r\n db_cursor = self._db_cursor()\r\n if seqname is not False: \r\n sql_query = self._process_insert_query(sql_query, tablename, seqname)\r\n\r\n if isinstance(sql_query, tuple):\r\n # for some databases, a separate query has to be made to find \r\n # the id of the inserted row.\r\n q1, q2 = sql_query\r\n self._db_execute(db_cursor, q1)\r\n self._db_execute(db_cursor, q2)\r\n else:\r\n self._db_execute(db_cursor, sql_query)\r\n\r\n try: \r\n out = db_cursor.fetchone()[0]\r\n out = range(out-len(values)+1, out+1) \r\n except Exception: \r\n out = None\r\n\r\n if not self.ctx.transactions: \r\n self.ctx.commit()\r\n return out", "title": "" }, { "docid": "8d6f510f87b7e07f3169c073f23f8776", "score": "0.64269686", "text": "def insert(self, table, data):\n\n # Init\n attributes = list(data[0]) # listing all attributes from data\n # Asumtions\n assert table in self.tables, \"Table is not regisered.\"\n\n # data\n insert_header = f\"INSERT INTO {table}\\n\"\n for dp in data:\n q = insert_header\n q += \"VALUES (\"\n for attr in attributes[:-1]:\n q += f\":{attr}, \"\n q += f\":{attributes[-1]})\"\n with self.conn:\n self.cur.execute(q, dp)", "title": "" }, { "docid": "a601a070844cdf9376934f049a24f60a", "score": "0.64241755", "text": "def insert(self, title, content):\n sql_query=\"INSERT INTO {} (title, content) VALUES (?,?)\".format(self.table)\n #.format(self.table,title, content)\n insert_result=self.cursor.execute(sql_query,(title, content))\n self.conn.commit()", "title": "" }, { "docid": "9b687c6d1e6ae748ffdac946cbfccd18", "score": "0.641938", "text": "def ejecuta_inserts(self, query, conn , valores):\r\n \r\n cursor = conn.cursor()\r\n cursor.execute(query, valores)\r\n conn.commit()\r\n conn.close()", "title": "" }, { "docid": "9876c6e15e9615afe03f7b01527d1882", "score": "0.64099705", "text": "def insert(self, n, t, i, p):\n self.open()\n # add the real object to the database\n o = None\n if t == self.TREASURES:\n o = Treasure()\n if t == self.LANDMARKS:\n o = Landmark()\n if t == self.ROBOTS:\n o = robot()\n # insert the query into the db\n self.cursor.execute(\"INSERT INTO data VALUES (?, ?, ?, ?, ?)\", (n, t, i, p, str(o)))\n self.connection.commit()\n self.close()", "title": "" }, { "docid": "42e6d55ed9b8167441d0c0db9c55427c", "score": "0.63734204", "text": "def test_insert(self):\n\n ids = self._helper.insert('test_table', (\n {'col_b': randint(0, 99)},\n {'col_b': randint(0, 99)},\n {'col_b': randint(0, 99)}\n ))\n self.assertEqual(len(ids), 3)\n self.assertEqual(ids[0] + 1, ids[1])\n self.assertEqual(ids[0] + 2, ids[2])\n last = ids[2]\n ids = self._helper.insert('test_table', {'col_b': randint(0, 99)})\n self.assertEqual(ids[0], last + 1)\n self.assertRaises(\n OperationalError,\n self._helper.insert,\n 'missing_table',\n {'col_b': 0}\n )\n ids = self._helper.insert('test_table', {})\n self.assertEqual(len(ids), 0)", "title": "" }, { "docid": "4bbcca9a5784712585487a411ca8e52f", "score": "0.6370255", "text": "def insert(self, table, **kwargs):\n sql = 'INSERT INTO %s (%s)' \\\n % (table, ', '.join([k for k in kwargs]))\n sql += ' VALUES (%s)' % ', '.join(['?' for k in kwargs])\n data = tuple([kwargs[k] for k in kwargs])\n self.execute(sql, data)", "title": "" }, { "docid": "1b57af82d9136c97db809b6b5d820a6c", "score": "0.636654", "text": "def insert_into_db(self):\n sql = '''INSERT INTO PCD (\n parentId,\n cls_nome,\n cls_codigo,\n cls_sub,\n reg_abertura,\n reg_desativacao,\n reg_reativacao,\n reg_mudanca_nome,\n reg_deslocamento,\n reg_extincao,\n cls_indicador,\n\n fase_corrente,\n evento_fase_corrente,\n fase_intermediaria,\n evento_fase_inter,\n dest_final,\n reg_alteracao,\n observacoes) \n VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'''\n exclude = ('legacyId','parentId','codigo_ref')\n items = [ item for x,item in self.item_data.items() if x not in exclude ]\n values = (self.parent_id, *items)\n self.app.cursor.execute(sql, values)\n self.app.connection.commit()", "title": "" }, { "docid": "2efefc1018841102ad1592167464919b", "score": "0.6361568", "text": "def sqlInsert(params,extraParams=None,dbType='postgres'):\n\n i = sqlhelp.insert('b_pos_and_shipdata',dbType=dbType)\n\n if dbType=='postgres':\n finished = []\n for key in params:\n if key in finished:\n continue\n\n if key not in toPgFields and key not in fromPgFields:\n if type(params[key])==Decimal: i.add(key,float(params[key]))\n else: i.add(key,params[key])\n else:\n if key in fromPgFields:\n val = params[key]\n # Had better be a WKT type like POINT(-88.1 30.321)\n i.addPostGIS(key,val)\n finished.append(key)\n else:\n # Need to construct the type.\n pgName = toPgFields[key]\n #valStr='GeomFromText(\\''+pgTypes[pgName]+'('\n valStr=pgTypes[pgName]+'('\n vals = []\n for nonPgKey in fromPgFields[pgName]:\n vals.append(str(params[nonPgKey]))\n finished.append(nonPgKey)\n valStr+=' '.join(vals)+')'\n i.addPostGIS(pgName,valStr)\n else:\n for key in params:\n if type(params[key])==Decimal: i.add(key,float(params[key]))\n else: i.add(key,params[key])\n\n if None != extraParams:\n for key in extraParams:\n i.add(key,extraParams[key])\n\n return i", "title": "" }, { "docid": "003971b00f46cd8540a91e2edc3d4dd8", "score": "0.6360507", "text": "def insert_into_db(query, args=()):\r\n db = get_db()\r\n cur = db.execute(query, args)\r\n db.commit()\r\n cur.close()", "title": "" }, { "docid": "ee4fb37fe67f9cd71c546266f5e8e32c", "score": "0.63529164", "text": "def insert_diary_record(username,country,place,date_from,date_to,text):\n conn = get_conn()\n c=conn.cursor()\n c.execute(\n \"INSERT INTO diary_records (username,country,place,date_from,date_to,text) VALUES (?,?,?,?,?,?)\",\n (username,country,place,date_from,date_to,text))\n conn.commit()", "title": "" }, { "docid": "2c3ee943d57fa34c9d674d536c343683", "score": "0.63453776", "text": "def insert(self,tablename,objects):\n\n if self.connect_check == True:\n\n if isinstance(objects,dict) or (isinstance(objects,list) and len(objects)==1): #either it is a single dicitionary or list with only one dictionary object\n\n #insert one logic\n\n if isinstance(objects,dict):\n pass\n else:\n objects = objects[0] #getting the dictionary from the list\n\n #processing query\n\n query = insertone_query(tablename=tablename,objects=objects)\n\n\n try:\n connection = connect_(username=self.__username, pwd=self.__pwd, database=self.__database, port=self.__port,\n host=self.__host)\n cursor = connection.cursor()\n\n except:\n\n raise ValueError(str(sys.exc_info()[1]))\n\n try:\n\n cursor.execute(query)\n connection.commit()\n connection.close()\n\n return True\n\n except Exception as e:\n\n # closing the connection and cursor\n\n connection.close()\n\n raise ValueError(str(sys.exc_info()[1]))\n\n else:\n #bulk insert logic\n\n #processing query\n query,data = insertmany_query(tablename=tablename, objects=objects)\n\n\n try:\n connection = connect_(username=self.__username, pwd=self.__pwd, database=self.__database, port=self.__port,\n host=self.__host)\n cursor = connection.cursor()\n\n except:\n\n raise ValueError(str(sys.exc_info()[1]))\n\n try:\n cursor.executemany(query, data)\n connection.commit()\n connection.close()\n\n return True\n\n except Exception as e:\n\n # closing the connection and cursor\n\n connection.close()\n\n raise ValueError(str(sys.exc_info()[1]))\n\n else:\n\n raise ValueError(\"Database credentials not initialised : call set_connect function\")", "title": "" }, { "docid": "398d50286cb0313f389b3105f7d979c2", "score": "0.6333495", "text": "def insert_item(db, type_name, fields):\n\t#lookup the table\n\ttable = lookup_table(type_name)\n\n\t#make cursor\n\tcursor = db.cursor()\n\n\tsql = \"insert into %s (item_code, name, year, value, unit) values (?, ?, ?, ?, ?);\" % table\n\n\t#prepare the statement\n\tcursor.execute(sql, fields)", "title": "" }, { "docid": "2b4000596f0153182aa567edc35a8506", "score": "0.63066816", "text": "def db_insert(self, data):\n print(\"INSERT\", data)", "title": "" }, { "docid": "f8b5771315554ca1288ad0593c557cde", "score": "0.63009495", "text": "def _insert_1_to_1_assoc():\n tkp.db.execute(ONE_TO_ONE_ASSOC_QUERY, {'type': 3}, commit=True)", "title": "" }, { "docid": "b3bb685403382dbd0777127b21720b20", "score": "0.6299678", "text": "def _insert_statement(self):\n columns = self.data.columns\n insert_statement = \"INSERT INTO [{}].[{}] ({}) VALUES ({});\"\n\n try:\n insert_statement = insert_statement.format(\n self.schema_name\n , self.table_name\n , \",\".join(column for column in columns)\n , \",\".join(\"?\" for column in columns)\n )\n except Exception as e:\n #logger.error(\"Unable to parse SQL INSERT statements.\", exc_info=True)\n raise (e)\n\n return insert_statement", "title": "" }, { "docid": "c697a8207998b93c56b827e300442765", "score": "0.6292934", "text": "def insert(self, table_name, **kwargs):\n args = ','.join('?' for _ in kwargs)\n parameter = ','.join([k for k in kwargs])\n query = queries['INSERT'].format(table_name, parameter, args)\n values = [kwargs[k] for k in kwargs]\n return self.execute(query, values)", "title": "" }, { "docid": "6a842275782d55e563faf63f7cf72491", "score": "0.6287664", "text": "def insert(self, data):\n \n fields, values = self.str_helper(data)\n\n query = (\"INSERT INTO trips \"\n \"(\" + fields + \") \"\n \"VALUES (\" + values + \")\")\n try:\n self.query(query)\n self.connection.commit()\n except Exception as e:\n logging.error(traceback.format_exc())\n self.query(query)", "title": "" }, { "docid": "25b607cea82561b69bd5a4a8ccfc8978", "score": "0.6281777", "text": "def _insert_item(self, cursor, item):\n insert_sql, params = item.get_insert_sql()\n try:\n cursor.execute(insert_sql, params)\n print('插入成功')\n except psycopg2.errors.UniqueViolation:\n print('数据重复插入,跳过')\n except Exception as e:\n self.logger.error('插入失败', e)", "title": "" }, { "docid": "90a73be71d9d1a7993980ca61c7b9a1c", "score": "0.62773794", "text": "def do_insert(self, cursor, item):\n # 对数据库进行插入操作,并不需要commit,twisted会自动commit\n insert_sql = \"select * from Students where Id_P={};\".format(item)\n cursor.execute(insert_sql)#, (item['title'], item['text'], item['kind'],item[\"source\"]))\n res=cursor.fetchone()\n global query_res\n query_res =res", "title": "" }, { "docid": "a1bc48eddb327ed9bded86c6d4467599", "score": "0.62700915", "text": "def insert_into(table, data):\n db = get_db()\n if table in {\"textfiles\"}:\n _insert_into_textfiles(db, data)\n elif table in {\"token_freqs\"}:\n _insert_into_token_freqs(db, data)\n elif table in {\"stopwords\"}:\n _insert_into_stopwords(db, data)\n elif table in {\"model\"}:\n _insert_into_model(db, data)\n elif table in {\"parameters\"}:\n _insert_into_parameters(db, data)\n db.commit()\n close_db()", "title": "" }, { "docid": "7576526cc889bf774a9369eaf3e0f682", "score": "0.62676096", "text": "def insert(table, columns, values):\n statement = 'insert into ' + table + ' (' + columns + ') values ({0})'.format(values)\n print statement\n\n cursor.execute(statement)\n connection.commit()", "title": "" }, { "docid": "a61f9da034f153bfb9af63dbcf19a5bd", "score": "0.62522125", "text": "def insert(self, tabla, datos: tuple): # [param : type] is used to show a warning if tabla isn't tuple\n # use join to convert tuple to string who contains \", %s\" for each element of the tuple\n if funciones.comprueba_tupla(datos):\n args = \", \".join((\"%s \" * len(datos)).split())\n self.cursor.execute(\"insert into \" + tabla + \" values(\" + args + \")\", datos)\n self.conexion.commit()\n else:\n raise ValueError(\"datos must be a tuple of two params\")", "title": "" }, { "docid": "8481e2378a3331feb3bd6fef474254fc", "score": "0.6246843", "text": "def insert_query(insert_values, table):\n field_names = []\n key_names = []\n for key in insert_values:\n field_names.append(key)\n key_names.append(':' + key)\n\n key_names = ' , '.join(key_names)\n field_names = ' , '.join(field_names)\n query = 'INSERT INTO {} ({}) VALUES ({})'.format(\n table, field_names, key_names)\n return query", "title": "" }, { "docid": "4ec18d26b770ce1f6e762ae4f8c20510", "score": "0.6243886", "text": "def insert_statement(self, values):\n columns = self.table.get_insert_columns()\n types = self.table.get_column_datatypes()\n columncount = len(self.table.get_insert_columns(False))\n insert_stmt = \"INSERT INTO \" + self.table_name()\n insert_stmt += \" (\" + columns + \")\"\n insert_stmt += \" VALUES (\"\n for i in range(0, columncount):\n insert_stmt += \"%s, \"\n insert_stmt = insert_stmt.rstrip(\", \") + \");\"\n n = 0\n while len(values) < insert_stmt.count(\"%s\"):\n values.append(self.format_insert_value(None,\n types[n]))\n n += 1\n insert_stmt %= tuple([str(value) for value in values])\n if self.debug:\n print(insert_stmt)\n return insert_stmt", "title": "" }, { "docid": "2053b25e84f3181ebc46039950cfb882", "score": "0.62325424", "text": "def insertion_info(nom, sexe, haut, bas, taille_haut, taille_bas):\r\n\r\n conn = psycopg2.connect(database=DATABASE,\r\n user=USER,\r\n host=HOST,\r\n password=PASSWORD)\r\n cur = conn.cursor()\r\n cur.execute(\"\"\"insert into bobo1\r\n (image, sexe, haut, bas, taille_haut, taille_bas)\r\n values(%s, %s, %s, %s, %s, %s);\"\"\",\r\n (nom, sexe, haut, bas, taille_haut,\r\n taille_bas))\r\n\r\n conn.commit()", "title": "" }, { "docid": "76b544b16f8667f3216e7cf1356a7269", "score": "0.622796", "text": "def insertDataInto(self, table, **kwargs):\n fields = []\n values = []\n unknownValues = []\n self.query = \"INSERT INTO \" + self.schema + \".\" + table\n\n for key in kwargs:\n # Table's fields\n fields.append(key)\n # Table's values\n values.append(kwargs[key])\n # Placeholders\n unknownValues.append(\"%s\")\n\n # Reversing to keep fields and values in the right order\n fields.reverse()\n values.reverse()\n\n # Converting the lists into string\n knownFields = \", \".join(fields)\n placehold = ', '.join(unknownValues)\n\n # Converting known values into a tuple\n knownValues = tuple(values)\n self.query = \"INSERT INTO \" + self.schema + \".\" + table \\\n + \"(\" + knownFields + \") \" \\\n + \"VALUES(\" + placehold + \")\"\n # DEBUG: print(self.query)\n # DEBUG: print(knownValues)\n\n self.cur.execute(self.query, knownValues)\n self.con.commit()", "title": "" }, { "docid": "84efa364e49c2b06d7576d0416d1e7af", "score": "0.6216422", "text": "def perform_insert(query, params):\n con = None\n try:\n con = psycopg2.connect(database=DATABASE, user=USER)\n cursor = con.cursor()\n print(\"Query:\", query)\n print(\"Parameters:\", params)\n cursor.execute(query, params)\n return params\n except psycopg2.DatabaseError as e:\n print('Database error: %s' % e)\n finally:\n if con:\n con.commit()\n con.close()", "title": "" }, { "docid": "ce15dcd0e5acc298d8a05dc2d51a8b11", "score": "0.62157804", "text": "def insertdata(self,data):\n con = pymysql.connect(host=self.data[0], user=self.data[1], password=self.data[2],\n port=self.data[3], db=self.data[4], charset=self.data[5])\n try:\n with con.cursor() as curs:\n sql = \"insert into \"+self.table+self.ca+\" values \"+self.val\n curs.execute(sql, data)\n con.commit()\n except:\n raise MyError(deferror())\n finally:\n con.close()", "title": "" }, { "docid": "4e67f72cf609e70ecd156c4b475e48fa", "score": "0.620827", "text": "def insert(conn, table_name, columns, values):\n insert = \"INSERT INTO {0} {1} VALUES {2};\"\n insert = insert.format(table_name, columns, values)\n insert = insert.replace(\"'\", \"\")\n\n try:\n cursor = conn.cursor()\n cursor.execute(insert)\n cursor.close()\n conn.commit()\n\n except (Exception, psycopg2.DatabaseError) as error:\n raise error", "title": "" }, { "docid": "4122a277b28a90d8623201a5686ede23", "score": "0.6208084", "text": "def insert(*args, **kwargs):\n table=args[0]\n values = dict()\n elmts_list = list(zip(args[1].split(\",\"), args[2:]))\n for elmts in elmts_list:\n values.update({elmts[0] : elmts[-1]})\n if not kwargs:\n query = sql.SQL(\"INSERT INTO {} ({}) VALUES({})\")\\\n .format(sql.Identifier(table),\\\n sql.SQL(\",\").join(map(sql.Identifier, [a.strip() for a in args[1].split(\",\")])),\\\n sql.SQL(\",\").join(map(lambda x : sql.Placeholder(name=x), [a.strip() for a in args[1].split(\",\")])))\n if kwargs:\n l_kwargs_keys = list(kwargs.keys())\n query = sql.SQL(\"INSERT INTO {} ({}) SELECT {} WHERE NOT EXISTS (SELECT * FROM {} WHERE {} = {})\")\\\n .format(sql.Identifier(table),\\\n sql.SQL(\",\").join(map(sql.Identifier, [a.strip() for a in args[1].split(\",\")])),\\\n sql.SQL(\",\").join(map(lambda x : sql.Placeholder(name=x), [a.strip() for a in args[1].split(\",\")])),\\\n sql.Identifier(table),\\\n sql.Identifier(str(l_kwargs_keys[0])),\\\n sql.Placeholder(name=str(l_kwargs_keys[0])))\n values.update({str(l_kwargs_keys[0]): kwargs[l_kwargs_keys[0]]})\n conn = db.__conn__()\n db.__cursor__(conn).execute(query, values)\n db.__close__(conn)", "title": "" }, { "docid": "d81f324c665dbaee36b1d8704e229716", "score": "0.61975825", "text": "def insert(self, table=None, data=None, out=None):\n if data is None:\n data = {}\n if table is not None:\n sql = 'INSERT INTO ' + table + self._keys_to_insert_sql(data)\n self.connect()\n try:\n self.cursor.execute(sql, self.temp_insert_values)\n except sqlite3.OperationalError as error:\n self.conn.rollback()\n del self.temp_insert_values\n if out == 'output':\n write(\"Error running SQL: %s\" % (sql,))\n return 'SQL Error: %s' % error\n else:\n if out == 'output':\n write(\"Successfully ran: %s\" % (sql,))\n write(\"With data : %s\" % (self.temp_insert_values,))\n del self.temp_insert_values\n # TODO Fix the last insert id\n # self.last_insert_id = self.cursor.lastrowid()\n self.conn.commit()\n self.cursor.close()\n return True\n else:\n raise NameError('Table not specified!')", "title": "" }, { "docid": "f9403e7c086132b2d104bcd5d25b42b9", "score": "0.6183547", "text": "def insert(query):\r\n conn = None\r\n try:\r\n # read connection parameters\r\n params = config()\r\n # connect to the PostgreSQL server\r\n conn = psycopg2.connect(**params)\r\n # create a cursor\r\n cur = conn.cursor() \r\n\t # execute a statement\r\n cur.execute(query)\r\n conn.commit()\r\n cur.close()\r\n return\r\n except (Exception, psycopg2.DatabaseError) as error:\r\n print(error)\r\n finally:\r\n if conn is not None:\r\n conn.close()", "title": "" }, { "docid": "067389f886fb47257f7c776aa79f90d2", "score": "0.61808866", "text": "def _insert(self, tree):\n tablename = tree.table\n keys = tree.attrs\n count = 0\n with self.connection.batch_write(tablename) as batch:\n for values in tree.data:\n if len(keys) != len(values):\n raise SyntaxError(\"Values '%s' do not match attributes \"\n \"'%s'\" % (values, keys))\n data = dict(zip(keys, map(self.resolve, values)))\n batch.put(data)\n count += 1\n return \"Inserted %d items\" % count", "title": "" }, { "docid": "67f56f8fe347b181e60f4062d3e0ac60", "score": "0.61723685", "text": "def insert_record(con, table, data):\n cur = con.cursor()\n cur.execute('INSERT INTO {0} VALUES {1}'.format(table, data))", "title": "" }, { "docid": "19255cf04b8aa844f1e0d03a122e1464", "score": "0.6167267", "text": "def insert_substituts_db(id_sub, prod_sub):\n data_sub = id_sub, prod_sub\n curs, con_db = db_connection() #tuples\n\n curs.execute(\n \"INSERT INTO Substituts (id_substitut, id_produit) VALUES (%s, %s)\",data_sub\n )\n\n con_db.commit()", "title": "" }, { "docid": "702d3c81dbc298f37cd30b712ec1180d", "score": "0.6167206", "text": "def _insert(self, objs, fields, return_id=False, raw=False, using=None):\r\n self._for_write = True\r\n if using is None:\r\n using = self.db\r\n query = sql.InsertQuery(self.model)\r\n query.insert_values(fields, objs, raw=raw)\r\n return query.get_compiler(using=using).execute_sql(return_id)", "title": "" }, { "docid": "04ea319d870868b074a8bff074e47d34", "score": "0.61363053", "text": "def _insert_record(self, record, commit=True):\n\n if self.columns != set(map(str.lower, list(record.keys()))):\n self.expand(record)\n c = self.connection.cursor()\n cols = []\n cols_q = []\n vals =[]\n for k in record:\n if record[k] == None:\n # dont insert none values\n continue\n cols.append(escape_identifier(k))\n cols_q.append('?')\n vals.append(record[k])\n sql = (\"INSERT INTO %s \" % (escape_identifier(self.table))) + '(' + ','.join(cols) + ') VALUES (' + ','.join(cols_q) + ')'\n #print sql\n r = c.execute(sql, vals)\n if commit:\n self.connection.commit()\n #pprint.pprint(record, indent=2)\n return r", "title": "" }, { "docid": "08dad02455658778043884c4770c0778", "score": "0.613414", "text": "def _do_insert(self, manager, using, fields, update_pk, raw):\r\n return manager._insert([self], fields=fields, return_id=update_pk,\r\n using=using, raw=raw)", "title": "" }, { "docid": "62b974e0da2c929377ebb68acf3ebd3d", "score": "0.61291486", "text": "def fucked_up_insert():\n pass # pragma: no cover", "title": "" }, { "docid": "4ddf2644712d227903ec22cfa468bba0", "score": "0.61277425", "text": "def single_insert(conn, insert_req):\n cursor = conn.cursor()\n try:\n cursor.execute(insert_req)\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"Error: %s\" % error)\n conn.rollback()\n cursor.close()\n return 1\n cursor.close()", "title": "" }, { "docid": "cc176a976253c30c53d764cc59cfe110", "score": "0.61252725", "text": "def insert(self):\n pass", "title": "" }, { "docid": "2e9a585ec8810057575bcdec7a017b97", "score": "0.61241925", "text": "def sqlInsertStr(params, outfile=sys.stdout, extraParams=None, dbType='postgres'):\n outfile.write(str(sqlInsert(params,extraParams,dbType=dbType)))", "title": "" }, { "docid": "94087f72cbd30a0a65f232b40e55605f", "score": "0.6121919", "text": "def _Insert(self, table, values):\n self._ReflectToMetadata()\n ins = self._meta.tables[table].insert()\n r = self._Execute(ins, values)\n return r.inserted_primary_key[0]", "title": "" }, { "docid": "c012ad54b321cfe5d07b7f23508c57de", "score": "0.61216605", "text": "def insert_iterative(self, data):", "title": "" }, { "docid": "3ab1f299d65ca57ec64ce63026a984e2", "score": "0.6118437", "text": "def get_insert_query():\n return \"INSERT INTO words VALUES ({})\".format(\", \".join(\"?\" * 28))", "title": "" }, { "docid": "672742809ce5ad9d23a52056c5aedb1a", "score": "0.6105644", "text": "def insert(sql, clue):\n # Clue is [game, air_date, round, category, value, clue, answer]\n # Note that at this point, clue[4] is False if round is 3\n if \"\\\\\\'\" in clue[6]:\n clue[6] = clue[6].replace(\"\\\\\\'\", \"'\")\n if \"\\\\\\\"\" in clue[6]:\n clue[6] = clue[6].replace(\"\\\\\\\"\", \"\\\"\")\n if not sql:\n print(clue)\n return\n sql.execute(\"INSERT OR IGNORE INTO categories(category) VALUES(?);\", (clue[3], ))\n category_id = sql.execute(\"SELECT category_id FROM categories WHERE category=?;\", (clue[3], )).fetchone()[0]\n \n right_player_id = sql.execute(\"SELECT players.player_id FROM players JOIN game_players ON game_players.player_id = players.player_id WHERE game_players.game_id=? AND players.nickname=?\", (clue[0], clue[8].replace(\"\\\\'\", \"'\"))).fetchone()[0] if clue[8] else None\n clue_id = sql.execute(\"INSERT INTO clues(game_id, round, value, category_id, clue, answer, answer_player_id, order_number, is_daily_double, column, row) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);\", (clue[0], clue[2], clue[4], category_id, clue[5], clue[6], right_player_id, clue[9], clue[10], clue[11], clue[12], )).lastrowid\n \n return clue_id", "title": "" }, { "docid": "be0c8cebbc201202993c42239fd40509", "score": "0.6104948", "text": "def insert(self, db_table, records): \n with self.engine.connect() as conn:\n for record in records:\n try:\n insert_stmt = insert(db_table).values(record)\n result = conn.execute(insert_stmt)\n except SQLAlchemyError as e:\n print(\"DB insert error: {}\".format(e))", "title": "" }, { "docid": "43e456abae1b56652542416a0ef26bd5", "score": "0.6104285", "text": "def insert(self, data: dict):\n sql = 'INSERT INTO {} ( {} ) VALUES ( {} )'.format(\n self._table,\n ', '.join(data),\n ', '.join(['%({})s'.format(column) for column in data]),\n )\n\n self.execute(sql, data)", "title": "" }, { "docid": "5639d7006dd6a6106acbccac3b7e7ed1", "score": "0.61005497", "text": "def insert_data(self, values):\n table = self.db_tablename\n params = self.insert_params\n sql_cmd = 'INSERT INTO %s VALUES %s' % (table, params)\n\n # Insert given parameters to database.\n self._cursor.execute(sql_cmd, values)\n self._conn.commit() # Save changes to db.", "title": "" }, { "docid": "f0858c328097f020860516e072779376", "score": "0.6095106", "text": "def _constructinsert_query_intro(self):\n insert_query_intro = \"INSERT INTO %s (%s\" % (self.table,self.dimension_list[0])\n for dimension in self.dimension_list[1:]:\n insert_query_intro += \", %s\" % (dimension)\n for metric in self.metric_list:\n insert_query_intro += \", %s\" % (metric)\n insert_query_intro += \") VALUES \"\n return insert_query_intro", "title": "" }, { "docid": "d0acdcc29555af53dbd0919bf51d3ff6", "score": "0.6086631", "text": "def insertion(insert_dict, form_info, con):\n # get the table we are interested in\n table = form_info['table-name']\n # initialize an empty dictionnary to store the values to add\n inserted_elem = {}\n # get an unused ID\n inserted_elem['ID'] = get_new_id(con, table)\n # get the fields of our table and loop through them\n fields = insert_dict[table]\n for field, descr in fields.items():\n # check if field is non nullable and empty return error if it is the case\n # and cancel query\n if descr['nullable'] == False and form_info[field] == '':\n print('error missing field {}'.format(field))\n return\n # add element only if user entered something\n if not form_info[field] == '':\n # case 1\n if descr['case'] == 1:\n inserted_elem[field] = form_info[field]\n else:\n # case 2\n if descr['case'] == 2:\n # get value to add to the tuple\n result = insert_case_2_3(con, descr, form_info[field])\n # if no result was given cancel query\n if not result:\n return\n inserted_elem[field] = result\n # case 3\n if descr['case'] == 3:\n result = insert_case_2_3(\n con, descr, form_info[field], inserted_elem['ID'])\n if not result:\n return\n # make the final query and execute it\n query = insert_query(inserted_elem, table)\n print(query)\n execute_query(con, query, inserted_elem)\n con.commit()", "title": "" }, { "docid": "1a233fcf4ae3377a7a17bae32948f4c6", "score": "0.60794014", "text": "def insert_statement(self, values):\n columns = self.table.get_insert_columns()\n types = self.table.get_column_datatypes()\n column_count = len(self.table.get_insert_columns(join=False, create=False))\n for row in values:\n row_length = len(row)\n # Add None with appropriate value type for empty cells\n for i in range(column_count - row_length):\n row.append(self.format_insert_value(None, types[row_length + i]))\n\n insert_stmt = \"INSERT INTO {table}\".format(table=self.table_name())\n insert_stmt += \" ( {columns} )\".format(columns=columns)\n insert_stmt += \" VALUES (\"\n for i in range(0, column_count):\n insert_stmt += \"{}, \".format(self.placeholder)\n insert_stmt = insert_stmt.rstrip(\", \") + \")\"\n\n if self.debug:\n print(insert_stmt)\n return insert_stmt", "title": "" }, { "docid": "59a11c23596f747a54d201048452a976", "score": "0.60774326", "text": "def add_data(self,qry, values):\n self.my_cursor.execute(qry, values)\n self.my_connection.commit()", "title": "" }, { "docid": "abb4d37c520d8b26e6da5c713fe8d199", "score": "0.60761213", "text": "def insert(self, q, value=None):\n pass", "title": "" }, { "docid": "0281fa929ee4713490f083e5d4f89fc7", "score": "0.6063293", "text": "def insert(self, name, data):\n if \"id\" in data.keys():\n del data[\"id\"]\n if self.isSQL():\n cursor = self.connection.cursor()\n\n keys = list(data.keys())\n\n insert = \"INSERT INTO \" + name + \"(\"\n insert += ','.join(map(str, keys))\n insert += \") VALUES (\"\n insert += \"?,\" * (len(keys) - 1)\n insert += \"?)\"\n\n values = tuple()\n\n for key in keys:\n if isinstance(data[key], list) or isinstance(data[key], dict):\n try:\n data[key] = json.dumps(data[key])\n except Exception as e:\n pass\n\n values += (data[key], )\n\n res = cursor.execute(insert, values)\n self.connection.commit()\n cursor.close()\n return self.get(name, \"id\", cursor.lastrowid)\n else:\n result = self.db[name].insert_one(data)\n\n return self.get(name, \"id\", str(result.inserted_id))", "title": "" }, { "docid": "1d5eac83469c3e038c05c9d7fd24a796", "score": "0.6062178", "text": "def _insert_new_assocxtrsource(image_id):\n\n query = \"\"\"\\\nINSERT INTO assocxtrsource\n (runcat\n ,xtrsrc\n ,type\n ,distance_arcsec\n ,r\n ,v_int\n ,eta_int\n ,f_datapoints\n )\n SELECT r0.id AS runcat\n ,r0.xtrsrc\n ,4\n ,0\n ,0\n ,0\n ,0\n ,1\n FROM (SELECT x1.id AS xtrsrc\n FROM extractedsource x1\n LEFT OUTER JOIN temprunningcatalog tmprc\n ON x1.id = tmprc.xtrsrc\n WHERE x1.image = %(image_id)s\n AND x1.extract_type = 0\n AND tmprc.xtrsrc IS NULL\n ) new_src\n ,runningcatalog r0\n WHERE r0.xtrsrc = new_src.xtrsrc\n\"\"\"\n tkp.db.execute(query, {'image_id':image_id}, True)", "title": "" }, { "docid": "b29b1ed19c54842041ab17f65b7bd444", "score": "0.6054349", "text": "def make_insert_statement(table, fields=(), values=()):\n statement = 'INSERT INTO %s (%s) VALUES (%s)' % (\n table,\n ', '.join(fields),\n ', '.join(['?'] * len(values))\n )\n return statement", "title": "" }, { "docid": "1286171eea760ee1907abffbd744d09f", "score": "0.6045893", "text": "def insertSQL(self, rec, db=None):\n lstFields = []\n lstValues = []\n\n # build sFields and sValues for all parameters\n for fld in self.lstFields:\n newValue = getattr(rec,fld.name, None)\n if newValue is not None:\n lstFields.append( fld.name )\n lstValues.append( fld.formatValue( newValue ))\n\n sql = 'INSERT INTO %s ' % self.tableName\n sql += ' ( %s ) ' % ','.join(lstFields)\n sql += ' VALUES ( %s )' % ','.join( lstValues )\n if db:\n db.execute( sql, commit=True )\n else:\n return sql", "title": "" }, { "docid": "b932e3cb891578e347cecbd1b6885c28", "score": "0.6044674", "text": "def insert_record(self, table, keys, values):\n joined_keys = \", \".join(keys)\n joined_values = \", \".join(map(\"'{}'\".format, values))\n\n cursor = connection.cursor()\n command = self.INSERT_RECORD_COMMAND.format(table, joined_keys, joined_values)\n cursor.execute(command)", "title": "" }, { "docid": "6cece035fd37c4092fd7052876732196", "score": "0.604345", "text": "def insert(self, data):\n try:\n session.add(data)\n session.commit()\n print('Insert successful')\n except DatabaseIntegrityError as dberr:\n session.rollback()\n print(dberr)\n print('unable to insert & ROLLBACK IS DONE')\n except Exception as exception:\n print(exception)\n print(\"unable to insert\")", "title": "" }, { "docid": "2681519f69c06131f990e7aae368e5ec", "score": "0.6039181", "text": "def insert(self, insert_str, value_str):\n with self.get_cursor() as cur:\n try:\n cur.execute(insert_str, value_str)\n except IntegrityError, i:\n logger.error('Duplicated data. {i}'.format(i=i))\n except Exception, e:\n logger.error(e)", "title": "" }, { "docid": "ec7e84d8b3988777268273657f750911", "score": "0.60343933", "text": "def _insert_1_to_1_runcat_flux():\n\n query = \"\"\"\\\nINSERT INTO runningcatalog_flux\n (runcat\n ,band\n ,stokes\n ,f_datapoints\n ,avg_f_peak\n ,avg_f_peak_sq\n ,avg_f_peak_weight\n ,avg_weighted_f_peak\n ,avg_weighted_f_peak_sq\n ,avg_f_int\n ,avg_f_int_sq\n ,avg_f_int_weight\n ,avg_weighted_f_int\n ,avg_weighted_f_int_sq\n )\n SELECT runcat\n ,band\n ,stokes\n ,f_datapoints\n ,avg_f_peak\n ,avg_f_peak_sq\n ,avg_f_peak_weight\n ,avg_weighted_f_peak\n ,avg_weighted_f_peak_sq\n ,avg_f_int\n ,avg_f_int_sq\n ,avg_f_int_weight\n ,avg_weighted_f_int\n ,avg_weighted_f_int_sq\n FROM temprunningcatalog\n WHERE inactive = FALSE\n AND f_datapoints=1\n\"\"\"\n cursor = tkp.db.execute(query, commit=True)\n return cursor.rowcount", "title": "" }, { "docid": "31e2b758002590811d95bfa058bce3d7", "score": "0.6033739", "text": "def multiple_insert(table, values, seqname=None):\r\n def escape(value):\r\n if value is None:\r\n return \"\\N\"\r\n elif isinstance(value, basestring): \r\n value = value.replace('\\\\', r'\\\\') # this must be the first one\r\n value = value.replace('\\t', r'\\t')\r\n value = value.replace('\\r', r'\\r')\r\n value = value.replace('\\n', r'\\n')\r\n return value\r\n elif isinstance(value, bool):\r\n return value and 't' or 'f'\r\n else:\r\n return str(value)\r\n \r\n def increment_sequence(seqname, n):\r\n \"\"\"Increments a sequence by the given amount.\"\"\"\r\n d = web.query(\r\n \"SELECT setval('%s', $n + (SELECT last_value FROM %s), true) + 1 - $n AS START\" % (seqname, seqname), \r\n locals())\r\n return d[0].start\r\n \r\n def write(path, data):\r\n f = open(path, 'w')\r\n f.write(web.utf8(data))\r\n f.close()\r\n \r\n if not values:\r\n return []\r\n \r\n if seqname is None:\r\n seqname = table + \"_id_seq\"\r\n \r\n #print \"inserting %d rows into %s\" % (len(values), table)\r\n \r\n columns = get_table_columns(table)\r\n if seqname:\r\n n = len(values)\r\n start = increment_sequence(seqname, n)\r\n ids = range(start, start+n)\r\n for v, id in zip(values, ids):\r\n v['id'] = id\r\n else:\r\n ids = None\r\n \r\n data = []\r\n for v in values:\r\n assert set(v.keys()) == set(columns)\r\n data.append(\"\\t\".join([escape(v[c]) for c in columns]))\r\n \r\n filename = tempfile.mktemp(suffix='.copy', prefix=table)\r\n write(filename, \"\\n\".join(data))\r\n web.query(\"COPY %s FROM '%s'\" % (table, filename))\r\n return ids", "title": "" }, { "docid": "aafde0a036a21bc7b9de37dac426df9a", "score": "0.6023386", "text": "def add_iden_pram_to_db(params,conn):\n\n conn.execute(\"insert into Iden_prams\" +\n \" (dset_key,comp_key,threshold,top_cut,p_rad,hwhm,d_rad,mask_rad,shift_cut,rg_cut,e_cut) \" +\n \"values (?,?,?,?,?,?,?,?,?,?,?);\",params)\n conn.commit()", "title": "" }, { "docid": "9a29a5ee22ab46ed85be245865136fff", "score": "0.6020144", "text": "def insert(self, data):\n if (not isinstance(data, list) and\n not isinstance(data, dict)) or not len(data):\n raise AttributeError(\"data type error.\")\n\n fields = data.keys() if isinstance(data, dict) else random.choice(data).keys()\n sql = self._insertstr % (self._table_name, self._buildInserFieldsString(fields),\n \"(%s)\" % \", \".join([\"%s\" for index in range(len(fields))]))\n\n return MySQLConnect.execute(sql,\n parameter=self.__buildInsertValueParam(data),\n many=isinstance(data, list))", "title": "" }, { "docid": "fa732882269b00c894d64e6c4afd0692", "score": "0.60160387", "text": "def test_insert(self):\n sqltap.start(self.engine)\n\n sess = self.Session()\n sess.add(self.A())\n sess.flush()\n\n stats = sqltap.collect()\n assert len(_startswith(stats, 'INSERT')) == 1", "title": "" }, { "docid": "8e100b6f49bb0f51f41298d4c25e2d14", "score": "0.6010963", "text": "def insert_token(self, *args):\n try:\n sql = \"insert into token(contract_addr, `type`,fullname, `name`, decimals, deciml, logo, \" \\\n \"daddy) values(%s) on DUPLICATE KEY UPDATE update_time = NOW()\"\n arglist = ','.join(['%s'] * len(args))\n ret = self.execute(sql % arglist, *args)\n return ret\n except Exception as e:\n return False\n pass", "title": "" }, { "docid": "2f7ed43618937b4af09f3a16ebe01d22", "score": "0.5998734", "text": "def compile_insert(self, query, values):\n table = self.wrap_table(query.from__)\n\n if not isinstance(values, list):\n values = [values]\n\n # If there is only one row to insert, we just use the normal grammar\n if len(values) == 1:\n return super(SQLiteQueryGrammar, self).compile_insert(query, values)\n\n names = self.columnize(values[0].keys())\n\n columns = []\n\n # SQLite requires us to build the multi-row insert as a listing of select with\n # unions joining them together. So we'll build out this list of columns and\n # then join them all together with select unions to complete the queries.\n for column in values[0].keys():\n columns.append(\"%s AS %s\" % (self.get_marker(), self.wrap(column)))\n\n columns = [\", \".join(columns)] * len(values)\n\n return \"INSERT INTO %s (%s) SELECT %s\" % (\n table,\n names,\n \" UNION ALL SELECT \".join(columns),\n )", "title": "" }, { "docid": "7be9239c69643545bd5b584606972741", "score": "0.59981793", "text": "def check_specific_insert(self, table, data):\n # Asserts\n\n # Init\n attributes = list(data[0])\n \n # prossesing\n # Select\n attr_str = \"\"\n for attr in attributes[:-1]:\n attr_str += f\"{attr}, \"\n attr_str += f\"{attributes[-1]}\"\n select_str = \"SELECT \" + attr_str\n # From\n from_str = f\"FROM {table}\"\n # Building the query\n query = select_str + \"\\n\" + from_str\n\n # Query\n with self.conn:\n res = self.cur.execute(query)\n res_data = res.fetchall()\n print(f\"check_specific_insert: \\n{res_data}\")", "title": "" }, { "docid": "77c8840454af72cb56604ac47c3bfd83", "score": "0.5996024", "text": "def sql_insertion(conn, tablename, dictionary: dict, silent=True):\n try:\n query = f\"INSERT INTO {tablename}({','.join(dictionary.keys())}) \" \\\n f\"VALUES({','.join('?' * len(dictionary.keys()))})\", \\\n [tuple([item[1] for item in dictionary.items()])]\n\n conn.executemany(query[0], query[1])\n\n if not silent:\n print(f'[success] data inserted on {tablename}.')\n\n except:\n print(f'[error] on inserting data on {tablename}.')", "title": "" }, { "docid": "d4c6e1e58aee957e6e2b6d1abcb7e427", "score": "0.5993183", "text": "def insert(self, sql, *args, **kwargs):\n\n assert \"insert into\" in sql.lower(), 'This function requires an insert statement, provided: {}'.format(sql)\n cursor = self.execute_and_commit(sql, *args, **kwargs)\n\n # now get that id\n last_row_id = cursor.lastrowid\n cursor.close()\n\n return last_row_id", "title": "" }, { "docid": "564d259ef5c4ed41a8afdbb3e3aeac8e", "score": "0.597577", "text": "def insert_in_db(self, values, method='many'):\n try:\n c = self.conn.cursor()\n try:\n request_insert = 'INSERT INTO \"' + self.table + \\\n '\" VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);'\n if method == \"many\":\n c.executemany(request_insert, values)\n else:\n c.execute(request_insert, values)\n except sqlite3.IntegrityError:\n # data already exists in database\n self.__logger.debug(\n f\"{values} data already exist in database.\")\n pass\n self.conn.commit()\n except Error as e:\n self.__logger.error(e, values)\n return None", "title": "" }, { "docid": "234737724b65556284754b93b337f34e", "score": "0.5975204", "text": "def insert(self, data: dict):\n\n assert isinstance(data, dict)\n\n # NOTE: you only need to convert lists for CSVs!!! Not here...\n for key, val in data.items():\n # Can't have this in one place because numpy.ndarray is the type\n # But numpy.array is the comprehension\n if isinstance(val, tuple):\n data[key] = list(data[key])\n elif isinstance(val, np.ndarray):\n # Must convert inner types to not be numpy types\n data[key] = list(float(x) for x in data[key])\n values_str = \", \".join([\"%s\"] * len(data))\n\n sql = (f\"INSERT INTO {self.name} ({','.join(data.keys())})\"\n f\" VALUES ({values_str})\")\n\n if self.id_col:\n sql += f\" RETURNING {self.id_col};\"\n\n # https://stackoverflow.com/a/41779401/8903959\n # If there is no data, replace part of the query\n sql = sql.replace(\"() VALUES ()\", \"DEFAULT VALUES\")\n\n logging.debug(f\"About to execute: {sql}\")\n logging.debug(f\"With data: {str(data.values())}\")\n result = self.execute(sql, tuple(data.values()))\n\n # Return the new ID\n if self.id_col:\n return result[0][self.id_col]", "title": "" }, { "docid": "e66f4dd70d124186a7706c0dcd706949", "score": "0.5970151", "text": "def insert_mutiple_rows(self, query, vals):\n success = 1\n try:\n #if isinstance(vals[0], dict):\n # for itm in vals:\n # self.insert(query, params_tuple=itm)\n #else:\n self.cursor.executemany(query, vals)\n self.connection.commit()\n success = 0\n except mysqldb.Error as err:\n print(\"Failed Insert: {}\".format(err))\n self.connection.rollback()\n\n return success", "title": "" }, { "docid": "26866bb2b9aa4329d9a64d5195e7da4f", "score": "0.5968506", "text": "def insert_variant(conn, values):\n cur = conn.cursor()\n sel = \"\"\"INSERT INTO Variants (CHROM, POS, ID, REF, ALT, QUAL, FILTER, thousandg2015aug_all,\n ExAC_ALL,\n FATHMM_predID,\n LRT_predID,\n MetaLR_predID,\n MetaSVM_predID,\n MutationAssessor_predID,\n MutationTaster_predID,\n PROVEAN_predID,\n Polyphen2_HDIV_predID,\n Polyphen2_HVAR_predID,\n SIFT_predID,\n fathmm_MKL_coding_predID) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\"\"\"\n \n with conn:\n cur.execute(sel,values)\n \n sel = \"SELECT COUNT(*) FROM Variants\"\n cur.execute(sel)\n rs = cur.fetchall()\n if rs[0][0]%1000==0:\n print(rs[0][0])\n return rs[0][0]", "title": "" }, { "docid": "109937a4938687982fca2e8038d2f5d3", "score": "0.5960648", "text": "def insert(self, cls, rows, force=False):\n tableInfo= self.getTables()[ cls.tableName() ]\n commands={True : \"REPLACE\", False : \"INSERT IGNORE\"}\n command= \"{0} INTO {1} VALUES {2}\".format( commands[force],\n cls.tableName(),\n tableInfo.format() )\n \n for row in rows: \n self._cursor.execute(command, tableInfo.toTuple(row))", "title": "" }, { "docid": "1b80e80fb1025992a1463144e22577a7", "score": "0.59599984", "text": "def insert_many(self, insert_str, values=[]):\n with self.get_cursor() as cur:\n for i in range(0, int(ceil(len(values) / 1000.0))):\n logger.debug('Storing ' + insert_str)\n try:\n cur.executemany(insert_str, values[i * 1000:(i + 1) * 1000])\n except IntegrityError, i:\n logger.error('Duplicated data. {i}'.format(i=i))\n for value in values:\n self.insert(insert_str, value)\n except Exception, e:\n logger.error(e)", "title": "" }, { "docid": "457d37b740d6fd500fac94ff37daa829", "score": "0.5957257", "text": "def insert_into_db(count=51):\n cur = conn.cursor()\n query = \"INSERT INTO tb_customer_account(id_customer, cpf_cnpj, nm_customer, is_active, vl_total) VALUES\"\n id_random = random.randint(1,500)\n values = list()\n\n for item in range(1, count):\n customer = '({id_customer}, \\'13089778\\', \\'Lapis da Silva\\', 1, {vl_total})'\n values.append(customer.format(id_customer=id_random*item, vl_total=item*200))\n\n query += ', '.join(values)\n cur.execute(query)\n cur.close()", "title": "" }, { "docid": "47d5d8c15966d2d2c4447c5283a38d6f", "score": "0.592578", "text": "def insert_products_db(id_prod, name_prod, brand_prod, grade_prod, detail_prod, stores_prod, url_prod, cat_prod):\n product_var = id_prod, name_prod, brand_prod, grade_prod, detail_prod, stores_prod, url_prod, cat_prod, name_prod\n curs, con_db = db_connection() #tuples\n try:\n curs.execute(\n \"INSERT INTO Produits VALUES (NULL, %s, %s, %s, %s, %s, %s, %s, %s) ON DUPLICATE KEY UPDATE nom=%s\",product_var\n )\n except Warning:\n pass\n con_db.commit()", "title": "" } ]
3f38572930214b31e362d80d752a5bef
Loops the queue. Invoke this command again to unloop the queue.
[ { "docid": "85398434c04e430f30d630ab4c452835", "score": "0.7258371", "text": "async def _loop(self, ctx: commands.Context):\n\n await ctx.message.delete(delay=5)\n if not ctx.voice_state.is_playing:\n return await ctx.send('Nothing being played at the moment.', delete_after=5)\n\n # Inverse boolean value to loop and unloop.\n ctx.voice_state.loop = not ctx.voice_state.loop\n await ctx.send('Currently ' + ('' if ctx.voice_state.loop else 'not ') + 'looping queue.', delete_after=5)", "title": "" } ]
[ { "docid": "89b15d25c448917b330f9c88c7c4275a", "score": "0.7013208", "text": "def loop(self, queue, name):\n try:\n while self.running:\n try:\n gcode = queue.get(block=True, timeout=1)\n except Queue.Empty:\n continue\n\n logging.debug(\"Executing \"+gcode.code()+\" from \"+name + \" \" + gcode.message)\n\n self._execute(gcode)\n\n self.printer.reply(gcode)\n\n queue.task_done()\n except Exception:\n logging.exception(\"Exception in {} loop: \".format(name))", "title": "" }, { "docid": "4061d9f6769ca49e60f3e8bcaba4c71b", "score": "0.681251", "text": "def loop(self, q):\n while True:\n x = q.get()\n command_type = x[0]\n connection_id = x[1]\n content = None if len(x) == 2 else x[2]\n if command_type == 'ADD_QUEUE':\n self.queues_out[connection_id] = content\n elif command_type == 'KILL_QUEUE':\n del self.queues_out[connection_id]\n elif command_type == 'COMMAND':\n self.handle_input(content, connection_id)", "title": "" }, { "docid": "16c8f9aa9e18919bda329867a52ec06d", "score": "0.6792188", "text": "def run(self):\n \n while self.running:\n if len(self.queue):\n print(self.queue[0])\n self.queue.pop(0)", "title": "" }, { "docid": "182103f28d47def21baeb04cbdbd51c7", "score": "0.6625575", "text": "def loop(self):\n\t\tpass", "title": "" }, { "docid": "55b00c4920f0906c47efc8625934dc0d", "score": "0.6574884", "text": "def loop(self):\n pass", "title": "" }, { "docid": "cfbb6740823fa11b47b10da4d5d1b73c", "score": "0.6490017", "text": "def stop(self):\n self._command_queue.put(None)", "title": "" }, { "docid": "29fca24f5df85d14cfe4256d8044d3bf", "score": "0.6460909", "text": "async def reclaim_loop():\n while True:\n logger.debug(\"Checking for any events which need reclaiming\")\n async for messages in self._reclaim_lost_messages(\n stream_names, consumer_group, expected_events\n ):\n await queue.put(messages)\n # Wait for the queue to empty before getting trying to get another message\n await queue.join()\n\n initial_reclaiming_complete.set()\n await asyncio.sleep(self.reclaim_interval)", "title": "" }, { "docid": "854336a5ab3878cebe49419319bf52cf", "score": "0.64506465", "text": "def _loop(self):\n pass", "title": "" }, { "docid": "bf5b5d0df4efa44b172cf262bfd6294e", "score": "0.6443101", "text": "def __loop(self):\n while True:\n record = self.__queue.get()\n try:\n super(_AsyncHandler, self).emit(record)\n except:\n pass", "title": "" }, { "docid": "ef572a98a2d0e476494277efc76b230e", "score": "0.6394394", "text": "def hold_loop(self, loop = True):\n was_looping = self.clip.looping # Remember whether we were looping\n self.clip.looping = loop\n yield\n self.clip.looping = was_looping", "title": "" }, { "docid": "36e7645679e22572dddd8c59f086ff03", "score": "0.63876754", "text": "def run(self):\n self._running.set()\n while self._running.isSet():\n e = self._queue.get() # Get next event or hang on empty queue\n if e is None: # Reached sentinel value\n break\n qp.Hsm.dispatch(self, e)\n self.unsubscribe_all()\n QF.remove(self)", "title": "" }, { "docid": "04b569b278e13ee0b8102db9063c0eae", "score": "0.63791907", "text": "def block_loop(self):\n while not self.block_loop_quit:\n self.block_once()", "title": "" }, { "docid": "9743055ae4acd6228eb481edc121848b", "score": "0.63707286", "text": "def loop(self):", "title": "" }, { "docid": "ff97ead2955c4d48a1879ddef5c4e95c", "score": "0.6368909", "text": "async def loop(self, ctx):\n\n reply = self._loop(ctx)\n await ctx.send(reply)", "title": "" }, { "docid": "dedfaaf52a8728a6c2af649596e0080f", "score": "0.6366", "text": "async def off(self, ctx):\n\n state = self.get_state(ctx.guild)\n\n state.loop = False\n state.loop_queue = False\n await ctx.send(\"Looping Disabeld\")", "title": "" }, { "docid": "cc0142c1af3451f7ab6a83ef76fb53ae", "score": "0.63530946", "text": "def loop(self):\n while not self.STOP:\n self.tick()\n time.sleep(tick_time)", "title": "" }, { "docid": "7511ac3a506a08478f78209320cf7b26", "score": "0.635098", "text": "def _handleloop(self):\n while not self.stop:\n (input, addr) = self.queue.get()\n if not input or not addr:\n continue\n if self.stop:\n break \n self.handle(input, addr)\n if cfg['udpsleep']:\n time.sleep(cfg['udpsleep'] or 0.01)\n logging.info('udp - shutting down udplistener')", "title": "" }, { "docid": "914713c8f0cf16c154297b06b72fa921", "score": "0.6349078", "text": "def stop(self):\n self.queue.put(None)", "title": "" }, { "docid": "f5d8819cacef777232c03bd3b1d05874", "score": "0.6341603", "text": "def loop(self):\n self.logger.info('Entering daemon loop.')\n if constants.POST_LOOP_BREAK:\n self.schedule_action(self.kill)\n\n while not self.stop.isSet():\n action, keyworded_args = self.queue.get()\n if len(keyworded_args):\n action(keyworded_args)\n else:\n action()\n\n self.logger.info('Exiting daemon loop.')", "title": "" }, { "docid": "a152f2bf948eac4b2be43a48070f73bf", "score": "0.6330632", "text": "def stop(self):\r\n self.is_looping.clear()", "title": "" }, { "docid": "5a84d9131da0d568ed66c4e5700e8cd8", "score": "0.6299489", "text": "def interruptLoop(self):\n self._status[2] = True", "title": "" }, { "docid": "977ec43f3defa1811408c45df5d1edfa", "score": "0.62983024", "text": "def _send_queue(self):\n while not LWLink.the_queue.empty():\n self._send_reliable_message(LWLink.the_queue.get_nowait())", "title": "" }, { "docid": "af7e2108959ae1422da3d25dafaca5e1", "score": "0.62285167", "text": "def loop_forever(self):\n self.usb_poll_thread()", "title": "" }, { "docid": "8a0fda85eee2789644e6f2475e3e24c5", "score": "0.62249994", "text": "def pop_loop(self):\n del self.break_stack[-1]", "title": "" }, { "docid": "923156e79f7475faff61ef92c19085f3", "score": "0.61899656", "text": "def _renotify(self) -> None:\n queue = self._queue.copy()\n self._queue.clear()\n while queue:\n self._notify(queue.pop(0))", "title": "" }, { "docid": "34572abe9b4f8c930d69b7618950ee65", "score": "0.61736786", "text": "def _ProcessQueue(self, queue):\n if self._paused:\n return\n\n while not queue.IsEmpty() and not queue.NeedsBackoff():\n dyn_req = queue.Pop()\n dyn_req.execute_cb(dyn_req)\n\n queue.ResetTimeout(partial(self._ProcessQueue, queue))", "title": "" }, { "docid": "9b03d2b3f22525d532217eb76c239672", "score": "0.6172339", "text": "def run(self):\n while True:\n fun, args, kargs = self.task_queue.get()\n fun(*args, **kargs)\n self.task_queue.task_done()", "title": "" }, { "docid": "3e70abd32bb41026d732abf410f96a52", "score": "0.6161003", "text": "def process_queue(self):\n start = time.clock()\n while self.queue and time.clock() - start < 1.0 / TICKS_PER_SEC: #The TICKS is used as the time it will parse through the QUEUE before taking a break \n self._dequeue() #this is hard coded as 60 above", "title": "" }, { "docid": "dfd3f39328c7c3ebfda66831c22157de", "score": "0.6153084", "text": "def reset(self):\n self.queue = []", "title": "" }, { "docid": "cfccca1320236c73784c8dca958ef27e", "score": "0.6146622", "text": "def _receive_loop(self):\n while self.doReceive :\n reply = self._receive()\n \n if reply:\n repel = reply.getElementsByTagName('reply');\n if repel and repel[0].getAttribute(\"isdummy\") :\n self.indQueue.put(reply);\n else :\n self.msgQueue.put(reply);\n else:\n self.doReceive = False\n break;", "title": "" }, { "docid": "ab43a0e7d3cc745a7ad23be786a55cde", "score": "0.61451393", "text": "def postloop(self):\n cmd.Cmd.postloop(self) ## Clean up command completion\n print \"Exiting...\"", "title": "" }, { "docid": "8c58b809d50e23b1ab2e3f5670015481", "score": "0.61431044", "text": "async def _loop(self, ctx: commands.Context):\r\n\r\n if not ctx.voice_state.is_playing:\r\n return await ctx.send('Ничего не играет в данный момент.')\r\n\r\n # Inverse boolean value to loop and unloop.\r\n ctx.voice_state.loop = not ctx.voice_state.loop\r\n await ctx.message.add_reaction('✅')", "title": "" }, { "docid": "b21ae6fd22f365d4e876d3363d796705", "score": "0.61287045", "text": "def _read_retry_queue(self, queue, **kwargs):\n while True:\n queue.get()\n queue.task_done()", "title": "" }, { "docid": "5b74ab54f64beaee9cead1dfaec2e70d", "score": "0.61155695", "text": "def execute_task_loop(self) -> None:\n # Event to indicate that the thread should terminate\n while self.alive.isSet():\n # Get an item from the outgoing queue, timeout ensures that the loop condition can be checked\n try:\n task: Optional[CommandTask] = self.send_q.get(timeout=self.queue_delay)\n except Empty:\n # No task obtained, retry queue\n continue\n else:\n if task is None:\n # End of queue\n self.send_q.task_done()\n break\n else:\n # Component-specific command handling\n logging.debug(f'{self.name} is handling {task.cmd}')\n return_val = self.hook_handle_gcode(task.cmd, task.b)\n logging.debug(f'{self.name} is done with {task.cmd}')\n self.send_q.task_done()\n\n if isinstance(return_val, Exception):\n cmd_response = CommandResponse(-1, return_val)\n else:\n cmd_response = CommandResponse(0, return_val)\n\n self.recv_q.put(cmd_response)", "title": "" }, { "docid": "9aab6a880561aa37f181c3743495e3a0", "score": "0.61081666", "text": "def terminate(self): # pragma: no cover ; not tested / running over multiprocessing\n\n self.loop = False", "title": "" }, { "docid": "0904efec7760751f4a05e7668159a47b", "score": "0.6084702", "text": "def postloop(self):\n Cmd.postloop(self) ## Clean up command completion\n print \"Exiting...\"", "title": "" }, { "docid": "6972c0ad104198f22012e623bdc4a569", "score": "0.6060405", "text": "def stop_crawling(self):\n self.lock.acquire()\n self.queue.put(None)\n self.lock.release()", "title": "" }, { "docid": "eecf9535bd0de0e50418ea7eacfac8be", "score": "0.6056358", "text": "def _outloop(self):\n logging.info('udp - starting outloop')\n while not self.stop:\n (printto, txt) = self.outqueue.get()\n if self.stop:\n return\n self.dosay(printto, txt)\n logging.info('udp - stopping outloop')", "title": "" }, { "docid": "232dd2c9784864f2e074817f296c8305", "score": "0.6050516", "text": "def stop(self):\n self._running.clear()\n self._queue.put(None) # Insert sentinel value", "title": "" }, { "docid": "9491978e9beb5d01681227d4b43da6cb", "score": "0.60500413", "text": "def stop(self):\n self.queue.put(\"STOP\")", "title": "" }, { "docid": "1cfa9ccbb431f1d287520afcc8b56c76", "score": "0.6048061", "text": "def loop(ctx):\n def gen_turns():\n turn = True\n while True:\n yield turn\n turn = not turn\n turn = gen_turns()\n\n while not rospy.is_shutdown():\n msg = encode_command(OPERAND['pump_on']\n if next(turn) else\n OPERAND['pump_off'], 0)\n rospy.loginfo(msg)\n ctx['uarm'].publish(msg)\n ctx['rate'].sleep()", "title": "" }, { "docid": "9a8cef6d88538515c8bda6b65e4ce6e4", "score": "0.6037269", "text": "def audio_queue_thread():\n global audio_killed\n logging.debug(\"Audio Queue Thread started\")\n while True :\n while not q.empty():\n #Uncomment following to purge all but most recent item in audio queue\n #while q.qsize() > 1:\n #next_function=q.get()\n #q.task_done()\n next_function = q.get()\n func = next_function[0]\n args = next_function[1:]\n logging.debug(\"Handling next audio job from queue: %s %s\" % (func.__name__ ,args , ))\n audio_killed = False\n unmute()\n func(*args)\n while not audio_killed and running_process.poll() is None: time.sleep(0.01)\n logging.debug(\"Audio task finished - muting audio\")\n q.task_done()\n mute()\n time.sleep(0.01)", "title": "" }, { "docid": "cb049c6120ae1516b52e84e5ed19d957", "score": "0.5979214", "text": "def process_entire_queue(self):\n while self.queue:\n self._dequeue()", "title": "" }, { "docid": "99d43dcae086a820546640b97838b586", "score": "0.5968414", "text": "def async_command_runner():\n\twhile True:\n\t\tfunc, self, bot, update = async_command_queue.get()\n\t\t# print(\"running processor\")#debug\n\t\tfunc(self, bot, update)", "title": "" }, { "docid": "9f92d9ee723083db3c91333df1096e68", "score": "0.595028", "text": "def discard(self):\n try:\n while True:\n item = self.queue.get_nowait()\n self.queue.task_done()\n except:\n pass\n return self", "title": "" }, { "docid": "e5808254706d5f83c657dc8b2ad054d3", "score": "0.5932518", "text": "def run(self):\n\n while True:\n item = self.queue.get()\n logging.warning('(frame_rcv, rssi, crc, mcs): {0}'.format(item))", "title": "" }, { "docid": "0760797d6577f1328f6de3c998f5b99f", "score": "0.59106886", "text": "def send_loop():\n \n while True:\n while not Message.objects.all():\n logging.debug(\"sleeping for %s seconds before checking queue again\" % EMPTY_QUEUE_SLEEP)\n time.sleep(EMPTY_QUEUE_SLEEP)\n send_all()", "title": "" }, { "docid": "f337b1a8b2026b6a6e846faf9018603e", "score": "0.59076655", "text": "def loop(self, ge):\n \n while self.cur_counter < self.max_counter:\n try:\n for key in self.load_queue.keys():\n msg = self.load_queue[key]\n\n if msg[0] == 'image':\n ge.GM.load(msg[1], msg[2])\n \n elif msg[0] == 'font':\n ge.GM.load_font(msg[1], msg[2])\n \n elif msg[0] == 'sound':\n ge.AM.load_sound(msg[1], msg[2], msg[3])\n \n elif msg[0] == 'music':\n ge.AM.load_music(msg[1]. msg[2], msg[3])\n\n self.cur_counter += 1\n \n self.load_queue.clear()\n \n except Exception as e:\n print(e)\n \n self.cur_counter, self.max_counter = 0, 0", "title": "" }, { "docid": "a1d071cfc7d198b3aa8438dec201189a", "score": "0.59053713", "text": "def _run(self, index):\n while 1:\n try:\n item = self._queue.get(timeout=2)\n self.process_item(item)\n except Queue.Empty:\n break\n except Exception as err:\n _log.error(\"In _run(): {}\".format(err))\n if _log.isEnabledFor(logging.DEBUG):\n _log.error(traceback.format_exc())\n self._status.fail(index)\n raise\n self._status.success(index)", "title": "" }, { "docid": "b14355c6936399b1c388cb114f47739e", "score": "0.58980477", "text": "def wait(self) -> None:\n while self.is_running():\n for _ in self:\n pass", "title": "" }, { "docid": "c21a2a30ee5b43c4ae4204cfaa72a3ec", "score": "0.58723915", "text": "def shutdown(self):\n self.queue.put(None)\n self.flush()", "title": "" }, { "docid": "a9c3226a335b7ee010242d4034846904", "score": "0.5865494", "text": "def send_queue(self):\n while not self.com.output.empty():\n # Unpack this message, calculate delay, pause for appropriate\n # time\n (target, phrase) = self.com.output.get()\n \n delay = (len(tuple(phrase))*1.0 / settings.TYPING_SPEED*1.0) * 60\n delay_send = simple_thread(lambda com,tar,phr,delay: time.sleep(delay) or com.privmsg(tar, phr))\n delay_send(self.com, target, phrase, delay)", "title": "" }, { "docid": "467208596df6ca65bb833ce7c8a33e93", "score": "0.58650875", "text": "def main_loop():\n while mqttc.loop() == 0:\n logging.debug(\"Looping\")", "title": "" }, { "docid": "060648f799db475ae7eacc5398c01f4b", "score": "0.586118", "text": "def loop_forever(self):\n self.client.loop_forever()", "title": "" }, { "docid": "362a2d06aa1b81da93854631cabfd70d", "score": "0.5858848", "text": "def _dequeue(self):\n func, args = self.queue.popleft() #after doing a commadn fucntion it will then be removed from the overall queue\n func(*args)", "title": "" }, { "docid": "2a1b413b1ee85d15d27f7e1e658427df", "score": "0.58250266", "text": "def send_loop(self):\n key = REDIS_SEND_NOTIFICATION_KEY\n while True:\n noti = self.redis.lpop(key)\n if noti == None:\n # no message\n return\n body = json.loads(noti)\n self.send_handler.task(body)\n return", "title": "" }, { "docid": "3a862e03d3df5120b838b56d6bd7e676", "score": "0.5807057", "text": "def resetQueue(self):\n\n# The first dude in the queue is the player, but there is no real necessity\n# for this - it is a design decision. The first object in the queue, however,\n# is an instance of the LevelTick class, which is not a dude. Rather,\n# the LevelTick instance updates things in the Level which should only be\n# updated once per turn. Currently, this LevelTick instance is a piece of\n# state local to the Level it updates.\n\n q = queue.PriorityQueue()\n for e in self.events:\n q.put(e, 0)\n q.put(self.player, 0)\n for p in self.dudeLayer:\n if not p.isPlayer():\n q.put(p, 0)\n\n self.__queue = q", "title": "" }, { "docid": "a3e30cd87309d60672ecebf319c52ac3", "score": "0.5801833", "text": "def processor(self):\n while not self.kill_flag:\n time.sleep(0.1)", "title": "" }, { "docid": "e5a125a7273c021ca746c1c1d4cb2a05", "score": "0.57823366", "text": "def _zpop(self):\n pipe = self.r.pipeline()\n while 1:\n data = None\n try:\n pipe.watch(self.queue)\n rs = pipe.zrange(self.queue, 0, 0)\n if len(rs) > 0:\n element = rs[0]\n pipe.multi()\n pipe.zrem(self.queue, element)\n pipe.execute()\n data = element\n else:\n data = None\n return data\n break\n except (redis.WatchError, IndexError):\n continue", "title": "" }, { "docid": "e74ad3053564c20d8db67b6179558e97", "score": "0.5782118", "text": "async def clearqueue(self, ctx):\n state = self.get_state(ctx.guild)\n state.playlist = []", "title": "" }, { "docid": "cd4ba2b65b47321738d58d153b362ae2", "score": "0.57771873", "text": "def run(self):\n while self._stop_event.is_set() is False:\n while self._message_queue.empty() is False:\n message = self._message_queue.get()\n\n # It is possible that we a signal is dispatched to a receiver\n # not present during enqueue of the message, how ever is present\n # now. YAGNI call for the moment.\n logger.debug(\"MessageDispatcher: dispatch message: \" +\n message.kwargs.__str__())\n logger.debug(\"MessageDispatcher: receivers: \" +\n message.signal.receivers.__str__())\n message.send()\n logger.debug(\"MessageDispatcher: message dispatched!\")\n self._message_queue.task_done()\n self._stop_event.wait(0.01)", "title": "" }, { "docid": "acbd0a007f5116e8bdb6bfd594c8dbc5", "score": "0.57702994", "text": "def __clear_queue(self):\n while not self.__queue.empty():\n try:\n (message, item) = self.__queue.get_nowait()\n except queue.Empty:\n break\n\n if message == self.__MSG_ADD:\n self.__dict[item] = 1\n elif message == self.__MSG_REMOVE:\n del self.__dict[item]", "title": "" }, { "docid": "9236c0a8063bec3e9049c7e6f25570b0", "score": "0.5759456", "text": "def _run(self):\n \n while True:\n self.send_messages()\n try:\n self.recieve_messages()\n except ConnectionResetError:\n self.stop()\n except BlockingIOError:\n pass", "title": "" }, { "docid": "90d4e5a52196b2f7bf3ab2542d49d2a8", "score": "0.5748325", "text": "def quit(self):\n\n self.loop = False", "title": "" }, { "docid": "907728e7ae18523205aa2c35d81d2eba", "score": "0.57477176", "text": "async def _drain_loop(self):\n # Note: this method should not call async methods apart from\n # waiting for the drain event, to avoid yielding to other queue methods\n while True:\n self._drain_evt.clear()\n while self.pending_tasks and (\n not self._max_active or len(self.active_tasks) < self._max_active\n ):\n pending: PendingTask = self.pending_tasks.pop(0)\n if pending.queued_time:\n pending.unqueued_time = time.perf_counter()\n timing = {\n \"queued\": pending.queued_time,\n \"unqueued\": pending.unqueued_time,\n }\n else:\n timing = None\n task = self.run(\n pending.coro, pending.complete_hook, pending.ident, timing\n )\n try:\n pending.task = task\n except ValueError:\n LOGGER.warning(\"Pending task future already fulfilled\")\n if self.pending_tasks:\n await self._drain_evt.wait()\n else:\n break", "title": "" }, { "docid": "c2d7ff500fbc1217e6368e235e274254", "score": "0.574504", "text": "def run(self):\n\n while len(self.skill._messages) > 0:\n self.skill._message_lock.acquire()\n if len(self.skill._messages) > 0:\n # print(\"thread {} acting\".format(self.id))\n skill = self.skill._messages.pop(0)\n self.skill._message_lock.release()\n # run skill\n self.skill.main(skill)\n else:\n self.skill._message_lock.release()\n #\n # print(\"thread {} clear\".format(self.id))\n with self.skill._thread_lock:\n # print(\"thread {} terminated\".format(self.id))\n self.skill._threads.remove(self)", "title": "" }, { "docid": "85872894ea6da5ec2ca6a8f71360f951", "score": "0.57355773", "text": "def dequeue(self):\n pass", "title": "" }, { "docid": "85872894ea6da5ec2ca6a8f71360f951", "score": "0.57355773", "text": "def dequeue(self):\n pass", "title": "" }, { "docid": "4e3bad32dd0db08b6066e368752dee73", "score": "0.573245", "text": "def _get_from_queue(self, dt):\n del dt\n\n while not self.receive_queue.empty():\n cmd, kwargs = self.receive_queue.get(False)\n self._process_command(cmd, **kwargs)", "title": "" }, { "docid": "ddd65b57b49d556f5e6df665d89538ba", "score": "0.5728467", "text": "def loop(self, *args):\n self.curr_loop_time_ms = utime.ticks_ms()\n self._check_wifi()\n self._check_mqtt()\n self._maybe_mqtt_disconnect_timeout()\n if self.mqtt_connected():\n self._mqtt_client.check_msg()\n for auto_with_loop in self.autos_with_loop:\n auto_with_loop.loop(self.curr_loop_time_ms)\n try:\n api.cb.loop(self.curr_loop_time_ms)\n except Exception as err:\n print(\"Error when evaluating user's loop function:\\n%s\" % err)\n if self._is_running:\n machine.Timer(1).init(mode=machine.Timer.ONE_SHOT, period=150, callback=schedule_run)", "title": "" }, { "docid": "93a358420135d8af1b15fa8862c6562f", "score": "0.57267714", "text": "def loop(self):\n\n while self.is_running:\n if self.player.is_playing():\n self.on_playing()\n elif self.player.finished_book():\n # when at the end of a book, delete its progress from the db\n # so we can listen to it again\n self.setup_db()\n self.db_cursor.execute(\n 'DELETE FROM progress WHERE book_id = %d' % self.player.book.book_id)\n self.db_conn.commit()\n self.db_conn.close()\n self.player.book.reset()\n\n# rfid_card = self.rfid_reader.read()\n\n# if not rfid_card:\n# continue\n \n# book_id = rfid_card.get_id()\n book_id = self.id_book\n\n if book_id and book_id != self.player.book.book_id: # a change in book id\n\n self.setup_db()\n progress = self.db_cursor.execute(\n 'SELECT * FROM progress WHERE book_id = \"%s\"' % book_id).fetchone()\n\n self.db_conn.close()\n self.player.play(book_id, progress)\n print \"Outside loop\"\n return", "title": "" }, { "docid": "64d6c66348c64eaac6325ef5538b3608", "score": "0.57195616", "text": "def sleep_til_next(self):\n if not self._queue:\n return\n sleep_time = self._queue[0][0]\n self.sleep_func(sleep_time)\n for item in self._queue:\n item[0] -= sleep_time", "title": "" }, { "docid": "ea8a71a014b806793eeaeecbcf58d5e6", "score": "0.57192475", "text": "def eventloop(self, queue, name):\n try:\n\n while self.running:\n # Returns False on timeout, else True\n if self.printer.path_planner.wait_until_sync_event():\n try:\n gcode = queue.get(block=True, timeout=1)\n except Queue.Empty:\n continue\n\n self._synchronize(gcode)\n logging.info(\"Event handled for \" + gcode.code() + \" from \" + name + \" \" + gcode.message)\n queue.task_done()\n except Exception:\n logging.exception(\"Exception in {} eventloop: \".format(name))", "title": "" }, { "docid": "3c95b69cbf00e06cda736b4c0c36b601", "score": "0.57174104", "text": "async def stop(ctx):\n if ctx.message.channel.id in channel_loop: # Checks if there is a loop in that channel:\n task_to_cancel = channel_loop[ctx.message.channel.id] # Gets the task object.\n task_to_cancel.cancel() # Cancels.\n await client.send_message(ctx.message.channel, \"You have cancelled the loop in this channel.\") # Alert the user.\n del channel_loop[ctx.message.channel.id] # Delete the channel loop entry.\n else: # If there is no loop:\n await client.send_message(ctx.message.channel, \"There is no loop in this channel.\") # Alert the user.", "title": "" }, { "docid": "b7fe88ef5552db52d5845472bd5d31b7", "score": "0.57171386", "text": "def pop(self):\n self._queue.popleft()", "title": "" }, { "docid": "5c07ef23e8044452a730572f6dbd65fc", "score": "0.5709105", "text": "def _loop(self):\n if self._alarms or self._did_something:\n if self._alarms:\n state = 'alarm'\n timeout = max(0, self._alarms[0][0] - time())\n if self._did_something and (not self._alarms or\n (self._alarms and timeout > 0)):\n state = 'idle'\n timeout = 0\n ready = self._poller.poll(timeout)\n else:\n state = 'wait'\n ready = self._poller.poll()\n\n if not ready:\n if state == 'idle':\n self._entering_idle()\n self._did_something = False\n elif state == 'alarm':\n task = heapq.heappop(self._alarms)\n task.callback()\n self._did_something = True\n\n for queue, _ in ready.items():\n self._queue_callbacks[queue]()\n self._did_something = True", "title": "" }, { "docid": "48c78f28f286b6d148cd1863210d57a7", "score": "0.56987095", "text": "def _do_loop(self):\n # You should not to change this method!\n self._game_loop()\n # Set the timer to go off again\n self.__screen.update()\n self.__screen.ontimer(self._do_loop, 5)", "title": "" }, { "docid": "598c997977826e279b4fab15e697456c", "score": "0.5689138", "text": "def knocker():\n while not stop:\n r = requests.post(setup['server'] + '/api/knock', json=state)\n logger_wrap('got ' + str(r.status_code), 1)\n resp = r.json()\n last = int(resp['id'])\n if last > state['last']:\n r = requests.post(setup['server'] + '/api/fetch', json=state)\n resp = r.json()\n state['last'] = last\n task_queue(resp)\n sleep(setup['knock_timeout'])", "title": "" }, { "docid": "8a254dba854eba4e7d553ec7e29baefa", "score": "0.5681879", "text": "def run():\n finished = False\n while not finished:\n finished = tick()", "title": "" }, { "docid": "52f8b354dfa6b9b671d79c27ad80f947", "score": "0.5676041", "text": "def loop(self):\n\n while not self._stop_event.is_set():\n self.process_events()\n time.sleep(0)\n ref_time = time.time()\n if self.check_events():\n self._sleep(ref_time)\n self.read_events()", "title": "" }, { "docid": "4e86640af2207a99237181aa60b7bd23", "score": "0.56728786", "text": "def cancel(self):\n self.queue.cancel()", "title": "" }, { "docid": "cb2691b50ec80cb99baa5a9660a97ffe", "score": "0.5664245", "text": "def run(self):\n while not self.kill_received:\n # get a task from the queue\n try:\n value = self.work_queue.get_nowait()\n except Queue.Empty:\n break\n\n #do some calculations here\n result = self.calculate(value)\n \n # store the result\n self.result_queue.put(result)", "title": "" }, { "docid": "47779e7eec61258229a36bde719e5dfb", "score": "0.5661941", "text": "async def loop(self, ctx, times:int, *, command):\n\t\ttry:\n\t\t\tmsg = copy.copy(ctx.message)\n\t\t\tmsg.content = command\n\t\t\tfor i in range(times):\n\t\t\t\tawait self.bot.process_commands(msg, command.split()[0][1:], '.')\n\t\texcept Exception as e:\n\t\t\tawait self.bot.say(code.format(type(e).__name__ + ': ' + str(e)))", "title": "" }, { "docid": "d16f69f89ad141c027d876a653cbe7bc", "score": "0.56611943", "text": "def terminate(self): # pragma: no cover ; not tested / running over multiprocessing\n\n self.loop = False\n self._terminate()", "title": "" }, { "docid": "d16f69f89ad141c027d876a653cbe7bc", "score": "0.56611943", "text": "def terminate(self): # pragma: no cover ; not tested / running over multiprocessing\n\n self.loop = False\n self._terminate()", "title": "" }, { "docid": "7b00567cc19ce5cb741a1ead406a6f1c", "score": "0.5651427", "text": "def run_loop(self):\n while not rospy.is_shutdown():\n #functions to repeat until the node is closed\n if self.stop_asr == False:\n self.listener1_pub.publish(self.listen)\n time.sleep(self.duration*0.8)\n self.listener2_pub.publish(self.listen)\n time.sleep(self.duration*0.8)\n self.listener3_pub.publish(self.listen)\n time.sleep(self.duration*0.8)\n self.listener4_pub.publish(self.listen)\n time.sleep(self.duration*0.8)", "title": "" }, { "docid": "f71c9ecee006acd891a46d9ee4ea5eeb", "score": "0.56302637", "text": "def loop(self) -> Result:\n\n # run all the registered preloop hooks\n for func in self._preloop_hooks:\n func()\n\n # enter the command loop\n while True:\n if self.input_queue:\n # we have enqueued commands, use the first one\n line = self.input_queue.pop(0)\n else:\n try:\n line = input(self.render_prompt())\n except EOFError:\n result = self.eof()\n if result.stop:\n break\n else:\n continue\n\n if line == '':\n continue\n\n # Run the command along with all associated pre and post hooks\n try:\n result = self.do(line)\n if result.stop:\n break\n except CommandNotFound as err:\n self.werr(\"{}: command not found\\n\".format(err.statement.command))\n\n # run all the registered postloop hooks\n for func in self._postloop_hooks:\n func()\n\n return result", "title": "" }, { "docid": "a941f4f811f01191f109e1918062888f", "score": "0.56299806", "text": "async def _process_command_queue(self):\n\n assert self.command_queue\n\n while True:\n cmd = await self.command_queue.get()\n\n log_header = LOG_HEADER.format(cmd=cmd)\n\n if cmd.status != CommandStatus.READY:\n if cmd.status != CommandStatus.CANCELLED:\n can_log.error(\n f\"{log_header} command is not ready \"\n f\"(status={cmd.status.name!r})\"\n )\n cmd.cancel()\n continue\n\n try:\n self.send_messages(cmd)\n except EmptyPool:\n if cmd.positioner_ids == [0]:\n # We'll ignore this case silently since generally this happens only\n # with GET_FIRMWARE or GET_STATUS that can usually be delayed.\n loop = asyncio.get_event_loop()\n loop.call_later(1, self.command_queue.put_nowait, cmd)\n continue\n except jaeger.JaegerError as ee:\n can_log.error(f\"found error while getting messages: {ee}\")\n continue", "title": "" }, { "docid": "5e64526f69a710f459fa884f5bb9689a", "score": "0.5628153", "text": "async def run_forever(self):\n while True:\n await self.run()\n await asyncio.sleep(self.delay)", "title": "" }, { "docid": "84aec9b0555d13d14f862df4da0179eb", "score": "0.5626909", "text": "def send_loop(self, port, queue):\n \n # Grab messages off the queue as fast as possible.\n while True:\n congadata = queue.get()\n \n # Set up sending socket.\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((congadata.address, port))\n \n # Send the data and close the socket.\n sock.send(congadata.data)\n sock.shutdown()\n sock.close()\n \n return", "title": "" }, { "docid": "162b31bcd1561ba26ddc43443f5dc2a5", "score": "0.56267095", "text": "def process_queue(self):\n while True:\n msg = self.queue.get()\n if msg == \"QUIT\":\n self.running = False\n break\n (filename, data) = msg\n storage.serialize_data(filename, data)", "title": "" }, { "docid": "2bbc1718d9c61670e25ad5f2bb7b10d6", "score": "0.5613198", "text": "def stop(self):\n self.loop.stop()", "title": "" }, { "docid": "ca6e4e6cf0245cd4008204e04bc4c806", "score": "0.56096846", "text": "def _actually_run_forever(self): # pragma: no cover\n self.loop.run_forever()", "title": "" }, { "docid": "d91808a6386294026e219526e706554d", "score": "0.5609507", "text": "def requeue(self):\n\n p = self.CPU.getProcess()\n down = self.CPU.bottomY()-20\n left = self.queue.backX() - 90\n up = self.queue.topY()\n right = self.queue.getEndPtr()\n\n # Move the process down out of the CPU\n if ((p.topY() < down) and (not self.movingUp)):\n if (p.bottomY() >= self.CPU.bottomY()):\n p.inCPU = False\n p.moveDown()\n\n # Move the process to the left after it has been removed from the queue\n if ((p.topY() >= down) and (not self.movingUp)):\n if (p.backX() > left):\n p.moveLeft()\n\n # Move process up inline with queue after moving it behind the queue\n if ((p.backX() <= left) and (not self.movingRight)):\n self.movingUp=True\n dist = p.topY() - up\n if (dist > p.stepSize):\n p.moveUp()\n else:\n p.moveUp(dist)\n\n if (p.topY() == up):\n self.movingRight = True\n dist = right - p.frontX()\n if (dist > p.stepSize):\n p.moveRight()\n else:\n p.moveRight(dist)\n self.queue.enqueue(p)\n self.CPU.setProcess(None)\n self.CPU.lock = False\n self.movingRight = False\n self.movingUp = False \n if (self.spawnProcess() == 0):\n self.state = \"dequeue\"", "title": "" }, { "docid": "087a7545daf61e9aa34ef088adea0692", "score": "0.5608679", "text": "def game_loop(self):\n while self.run:\n # cleanup any zombie threads\n self.cleanup_wait_thread()\n # get msgs from socket\n if not self.msgs:\n self.recv_msgs()\n msg = self.get_msg()\n # process msgs\n while msg:\n self.process_msg(msg)\n msg = self.get_msg()", "title": "" }, { "docid": "f2c866e5adea36b203eb221159e179e4", "score": "0.5595649", "text": "def _actor_loop(self):\r\n try:\r\n self.on_start()\r\n except Exception:\r\n self._handle_failure(*_sys.exc_info())\r\n\r\n while not self.actor_stopped.is_set():\r\n message = self.actor_inbox.get()\r\n reply_to = None\r\n try:\r\n reply_to = message.pop('pykka_reply_to', None)\r\n response = self._handle_receive(message)\r\n if reply_to:\r\n reply_to.set(response)\r\n except Exception:\r\n if reply_to:\r\n _logger.debug(\r\n 'Exception returned from %s to caller:' % self,\r\n exc_info=_sys.exc_info())\r\n reply_to.set_exception()\r\n else:\r\n self._handle_failure(*_sys.exc_info())\r\n try:\r\n self.on_failure(*_sys.exc_info())\r\n except Exception:\r\n self._handle_failure(*_sys.exc_info())\r\n except BaseException:\r\n exception_value = _sys.exc_info()[1]\r\n _logger.debug(\r\n '%s in %s. Stopping all actors.' %\r\n (repr(exception_value), self))\r\n self._stop()\r\n _ActorRegistry.stop_all()\r\n\r\n while not self.actor_inbox.empty():\r\n msg = self.actor_inbox.get()\r\n reply_to = msg.pop('pykka_reply_to', None)\r\n if reply_to:\r\n if msg.get('command') == 'pykka_stop':\r\n reply_to.set(None)\r\n else:\r\n reply_to.set_exception(_ActorDeadError(\r\n '%s stopped before handling the message' %\r\n self.actor_ref))", "title": "" }, { "docid": "51fb27b7fa78c40ba1267dd705fd83ba", "score": "0.5595124", "text": "def run_next( self ):\n cnt = 0\n while 1:\n uci_wrapper = self.queue.get()\n uci_state = uci_wrapper.get_uci_state()\n if uci_state is self.STOP_SIGNAL:\n return\n try:\n if uci_state==uci_states.NEW:\n self.create_uci( uci_wrapper )\n elif uci_state==uci_states.DELETING:\n self.delete_uci( uci_wrapper )\n elif uci_state==uci_states.SUBMITTED:\n self.start_uci( uci_wrapper )\n #self.dummy_start_uci( uci_wrapper )\n elif uci_state==uci_states.SHUTTING_DOWN:\n self.stop_uci( uci_wrapper )\n elif uci_state==uci_states.SNAPSHOT:\n self.snapshot_uci( uci_wrapper )\n elif uci_state==uci_states.ADD_STORAGE:\n self.add_storage_to_uci( uci_wrapper )\n except:\n log.exception( \"Uncaught exception executing cloud request.\" )\n cnt += 1", "title": "" }, { "docid": "2530f8f8ec8898c1d7eb1f3ce086777e", "score": "0.5587913", "text": "def run(self):\n\t\twhile True:\n\t\t\ttime.sleep(1)", "title": "" }, { "docid": "e2c58f1bbac1763031e83633e14fb936", "score": "0.55852646", "text": "async def _consume_loop():\n while True:\n message = await channel_layer.receive(channel)\n if dry_run:\n continue\n if message.get(\"type\", {}) == \"_resolwe_manager_quit\":\n break\n message.update(scope)\n await app.send_input(message)", "title": "" }, { "docid": "1ea8e46d13df94e04d6926e8c7108bd9", "score": "0.557806", "text": "def pop(self):\n self.bag=self.queue.pop()", "title": "" } ]
e0530609128272644ed30911328ec071
Segments the input frame using a trained SVM. This function requires that the SVM classifier is already available. Use the `ml.classifier.trainClassifiers()` function to create it.
[ { "docid": "7b5213ce699472c14495cebf02ccc3f6", "score": "0.5688241", "text": "def locateBricksSvm(self, frame, depth, hsv=False, scale=0.25):\n\n # check if the SVM classifier is available\n if self.svm_clf is None:\n self.logger.error('The SVM classifier is None; cannot do segmentation.')\n return None\n # check that the input frame has 3 channels (BGR)\n if len(frame.shape) != 3 or frame.shape[2] != 3: \n self.logger.error('The frame input does not contain 3 channels.')\n return None\n # make sure that the scaler is in the range (0,1]\n if 0 >= scale > 1:\n self.logger.error(('The argument `scale` is expected to be in the range'\n ' (0,1], but it was {}.').format(scale))\n return None\n # check that the depth frame has 1 channel\n if len(depth.shape) != 2:\n self.logger.error('The depth input does not contain a single channel.')\n return None\n \n # convert and scale colour input\n frame_hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV) if not hsv else frame\n frame_hsv = cv.resize(frame_hsv, (int(frame.shape[1]*scale),int(frame.shape[0]*scale)))\n frame_hsv = vision.imgproc.normalise(frame_hsv)\n # scale depth input\n depth_ = cv.resize(depth, (int(depth.shape[1]*scale),int(depth.shape[0]*scale)))\n depth_ = vision.imgproc.normalise(depth_,mode='depth')\n # combine colour and depth information\n features = np.hstack([frame_hsv.reshape(-1,3),depth_.reshape(-1,1)])\n\n # perform SVM based segmentation\n pred = self.svm_clf.predict(features) * 255\n pred = pred.reshape(frame_hsv.shape[:2]).astype(np.uint8)\n pred = cv.resize(pred, (frame.shape[1], frame.shape[0]))\n\n # morphology\n kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3,3))\n closing = cv.morphologyEx(pred, cv.MORPH_CLOSE, kernel)\n \n # generate final mask based on connected component analysis\n mask = vision.imgproc.connectedComponentsBasedFilter(closing)\n\n return mask", "title": "" } ]
[ { "docid": "3e67222fc58ff68145d7ff5d86d4d6bc", "score": "0.6459603", "text": "def train_svm():\n\n X, y = load_training_set()\n linear_svm.fit(X, y.flatten())", "title": "" }, { "docid": "5921c2cdf30cfcbc2db0faa0a513742d", "score": "0.634443", "text": "def train_svm(X, y):\n svm = SVC(C=1000.0, gamma=0.0, kernel='rbf')\n svm.fit(X, y)\n return svm", "title": "" }, { "docid": "11bfc44971e34a78b24c5c58b053dd71", "score": "0.6318526", "text": "def train_svm(X, y):\n svm = SVC(C=5000.0, gamma=0.0, kernel='rbf')\n svm.fit(X, y)\n return svm", "title": "" }, { "docid": "043d7800189f397c5ed5b5168868daed", "score": "0.6309719", "text": "def train_svm(X, y):\n svm = SVC(C=1000000.0, gamma=0.0, kernel='rbf')\n svm.fit(X, y)\n return svm", "title": "" }, { "docid": "7445bf90219e37ef7ca81aed16fa6da8", "score": "0.6281207", "text": "def train_SVM(X_train: pd.DataFrame, y_train: pd.Series, parameters: dict) -> SVC:\n clf_SVM = SVC(**parameters[\"models\"][\"SVM\"])\n clf_SVM.fit(X_train, y_train)\n return clf_SVM", "title": "" }, { "docid": "9604983f9642b167b587e6aee093ee16", "score": "0.62616223", "text": "def train_svm(X, y):\n svm = SVC(C=1000000.0, gamma='auto', kernel='rbf')\n svm.fit(X, y)\n return svm", "title": "" }, { "docid": "d5a7405ec4103b7b5a4ff6715202b536", "score": "0.60065985", "text": "def svc(self, X, base=True):\r\n X_train, X_test, y_train, y_test = train_test_split(X, self.y, random_state=1, test_size=0.25)\r\n \r\n kwargs = {'max_iter':1500,'kernel':'rbf','tol':1e-3,'C':5.7,'decision_function_shape':'ovr', 'degree':1,'gamma':'scale','coef0':1, 'probability':True} if not base else {}\r\n \r\n classificador = SVC(**kwargs)\r\n \r\n classificador.fit(X_train, y_train.ravel())\r\n \r\n previsoes = classificador.predict(X_test)\r\n \r\n score = classificador.score(X_test, y_test)\r\n \r\n print(f'SVC Support Vector Classifier - Score = {score}')\r\n \r\n previsoes = classificador.predict(X_test)\r\n \r\n return classificador", "title": "" }, { "docid": "c5d1b0ddb4a3395105aece7e68a416ee", "score": "0.5991803", "text": "def run_SVM_baseline(word2vec_src):\n # Create a subplot with 1 row and 2 columns\n print(\"# word2vec:\", word2vec_src)\n clf = svm.SVC(kernel=\"rbf\", gamma=0.005)\n word2vec_model = gensim.models.Word2Vec.load(word2vec_src)\n data = PaperData(word2vec=word2vec_model)\n train_pd = load_vec(data, data.train_data, use_pkl=False)\n test_pd = load_vec(data, data.test_data, use_pkl=False)\n train_X = train_pd.loc[:, \"Output\"].tolist()\n train_Y = train_pd.loc[:, \"LinkTypeId\"].tolist()\n test_X = test_pd.loc[:, \"Output\"].tolist()\n test_Y = test_pd.loc[:, \"LinkTypeId\"].tolist()\n start = timeit.default_timer()\n clf.fit(train_X, train_Y)\n stop = timeit.default_timer()\n predicted = clf.predict(test_X)\n print(metrics.classification_report(test_Y, predicted,\n labels=[\"1\", \"2\", \"3\", \"4\"],\n digits=3))\n cm=metrics.confusion_matrix(test_Y, predicted, labels=[\"1\", \"2\", \"3\", \"4\"])\n print(\"accuracy \", get_acc(cm))\n print(\"Model training time: \", stop - start)", "title": "" }, { "docid": "4e6b6466d2107cd2d4b6ddbfdd9a138c", "score": "0.59420794", "text": "def train_classifier():\n features, labels = get_preprocessed_data()\n print(\"Number of samples: {}\".format(features.shape[0]))\n print(\"Number of features: {}\".format(features.shape[1]))\n\n train_features, test_features, train_labels, test_labels = train_test_split(features, labels,\n test_size=0.2,\n random_state=8)\n print(\"Training classifier...\")\n clf = SVC()\n clf.fit(train_features, train_labels)\n\n # Store trained classifier\n with open(\"clf.pkl\", \"wb\") as file:\n pickle.dump(clf, file, protocol=4)\n\n # Report score on test data\n score = clf.score(test_features, test_labels)\n print(\"Classifier score on test data: {:.4f}\".format(score))", "title": "" }, { "docid": "d30b00083851968a953e18c96be845ef", "score": "0.5814612", "text": "def train():\n features, labels = __init__.load_data('train')\n \n vectorizer = text.CountVectorizer(decode_error='ignore', stop_words='english')\n transformer = text.TfidfTransformer()\n \n classifier = svm.SVR(kernel='sigmoid', gamma='scale')\n \n # Serializes the processing steps that would be required of the above.\n text_clf = pipeline.Pipeline(steps=[('vect', vectorizer),\n ('tfidf', transformer),\n ('clf-svr', classifier)])\n \n start = time.time()\n text_clf.fit(features, labels)\n print 'Training time:\\t%1.4f seconds' % (time.time() - start)\n \n __init__.evaluate(text_clf, features, labels)\n\n return text_clf", "title": "" }, { "docid": "71629e2177999f105eb4bd896cf3a9e5", "score": "0.58077735", "text": "def train(self, inputs, targets):\n self.svm.fit(inputs, targets)", "title": "" }, { "docid": "fed48481b253ed46182d4444cd1a43b5", "score": "0.5768474", "text": "def train_svm(training_points, kernel_fn=dot_product, max_iter=500,\n show_graph=True, animate=True, animation_delay=0.5, manual_animation=False):\n # Define alias for kernel function, converting Points to coordinates just in case\n K = lambda p1, p2: kernel_fn(p1.coords, p2.coords)\n\n # Initialize SVM\n svm = SupportVectorMachine([0,0], 0, training_points, [])\n\n if show_graph:\n if not manual_animation:\n update_svm_plot = create_svm_graph(training_points)\n\n if (animate and animation_delay > 0) or manual_animation:\n # Keep track of last update time\n last_update_time = time()\n\n b = 0\n iteration = 0\n\n while iteration < max_iter:\n\n # Keep track of current alpha values\n old_alphas = map(lambda pt: pt.alpha, training_points)\n\n # Two nested summations, as in lecture\n for i in training_points:\n for j in training_points:\n\n # If the points are the same or have the same coordinates, skip this pair\n if i.name == j.name or i.coords == j.coords:\n continue\n\n # Compute lower and upper bounds on j.alpha\n if i.classification == j.classification:\n if i.alpha == 0 and j.alpha == 0:\n # Skip this pair, because they are both non-SVs of the same class,\n # so no need to update their alphas or make one become a SV\n continue\n lower_bound = 0\n upper_bound = i.alpha + j.alpha\n else:\n lower_bound = max(0, j.alpha - i.alpha)\n upper_bound = INF\n\n # Compute current error of alpha_i and alpha_j\n error_i = reduce(lambda total,pt: total + (pt.classification * pt.alpha * K(i,pt) + b), training_points, 0) - i.classification\n error_j = reduce(lambda total,pt: total + (pt.classification * pt.alpha * K(j,pt) + b), training_points, 0) - j.classification\n\n # Store old alpha values before updating\n old_alpha_i = i.alpha\n old_alpha_j = j.alpha\n\n # Update j.alpha, but keep it between lower and upper bounds\n n = 2 * K(i,j) - K(i,i) - K(j,j) # Note: if K is dot_product, n = -||i-j||^2\n j.alpha = old_alpha_j - ( j.classification * (error_i - error_j) / float(n) )\n if j.alpha > upper_bound:\n j.alpha = upper_bound\n elif j.alpha < lower_bound:\n j.alpha = lower_bound\n\n # If j.alpha hasn't changed *at all*, continue\n if j.alpha == old_alpha_j:\n continue\n\n # Update i.alpha, but ensure it stays non-negative\n i.alpha = max(0, old_alpha_i + (i.classification * j.classification) * (old_alpha_j - j.alpha))\n\n if show_graph and ((animate and animation_delay > 0) or manual_animation):\n # Store old values\n old_w = svm.w[:]\n old_support_vectors = svm.support_vectors[:]\n old_b = svm.b\n\n # Update SVM based on alphas\n update_svm_from_alphas(svm) # Note: kernel_fn is hardcoded as dot_product\n\n # Update b from SVM\n b = svm.b\n\n if show_graph and (animate or manual_animation):\n skip_update = False\n if animation_delay > 0 or manual_animation:\n # If values have not changed perceptibly, skip this update\n if 0 in [b, old_b] and b != old_b:\n pass\n elif (map(lambda sv: sv.name, svm.support_vectors) == map(lambda sv: sv.name, old_support_vectors)\n and ((b==0 and old_b==0 and list_approx_equal(svm.w, old_w, 0.001))\n or list_approx_equal(scalar_mult(1./b, svm.w), scalar_mult(1./old_b, old_w), 0.001))):\n skip_update=True\n\n if skip_update:\n # Set values back to old values as a baseline for whether to skip next update\n svm.w = old_w[:]\n svm.b = old_b\n svm.support_vectors = old_support_vectors[:]\n else:\n if manual_animation:\n # Recreate graph and block further execution\n create_svm_graph(training_points)(svm, True)\n # When user closes graph window, update timer to current time\n last_update_time = time()\n else:\n while time() - last_update_time < animation_delay:\n pass\n last_update_time = time()\n update_svm_plot(svm)\n\n iteration += 1\n\n # If alpha values have not changed *at all*, cease training\n if old_alphas == map(lambda pt: pt.alpha, training_points):\n break\n\n print '# iterations:', iteration\n\n # Compute final w, b, and SVs based on alphas\n update_svm_from_alphas(svm) # Note: kernel_fn is hardcoded as dot_product\n\n # Check training\n misclassified = misclassified_training_points(svm)\n print \"SVM with decision boundary %.3f*x + %.3f*y + %.3f >= 0 misclassified %i points.\" % (svm.w[0], svm.w[1], svm.b, len(misclassified))\n\n if show_graph:\n if manual_animation:\n print 'Displaying final graph for trained SVM'\n update_svm_plot = create_svm_graph(training_points)\n # Update graph with final values\n update_svm_plot(svm, final_update=True)\n\n # Return the trained SVM\n return svm", "title": "" }, { "docid": "b786f4ca1b32a706d5f57e178133c631", "score": "0.57190347", "text": "def svm_prediction(peptides, job_id,\n input_train=\"SVM_POS_NEG.fasta\"):\n\n print(\"Begin SVM\")\n\n # from methods import load_sqlite, store_sqlite\n\n global PATH\n global TMP_PATH\n\n # suppress SVM output\n devnull = open(os.devnull, 'w')\n sys.stdout, sys.stderr = devnull, devnull\n\n svm_scores = []\n # query the database\n # for peptide in peptides:\n # try:\n # score = load_sqlite(peptide, method=\"SVM\", unique=True)\n # svm_scores.append(score)\n # except:\n # pass\n\n if len(peptides) == len(svm_scores):\n pass\n else:\n\n # generate a svm input from the peptides\n rand = job_id\n input_svm = \"%s_svm.fasta\" % rand\n output_tmp = open(os.path.join(TMP_PATH, input_svm), \"w\")\n\n count = 0\n for peptide in peptides:\n count += 1\n output_tmp.write(\"> %i label=%s\\n%s\\n\" % (count, 1, peptide))\n for peptide in peptides:\n count += 1\n output_tmp.write(\"> %i label=%s\\n%s\\n\" % (count, -1, peptide))\n output_tmp.close()\n\n # outputs\n model_svm = \"%s_svm_model.txt\" % rand\n\n # train data\n train_data = SequenceData(os.path.join(PATH, input_train), mink=1, maxk=1, maxShift=0,\n headerHandler=svm_process_header)\n train_data.attachKernel('cosine')\n\n cval = 1\n s = SVM(C=cval)\n s.train(train_data)\n s.save(os.path.join(TMP_PATH, model_svm))\n\n # load trained SVM\n loaded_svm = loadSVM(os.path.join(TMP_PATH, model_svm), train_data)\n\n # test data\n test_data = SequenceData(os.path.join(TMP_PATH, input_svm), mink=1, maxk=1, maxShift=0,\n headerHandler=svm_process_header)\n test_data.attachKernel('cosine')\n results = loaded_svm.test(test_data)\n\n # print results out\n output_svm = \"%s_svm.txt\" % rand\n results.toFile(os.path.join(TMP_PATH, output_svm))\n\n # load results process output (positives + negatives)\n infile = open(os.path.join(TMP_PATH, output_svm), \"r\")\n inlines = infile.readlines()\n infile.close()\n scores = list()\n for line in inlines:\n line = line.rstrip(\"\\r\\n\")\n try:\n entry = int(line.split(\"\\t\")[0])\n score = float(line.split(\"\\t\")[1])\n label = int(line.split(\"\\t\")[3])\n if label != \"-1\":\n scores.append([entry, score])\n except:\n pass\n\n # order list\n sorted_scores = sorted(scores, key=lambda scores: scores[0])\n\n svm_scores = list()\n for score in sorted_scores:\n svm_score = score[1]\n svm_scores.append(svm_score)\n\n # remove the temporary model files and results\n try:\n os.remove(os.path.join(TMP_PATH, input_svm))\n os.remove(os.path.join(TMP_PATH, model_svm))\n os.remove(os.path.join(TMP_PATH, output_svm))\n except:\n pass\n\n # save the peptides in db\n # for peptide, score in zip(peptides, svm_scores):\n # store_sqlite(peptide, method=\"SVM\", information=score, save=True)\n\n # restore normal output\n sys.stdout = sys.__stdout__\n sys.stderr = sys.__stderr__\n\n print(\"End SVM\")\n return svm_scores", "title": "" }, { "docid": "96d677c737cc2d4b54c442e23280a5ba", "score": "0.569647", "text": "def train_svm_command(model_path: str, data_path: str, **params: dict):\n train_and_save_pipeline(model_path, data_path, LinearSVC, params)", "title": "" }, { "docid": "d82a5cebb56e18089090f6efd75fa8a9", "score": "0.566796", "text": "def train(self, classifier, input_x, input_y, vectorizer=None, model_directory=\"model\"):\n # Replace dataset labels by their corresponding indices\n y = numpy.array([self.class_index[label] for label in input_y])\n\n use_pipeline = isinstance(classifier, Pipeline)\n self.classifier = classifier\n\n if not use_pipeline:\n X = self._vectorize(input_x)\n logger.info(\"Fitting classifier [{}]...\".format(str(self.classifier.__class__.__name__)))\n self.classifier.fit(X, y)\n else:\n logger.info(\"Fitting classifier [{}]...\".format(str(self.classifier.__class__.__name__)))\n self.classifier.fit(input_x, y)\n\n self._save_model(classifier, model_directory)", "title": "" }, { "docid": "63bc968abc0a9fe230484d0a3935be88", "score": "0.5667067", "text": "def multi_class_svm(train_x, train_y, test_x):\n raise NotImplementedError", "title": "" }, { "docid": "3143aa7f934df01553a93b2836933328", "score": "0.5666001", "text": "def run_classification_model():\n # Read data, build features\n path_to_data = \"../data/raw_data.csv\"\n df = pd.read_csv(path_to_data)\n df[\"lemmatized_str\"] = df[\"tokenized_txt\"].apply(\n lambda row: get_clean_sentence(ast.literal_eval(row))\n )\n\n # Using ordinal encoding to transform authors to numeric categories\n encoder = OrdinalEncoder()\n authors = df.loc[:, [\"author\"]].values\n target = encoder.fit_transform(authors)\n target = np.array(list((itertools.chain(*target))))\n\n # Find vectorized sentences, split data\n vectorized_sentences = get_vectorized_sentences(df[\"lemmatized_str\"].values)\n x_train, x_test, y_train, y_test = train_test_split(vectorized_sentences,\n target,\n test_size=0.2,\n random_state=42\n )\n\n # Build SVM model w/ linear kernel\n clf = SVC(gamma=0.001, C=100., kernel=\"linear\", random_state=0)\n clf.fit(x_train, y_train)\n\n # Store data and model to disk\n model_data_dict = {\"x_train\": x_train,\n \"x_test\": x_test,\n \"y_train\": y_train,\n \"y_test\": y_test}\n joblib.dump(model_data_dict, \"../output/train_test_data.dat\")\n joblib.dump(clf, \"../output/svm_model.sav\")", "title": "" }, { "docid": "59c8802866c8a309e5a2679c98b7af6c", "score": "0.5663258", "text": "def SupportVectorMachine (self, sampleFeatureMatrix, sampleLabelCodes, classifier_source_folder, classifier_folder_name, SVMKernelType, DST_Enabled):\n \n #Hint: Shuffling training data is not needed for Support Vector Machines, since they are optimization-based methods!\n \n #Making the label vector a 1D array by raveling\n sampleLabelCodes_raveled = sampleLabelCodes.ravel()\n \n #setting the classifier configurations\n CacheSize = 5000\n useProbability = True\n SVMMultiClassComparisonMode = 'ovr'\n classWeight = 'balanced' # None\n SVMKernel = SVMKernelType\n \n #finding the optimal classifier parameters (C and Gamma)\n C_range = np.logspace(-2, 10, 13)\n gamma_range = np.logspace(-9, 3, 13)\n param_grid = dict(gamma = gamma_range, C = C_range)\n CrossValidationSettings = model_selection.KFold(n_splits = 4, shuffle = True)\n SVMToBeOptimized = svm.SVC(kernel = SVMKernel, decision_function_shape = SVMMultiClassComparisonMode, probability = useProbability, cache_size = CacheSize, class_weight = classWeight)\n gridOfClassifiers = model_selection.GridSearchCV(SVMToBeOptimized, param_grid = param_grid, cv = CrossValidationSettings, refit = True, n_jobs = -1, verbose = 1)\n gridOfClassifiers.fit(sampleFeatureMatrix, sampleLabelCodes_raveled)\n \n #defining the SVM classifier\n SVMClassifier = gridOfClassifiers.best_estimator_\n \n #saving the trained classifier\n if (DST_Enabled):\n fileAddress = classifier_source_folder + '/' + classifier_folder_name + '/Dempster-Shafer/SVM.pkl'\n else:\n fileAddress = classifier_source_folder + '/' + classifier_folder_name + '/SVM.pkl'\n \n SVMFile = open(fileAddress, \"wb+\")\n joblib.dump(SVMClassifier, SVMFile)\n \n return", "title": "" }, { "docid": "c4b6c098849a5e6e7c7d857721b1dc53", "score": "0.5634561", "text": "def train_classifier(segments, actual, output_filename):\n clf = svm.SVC()\n \n # specify parameters and distributions to sample from\n param_dist = {'C': expon(scale=100),\n 'gamma': expon(scale=.1),\n 'kernel': ['rbf'],\n 'class_weight':['balanced', None]}\n\n # run randomized search\n n_iter_search = 20\n random_search = RandomizedSearchCV(clf, param_distributions=param_dist,\n n_iter=n_iter_search)\n\n random_search.fit(segments, actual) # this may take time...\n pickle.dump(random_search, open(output_filename, \"wb\"))\n \n return clf", "title": "" }, { "docid": "4658572f6fb2a12c2b798d6c932b8a3a", "score": "0.5629895", "text": "def SVM_feature_extraction(X_train, y_train, X_test):\n clf = svm.LinearSVC()\n clf.fit(X_train, y_train)\n X_train_t = clf.decision_function(X_train)\n X_test_t = clf.decision_function(X_test)\n return (X_train_t,X_test_t)", "title": "" }, { "docid": "0f9fcaea03db89dbbe332dbd5f27d0eb", "score": "0.5602513", "text": "def for_svm(**kwargs):\n return wrap_classifier(svm.SVC, **kwargs)", "title": "" }, { "docid": "98882d618e393bfa8045a9471f4f7555", "score": "0.5578222", "text": "def do_segment(infile):\n complete_flag = False\n try_numbers = 0\n max_attempts = 10\n while not complete_flag:\n # if try_numbers == max_attempts:\n # return False\n print(bcolors.HEADER + 'Segmenter Attempt no: {0}'.format(try_numbers) + bcolors.ENDC)\n [tok_f, tag_f, par_f], complete_flag = do_preprocess(infile)\n # if not complete_flag and [tok_f, tag_f, par_f] == [-1, -1, -1]:\n # return False\n if [tok_f, tag_f, par_f] == [-2, -2, -2]:\n return False\n try_numbers += 1\n print (bcolors.OKBLUE + 'Iam not stuck, just extracting features.' + bcolors.ENDC)\n has_one_word = extract_features(tok_f, par_f, tag_f, \"tmp.feat\", \"tmp.words\", edufile=\"tmp.edu\")\n\n if has_one_word != -1:\n # train_model(\"train.seg\")\n pred = apply_model(\"tmp.feat\")\n processLROutput(\"tmp.tok\", \"tmp.words\", pred, \"tmp.edu\")\n\n os.unlink(\"tmp.words\")\n os.unlink(\"tmp.feat\")\n\n return True", "title": "" }, { "docid": "b31abbdcf8dfcd3c3c0bde403f28c91c", "score": "0.5577066", "text": "def svm_classification( madlib_schema, input_table, model_table, parallel, kernel_func, verbose=False, eta=0.1, nu=0.005):\n\n # Output error if model_table already exist\n # if __check_rel_exist(model_table):\n # plpy.error('Table ' + model_table + ' exists; please use a different model table or drop ' + model_table + ' before calling this function.');\n\n # plpy.execute('drop table if exists ' + model_table);\n plpy.execute('create table ' + model_table + ' ( id text, weight float8, sv float8[] ) m4_ifdef(`GREENPLUM', `distributed randomly')'); \n\n plpy.execute('create temp table svm_temp_result ( id text, model ' + madlib_schema + '.svm_model_rec ) m4_ifdef(`GREENPLUM', `distributed randomly')');\n\n if (verbose):\n plpy.info(\"Parameters:\");\n plpy.info(\" * input_table = \" + input_table);\n plpy.info(\" * model_table = \" + model_table);\n plpy.info(\" * parallel = \" + str(parallel));\n plpy.info(\" * eta = \" + str(eta));\n plpy.info(\" * nu = \" + str(nu));\n\n if (parallel) :\n # Learning multiple models in parallel \n\n # Start learning process\n sql = 'insert into svm_temp_result (select \\'' + model_table + '\\' || gp_segment_id, ' + madlib_schema + '.svm_cls_agg(ind, label,\\'' + kernel_func + '\\',' + str(eta) + ',' + str(nu) + ') from ' + input_table + ' group by gp_segment_id)';\n plpy.execute(sql);\n\n # Store the models learned\n numproc_t = plpy.execute('select count(distinct(gp_segment_id)) from ' + input_table);\n numproc = numproc_t[0]['count'];\n plpy.execute('select ' + madlib_schema + '.svm_store_model(\\'svm_temp_result\\', \\'' + model_table + '\\',\\'' + model_table + '\\', ' + str(numproc) + ')');\n\n else :\n # Learning a single model\n\n # Start learning process \n sql = 'insert into svm_temp_result (select \\'' + model_table + '\\', ' + madlib_schema + '.svm_cls_agg(ind, label,\\'' + kernel_func + '\\',' + str(eta) + ',' + str(nu) + ') from ' + input_table + ')';\n plpy.execute(sql);\n\n # Store the model learned\n plpy.execute('select ' + madlib_schema + '.svm_store_model(\\'svm_temp_result\\', \\'' + model_table + '\\', \\'' + model_table + '\\')');\n\n # Retrieve and return the summary for each model learned \n if parallel:\n where_cond = \"position('\" + model_table + \"' in id) > 0 AND '\" + model_table + \"' <> id\";\n else:\n where_cond = \"id = '\" + model_table + \"'\";\n\n summary = plpy.execute(\"select id, (model).inds, (model).cum_err, (model).rho, (model).b, (model).nsvs from svm_temp_result where \" + where_cond);\n\n result = [];\n for i in range(0,summary.nrows()):\n result = result + [(model_table, summary[i]['id'], summary[i]['inds'], summary[i]['cum_err'], summary[i]['rho'], summary[i]['b'], summary[i]['nsvs'])];\n\n # Clean up temp storage of models\n plpy.execute('drop table svm_temp_result');\n\n return result;", "title": "" }, { "docid": "6c3788a93fb4508e1e7662236591d2dc", "score": "0.55765706", "text": "def train_SVM(gk, train_graphs, test_graphs, train_labels, test_labels, kernel=RandomWalk):\n\n print(\"Computing training kernel\")\n K_train = gk.fit_transform(train_graphs)\n\n print(\"Computing test kernel\")\n K_test = gk.transform(test_graphs)\n\n print(\"Classifying molecules\")\n clf = SVC(kernel='precomputed')\n\n # Fit on the train Kernel\n clf.fit(K_train, train_labels)\n\n # Predict and test\n y_pred = clf.predict(K_test)\n\n return accuracy_score(test_labels, y_pred)", "title": "" }, { "docid": "23b914f8aabba77bbcfc4c28c20fb55b", "score": "0.5543146", "text": "def SVM(self):\n try:\n return SVC(C = self.C, kernel = self.kernel, gamma = self.gamma, degree = self.degree, coef0 = self.coef0)\n except ValueError:\n return SVC(C = self.C, kernel = self.kernel, gamma = \"auto\", degree = self.degree, coef0 = self.coef0)", "title": "" }, { "docid": "0b6f3037660613b8f0a04ef23451166a", "score": "0.55274874", "text": "def svm_process(self):\n self.ob = self.fit(self.x_train, self.y_train)\n self.y_pred = self.ob.predict(self.x_test)\n self.score = self.ob.score(self.x_test, self.y_test)\n return self.ob", "title": "" }, { "docid": "5642f261cf287439f4fb65264de281cd", "score": "0.5524375", "text": "def fitKernelSVM(self):\n \n self.fitSVM(ker=\"rbf\")", "title": "" }, { "docid": "544ade3a9859addb1e61ada5540dfab0", "score": "0.551118", "text": "def SVM_reg(X, y, kaggle_data):\n print(\"--------------- SVM --------------------\") \n #Splitting the data into test and train sets with ratio (80:20) \n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) \n #normalizing the data \n normalizer = StandardScaler().fit(X_train) \n X_train_normalized = normalizer.transform(X_train) \n X_test_normalized = normalizer.transform(X_test) \n\n #cross validation process \n svm_rbf = SVR(kernel= 'rbf') \n svm_1 = SVR(kernel= 'linear') \n svm_2 = SVR(kernel='poly', degree=2)\n rbf_score = cross_val_score(svm_rbf, X_train_normalized, y_train, cv=5, scoring=\"neg_mean_absolute_error\") \n svm_1_score = cross_val_score(svm_1, X_train_normalized, y_train, cv=5, scoring=\"neg_mean_absolute_error\") \n svm_2_score = cross_val_score(svm_2, X_train_normalized, y_train, cv=5, scoring=\"neg_mean_absolute_error\") \n print(\"SVM rbf -- out of sample error: \" + str(abs(rbf_score.mean())))\n print(\"SVM degree 1 -- out of sample error: \" + str(abs(svm_1_score.mean())))\n print(\"SVM degree 2 -- out of sample error: \" + str(abs(svm_2_score.mean())))\n \n #Training the best performing model on the training set, 80% of the total data \n svm_regressor = SVR(kernel=\"linear\") \n svm_regressor.fit(X_train_normalized, y_train) \n #Making prediction on the test set, 20% of the total data \n predicted_values_svm = svm_regressor.predict(X_test_normalized)\n MAE_svm = compute_error(predicted_values_svm, y_test) \n print(\"MAE svm_linear: \" + str(MAE_svm))\n \n #for kagggle \n nor = StandardScaler().fit(X) \n X = nor.transform(X) \n kaggle_data = nor.transform(kaggle_data) \n kaggle_svm = SVR(kernel=\"linear\")\n kaggle_svm.fit(X, y) \n predicted_y = kaggle_svm.predict(kaggle_data)\n # Output file location\n file_name = '../Predictions/best.csv'\n # Writing output in Kaggle format\n print('Writing output to ', file_name)\n kaggle.kaggleize(predicted_y, file_name)", "title": "" }, { "docid": "e08930ac317a4aa54fdba6735fb9e567", "score": "0.5502803", "text": "def fit(self, X, y):\n assert self.loss in SVMperf.valid_losses, \\\n f'unsupported loss {self.loss}, valid ones are {list(SVMperf.valid_losses.keys())}'\n\n self.svmperf_learn = join(self.svmperf_base, 'svm_perf_learn')\n self.svmperf_classify = join(self.svmperf_base, 'svm_perf_classify')\n self.loss_cmd = '-w 3 -l ' + str(self.valid_losses[self.loss])\n self.c_cmd = '-c ' + str(self.C)\n\n self.classes_ = sorted(np.unique(y))\n self.n_classes_ = len(self.classes_)\n\n local_random = random.Random()\n # this would allow to run parallel instances of predict\n random_code = 'svmperfprocess'+'-'.join(str(local_random.randint(0, 1000000)) for _ in range(5))\n if self.host_folder is None:\n # tmp dir are removed after the fit terminates in multiprocessing...\n self.tmpdir = tempfile.TemporaryDirectory(suffix=random_code).name\n else:\n self.tmpdir = join(self.host_folder, '.' + random_code)\n makedirs(self.tmpdir, exist_ok=True)\n\n self.model = join(self.tmpdir, 'model-'+random_code)\n traindat = join(self.tmpdir, f'train-{random_code}.dat')\n\n dump_svmlight_file(X, y, traindat, zero_based=False)\n\n cmd = ' '.join([self.svmperf_learn, self.c_cmd, self.loss_cmd, traindat, self.model])\n if self.verbose:\n print('[Running]', cmd)\n p = subprocess.run(cmd.split(), stdout=PIPE, stderr=STDOUT)\n if not exists(self.model):\n print(p.stderr.decode('utf-8'))\n remove(traindat)\n\n if self.verbose:\n print(p.stdout.decode('utf-8'))\n\n return self", "title": "" }, { "docid": "a466cbe45d72d315353dc5cbda42d8bc", "score": "0.54911685", "text": "def multi_class_svm(train_x, train_y, test_x):\n clf = LinearSVC(C=0.1, random_state=0)\n\n clf.fit(train_x, train_y)\n\n pred_test_y = clf.predict(test_x)\n\n return pred_test_y", "title": "" }, { "docid": "f24ff7f8edd5d0e2e526fafa57d75b03", "score": "0.54733276", "text": "def svmClassifier(x_train, x_test, y_train, y_test):\n clf = svm.SVC(kernel='rbf') \n clf.fit(x_train, y_train) \n pred = clf.predict(x_test)\n return accuracy_score(y_test, pred)", "title": "" }, { "docid": "96ca19ec00d3d8373fb9d9aa0843b266", "score": "0.5463917", "text": "def train_SVM(self):\n self.method = 'SVM'\n \n parameters = {\n \"kernel\": [\"rbf\"],\n \"C\": [10, 100, 500],\n \"gamma\": [1e-6, 1e-5, 1e-4]\n }\n \n self.svm = GridSearchCV(SVR(), parameters, cv=5, verbose=1, n_jobs=-1)\n self.svm.fit(self.X_train, self.y_train)\n print(self.method, 'best params: ', self.svm.best_params_)\n pred = self.svm.predict(self.X_test)\n test_pred = self.svm.predict(self.df_test.drop(['reduction', 'type'], axis=1))\n \n self.pred[self.method] = test_pred\n \n return pred", "title": "" }, { "docid": "69b41204c011a0fef445edcd45cf3b85", "score": "0.544239", "text": "def optimized_svm_pipeline(self, x_training_data, y_training_data):\n text_clf = skPipeline([('vect', CountVectorizer(ngram_range=(1, 2), lowercase=False, token_pattern=u'(?u)\\\\b\\\\w+\\\\b')),\n ('tfidf', TfidfTransformer(use_idf=True,\n sublinear_tf=True,\n smooth_idf=False)),\n ('clf', SGDClassifier(loss='perceptron', penalty='l2',\n alpha=1e-3, n_iter=5, random_state=42))])\n _ = text_clf.fit(x_training_data, y_training_data)\n\n return text_clf", "title": "" }, { "docid": "983395e058c38c85a69521930dade144", "score": "0.54281694", "text": "def svm(x_train_standard, x_test_standard, y_train, y_test):\n svm = SVC(kernel = \"linear\", random_state = 0, gamma = 1, C = 1) \n \n svm.fit(x_train_standard, y_train)\n y_pred = svm.predict(x_test_standard)\n \n accuracy_list, misclassified_list, dc_precision, dc_recall, dc_f_score = evaluateClassifier(y_test, y_pred)\n \n \n weight_array = svm.fit(x_train_standard, y_train).coef_\n intercept = svm.fit(x_train_standard, y_train).intercept_\n\n return accuracy_list, misclassified_list, dc_precision, dc_recall, dc_f_score, y_pred, weight_array, intercept", "title": "" }, { "docid": "ee48c25d7c4cfd637b4693c11831ce27", "score": "0.53843063", "text": "def fitSVM(self, ker=\"linear\",rs=0):\n self.classifier=SVC(kernel=ker,random_state=rs)\n self.classifier.fit(self.X_train,self.y_train)", "title": "" }, { "docid": "dc885990c6cc7487155712d38b0eca34", "score": "0.5351943", "text": "def train_classifier(x, y, clf):\n ss = StandardScaler()\n\n # make sure y is set up correctly\n y = np.array(y).astype(int)\n\n # stitch together preprocessing and model\n model = Pipeline([('scale', ss), ('classify', clf)])\n\n # train\n model.fit(x, y)\n\n return model", "title": "" }, { "docid": "75dc865bd6718329851355aba833d8aa", "score": "0.53381896", "text": "def forward(self, x, scale_factors):\n x0 = self.num_segments_list[0]\n x1 = x0 + self.num_segments_list[1]\n num_segments = x1 + self.num_segments_list[2]\n feat_dim = x.size(1)\n x = x.view(-1, num_segments, feat_dim)\n num_samples = x.size(0)\n scale_factors = scale_factors.view(-1, 2)\n stage_stpp_feats = []\n stage_stpp_feats.extend(self._extract_stage_feature(x[:, :x0, :], self.stpp_stages[0], self.multiplier_list[0], scale_factors[:, 0], num_samples))\n stage_stpp_feats.extend(self._extract_stage_feature(x[:, x0:x1, :], self.stpp_stages[1], self.multiplier_list[1], None, num_samples))\n stage_stpp_feats.extend(self._extract_stage_feature(x[:, x1:, :], self.stpp_stages[2], self.multiplier_list[2], scale_factors[:, 1], num_samples))\n stpp_feat = torch.cat(stage_stpp_feats, dim=1)\n course_feat = x[:, x0:x1, :].mean(dim=1)\n return course_feat, stpp_feat", "title": "" }, { "docid": "64c2582a4c5718de20c9856db20dbe45", "score": "0.5337759", "text": "def perform_svm(self, kernel = 'linear'):\n self.lin_svm = SVC(kernel = kernel, C=1, random_state=0)\n \"\"\"fits three of our species with there respective classes\"\"\"\n x = self.sertosa + self.versicolor + self.virginica\n z = self.class1 + self.class2 + self.class3\n \"\"\"fit or train our data\"\"\"\n \"\"\"to predict a class, type 'self.lin_svm.predict([[2, 2]])' into console\"\"\"\n self.lin_svm.fit(x,z) \n \n \"\"\"begin getting our contoured graph ready\"\"\"\n \"\"\"get the max and min of our x1 and x2 directions\"\"\"\n min_x1, max_x1 = self.ax1.get_xlim() \n min_x2, max_x2 = self.ax1.get_ylim() \n\n X1 = np.linspace(min_x1, max_x1, 20)\n X2 = np.linspace(min_x2, max_x2, 20)\n \n \"\"\"create a meshgird of our X1 and X2 coordinates\"\"\"\n x1mesh, x2mesh = np.meshgrid(X1, X2)\n \n \"\"\"ravel method turns array of arrays into one long, single array\"\"\"\n \"\"\"vstack turns two seperate arrays into 1\"\"\" \n x1_x2 = np.vstack([x1mesh.ravel(), x2mesh.ravel()] )\n \n \"\"\"transpose in order to turn the x1 and x2 arrays into x1,x2 pairs\"\"\"\n x1_x2 = x1_x2.T\n \n # I'm going to need to rehape this back into a the same dimension as the acutal plot\n \"\"\"predict whether all of coordinate pairs are a 0, 1, or a 2\"\"\"\n self.class_values = self.lin_svm.predict(x1_x2)\n \n self.class_values = self.class_values.reshape(x1mesh.shape)\n \"\"\"create empty plot\"\"\"\n self.figure = plt.figure()\n \"\"\"gets current axis\"\"\"\n self.axes = self.figure.gca()\n \"\"\"plot the class_values based on x1mesh and x2mesh\"\"\"\n self.axes.contour(x1mesh, x2mesh, self.class_values, levels = [0,1,2])\n \"\"\"plot the scatters for each species\"\"\"\n self.axes.scatter(self.sertosa_df.T[1], self.sertosa_df.T[2], s = 40, c='b', marker = 'D', label='first')\n self.axes.scatter(self.versicolor_df.T[1], self.versicolor_df.T[2], s = 40, c='r', marker = 'o', label='first')\n self.axes.scatter(self.virginica_df.T[1], self.virginica_df.T[2], s = 40, c='k', marker = 'x', label='third')", "title": "" }, { "docid": "1a103a9ba71acda11c4f2e5fa0b69352", "score": "0.5325735", "text": "def svmClassifier(X, y):\n\n temp_cls_ = SVC()\n\n parameters = {\n 'kernel': ('linear', 'rbf'),\n 'C': [1,2,3,4],\n 'degree': [2,3,4],\n 'gamma': ['auto', 'scale'],\n 'random_state' : [0]\n }\n\n param_tuner_ = GridSearchCV(temp_cls_, parameters)\n param_tuner_.fit(X, y)\n cls = SVC(**param_tuner_.best_params_).fit(X, y)\n return cls", "title": "" }, { "docid": "a9cabc082400205eb060015c55ffff5b", "score": "0.53239083", "text": "def sc_linear_svc(input_dict):\n from sklearn.svm import LinearSVC\n\n classifier = LinearSVC(C=float(input_dict[\"C\"]),\n loss=input_dict[\"loss\"],\n penalty=input_dict[\"penalty\"],\n multi_class=input_dict[\"multi_class\"])\n\n return {'classifier': classifier}", "title": "" }, { "docid": "bd8c886397c6db510e13f21e2e83bf37", "score": "0.5297197", "text": "def fit(self, X):\n self.X_train = X\n self.oneclass = OneClassSVM( kernel='rbf', gamma=self.gamma0, nu=self.nu )\n self.oneclass.fit( self.X_train )\n p = self.oneclass.predict( self.X_train )\n \n # Determine which class has more points\n idx = self.__densest_cloud__( self.X_train, p )\n self.main_class, self.outlier_class = idx[0], idx[1]\n \n # Put some of the outliers in the main class of points\n if self.knn > 0:\n p = self.__knn_move_outliers__(p, self.knn, knn_iters=self.knn_iters)\n # Fit a new svm to the labels learned by the one-class SVM and k-nearest neighbors\n # Now fit an SVM to the labels learned by the one-class SVM\n self.svm = SVC( kernel='rbf', C=self.C, gamma=self.gamma1 )\n self.svm.fit( self.X_train, p )\n else:\n self.svm = self.oneclass", "title": "" }, { "docid": "619b5be0b7411c668fff25cf1da117b0", "score": "0.52948725", "text": "def one_vs_rest_svm(train_x, train_y, test_x):\n raise NotImplementedError", "title": "" }, { "docid": "aa607fdf3fcd0aeddbb3d98247a46371", "score": "0.52911794", "text": "def train(self, X, y):\n self.clf.fit(X, y)", "title": "" }, { "docid": "2f0f5087e1144b41f2764d0f7677aba4", "score": "0.52794695", "text": "def for_linear_svm(**kwargs):\n return wrap_classifier(svm.LinearSVC, **kwargs)", "title": "" }, { "docid": "3f379eec934a0f8032e44a26e02e4ebf", "score": "0.52703243", "text": "def one_vs_rest_svm(train_x, train_y, test_x):\n\n clf = LinearSVC(C=0.1, random_state=0)\n\n clf.fit(train_x, train_y)\n\n pred_test_y = clf.predict(test_x)\n\n return pred_test_y", "title": "" }, { "docid": "d5fe78b5050e0021db58a27153c5ac5f", "score": "0.5269472", "text": "def segment(dataset, model_type, batch_size, set_name=None, should_saveimg=True, is_verbose=True):\n assert(dataset in ['ADP', 'VOC2012', 'DeepGlobe_train75', 'DeepGlobe_train37.5'])\n assert(model_type in ['M1', 'M2', 'M3', 'M4', 'M5', 'M6', 'M7', 'X1.7', 'M7', 'M7bg', 'VGG16', 'VGG16bg'])\n assert(os.path.exists(os.path.exists(os.path.join(MODEL_CNN_ROOT, dataset + '_' + model_type))))\n assert(batch_size > 0)\n assert(set_name in [None, 'tuning', 'segtest'])\n assert(type(should_saveimg) is bool)\n assert(type(is_verbose) is bool)\n if model_type in ['VGG16', 'VGG16bg']:\n img_size = 321\n else:\n img_size = 224\n sess_id = dataset + '_' + model_type\n model_dir = os.path.join(MODEL_CNN_ROOT, sess_id)\n\n if is_verbose:\n print('Predict: dataset=' + dataset + ', model=' + model_type)\n\n database_dir = os.path.join(os.path.dirname(os.getcwd()), 'database')\n if dataset == 'ADP':\n segment_adp(sess_id, model_type, batch_size, img_size, set_name, should_saveimg, is_verbose)\n return\n elif dataset == 'VOC2012':\n devkit_dir = os.path.join(database_dir, 'VOCdevkit', 'VOC2012')\n fgbg_modes = ['fg', 'bg']\n OVERLAY_R = 0.75\n elif 'DeepGlobe' in dataset:\n devkit_dir = os.path.join(database_dir, 'DGdevkit')\n fgbg_modes = ['fg']\n OVERLAY_R = 0.25\n img_dir = os.path.join(devkit_dir, 'JPEGImages')\n gt_dir = os.path.join(devkit_dir, 'SegmentationClassAug')\n\n out_dir = os.path.join('./out', sess_id)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n eval_dir = os.path.join('./eval', sess_id)\n if not os.path.exists(eval_dir):\n os.makedirs(eval_dir)\n\n # Load network and thresholds\n mdl = {}\n thresholds = {}\n alpha = {}\n final_layer = {}\n for fgbg_mode in fgbg_modes:\n mdl[fgbg_mode] = build_model(model_dir, sess_id)\n thresholds[fgbg_mode] = load_thresholds(model_dir, sess_id)\n thresholds[fgbg_mode] = np.maximum(np.minimum(thresholds[fgbg_mode], 0), 1 / 3)\n alpha[fgbg_mode], final_layer[fgbg_mode] = get_grad_cam_weights(mdl[fgbg_mode],\n np.zeros((1, img_size, img_size, 3)))\n\n # Load data and classes\n ds = Dataset(data_type=dataset, size=img_size, batch_size=batch_size)\n class_names, seg_class_names = load_classes(dataset)\n colours = get_colours(dataset)\n if 'DeepGlobe' in dataset:\n colours = colours[:-1]\n gen_curr = ds.set_gens[ds.sets[ds.is_evals.index(True)]]\n\n # Process images in batches\n intersects = np.zeros((len(colours)))\n unions = np.zeros((len(colours)))\n confusion_matrix = np.zeros((len(colours), len(colours)))\n gt_count = np.zeros((len(colours)))\n n_batches = len(gen_curr.filenames) // batch_size + 1\n for iter_batch in range(n_batches):\n batch_start_time = time.time()\n if is_verbose:\n print('\\tBatch #%d of %d' % (iter_batch + 1, n_batches))\n start_idx = iter_batch * batch_size\n end_idx = min(start_idx + batch_size - 1, len(gen_curr.filenames) - 1)\n cur_batch_sz = end_idx - start_idx + 1\n\n # Image reading\n start_time = time.time()\n img_batch_norm, img_batch = read_batch(gen_curr.directory, gen_curr.filenames[start_idx:end_idx + 1],\n cur_batch_sz, (img_size, img_size), dataset)\n if is_verbose:\n print('\\t\\tImage read time: %0.5f seconds (%0.5f seconds / image)' % (time.time() - start_time,\n (time.time() - start_time) / cur_batch_sz))\n\n # Generate patch confidence scores\n start_time = time.time()\n predicted_scores = {}\n is_pass_threshold = {}\n for fgbg_mode in fgbg_modes:\n predicted_scores[fgbg_mode] = mdl[fgbg_mode].predict(img_batch_norm)\n is_pass_threshold[fgbg_mode] = np.greater_equal(predicted_scores[fgbg_mode], thresholds[fgbg_mode])\n if is_verbose:\n print('\\t\\tGenerating patch confidence scores time: %0.5f seconds (%0.5f seconds / image)' %\n (time.time() - start_time, (time.time() - start_time) / cur_batch_sz))\n\n # Generate Grad-CAM\n start_time = time.time()\n H = {}\n for fgbg_mode in fgbg_modes:\n H[fgbg_mode] = grad_cam(mdl[fgbg_mode], alpha[fgbg_mode], img_batch_norm, is_pass_threshold[fgbg_mode],\n final_layer[fgbg_mode], predicted_scores[fgbg_mode], orig_sz=[img_size, img_size],\n should_upsample=True)\n H[fgbg_mode] = np.transpose(H[fgbg_mode], (0, 3, 1, 2))\n if is_verbose:\n print('\\t\\tGenerating Grad-CAM time: %0.5f seconds (%0.5f seconds / image)' % (time.time() - start_time,\n (time.time() - start_time) / cur_batch_sz))\n\n # Modify fg Grad-CAM with bg activation\n start_time = time.time()\n if dataset == 'VOC2012':\n Y_gradcam = np.zeros((cur_batch_sz, len(seg_class_names), img_size, img_size))\n mode = 'mult'\n if mode == 'mult':\n X_bg = np.sum(H['bg'], axis=1)\n Y_gradcam[:, 0] = 0.15 * scipy.special.expit(np.max(X_bg) - X_bg)\n Y_gradcam[:, 1:] = H['fg']\n elif 'DeepGlobe' in dataset:\n Y_gradcam = H['fg'][:, :-1, :, :]\n if is_verbose:\n print('\\t\\tFg/Bg modifications time: %0.5f seconds (%0.5f seconds / image)' % (time.time() - start_time,\n (time.time() - start_time) / cur_batch_sz))\n\n # FC-CRF\n start_time = time.time()\n if dataset == 'VOC2012':\n dcrf_config = np.array([3 / 4, 3, 80 / 4, 13, 10, 10]) # test (since 2448 / 500 = 4.896 ~= 4)\n elif 'DeepGlobe' in dataset:\n dcrf_config = np.array([3, 3, 80, 13, 10, 10]) # test\n Y_crf = dcrf_process(Y_gradcam, img_batch, dcrf_config)\n if is_verbose:\n print('\\t\\tCRF time: %0.5f seconds (%0.5f seconds / image)' % (time.time() - start_time,\n (time.time() - start_time) / cur_batch_sz))\n elapsed_time = time.time() - batch_start_time\n if is_verbose:\n print('\\t\\tElapsed time: %0.5f seconds (%0.5f seconds / image)' % (elapsed_time, elapsed_time / cur_batch_sz))\n\n if dataset == 'VOC2012':\n for iter_file, filename in enumerate(gen_curr.filenames[start_idx:end_idx + 1]):\n # Load GT segmentation\n gt_filepath = os.path.join(gt_dir, filename.replace('.jpg', '.png'))\n gt_idx = cv2.cvtColor(cv2.imread(gt_filepath), cv2.COLOR_BGR2RGB)[:, :, 0]\n # Load predicted segmentation\n pred_idx = cv2.resize(np.uint8(Y_crf[iter_file]), (gt_idx.shape[1], gt_idx.shape[0]),\n interpolation=cv2.INTER_NEAREST)\n pred_segmask = np.zeros((gt_idx.shape[0], gt_idx.shape[1], 3))\n # Evaluate predicted segmentation\n for k in range(len(colours)):\n intersects[k] += np.sum((gt_idx == k) & (pred_idx == k))\n unions[k] += np.sum((gt_idx == k) | (pred_idx == k))\n confusion_matrix[k, :] += np.bincount(pred_idx[gt_idx == k], minlength=len(colours))\n pred_segmask += np.expand_dims(pred_idx == k, axis=2) * \\\n np.expand_dims(np.expand_dims(colours[k], axis=0), axis=0)\n gt_count[k] += np.sum(gt_idx == k)\n # Save outputted segmentation to file\n if should_saveimg:\n orig_filepath = os.path.join(img_dir, filename)\n orig_img = cv2.cvtColor(cv2.imread(orig_filepath), cv2.COLOR_BGR2RGB)\n imgio.imsave(os.path.join(out_dir, filename.replace('.jpg', '') + '.png'), pred_segmask / 256.0)\n imgio.imsave(os.path.join(out_dir, filename.replace('.jpg', '') + '_overlay.png'),\n (1 - OVERLAY_R) * orig_img / 256.0 +\n OVERLAY_R * pred_segmask / 256.0)\n elif 'DeepGlobe' in dataset:\n for iter_file, filename in enumerate(gen_curr.filenames[start_idx:end_idx + 1]):\n # Load GT segmentation\n gt_filepath = os.path.join(gt_dir, filename.replace('.jpg', '.png'))\n gt_curr = cv2.cvtColor(cv2.imread(gt_filepath), cv2.COLOR_BGR2RGB)\n gt_r = gt_curr[:, :, 0]\n gt_g = gt_curr[:, :, 1]\n gt_b = gt_curr[:, :, 2]\n # Load predicted segmentation\n pred_idx = cv2.resize(np.uint8(Y_crf[iter_file]), (gt_curr.shape[1], gt_curr.shape[0]),\n interpolation=cv2.INTER_NEAREST)\n pred_segmask = np.zeros((gt_curr.shape[0], gt_curr.shape[1], 3))\n # Evaluate predicted segmentation\n for k, gt_colour in enumerate(colours):\n gt_mask = (gt_r == gt_colour[0]) & (gt_g == gt_colour[1]) & (gt_b == gt_colour[2])\n pred_mask = pred_idx == k\n intersects[k] += np.sum(gt_mask & pred_mask)\n unions[k] += np.sum(gt_mask | pred_mask)\n confusion_matrix[k, :] += np.bincount(pred_idx[gt_mask], minlength=len(colours))\n pred_segmask += np.expand_dims(pred_mask, axis=2) * \\\n np.expand_dims(np.expand_dims(colours[k], axis=0), axis=0)\n gt_count[k] += np.sum(gt_mask)\n # Save outputted segmentation to file\n if should_saveimg:\n orig_filepath = os.path.join(img_dir, filename)\n orig_img = cv2.cvtColor(cv2.imread(orig_filepath), cv2.COLOR_BGR2RGB)\n orig_img = cv2.resize(orig_img, (orig_img.shape[0] // 4, orig_img.shape[1] // 4))\n pred_segmask = cv2.resize(pred_segmask, (pred_segmask.shape[0] // 4, pred_segmask.shape[1] // 4),\n interpolation=cv2.INTER_NEAREST)\n imgio.imsave(os.path.join(out_dir, filename.replace('.jpg', '') + '.png'), pred_segmask / 256.0)\n imgio.imsave(os.path.join(out_dir, filename.replace('.jpg', '') + '_overlay.png'),\n (1 - OVERLAY_R) * orig_img / 256.0 + OVERLAY_R * pred_segmask / 256.0)\n # Evaluate mIoU and write to .xlsx file\n mIoU = np.mean(intersects / (unions + 1e-7))\n df = pd.DataFrame({'Class': seg_class_names + ['Mean'], 'IoU': list(intersects / (unions + 1e-7)) + [mIoU]},\n columns=['Class', 'IoU'])\n xlsx_path = os.path.join(eval_dir, 'metrics_' + sess_id + '.xlsx')\n df.to_excel(xlsx_path)\n\n # Generate confusion matrix for all classes and write to .png file\n count_mat = np.transpose(np.matlib.repmat(gt_count, len(colours), 1))\n title = \"Confusion matrix\\n\"\n xlabel = 'Prediction' # \"Labels\"\n ylabel = 'Ground-Truth' # \"Labels\"\n xticklabels = seg_class_names\n yticklabels = seg_class_names\n heatmap(confusion_matrix / (count_mat + 1e-7), title, xlabel, ylabel, xticklabels, yticklabels,\n rot_angle=45)\n plt.savefig(os.path.join(eval_dir, 'confusion_' + sess_id + '.png'), dpi=96,\n format='png', bbox_inches='tight')\n\n # Generate confusion matrix for only foreground classes and write to .png file\n title = \"Confusion matrix\\n\"\n xlabel = 'Prediction' # \"Labels\"\n ylabel = 'Ground-Truth' # \"Labels\"\n if dataset == 'VOC2012':\n xticklabels = seg_class_names[1:]\n yticklabels = seg_class_names[1:]\n heatmap(confusion_matrix[1:, 1:] / (count_mat[1:, 1:] + 1e-7), title, xlabel, ylabel, xticklabels, yticklabels,\n rot_angle=45)\n elif 'DeepGlobe' in dataset:\n xticklabels = seg_class_names[:-2]\n yticklabels = seg_class_names[:-1]\n heatmap(confusion_matrix[:-1, :-1] / (count_mat[:-1, :-1] + 1e-7), title, xlabel, ylabel, xticklabels,\n yticklabels,\n rot_angle=45)\n plt.savefig(os.path.join(eval_dir, 'confusion_fore_' + sess_id + '.png'), dpi=96,\n format='png', bbox_inches='tight')\n plt.close()", "title": "" }, { "docid": "96a218eb5d3bc772373885dfbdc8e137", "score": "0.5247434", "text": "def _train(self, train_data, param):\n \n # init container \n svms = {}\n\n # concatenate data\n data = PreparedMultitaskData(train_data, shuffle=False)\n \n # create svm\n svm = shogun_factory.create_initialized_svm(param, data.examples, data.labels)\n \n print \"starting training procedure\" \n \n # train SVM\n svm.train()\n \n print \"training done\"\n \n # use a reference to the same svm several times\n for task_id in train_data.keys():\n svms[task_id] = svm\n \n return svms", "title": "" }, { "docid": "445df76150920329bb59d3787574afca", "score": "0.5245567", "text": "def train_svm(training_data,answer):\n \n # C is the cost to the SVM when it mis-classifies one of your training examples. If you increase it, \n # the SVM will try very hard to fit all your data, which may be good if you strongly trust your data.\n # http://www.svms.org/parameters/\n clf = svm.SVC(C=1,probability=True,cache_size=1000) #http://scikit-learn.org/stable/modules/svm.html\n \n time_start = time.time()\n \n clf.fit(training_data,answer)\n \n #2313 seconds to train C=1, cache_size=200\n #2108 seconds to train C=1, cache_size=1000\n #3319 seconds to train C=10, cache_size=1000\n #3300 seconds to train C=100, cache_size=1000\n #14224 seconds to train C=1, cache_size=1000, probability=True\n print 'It took ', time.time()-time_start, ' seconds to train the svm!'\n \n #Save the trained model\n save_model(clf, r'svm')", "title": "" }, { "docid": "07a571fcfc07615ce4c9cdac4773c905", "score": "0.52309364", "text": "def predict_svm(faces, feature_type, transformed=False):\n predict_dataset = list()\n train_file = \"trained_data_files/%s_svm.yml\" % feature_type.lower()\n print(train_file)\n svm = cv2.ml.SVM_load(train_file)\n\n if feature_type == \"HOG\":\n\n if not transformed:\n for image in faces:\n predict_dataset.append(image_transform(image, feature_type))\n\n return svm.predict(np.float32(predict_dataset))[1].ravel()\n\n else:\n return svm.predict(np.float32(faces))[1].ravel()\n\n elif feature_type == \"SURF\":\n if not transformed:\n bow_extract = cv2.BOWImgDescriptorExtractor(surf, matcher)\n\n with open('trained_data_files/surf_svm_bow_pickle.pickle', 'rb') as f:\n dictionary = pickle.load(f)\n\n bow_extract.setVocabulary(dictionary)\n\n for face in faces:\n image_transformed = image_transform(face, feature_type)\n surf_kp = surf.detect(image_transformed)\n bow_sig = bow_extract.compute(image_transformed, surf_kp)\n predict_dataset.extend(bow_sig)\n\n else:\n predict_dataset = faces\n\n return list(map(int, svm.predict(np.float32(predict_dataset))[1].ravel()))\n\n else:\n raise Exception(\"Missing feature type\")", "title": "" }, { "docid": "c312e7bf1f8ca8f0dc19001762d90ac4", "score": "0.5229045", "text": "def _cli(ctx, primary_images, nuclei, output):\n print('Segmenting ...')\n ctx.obj = dict(\n component=Segmentation,\n output=output,\n primary_images=primary_images,\n nuclei=nuclei,\n )", "title": "" }, { "docid": "efb6c5cc32e64c6e9847d7ed411e7c34", "score": "0.5226402", "text": "def detect(dataPath, clf):\r\n # Begin your code (Part 4)\r\n raise NotImplementedError(\"To be implemented\")\r\n # End your code (Part 4)\r", "title": "" }, { "docid": "890e7709224e01433c7891344b5631b9", "score": "0.5210237", "text": "def precompute(self):\n self.subIndex = np.random.permutation(np.arange(self.n_labels))[0:self.n_subsample + 1]\n Xs_sub_l = []\n for k, d in enumerate(self.Xtrains_l):\n Xs_sub_l.append(d[self.subIndex, :])\n\n Y_sub = Y[self.subIndex]\n self.models_single = []\n self.flag_pre_train = False\n ifExists = True\n if self.kern_name == 'rbf':\n svc_kern_name = 'rbf'\n elif self.kern_name == 'linear':\n svc_kern_name = 'linear'\n elif self.kern_name == 'poly':\n svc_kern_name = 'poly'\n else:\n ifExists = False\n print('Initialization:')\n for k, ds in enumerate(Xs_sub_l):\n if self.kern_name == 'rbf' or self.kern_name == 'poly':\n gamma = self.kern_param['gamma']\n if self.kern_name == 'poly':\n try:\n degree = self.kern_param['degree']\n except KeyError:\n degree = 3\n\n svc_model = SVC(kernel=svc_kern_name, gamma=gamma, degree=degree)\n else:\n svc_model = SVC(kernel=svc_kern_name, gamma=gamma)\n elif self.kern_name == 'linear':\n svc_model = LinearSVC()\n else:\n svc_model = SVC(kernel=self.kern)\n svc_model.fit(ds, Y_sub)\n self.models_single.append(svc_model)\n\n self.flag_pre_train = True", "title": "" }, { "docid": "2bdd5f9abe8563c1730e2d52f4d691ee", "score": "0.5172296", "text": "def svm_train(X, y, b, alpha, n_samples, n_features, learner, loop, eta,\n max_iter=100, step_probability=0.5):\n if isinstance(X, bstring):\n if n_features is None:\n n_features = 2**17 # the default in sofia-ml TODO: parse file to see\n w = _sofia_ml.train(X, n_features, alpha, max_iter, False,\n learner.value, loop.value, eta.value, step_probability)\n elif isinstance(X, np.ndarray):\n if n_features is None:\n n_features = X.shape[1]\n\n if n_samples is None:\n n_samples = X.shape[0]\n\n w = _sofia_ml.train_fast(np.float64(X), np.float64(y), n_samples, n_features, alpha, max_iter, False,\n learner.value, loop.value, eta.value, step_probability)\n else:\n if n_features is None:\n n_features = X.shape[1]\n\n with tempfile.NamedTemporaryFile() as f:\n datasets.dump_svmlight_file(X, y, f.name, query_id=b)\n w = _sofia_ml.train(f.name, n_features, alpha, max_iter, False,\n learner.value, loop.value, eta.value, step_probability)\n return w", "title": "" }, { "docid": "c93b2e5511c8330e59e56a5ec12b3156", "score": "0.5162297", "text": "def forward(self, x):\n features = self.encoder(x)\n clf = self.classification_head(features[-1])\n decoder_output = self.decoder(features)\n\n if self.deep_supervision:\n final_output = decoder_output[0]\n deep_outputs = decoder_output[1]\n final_mask = self.segmentation_head(final_output)\n masks = []\n for feature, segmentation_head in zip(deep_outputs, self.deep_segmentation_head):\n masks.append(segmentation_head(feature))\n\n if self.clf_head:\n return final_mask, masks, clf\n return final_mask, masks\n else:\n final_output = decoder_output\n mask = self.segmentation_head(decoder_output)\n\n if self.clf_head:\n return mask, clf\n return mask", "title": "" }, { "docid": "0be4de6021dcf15502b430c8a85e6fae", "score": "0.51544774", "text": "def __init__(self):\n data = numpy.loadtxt(open('result.csv', 'rb'), delimiter=',', dtype='str')\n #Support_Vector_Machine\n #self.svm = svm.SVC()\n # Decision tree\n self.svm = tree.DecisionTreeClassifier()\n self.svm.fit(data[:, 0:3], data[:, 3])", "title": "" }, { "docid": "036689cd24e41d0a7fa9ad08811dba20", "score": "0.5148325", "text": "def train_svm_classifer(features, labels, model_output_path):\n\t# save 20% of data for performance evaluation\n\tX_train, X_test, y_train, y_test = cross_validation.train_test_split(features, labels, test_size = 0.2)\n\n\tparam = [\n\t\t{\n\t\t\t\"kernel\": [\"linear\"],\n\t\t\t\"C\": [1, 10, 100, 1000]\n\t\t},\n\t\t{\n\t\t\t\"kernel\": [\"rbf\"],\n\t\t\t\"C\": [1, 10, 100, 1000],\n\t\t\t\"gamma\": [1e-2, 1e-3, 1e-4, 1e-5]\n\t\t}\n\t]\n\n\t# request probability estimation\n\tsvm = SVC(probability = True)\n\n\t# 10-fold cross validation, use 4 thread as each fold and each parameter set can be train in parallel\n\tclf = grid_search.GridSearchCV(svm, param,\n\t cv = 10, n_jobs = 4, verbose = 3)\n\n\tclf.fit(X_train, y_train)\n\n\tif os.path.exists(model_output_path):\n\t\tjoblib.dump(clf.best_estimator_, model_output_path)\n\telse:\n\t\tprint(\"Cannot save trained svm model to {0}.\".format(model_output_path))\n\n\tprint(\"\\nBest parameters set:\")\n\tprint(clf.best_params_)\n\n\ty_predict = clf.predict(X_test)\n\n\tlabels = sorted(list(set(labels)))\n\tprint(\"\\nConfusion matrix:\")\n\tprint(\"Labels: {0}\\n\".format(\",\".join(labels)))\n\tprint(confusion_matrix(y_test, y_predict, labels = labels))\n\n\tprint(\"\\nClassification report:\")\n\tprint(classification_report(y_test, y_predict))", "title": "" }, { "docid": "5ab8cb3b70b053405bc3be1d8d55f036", "score": "0.5131946", "text": "def predict_SVC(self, dataset_X=None):\n # Check that run_SVC() has already been called\n if self.classifier_SVC is None:\n print(\"The SVC model seems to be missing. Have you called\",\n \"run_SVC() yet?\")\n return None\n\n # Try to make the prediction\n # Handle exception if dataset_X isn't a valid input\n try:\n y_prediction = self.classifier_SVC.predict(dataset_X)\n except Exception as e:\n print(\"The SVC model failed to run.\",\n \"Check your inputs and try again.\")\n print(\"Here is the exception message:\")\n print(e)\n return None\n\n print(\"\\nSVC Predictions:\\n\", y_prediction, \"\\n\")\n return y_prediction", "title": "" }, { "docid": "90c4d35638be94d396fc16b097aeb56a", "score": "0.50985163", "text": "def Train(dataRep, labels, params, type):\r\n c = params['C']\r\n if type == 'Linear':\r\n model = LinearSVC(dual=False, C=c, multi_class='ovr')\r\n model.fit(dataRep, labels)\r\n else:\r\n model = m_class_SVM_train(dataRep, labels, params, type)\r\n return model", "title": "" }, { "docid": "04f9a28e0ff6c5740a3673d65c7f55a9", "score": "0.5091695", "text": "def segmentDetectorFinal(input_img, dataset=None, lineWidth=2):\n if not dataset is None: # particular dataset used\n if dataset == 'sudoku':\n img_edges = ed.canny_median_blur(input_img, downsize=False)\n lines, img_segment, img_points = LSD.lsd_alg(input_img,\n line_width=lineWidth,\n fuse=True,\n dTheta=1 / 360 * np.pi * 2,\n dRho=8,\n maxL=4)\n lines = lines.reshape((lines.shape[0], 1, lines.shape[1]))\n\n # Add segment detected to the edges image\n img_edges_segment = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2BGR)\n if lines is not None:\n for i in range(0, len(lines)):\n line = lines[i][0]\n cv2.line(img_edges_segment, (line[0], line[1]),\n (line[2], line[3]), (0, 0, 255), lineWidth)\n\n return img_edges, lines, img_edges_segment, img_segment\n\n if dataset == 'pcb':\n img_edges = ed.canny_gaussian_blur_downsize(input_img,\n lo_thresh=150,\n hi_thresh=200,\n sobel_size=3,\n i_gaus_kernel_size=5,\n gauss_center=1)\n\n lines, img_edges_segment, img_segment = hough(img_edges, 1,\n np.pi / 180,\n thresh=10,\n minLineLen=7,\n maxLineGap=3,\n fuse=True,\n dTheta=3 / 360 * np.pi * 2,\n dRho=3, maxL=3,\n lineWidth=lineWidth)\n\n return img_edges, lines, img_edges_segment, img_segment\n\n if dataset == 'soccer':\n img_edges = ed.canny_median_blur(input_img, downsize=False)\n hsv = cv2.cvtColor(input_img, cv2.COLOR_BGR2HSV)\n low = np.array([30, 0, 150])\n upp = np.array([90, 70, 255])\n mask = cv2.inRange(hsv, low, upp)\n img_mask = cv2.bitwise_and(input_img, input_img, mask=mask)\n ret = cv2.cvtColor(img_mask, cv2.COLOR_HSV2BGR)\n # LSD\n lines, img_segment, img_points = LSD.lsd_alg(ret)\n lines = lines.reshape((lines.shape[0], 1, lines.shape[1]))\n lines = np.around(lines).astype(int)\n\n # Add segment detected to the edges image\n img_edges_segment = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2BGR)\n if lines is not None:\n for i in range(0, len(lines)):\n line = lines[i][0]\n cv2.line(img_edges_segment, (line[0], line[1]),\n (line[2], line[3]), (0, 0, 255), lineWidth)\n\n return img_edges, lines, img_edges_segment, img_segment\n\n if dataset == 'road': \n img_edges = ed.edgesDetectionFinal(input_img)\n lines, img_segment, img_points = LSD.lsd_alg(input_img,\n line_width=lineWidth,\n fuse=True,\n dTheta=1 / 360 * np.pi * 2,\n dRho=5,\n maxL=4)\n lines = lines.reshape((lines.shape[0], 1, lines.shape[1]))\n lines = np.around(lines).astype(int)\n\n # Add segment detected to the edges image\n img_edges_segment = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2BGR)\n if lines is not None:\n for i in range(0, len(lines)):\n line = lines[i][0]\n cv2.line(img_edges_segment, (line[0], line[1]),\n (line[2], line[3]), (0, 0, 255), lineWidth)\n\n return img_edges, lines, img_edges_segment, img_segment\n \n if dataset == 'building':\n img_edges = ed.edgesDetectionFinal(input_img)\n lines, img_segment, img_points = LSD.lsd_alg(input_img,\n line_width=lineWidth,\n fuse=True,\n dTheta=1 / 360 * np.pi * 2,\n dRho=2,\n maxL=4)\n lines = lines.reshape((lines.shape[0], 1, lines.shape[1]))\n lines = np.around(lines).astype(int)\n\n # Add segment detected to the edges image\n img_edges_segment = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2BGR)\n if lines is not None:\n for i in range(0, len(lines)):\n line = lines[i][0]\n cv2.line(img_edges_segment, (line[0], line[1]),\n (line[2], line[3]), (0, 0, 255), lineWidth)\n\n return img_edges, lines, img_edges_segment, img_segment\n\n return segHough(input_img, ed.edgesDetectionFinal, lineWidth=lineWidth)", "title": "" }, { "docid": "9c5a4a5fac631fbdf90feeb4b2f48f7f", "score": "0.5073276", "text": "def svm_regression( madlib_schema, input_table, model_table, parallel, kernel_func, verbose = False, eta = 0.1, nu = 0.005, slambda = 0.2):\n\n # Output error if model_table already exist\n # if __check_rel_exist(model_table):\n # plpy.error('Table ' + model_table + ' exists; please use a different model table or drop ' + model_table + ' before calling this function.');\n\n # plpy.execute('drop table if exists ' + model_table);\n plpy.execute('create table ' + model_table + ' ( id text, weight float8, sv float8[] ) m4_ifdef(`GREENPLUM', `distributed randomly')'); \n\n plpy.execute('create temp table svm_temp_result ( id text, model ' + madlib_schema + '.svm_model_rec ) m4_ifdef(`GREENPLUM', `distributed randomly')');\n\n if (verbose):\n plpy.info(\"Parameters:\");\n plpy.info(\" * input_table = %s\" % input_table);\n plpy.info(\" * model_table = \" + model_table);\n plpy.info(\" * parallel = \" + str(parallel));\n plpy.info(\" * kernel_func = \" + kernel_func);\n plpy.info(\" * eta = \" + str(eta));\n plpy.info(\" * nu = \" + str(nu));\n plpy.info(\" * slambda = \" + str(slambda));\n\n if (parallel) :\n # Learning multiple models in parallel \n\n # Start learning process\n sql = 'insert into svm_temp_result (select \\'' + model_table + '\\' || gp_segment_id, ' + madlib_schema + '.svm_reg_agg(ind, label,\\'' + kernel_func + '\\',' + str(eta) + ',' + str(nu) + ',' + str(slambda) + ') from ' + input_table + ' group by gp_segment_id)';\n plpy.execute(sql);\n\n # Store the models learned\n numproc_t = plpy.execute('select count(distinct(gp_segment_id)) from ' + input_table);\n numproc = numproc_t[0]['count'];\n plpy.execute('select ' + madlib_schema + '.svm_store_model(\\'svm_temp_result\\', \\'' + model_table + '\\',\\'' + model_table + '\\', ' + str(numproc) + ')'); \n\n else :\n # Learning a single model\n\n # Start learning process \n sql = 'insert into svm_temp_result (select \\'' + model_table + '\\', ' + madlib_schema + '.svm_reg_agg(ind, label,\\'' + kernel_func + '\\',' + str(eta) + ',' + str(nu) + ',' + str(slambda) + ') from ' + input_table + ')';\n plpy.execute(sql);\n # Store the model learned\n plpy.execute('select ' + madlib_schema + '.svm_store_model(\\'svm_temp_result\\', \\'' + model_table + '\\', \\'' + model_table + '\\')');\n\n # Retrieve and return the summary for each model learned \n if parallel:\n where_cond = \"position('\" + model_table + \"' in id) > 0 AND '\" + model_table + \"' <> id\";\n else:\n where_cond = \"id = '\" + model_table + \"'\";\n\n summary = plpy.execute(\"select id, (model).inds, (model).cum_err, (model).epsilon, (model).b, (model).nsvs from svm_temp_result where \" + where_cond);\n\n result = [];\n for i in range(0,summary.nrows()):\n result = result + [(model_table, summary[i]['id'], summary[i]['inds'], summary[i]['cum_err'], summary[i]['epsilon'], summary[i]['b'], summary[i]['nsvs'])];\n\n # Clean up temp storage of models\n plpy.execute('drop table svm_temp_result'); \n\n return result;", "title": "" }, { "docid": "22236089c14795e263ac56464ecd6ab3", "score": "0.5065037", "text": "def segmentation_train_gen(cls,\n hyper_params: Dict = None,\n load_hyper_parameters: bool = False,\n progress_callback: Callable[[str, int], Callable[[None], None]] = lambda string, length: lambda: None,\n message: Callable[[str], None] = lambda *args, **kwargs: None,\n output: str = 'model',\n spec: str = default_specs.SEGMENTATION_SPEC,\n load: Optional[str] = None,\n device: str = 'cpu',\n training_data: Sequence[Dict] = None,\n evaluation_data: Sequence[Dict] = None,\n threads: int = 1,\n force_binarization: bool = False,\n format_type: str = 'path',\n suppress_regions: bool = False,\n suppress_baselines: bool = False,\n valid_regions: Optional[Sequence[str]] = None,\n valid_baselines: Optional[Sequence[str]] = None,\n merge_regions: Optional[Dict[str, str]] = None,\n merge_baselines: Optional[Dict[str, str]] = None,\n bounding_regions: Optional[Sequence[str]] = None,\n resize: str = 'fail',\n augment: bool = False,\n topline: Union[bool, None] = False):\n # load model if given. if a new model has to be created we need to do that\n # after data set initialization, otherwise to output size is still unknown.\n nn = None\n\n hyper_params_ = default_specs.SEGMENTATION_HYPER_PARAMS\n\n if load:\n nn, hp = cls.load_model(load,\n load_hyper_parameters=load_hyper_parameters,\n message=message)\n hyper_params_.update(hp)\n batch, channels, height, width = nn.input\n else:\n # preparse input sizes from vgsl string to seed ground truth data set\n # sizes and dimension ordering.\n spec = spec.strip()\n if spec[0] != '[' or spec[-1] != ']':\n logger.error(f'VGSL spec \"{spec}\" not bracketed')\n return None\n blocks = spec[1:-1].split(' ')\n m = re.match(r'(\\d+),(\\d+),(\\d+),(\\d+)', blocks[0])\n if not m:\n logger.error(f'Invalid input spec {blocks[0]}')\n return None\n batch, height, width, channels = [int(x) for x in m.groups()]\n\n if hyper_params:\n hyper_params_.update(hyper_params)\n\n validate_hyper_parameters(hyper_params_)\n\n hyper_params = hyper_params_\n\n transforms = generate_input_transforms(batch, height, width, channels, 0, valid_norm=False)\n\n # set multiprocessing tensor sharing strategy\n if 'file_system' in torch.multiprocessing.get_all_sharing_strategies():\n logger.debug('Setting multiprocessing tensor sharing strategy to file_system')\n torch.multiprocessing.set_sharing_strategy('file_system')\n\n if not valid_regions:\n valid_regions = None\n if not valid_baselines:\n valid_baselines = None\n\n if suppress_regions:\n valid_regions = []\n merge_regions = None\n if suppress_baselines:\n valid_baselines = []\n merge_baselines = None\n\n gt_set = BaselineSet(training_data,\n line_width=hyper_params['line_width'],\n im_transforms=transforms,\n mode=format_type,\n augmentation=hyper_params['augment'],\n valid_baselines=valid_baselines,\n merge_baselines=merge_baselines,\n valid_regions=valid_regions,\n merge_regions=merge_regions)\n val_set = BaselineSet(evaluation_data,\n line_width=hyper_params['line_width'],\n im_transforms=transforms,\n mode=format_type,\n augmentation=hyper_params['augment'],\n valid_baselines=valid_baselines,\n merge_baselines=merge_baselines,\n valid_regions=valid_regions,\n merge_regions=merge_regions)\n\n if format_type is None:\n for page in training_data:\n gt_set.add(**page)\n for page in evaluation_data:\n val_set.add(**page)\n\n # overwrite class mapping in validation set\n val_set.num_classes = gt_set.num_classes\n val_set.class_mapping = gt_set.class_mapping\n\n if not load:\n spec = f'[{spec[1:-1]} O2l{gt_set.num_classes}]'\n message(f'Creating model {spec} with {gt_set.num_classes} outputs ', nl=False)\n nn = vgsl.TorchVGSLModel(spec)\n message('\\u2713', fg='green')\n if bounding_regions is not None:\n nn.user_metadata['bounding_regions'] = bounding_regions\n nn.user_metadata['topline'] = topline\n else:\n if gt_set.class_mapping['baselines'].keys() != nn.user_metadata['class_mapping']['baselines'].keys() or \\\n gt_set.class_mapping['regions'].keys() != nn.user_metadata['class_mapping']['regions'].keys():\n\n bl_diff = set(gt_set.class_mapping['baselines'].keys()).symmetric_difference(\n set(nn.user_metadata['class_mapping']['baselines'].keys()))\n regions_diff = set(gt_set.class_mapping['regions'].keys()).symmetric_difference(\n set(nn.user_metadata['class_mapping']['regions'].keys()))\n\n if resize == 'fail':\n logger.error(f'Training data and model class mapping differ (bl: {bl_diff}, regions: {regions_diff}')\n raise KrakenInputException(\n f'Training data and model class mapping differ (bl: {bl_diff}, regions: {regions_diff}')\n elif resize == 'add':\n new_bls = gt_set.class_mapping['baselines'].keys() - nn.user_metadata['class_mapping']['baselines'].keys()\n new_regions = gt_set.class_mapping['regions'].keys() - nn.user_metadata['class_mapping']['regions'].keys()\n cls_idx = max(max(nn.user_metadata['class_mapping']['baselines'].values()) if nn.user_metadata['class_mapping']['baselines'] else -1,\n max(nn.user_metadata['class_mapping']['regions'].values()) if nn.user_metadata['class_mapping']['regions'] else -1)\n message(f'Adding {len(new_bls) + len(new_regions)} missing types to network output layer ', nl=False)\n nn.resize_output(cls_idx + len(new_bls) + len(new_regions) + 1)\n for c in new_bls:\n cls_idx += 1\n nn.user_metadata['class_mapping']['baselines'][c] = cls_idx\n for c in new_regions:\n cls_idx += 1\n nn.user_metadata['class_mapping']['regions'][c] = cls_idx\n message('\\u2713', fg='green')\n elif resize == 'both':\n message('Fitting network exactly to training set ', nl=False)\n new_bls = gt_set.class_mapping['baselines'].keys() - nn.user_metadata['class_mapping']['baselines'].keys()\n new_regions = gt_set.class_mapping['regions'].keys() - nn.user_metadata['class_mapping']['regions'].keys()\n del_bls = nn.user_metadata['class_mapping']['baselines'].keys() - gt_set.class_mapping['baselines'].keys()\n del_regions = nn.user_metadata['class_mapping']['regions'].keys() - gt_set.class_mapping['regions'].keys()\n\n message(f'Adding {len(new_bls) + len(new_regions)} missing '\n f'types and removing {len(del_bls) + len(del_regions)} to network output layer ',\n nl=False)\n cls_idx = max(max(nn.user_metadata['class_mapping']['baselines'].values()) if nn.user_metadata['class_mapping']['baselines'] else -1,\n max(nn.user_metadata['class_mapping']['regions'].values()) if nn.user_metadata['class_mapping']['regions'] else -1)\n\n del_indices = [nn.user_metadata['class_mapping']['baselines'][x] for x in del_bls]\n del_indices.extend(nn.user_metadata['class_mapping']['regions'][x] for x in del_regions)\n nn.resize_output(cls_idx + len(new_bls) + len(new_regions) -\n len(del_bls) - len(del_regions) + 1, del_indices)\n\n # delete old baseline/region types\n cls_idx = min(min(nn.user_metadata['class_mapping']['baselines'].values()) if nn.user_metadata['class_mapping']['baselines'] else np.inf,\n min(nn.user_metadata['class_mapping']['regions'].values()) if nn.user_metadata['class_mapping']['regions'] else np.inf)\n\n bls = {}\n for k, v in sorted(nn.user_metadata['class_mapping']['baselines'].items(), key=lambda item: item[1]):\n if k not in del_bls:\n bls[k] = cls_idx\n cls_idx += 1\n\n regions = {}\n for k, v in sorted(nn.user_metadata['class_mapping']['regions'].items(), key=lambda item: item[1]):\n if k not in del_regions:\n regions[k] = cls_idx\n cls_idx += 1\n\n nn.user_metadata['class_mapping']['baselines'] = bls\n nn.user_metadata['class_mapping']['regions'] = regions\n\n # add new baseline/region types\n cls_idx -= 1\n for c in new_bls:\n cls_idx += 1\n nn.user_metadata['class_mapping']['baselines'][c] = cls_idx\n for c in new_regions:\n cls_idx += 1\n nn.user_metadata['class_mapping']['regions'][c] = cls_idx\n message('\\u2713', fg='green')\n else:\n logger.error(f'invalid resize parameter value {resize}')\n raise KrakenInputException(f'invalid resize parameter value {resize}')\n # backfill gt_set/val_set mapping if key-equal as the actual\n # numbering in the gt_set might be different\n gt_set.class_mapping = nn.user_metadata['class_mapping']\n val_set.class_mapping = nn.user_metadata['class_mapping']\n\n # updates model's hyper params with users defined\n nn.hyper_params = hyper_params\n\n # change topline/baseline switch\n loc = {None: 'centerline',\n True: 'topline',\n False: 'baseline'}\n\n if 'topline' not in nn.user_metadata:\n logger.warning(f'Setting baseline location to {loc[topline]} from unset model.')\n elif nn.user_metadata['topline'] != topline:\n from_loc = loc[nn.user_metadata[\"topline\"]]\n logger.warning(f'Changing baseline location from {from_loc} to {loc[topline]}.')\n nn.user_metadata['topline'] = topline\n\n message('Training line types:')\n for k, v in gt_set.class_mapping['baselines'].items():\n message(f' {k}\\t{v}\\t{gt_set.class_stats[\"baselines\"][k]}')\n message('Training region types:')\n for k, v in gt_set.class_mapping['regions'].items():\n message(f' {k}\\t{v}\\t{gt_set.class_stats[\"regions\"][k]}')\n\n if len(gt_set.imgs) == 0:\n logger.error('No valid training data was provided to the train command. Please add valid XML data.')\n return None\n\n if device == 'cpu':\n loader_threads = threads // 2\n else:\n loader_threads = threads\n\n train_loader = InfiniteDataLoader(gt_set, batch_size=1, shuffle=True, num_workers=loader_threads, pin_memory=True)\n val_loader = DataLoader(val_set, batch_size=1, shuffle=True, num_workers=loader_threads, pin_memory=True)\n threads = max((threads - loader_threads, 1))\n\n # set model type metadata field and dump class_mapping\n nn.model_type = 'segmentation'\n nn.user_metadata['class_mapping'] = val_set.class_mapping\n\n # set mode to training\n nn.train()\n\n logger.debug(f'Set OpenMP threads to {threads}')\n nn.set_num_threads(threads)\n\n if hyper_params['optimizer'] == 'Adam':\n optim = torch.optim.Adam(nn.nn.parameters(), lr=hyper_params['lrate'], weight_decay=hyper_params['weight_decay'])\n else:\n optim = getattr(torch.optim, hyper_params['optimizer'])(nn.nn.parameters(),\n lr=hyper_params['lrate'],\n momentum=hyper_params['momentum'],\n weight_decay=hyper_params['weight_decay'])\n\n tr_it = TrainScheduler(optim)\n tr_it = TrainScheduler(optim)\n if hyper_params['schedule'] == '1cycle':\n annealing_one = partial(annealing_onecycle,\n max_lr=hyper_params['lrate'],\n epochs=hyper_params['epochs'],\n steps_per_epoch=len(gt_set))\n tr_it.add_phase(int(len(gt_set) * hyper_params['epochs']),\n annealing_one)\n elif hyper_params['schedule'] == 'exponential':\n annealing_exp = partial(annealing_exponential,\n step_size=hyper_params['step_size'],\n gamma=hyper_params['gamma'])\n tr_it.add_phase(int(len(gt_set) * hyper_params['epochs']),\n annealing_exp)\n elif hyper_params['schedule'] == 'step':\n annealing_step_p = partial(annealing_step,\n step_size=hyper_params['step_size'],\n gamma=hyper_params['gamma'])\n tr_it.add_phase(int(len(gt_set) * hyper_params['epochs']),\n annealing_step_p)\n elif hyper_params['schedule'] == 'reduceonplateau':\n annealing_red = partial(annealing_reduceonplateau,\n patience=hyper_params['rop_patience'],\n factor=hyper_params['gamma'])\n tr_it.add_phase(int(len(gt_set) * hyper_params['epochs']),\n annealing_red)\n elif hyper_params['schedule'] == 'cosine':\n annealing_cos = partial(annealing_cosine, t_max=hyper_params['cos_t_max'])\n tr_it.add_phase(int(len(gt_set) * hyper_params['epochs']),\n annealing_cos)\n else:\n # constant learning rate scheduler\n tr_it.add_phase(int(len(gt_set) * hyper_params['epochs']),\n annealing_const)\n\n if hyper_params['quit'] == 'early':\n st_it = EarlyStopping(hyper_params['min_delta'], hyper_params['lag'])\n elif hyper_params['quit'] == 'dumb':\n st_it = EpochStopping(hyper_params['epochs'] - hyper_params['completed_epochs'])\n else:\n logger.error(f'Invalid training interruption scheme {quit}')\n return None\n\n trainer = cls(model=nn,\n optimizer=optim,\n device=device,\n filename_prefix=output,\n event_frequency=hyper_params['freq'],\n train_set=train_loader,\n val_set=val_loader,\n stopper=st_it,\n loss_fn=baseline_label_loss_fn,\n evaluator=baseline_label_evaluator_fn)\n\n trainer.add_lr_scheduler(tr_it)\n\n return trainer", "title": "" }, { "docid": "02a6db041107a68ce5ff294e17e051e0", "score": "0.5049561", "text": "def train_svm_classifer(features, labels, save_dir, label_names):\n # save 20% of data for performance evaluation\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(features, labels, test_size=0.1)\n \n param = [\n {\n \"kernel\": [\"linear\"],\n \"C\": [1, 10, 100, 1000]\n },\n {\n \"kernel\": [\"rbf\"],\n \"C\": [1, 10, 100, 1000],\n \"gamma\": [1e-2, 1e-3, 1e-4, 1e-5]\n }\n ]\n \n #param = [{\"kernel\": [\"linear\"], \"C\": [1]}]\n # request probability estimation\n # loss defaults to squared_hinge, penalty defaults to l2\n # multiclass = ovr (one verse rest)\n #svm = LinearSVC()\n svm = SVC(probability=True)\n \n # 5-fold cross validation, use 4 thread as each fold and each parameter set can be train in parallel\n clf = grid_search.GridSearchCV(svm, param,\n cv=5, n_jobs=5, verbose=2)\n \n clf.fit(X_train, y_train)\n \n with open(os.path.join(save_dir, 'best_svm.pkl'), 'wb') as f:\n pickle.dump(clf.best_estimator_, f)\n\n f1=open(os.path.join(save_dir, 'train_svm.txt'), 'w+')\n\n print(\"\\nBest parameters set:\")\n f1.write(\"\\nBest parameters set:\\n\")\n print(clf.best_params_)\n f1.write(str(clf.best_params_)+\"\\n\")\n y_predict=clf.predict(X_test)\n \n labels=sorted(list(set(labels)))\n\n print(\"\\nConfusion matrix:\")\n f1.write(\"\\nConfusion matrix:\\n\")\n l = \"Labels: {0}\".format(\",\".join([str(x) for x in labels]))\n print(l)\n f1.write(l+\"\\n\")\n l = \"Label names: {0}\\n\".format(\",\".join(label_names))\n print(l)\n f1.write(l+\"\\n\")\n l = confusion_matrix(y_test, y_predict, labels=labels)\n print(l)\n f1.write(str(l)+\"\\n\")\n l = \"\\nClassification report:\"\n print(l)\n f1.write(l+\"\\n\")\n l = classification_report(y_test, y_predict)\n print(l)\n f1.write(l+\"\\n\")\n l = \"Accuracy score: {0}\\n\".format(accuracy_score(y_test, y_predict))\n print(l)\n f1.write(l+\"\\n\")", "title": "" }, { "docid": "88d26c2fa21dd378d1d8caa5f4b02a47", "score": "0.50476253", "text": "def build_svm_model(*args):\n model = SVC()\n features = FeatureStacker([feat for feat in args]) #make list of features to be applied\n pipeline = Pipeline([('feat', features), ('svm', model)])\n return pipeline", "title": "" }, { "docid": "92951cbb028b2ff67fd2b5b6a0b0e42f", "score": "0.5040914", "text": "def SVM (dico_descriptors, list_descriptors, path_directory, file_out, path_file_global_descriptor, name_dataset, train_test_option = 1) :\n \n path_filout = path_directory + file_out\n\n # train and test\n if train_test_option == 1 : \n \n path_files_train_test = writeFiles.specificDescriptorbyData (dico_descriptors, list_descriptors, path_filout)\n \n ###############\n # RUN LDA #\n ###############\n # run SVM train, test and LOO\n runOtherProg.SVM (path_filin1 = path_files_train_test [0], path_filin2 = path_files_train_test [1], path_filout = path_filout)\n else :\n \n ##############\n # Write file #\n ##############\n if list_descriptors == \"global\" :\n path_file_every_pocket = writeFiles.globalDescriptors(dico_descriptors, path_filout + \"_loo\")\n else : \n path_file_every_pocket = writeFiles.specificDescriptor(dico_descriptors, list_descriptors, path_filout + \"_loo\" )\n \n ###########\n # Run SVM #\n ###########\n runOtherProg.SVM (path_filin1 = path_file_every_pocket, path_filin2 = \"0\", path_filout = path_filout)", "title": "" }, { "docid": "2cfca370ea97660c2b48eba6c758493d", "score": "0.5031259", "text": "def infer_training_data(clf, scaler=None):\n\n training_data_ = training_data.get_training_data_dict()\n\n for sample in training_data_['vec']:\n print(infer(sample.reshape(1, -1), clf, scaler=scaler))\n\n pass", "title": "" }, { "docid": "ff05c2dca52f156f5ecc7ea05c79bbcd", "score": "0.5022708", "text": "def update_svm_plot(svm, final_update=False):\n w, b, support_vectors = svm.w, svm.b, svm.support_vectors\n\n # Circle support vectors\n sv_circles.set_xdata(map(get_x, support_vectors))\n sv_circles.set_ydata(map(get_y, support_vectors))\n\n # Create helper function for computing points on boundary and gutters\n def compute_y(x, c=0):\n \"Given x, returns y such that [x,y] is on the line w dot [x,y] + b = c\"\n return (-w[0] * x - b + c) / float(w[1])\n def compute_x(y, c=0):\n \"Given y, returns x such that [x,y] is on the line w dot [x,y] + b = c\"\n return (-w[1] * y - b + c) / float(w[0])\n\n def update_line(line, c=0):\n \"\"\"helper function for updating a pyplot line (specifically, the\n decision boundary or a gutter)\"\"\"\n try:\n line.set_xdata([x_min, x_max])\n line.set_ydata([compute_y(x_min, c), compute_y(x_max, c)])\n except ZeroDivisionError:\n # line is vertical\n line.set_xdata([compute_x(y_min, c), compute_x(y_max, c)])\n line.set_ydata([y_min, y_max])\n\n # Update decision boundary (w dot x + b = 0)\n update_line(boundary_line)\n\n # Update gutters (w dot x + b = +/-1)\n update_line(positive_gutter_line, 1)\n update_line(negative_gutter_line, -1)\n\n # Redraw graph\n pl.draw()\n\n if final_update:\n # Turn off interactive mode so that pl.show() will block\n pl.ioff()\n pl.show()", "title": "" }, { "docid": "074197619151ae01f123636304c4d840", "score": "0.5019403", "text": "def train_model(classifier, feature_vector_train, label, feature_vector_valid):\n # fit the training dataset on the classifier\n classifier.fit(feature_vector_train, label)\n\n # predict the labels on validation dataset\n predictions = classifier.predict(feature_vector_valid)\n return metrics.accuracy_score(predictions, y_test)", "title": "" }, { "docid": "3f1ee7c84aa7556f6c40f30a625dd249", "score": "0.50083536", "text": "def train_model(x_train, y_train, kernel, C, logGamma, degree, coef0):\n if kernel == 'linear':\n model = sklearn.svm.SVC(kernel=kernel, C=C)\n elif kernel == 'poly':\n model = sklearn.svm.SVC(kernel=kernel, C=C, degree=degree, coef0=coef0)\n elif kernel == 'rbf':\n model = sklearn.svm.SVC(kernel=kernel, C=C, gamma=10 ** logGamma)\n else:\n raise AttributeError(\"Unknown kernel function: %s\" % kernel)\n model.fit(x_train, y_train)\n return model", "title": "" }, { "docid": "d0f0a4222f0b0c88a6ae69e20f550f38", "score": "0.5007879", "text": "def create_svm_graph(training_points):\n\n # Use interactive mode for graph updates during training\n pl.ion()\n\n # Add gridlines\n pl.grid()\n\n training_point_x_vals = map(get_x, training_points)\n training_point_y_vals = map(get_y, training_points)\n\n # Define graph range\n x_min_actual, x_max_actual = min(training_point_x_vals), max(training_point_x_vals)\n x_diff = x_max_actual - x_min_actual\n x_min = x_min_actual - x_diff*0.2\n x_max = x_max_actual + x_diff*0.2\n pl.xlim([x_min, x_max])\n\n y_min_actual, y_max_actual = min(training_point_y_vals), max(training_point_y_vals)\n y_diff = y_max_actual - y_min_actual\n y_min = y_min_actual - y_diff*0.2\n y_max = y_max_actual + y_diff*0.2\n pl.ylim([y_min, y_max])\n\n # Partition training points into positive and negative\n positive_point_x_vals, positive_point_y_vals = [], []\n negative_point_x_vals, negative_point_y_vals = [], []\n for pt in training_points:\n if pt.classification == 1:\n positive_point_x_vals.append(pt.coords[0])\n positive_point_y_vals.append(pt.coords[1])\n else:\n negative_point_x_vals.append(pt.coords[0])\n negative_point_y_vals.append(pt.coords[1])\n\n # Plot training points as red (positive) and blue (negative) circles\n pl.plot(positive_point_x_vals, positive_point_y_vals, \"ro\")\n pl.plot(negative_point_x_vals, negative_point_y_vals, \"bo\")\n\n # Prepare to plot support vectors as open circles\n sv_circles, = pl.plot([], [], \"o\", markersize=14, mfc='none')\n\n # Prepare to plot boundary line (solid black) and gutters (dashed black)\n boundary_line, = pl.plot([], [], \"k\")\n positive_gutter_line, = pl.plot([], [], \"k--\")\n negative_gutter_line, = pl.plot([], [], \"k--\")\n\n pl.draw()\n\n\n # Create update function for this SVM\n def update_svm_plot(svm, final_update=False):\n \"\"\"Update the SVM's graph with the current decision boundary and\n gutters, as determined by w and b, and the current support vectors.\n If final_update is True, displays the graph statically after update,\n blocking execution until the graph is closed.\"\"\"\n w, b, support_vectors = svm.w, svm.b, svm.support_vectors\n\n # Circle support vectors\n sv_circles.set_xdata(map(get_x, support_vectors))\n sv_circles.set_ydata(map(get_y, support_vectors))\n\n # Create helper function for computing points on boundary and gutters\n def compute_y(x, c=0):\n \"Given x, returns y such that [x,y] is on the line w dot [x,y] + b = c\"\n return (-w[0] * x - b + c) / float(w[1])\n def compute_x(y, c=0):\n \"Given y, returns x such that [x,y] is on the line w dot [x,y] + b = c\"\n return (-w[1] * y - b + c) / float(w[0])\n\n def update_line(line, c=0):\n \"\"\"helper function for updating a pyplot line (specifically, the\n decision boundary or a gutter)\"\"\"\n try:\n line.set_xdata([x_min, x_max])\n line.set_ydata([compute_y(x_min, c), compute_y(x_max, c)])\n except ZeroDivisionError:\n # line is vertical\n line.set_xdata([compute_x(y_min, c), compute_x(y_max, c)])\n line.set_ydata([y_min, y_max])\n\n # Update decision boundary (w dot x + b = 0)\n update_line(boundary_line)\n\n # Update gutters (w dot x + b = +/-1)\n update_line(positive_gutter_line, 1)\n update_line(negative_gutter_line, -1)\n\n # Redraw graph\n pl.draw()\n\n if final_update:\n # Turn off interactive mode so that pl.show() will block\n pl.ioff()\n pl.show()\n\n # Return update function\n return update_svm_plot", "title": "" }, { "docid": "f4d3259808aba6a3459545ef2135496f", "score": "0.5002543", "text": "def _train(self, dataset):\n # Get selected feature ids\n selected_ids = self._get_selected_ids(dataset)\n # announce desired features to the underlying slice mapper\n self._safe_assign_slicearg(selected_ids)\n # and perform its own training\n super(SensitivityBasedFeatureSelection, self)._train(dataset)", "title": "" }, { "docid": "c142f745040a9724db368384bb8399fe", "score": "0.49915686", "text": "def predict(image, vocabulary, trained_svm):\n f = bow_features(image, vocabulary)\n p = trained_svm.predict(f)\n return p", "title": "" }, { "docid": "7472f7e647db7caad910204e53a2ede4", "score": "0.4987791", "text": "def sv_classification( input_table, modelname, parallel):\n\n if (parallel) :\n # Learning multiple models in parallel \n\n # Output error if models with the same modelname already exist\n sql = 'select count(*) from MADLIB_SCHEMA.sv_results where id = \\'' + modelname + '0\\'';\n seen = plpy.execute(sql);\n if (seen[0]['count'] > 0):\n plpy.error('model with name \\'' + modelname + '\\' already exists; please use a different model name or drop the model using drop_sv_model() function');\n\n # Start learning process\n sql = 'insert into MADLIB_SCHEMA.sv_results (select \\'' + modelname + '\\' || gp_segment_id, MADLIB_SCHEMA.online_sv_cl_agg(ind, label) from ' + input_table + ' group by gp_segment_id)';\n plpy.execute(sql);\n\n # Store the models learned\n numproc_t = plpy.execute('select count(distinct(gp_segment_id)) from ' + input_table);\n numproc = numproc_t[0]['count'];\n plpy.execute('select MADLIB_SCHEMA.storeModel(\\'' + modelname + '\\', ' + str(numproc) + ')'); \n else :\n # Learning a single model\n\n # Output error if a model with the same modelname already exists\n sql = 'select count(*) from MADLIB_SCHEMA.sv_results where id = \\'' + modelname + '\\'';\n seen = plpy.execute(sql);\n if (seen[0]['count'] > 0):\n plpy.error('model with name \\'' + modelname + '\\' already exists; please use a different model name or drop the model using drop_sv_model() function');\n \n # Start learning process \n sql = 'insert into MADLIB_SCHEMA.sv_results (select \\'' + modelname + '\\', MADLIB_SCHEMA.online_sv_cl_agg(ind, label) from ' + input_table + ')';\n plpy.execute(sql);\n\n # Store the model learned\n plpy.execute('select MADLIB_SCHEMA.storeModel(\\'' + modelname + '\\')');\n\n return '''Finished support vector classification learning on %s table. \n ''' % (input_table)", "title": "" }, { "docid": "62d7ed58bd4c2152ec9835865d7a57d3", "score": "0.4983208", "text": "def train(self):\n \n # zalogovani zpravy\n self.dataset.log_info(\"[INFO] Trenuje se klasifikator \")\n \n data = self.data\n labels = self.labels\n \n print \"[INFO] Trenuje se klasifikator... \",\n classifier = SVC(kernel=\"linear\", C = self.C, probability=True, random_state=42)\n classifier.fit(data, labels)\n print \"Hotovo\"\n \n # ulozi klasifikator do .cpickle souboru\n print \"[INFO] Uklada se klasifikator do souboru .cpickle ...\",\n f = open(self.classifier_path, \"w\")\n f.write(cPickle.dumps(classifier))\n f.close()\n print \"Hotovo\"\n \n # zalogovani zpravy \n self.dataset.log_info(\"... Hotovo.\")", "title": "" }, { "docid": "c84f372537fbdb5864e581ff0f5e332c", "score": "0.49678546", "text": "def train(self, train_feature, train_label, train_response):\n assert train_response.shape[1] == self.num_channels\n train_feature_cv = train_feature.astype(np.float32)\n if self.num_classes > 1:\n print('Training classifier')\n self.classifier.train(train_feature_cv, cv2.ml.ROW_SAMPLE, train_label)\n predicted_train = self.classifier.predict(train_feature_cv)[1].ravel()\n error_svm = accuracy_score(train_label, predicted_train)\n print('Classifier trained. Training accuracy: %f' % error_svm)\n # Split the training sample based on ground truth label.\n for cls_name, cls in self.class_map.items():\n feature_in_class = train_feature_cv[train_label == cls, :]\n target_in_class = train_response[train_label == cls, :]\n # Skip models in 'transition' mode.\n if cls_name == ignore_class:\n continue\n for chn in range(self.num_channels):\n rid = cls * self.num_channels + chn\n print('Training regressor for class %d, channel %d' % (cls, chn))\n self.regressors[rid].train(feature_in_class, cv2.ml.ROW_SAMPLE,\n target_in_class[:, chn].astype(np.float32))\n predicted = self.regressors[rid].predict(feature_in_class)[1]\n print('Regressor for class %d channel %d trained. Training error: %f(r2), %f(MSE)' %\n (cls, chn, r2_score(predicted, target_in_class[:, chn]),\n mean_squared_error(predicted, target_in_class[:, chn])))\n print('All done')", "title": "" }, { "docid": "ac6152560cc25d8351385b3729f2c128", "score": "0.4964343", "text": "def train_classifier(self, test_size=0.25):\n\n start = time.time()\n self.classifier = SupportVectorClassifier(self.vehicle_features, self.non_vehicle_features)\n score = self.classifier.train_and_score(test_size=test_size)\n if self.verbose:\n print(\"Finished training in\", round(time.time() - start),\n \"seconds with {:3.3f}% accuracy.\".format(score * 100.0))", "title": "" }, { "docid": "8a5de14d37756bd470e7a0df9dd17d9a", "score": "0.49640694", "text": "def train_model(images, mask_list, k_size, probability):\n logging.info('Calculating, normalizing feature vectors for %d image(s)', len(images))\n vectors_list = [calculate_features(x.image, x.fov_mask, mask_list, k_size) for x in images]\n truth_list = [x.truth for x in images]\n logging.info('Training model with %d image(s)', len(images))\n svm.train(vectors_list, truth_list, probability) # Train SVM, lengthy process", "title": "" }, { "docid": "84bf4d2544ab5f8485485f35b420dbfb", "score": "0.49614185", "text": "def predict(self, X):", "title": "" }, { "docid": "84bf4d2544ab5f8485485f35b420dbfb", "score": "0.49614185", "text": "def predict(self, X):", "title": "" }, { "docid": "f42f3ac6060c62437ae03706756ba12a", "score": "0.49596742", "text": "def run_SVM (dataset01, dataset02, kernel='rbf', features='all_except_LBP', show_plot=\"yes\"): \n features_dic = {\n 'all_except_LBP': all_feat_no_LBP,\n 'all_with_LBP': all_LBP,\n 'geometrical': geom_feat,\n 'intensity': intens_feat,\n 'intensity_no_GLCM': noGLCM_feat,\n 'lbp': lbp_feat,\n }\n\n # Get the function from features dictionary\n func = features_dic.get(features)\n #func, dataset01_data, dataset02_data = features_dic.get(features, dataset01_data, dataset02_data)\n # Execute the function\n svclassifier,dataset01_data, dataset02_data = func(kernel, dataset01, dataset02)\n\n #dataset01_data = dataset01[:,:-1]\n dataset01_labels = dataset01[:,-1]\n #dataset02_data = dataset02[:,:-1]\n dataset02_labels = dataset02[:,-1]\n\n # Trains classifier in DataSet01 and tests in DataSet02\n svclassifier.fit(dataset01_data, dataset01_labels)\n prob1 = svclassifier.predict_proba(dataset02_data)\n print(prob1.shape)\n print(dataset02.shape)\n prob1 = np.column_stack((prob1,dataset02_labels))\n\n # Trains classifier in DataSet02 and tests in DataSet01\n svclassifier.fit(dataset02_data, dataset02_labels)\n prob2 = svclassifier.predict_proba(dataset01_data)\n prob2 = np.column_stack((prob2,dataset01_labels))\n\n # Calculate the probabilities taking both tests into account\n full_probabilities = np.concatenate((prob1,prob2),axis=0)\n false_positive_rate, true_positive_rate, thresholds = roc_curve(full_probabilities[:,-1], full_probabilities[:,1], pos_label=1, drop_intermediate=True)\n full_auc = auc(false_positive_rate, true_positive_rate)\n\n if \"yes\" == show_plot:\n partial_auc, FROC_fpr, FROC_tpr = ROC_to_FROC(full_probabilities, false_positive_rate, true_positive_rate,\n full_auc)\n elif \"no\" == show_plot:\n partial_auc, FROC_fpr, FROC_tpr = ROC_to_FROC(full_probabilities, false_positive_rate, true_positive_rate,\n full_auc, \"no\")\n\n return full_probabilities, full_auc, partial_auc, FROC_fpr, FROC_tpr", "title": "" }, { "docid": "2a44d35fd79b4e7549a1089319ee874b", "score": "0.4956659", "text": "def setSvmTrainedFile(self, svmFileName):\n self.svm = cv2.ml.SVM_load(svmFileName)", "title": "" }, { "docid": "42b9650de0e775d3c1fac9cc9c3c08eb", "score": "0.49538165", "text": "def compute_prediction(clf, verbose=True):\n\n df = pd.read_csv(STAGE1_SAMPLE_SUBMISSION)\n #x = np.array([np.mean(np.load(FEATURE_FOLDER + '/%s.npy' % str(id)), axis=0) for id in df['id'].tolist()])\n # x = np.array([np.load(FEATURE_FOLDER + '/%s.npy' % str(id))[:30].flatten()\n # for id in df['id'].tolist()])[:, FEATURE]\n x = np.array([np.r_[np.mean(np.load(FEATURE_FOLDER + '/%s.npy' % str(id)), axis=0)]\n for id in df['id'].tolist()])\n \"\"\"\n x2 = np.array([np.r_[np.mean(np.load(FEATURE_FOLDER_2 + '/%s.npy' % str(id)), axis=0)]\n for id in df['id'].tolist()])\n x = np.c_[x, x2]\n \"\"\"\n pred = clf.predict(x)\n df['cancer'] = pred\n return df", "title": "" }, { "docid": "631cd66ea029424e1f421e6c36d725c7", "score": "0.49444875", "text": "def __init__(self, classifier_name='svm_classifier', random_seed=None,\n **svm_params):\n super().__init__(classifier_name=classifier_name,\n random_seed=random_seed, **svm_params)\n\n # Important and necessary SVM parameters\n kernel = svm_params.pop('kernel', 'rbf')\n\n unoptimised_classifier = sklearn.svm.SVC(kernel=kernel,\n probability=self.prob,\n random_state=self._rs,\n **svm_params)\n self.classifier = unoptimised_classifier\n # Store the unoptimised classifier\n self.unoptimised_classifier = unoptimised_classifier\n print(f'Created classifier of type: {self.classifier}.\\n')\n\n # Good defaulf ranges for these parameters\n self.param_grid_default = {'C': np.logspace(-2, 5, 5),\n 'gamma': np.logspace(-8, 3, 5)}", "title": "" }, { "docid": "b33e4e577b897a8ac7ce920a6e35cef9", "score": "0.49291986", "text": "def get_svc(d, features_list):\n # Keep only the values from features_list\n data = featureFormat(d, features_list, sort_keys=True)\n # Split between labels (poi) and the rest of features\n labels, features = targetFeatureSplit(data)\n\n svm = Pipeline([('scaler', StandardScaler()), ('svm', SVC())])\n\n param_grid = ([{'svm__C': [50],\n 'svm__gamma': [0.1],\n 'svm__degree': [2],\n 'svm__kernel': ['poly'],\n 'svm__max_iter': [100]}])\n\n svm_clf = GridSearchCV(svm,\n param_grid,\n scoring='f1').fit(\n features, labels).best_estimator_\n\n return svm_clf", "title": "" }, { "docid": "90199e56ff2addf17380be1443f3aa7c", "score": "0.49255335", "text": "def trainClassifiers(self):\n \n self.which = {0:vsm(),1:nb(),2:svm()}\n for i in self.which.keys():\n self.which[i].fit()", "title": "" }, { "docid": "6957f98bce74b92011529a07a6e25966", "score": "0.4923072", "text": "def train(self,trainData, trainLabels, testData = None, testLabels=None):\n\t\t# Computes Gram matrix\n\t\ty = trainLabels.flatten()\n\t\tself.gram = self._computeGram_(trainData)\n\n\t\tdim = len(trainData)\n\n\t\tprint(\"\\tSolve SVM Equations\", end = '\\r')\n\n\t\t\"\"\"\n\t\tmin 1/2 x^T P x + q^T x\n\t\tsubject to\tGx < h\n\t\t\t\t\tAx = b\n\t\t\"\"\"\n\t\t# P is gram matrix\n\t\t# q is a vector of labels\n\t\tP = cvxopt.matrix(np.outer(y,y) * self.gram)\n\t\tq = cvxopt.matrix(np.ones(dim) * -1)\n\n\t\t# A = 1T\n\t\tA = cvxopt.matrix(y, (1,dim), 'd')\n\t\tb = cvxopt.matrix(0.0)\n\n\t\t# G = diag(d)\n\t\t# h = gamma*1\n\t\tG_std = np.diag(np.ones(dim) * -1)\n\t\th_std = np.zeros(dim)\n\n\t\tG_slack = np.identity(dim)\n\t\th_slack = np.ones(dim) * 0.01 \n\n\t\tG = cvxopt.matrix(np.vstack((G_std, G_slack)))\n\t\th = cvxopt.matrix(np.hstack((h_std, h_slack)))\n\n\t\t# solve QP problem\n\t\tsolution = cvxopt.solvers.qp(P, q, G, h, A, b)\n\n\n\t\tprint(\"\\tExtract Support Vectors\", end = '\\r')\n\t\t# Lagrange multipliers\n\t\ta = np.ravel(solution['x'])\n\n\t\t# Support vectors have non zero lagrange multipliers\n\t\tsupportVector = a > 1e-8\n\t\tind = np.arange(len(a))[supportVector]\n\t\tself.alpha = a[supportVector]\n\t\tself.supportVector = trainData[supportVector]\n\t\tself.supportVectorLabels = y[supportVector]\n\n\t\t# Bias\n\t\tself.bias = 0\n\t\tfor n in range(len(self.alpha)):\n\t\t\tself.bias += self.supportVectorLabels[n]\n\t\t\tself.bias -= np.sum(self.alpha * self.supportVectorLabels * self.gram[ind[n],supportVector])\n\t\tself.bias /= len(self.alpha)\n\n\t\tself.test(trainData, trainLabels)", "title": "" }, { "docid": "1a0ad2716b4a1588e23cf5c322dcb252", "score": "0.49111462", "text": "def fit(self, X, y):\n #### ----------\n # #### ----------\n\n self.trained = True\n \n if self.clf_type == 'svm':\n if self.clf_args is not None:\n clf = SVC(**self.clf_args)\n else:\n clf = SVC()\n\n elif self.clf_type == 'rf':\n if self.clf_args is not None:\n clf = RandomForestClassifier(**self.clf_args)\n #clf = RandomForestClassifier(**self.clf_args, warm_start=True)\n else:\n clf = RandomForestClassifier()\n\n n_average = self.n_average\n \n predict_score = []\n training_score = []\n clf_list = []\n xtrain_scaler_list = []\n\n n_sample = X.shape[0]\n zero_eps = 1e-6\n\n y_unique = np.unique(y) # different labels\n assert len(y_unique)>1, \"Cluster provided only has a unique label, can't classify !\"\n\n n_sample = X.shape[0]\n idx = np.arange(n_sample)\n yu_pos = {yu : idx[(y == yu)] for yu in y_unique}\n n_class = len(y_unique)\n import time \n\n dt=0.0\n\n import pickle\n\n for _ in range(n_average):\n while True:\n ytrain, ytest, xtrain, xtest = train_test_split(y, X, test_size=self.test_size)\n if len(np.unique(ytrain)) > 1: # could create a bug otherwise\n break\n\n #print(\"train size, test size:\", len(ytrain),len(ytest),sep='\\t')\n \n std = np.std(xtrain, axis = 0) \n std[std < zero_eps] = 1.0 # get rid of zero variance data.\n mu, inv_sigma = np.mean(xtrain, axis=0), 1./std\n\n xtrain = (xtrain - mu)*inv_sigma # zscoring the data \n xtest = (xtest - mu)*inv_sigma\n pickle.dump([xtrain, ytrain], open('test.pkl','wb'))\n s=time.time()\n print(len(xtrain))\n clf.fit(xtrain, ytrain)\n dt += (time.time() - s)\n\n t_score = clf.score(xtrain, ytrain) # predict on test set\n training_score.append(t_score)\n \n p_score = clf.score(xtest, ytest) # predict on test set\n #print(t_score,'\\t',p_score)\n predict_score.append(p_score)\n\n clf_list.append(clf)\n xtrain_scaler_list.append([mu,inv_sigma])\n print(\"TRAINING ONLY\\t\",dt)\n\n self.scaler_list = xtrain_scaler_list # scaling transformations (zero mean, unit std)\n self.cv_score = np.mean(predict_score)\n self.cv_score_std = np.std(predict_score) \n self.mean_train_score = np.mean(training_score)\n self.std_train_score = np.std(training_score)\n self.clf_list = clf_list # classifier list for majority voting !\n self._n_sample = len(y)\n\n return self", "title": "" }, { "docid": "139e93cef189112f634b50f5d1a098f5", "score": "0.48979208", "text": "def data_preprocess_segmentation(input_path, img_height, img_width, batch_size, test_batch_size, tfrecord=True,\n augment=False):\n\n if tfrecord:\n dataset, info = tfds.load(input_path, with_info=True)\n\n train_image_count = info.splits['train'].num_examples\n test_image_count = info.splits['test'].num_examples\n\n num_training_steps = train_image_count // batch_size\n num_test_steps = test_image_count // test_batch_size\n\n else:\n # Class names based on the directory structure\n class_names = os.listdir(input_path)\n\n # o nome das pastas não precisa ser esses : image e segmentation_mask\n train_images_dir = pathlib.Path(os.path.join(input_path, 'train_images', 'image'))\n train_masks_dir = pathlib.Path(os.path.join(input_path, 'train_images', 'segmentation_mask'))\n test_images_dir = pathlib.Path(os.path.join(input_path, 'test_images', 'image'))\n test_masks_dir = pathlib.Path(os.path.join(input_path, 'test_images', 'segmentation_mask'))\n # verificar se as classes se separam por pasta mesmo + adicionar a possibilidade de imagens .png\n train_image_count = len(list(train_images_dir.glob('*/*.jpg')))\n test_image_count = len(list(test_images_dir.glob('*/*.jpg')))\n\n num_training_steps = train_image_count // batch_size\n num_test_steps = test_image_count // test_batch_size\n\n # ainda falta criar outro dicionário pras separçaões\n train_images = tf.data.Dataset.list_files(str(train_images_dir / '*/*'))\n train_masks = tf.data.Dataset.list_files(str(train_masks_dir / '*/*'))\n train_ds = {'image': train_images, 'segmentation_mask': train_masks}\n\n test_images = tf.data.Dataset.list_files(str(test_images_dir / '*/*'))\n test_masks = tf.data.Dataset.list_files(str(test_masks_dir / '*/*'))\n test_ds = {'image': test_images, 'segmentation_mask': test_masks}\n\n dataset = {'train': train_ds, 'test': test_ds}\n\n train = dataset['train'].map(lambda input_images: load_images_segmentation(input_images, img_height, img_width),\n num_parallel_calls=AUTOTUNE)\n test = dataset['test'].map(lambda input_images: load_images_segmentation(input_images, img_height, img_width),\n num_parallel_calls=AUTOTUNE)\n\n train_batches = prepare_for_training(train, batch_size, augment)\n test_batches = test.batch(test_batch_size)\n\n return train_batches, test_batches, num_training_steps, num_test_steps", "title": "" }, { "docid": "129c09fbe3474ab9ed8588c60fec835d", "score": "0.48974913", "text": "def svc_tune(d, features_list, scaler=True):\n # Keep only the values from features_list\n data = featureFormat(d, features_list, sort_keys=True)\n # Split between labels (poi) and the rest of features\n labels, features = targetFeatureSplit(data)\n\n svm = Pipeline([('scaler', StandardScaler()), ('svm', SVC())])\n\n param_grid = ([{'svm__C': [1, 50, 100, 1000],\n 'svm__gamma': [0.5, 0.1, 0.01],\n 'svm__degree': [1, 2],\n 'svm__kernel': ['rbf', 'poly', 'linear'],\n 'svm__max_iter': [1, 100, 1000]}])\n\n svm_clf = GridSearchCV(svm,\n param_grid,\n scoring='f1').fit(\n features, labels).best_estimator_\n\n tester.test_classifier(svm_clf, d, features_list)\n\n return", "title": "" }, { "docid": "559c1ea37fcff96bd2fb3afbca2772b5", "score": "0.48924017", "text": "def predict(X):", "title": "" }, { "docid": "d0ef824c9e4fb051986758ecbdb285f1", "score": "0.48915452", "text": "def train(self, data, labels):\n # the scikit learn pipeline for vectorizing, normalizing and classifying text\n text_clf = Pipeline([('vect', HashingVectorizer()), ('clf', SGDClassifier(loss=\"log\", max_iter=3))])\n\n if DEBUG:\n parameters = {}\n else:\n parameters = {'clf__alpha': (10. ** sp.arange(-6, -4, 1.)).tolist()}\n\n # perform gridsearch to get the best regularizer\n gs_clf = GridSearchCV(text_clf, parameters, n_jobs=1, verbose=4)\n gs_clf.fit(data, labels)\n # dump classifier to pickle\n joblib.dump(gs_clf.best_estimator_, self.clf_path)\n\n self.clf = gs_clf.best_estimator_", "title": "" }, { "docid": "c7df89cb5d38ca05fcf9418b60e46eac", "score": "0.4891124", "text": "def ddos_svm_rbf_data(self):\n self._logging.debug(\"Preparing data for SVM (RBF kernel) \"\n \"DDoS attack classifier.\")\n features = [\"totalSourceBytes\", \"totalSourcePackets\",\n \"startDateTime\", \"stopDateTime\"]\n selected_data = self._return_features(self._raw_data, features)\n transformed_data = []\n for flow in selected_data:\n new_entry = []\n src_bytes = 0\n try:\n src_bytes = math.log(float(flow[0]))\n except ValueError:\n # Log (base 10) could not be evaluated, so set it to 0.\n # This has arisen as the number of source bytes is 0.\n # If the number of source bytes as listed in the\n # dataset is not 0, then something is wrong with the\n # data.\n pass\n new_entry.append(src_bytes)\n new_entry.append(flow[1]) # copy in totalSourcePackets\n start_dt = datetime.strptime(flow[2], \"%Y-%m-%dT%H:%M:%S\")\n stop_dt = datetime.strptime(flow[3], \"%Y-%m-%dT%H:%M:%S\")\n duration = (stop_dt-start_dt).seconds\n new_entry.append(duration)\n transformed_data.append(new_entry)\n return (np_array.array(transformed_data).astype(float32),\n np_array.array(self._raw_labels).astype(float32))", "title": "" }, { "docid": "ecb1e7e48357bd859c7c58bc99ab3dfa", "score": "0.48898882", "text": "def svc(task=None,x=None,w=None):\n if not isinstance(task,str):\n out = prmapping(svc,task,x)\n return out\n if (task=='untrained'):\n # just return the name, and hyperparameters\n if x is None:\n kernel = 'rbf'\n x = 1.\n C = 1.\n else:\n kernel = x[0]\n C = x[2]\n x = x[1]\n if (kernel=='linear') or (kernel=='l'):\n clf = svm.SVC(kernel='linear',degree=x,C=C,probability=True)\n elif (kernel=='poly') or (kernel=='p'):\n clf = svm.SVC(kernel='poly',degree=x,gamma='auto',coef0=1.,C=C,probability=True)\n #clf = svm.SVC(kernel='poly',gamma=x,C=C,probability=True)\n else:\n #print(\"Supplied kernel is unknown, use RBF instead.\")\n clf = svm.SVC(kernel='rbf',gamma=1./(x*x),C=C,probability=True)\n return 'Support vector classifier', clf\n elif (task==\"train\"):\n # we are going to train the mapping\n X = +x\n y = numpy.ravel(x.targets)\n clf = copy.deepcopy(w)\n clf.fit(X,y)\n return clf,x.lablist()\n elif (task==\"eval\"):\n # we are applying to new data\n clf = w.data\n pred = clf.decision_function(+x)\n if (len(pred.shape)==1): # oh boy oh boy, we are in trouble\n pred = pred[:,numpy.newaxis]\n pred = numpy.hstack((-pred,pred)) # sigh\n return pred\n else:\n print(task)\n raise ValueError('This task is *not* defined for svc.')", "title": "" }, { "docid": "6ab69fdb5fa17254ac8da6b68bebf82d", "score": "0.48808974", "text": "def analysis(X_train, X_test, y_train, y_test,Tfidf=False):\n\n clf = train_model(X_train, y_train, trainer=\"MultinomialNB\")\n predicted= clf.predict(X_test)\n print(\"MultinomialNB Accuracy:\",metrics.accuracy_score(y_test, predicted))\n print(\"MultinomialNB f1:\",metrics.f1_score(y_test,predicted))\n\n \n linear=train_model(X_train, y_train, trainer=\"LinearSVC\")\n y_label=linear.predict(X_test)\n \n acc=metrics.accuracy_score(y_test,y_label,normalize=True)\n f1=metrics.f1_score(y_test,y_label)\n print(\"linear SVC Accuracy: \",acc)\n print(\"linear SVC f1: \",f1)", "title": "" }, { "docid": "56276e140939cc2305101d5f82b348ce", "score": "0.48766634", "text": "def svm_predict( input_table, data_col, id_col, model_table, output_table, parallel, kernel_func):\n\n plpy.execute('drop table if exists ' + output_table);\n plpy.execute('create table ' + output_table + ' ( id int, prediction float8 ) m4_ifdef(`GREENPLUM', `distributed by (id)')');\n\n if (parallel) :\n num_models_t = plpy.execute('SELECT COUNT(DISTINCT(id)) n FROM ' + model_table + ' WHERE position(\\'' + model_table + '\\' in id) > 0 AND \\'' + model_table + '\\' <> id;');\n num_models = num_models_t[0]['n'];\n\n sql = 'insert into ' + output_table + '(select t.' + id_col + ', sum(weight * ' + kernel_func + '(m.sv, t.' + data_col + ')) / ' + str(num_models) + ' from ' + model_table + ' m, ' + input_table + ' t where position(\\'' + model_table + '\\' in m.id) > 0 AND \\'' + model_table + '\\' <> m.id group by 1)';\n plpy.execute(sql);\n\n else :\n sql = 'insert into ' + output_table + '(select t.' + id_col + ', sum(weight * ' + kernel_func + '(m.sv, t.' + data_col + ')) from ' + model_table + ' m, ' + input_table + ' t where m.id = \\'' + model_table + '\\' group by 1)';\n plpy.execute(sql);\n\n return '''Finished processing data points in %s table; results are stored in %s table. \n ''' % (input_table,output_table)", "title": "" }, { "docid": "32fe883d0ec5b91c9a4ec8820b7f38f6", "score": "0.48744503", "text": "def predict(model, segments):\n predictions = model.predict(segments)\n\n return predictions", "title": "" }, { "docid": "d96180afab0bb77b8226bfb56d874fc9", "score": "0.48728478", "text": "def train():\n features, labels = __init__.load_data('train')\n \n vectorizer = text.CountVectorizer(decode_error='ignore', stop_words='english')\n transformer = text.TfidfTransformer()\n \n classifier = linear_model.LogisticRegression(solver='lbfgs')\n \n # Serializes the processing steps that would be required of the above.\n text_clf = pipeline.Pipeline(steps=[('vect', vectorizer),\n ('tfidf', transformer),\n ('clf-lr', classifier)])\n \n start = time.time()\n text_clf.fit(features, labels)\n print 'Training time:\\t%1.4f seconds' % (time.time() - start)\n \n __init__.evaluate(text_clf, features, labels)\n\n return text_clf", "title": "" }, { "docid": "24881660521eac987be10f4b3878ba6c", "score": "0.48674807", "text": "def classify(training, training_classes, testing, c=1.0, kernel='rbf', degree=3, gamma='auto', coef0=0.0,\n shrinking=True, probability=False, tol=0.001, cache_size=200, class_weight=None, verbose=False,\n max_iter=-1, decision_function_shape=None, random_state=RANDOM_SEED):\n\n clf = SVC(C=c, kernel=kernel, degree=degree, gamma=gamma, coef0=coef0, shrinking=shrinking, probability=probability,\n tol=tol, cache_size=cache_size, class_weight=class_weight, verbose=verbose, max_iter=max_iter,\n decision_function_shape=decision_function_shape, random_state=random_state)\n clf.fit(asarray(training), asarray(training_classes))\n return clf.predict(asarray(testing))", "title": "" }, { "docid": "73c985eb0342a23d24d7291309568a9b", "score": "0.48616675", "text": "def runLinearSVC(a,b,c,d):\n model = LinearSVC()\n model.fit(a,b)\n kfold = model_selection.KFold(n_splits=10)\n accuracy = model_selection.cross_val_score(model, a,b, cv=kfold, scoring='accuracy')\n mean = accuracy.mean() \n stdev = accuracy.std()\n print('LinearSVC - Training set accuracy: %s (%s)' % (mean, stdev))\n print('')", "title": "" }, { "docid": "b1f7e41ed131a028971653247f44af52", "score": "0.48612747", "text": "def fit_svm(train_y, train_x, test_x, c=None, gamma=None):\n # input validation\n if c is not None:\n if not isinstance(c, list):\n raise TypeError(\"c should be a list\")\n if gamma is not None:\n if not isinstance(gamma, list):\n raise TypeError(\"gamma should be a list\")\n # creat svm model\n scaler = StandardScaler()\n train_x = scaler.fit_transform(train_x)\n Cs = c\n Gammas = gamma\n if c is None:\n Cs = list(np.logspace(-6, -1, 10))\n if gamma is None:\n Gammas = list(np.linspace(0.0001, 0.15, 10))\n svc = svm.SVC()\n clf = GridSearchCV(estimator=svc, param_grid=dict(C=Cs, gamma=Gammas),\n n_jobs=-1)\n clf.fit(train_x, train_y)\n clf = clf.best_estimator_\n # fit the best model\n clf.fit(train_x, train_y)\n # predict the testing data and convert to data frame\n prediction = clf.predict(scaler.fit_transform((test_x)))\n prediction = pd.DataFrame(prediction)\n prediction.columns = ['predict_strain']\n print('The best SVM Model is:')\n print(clf)\n return prediction", "title": "" }, { "docid": "8b382c064b329340dbd624a3653073a9", "score": "0.4854759", "text": "def fit(self, X, y):\n m, n = np.shape(X)\n y = y[:, np.newaxis] * 1.\n X_dash = y * X\n\n # define the quadratic optimization problem\n # (the dual problem from the lecture slides but in terms of minimization)\n # by constructing the appropriate matrices P, q, A, b...\n P = cvxopt.matrix(np.dot(X_dash , X_dash.T) * 1.)\n q = cvxopt.matrix(-np.ones((m, 1)))\n A = cvxopt.matrix(y.T)\n b = cvxopt.matrix(np.zeros(1))\n\n if not self.C:\n # the case when C=0 (Hard-margin SVM)\n G = cvxopt.matrix(-np.eye(m))\n h = cvxopt.matrix(np.zeros(m))\n else:\n # the case when C>0 (Soft-margin SVM)\n G_max = np.eye(m) * -1\n G_min = np.eye(m)\n G = cvxopt.matrix(np.vstack((G_max, G_min)))\n h_max = cvxopt.matrix(np.zeros(m))\n h_min = cvxopt.matrix(np.ones(m) * self.C)\n h = cvxopt.matrix(np.vstack((h_max, h_min)))\n\n # solve the quadratic optimization problem using cvxopt\n minimization = cvxopt.solvers.qp(P, q, G, h, A, b)\n\n # lagrange multipliers (denoted by alphas in the lecture slides)\n alphas = np.ravel(minimization['x'])\n\n # first get indexes of non-zero lagr. multipiers\n idx = alphas > 1e-7\n\n # get the corresponding lagr. multipliers (non-zero alphas)\n self.alphas = alphas[idx]\n\n # get the support vectors\n self.support_vectors = X[idx]\n\n # get the corresponding labels\n self.support_vector_labels = y[idx]\n\n # calculate w using the alphas, support_vectors and\n # the corresponding labels\n self.w = ((self.alphas[:, np.newaxis] * self.support_vector_labels).T @ self.support_vectors)[0]\n\n # calculate t using w and the first support vector\n self.t = np.dot(self.w, self.support_vectors[0]) - self.support_vector_labels[0]", "title": "" } ]
d2944facf207d685092d2fdb00226d73
This API has never heard of any patron.
[ { "docid": "8214a506e3ee07e5063cf26ec01e8faf", "score": "0.6529164", "text": "def patron_remote_identifier_lookup(self, patron):\n return None", "title": "" } ]
[ { "docid": "fde88ffd4ca41b2f91383e063f307889", "score": "0.56945896", "text": "def Pattern(self) -> str:", "title": "" }, { "docid": "fde88ffd4ca41b2f91383e063f307889", "score": "0.56945896", "text": "def Pattern(self) -> str:", "title": "" }, { "docid": "d5763184627af06538e9c4dd0d7d78e1", "score": "0.5521969", "text": "def pattern(self):\n raise NotImplementedError()", "title": "" }, { "docid": "aac328ece2029d184aa5284bf3a1a485", "score": "0.5310218", "text": "def __call__(self, pattern):\n return _MagicPattern(self, pattern)", "title": "" }, { "docid": "b1108e31df77f87a91c53909708ef5c9", "score": "0.51216394", "text": "def test_register_patron(self):\n\t\t\n\t\tself.helper_user_type_all('patron', 'Patron Info', 'patron_info', 'I am a patron')", "title": "" }, { "docid": "d57f25d8f2956ce4d3f3a6e40a186509", "score": "0.5029252", "text": "def test_create_patron(self):\n patron = self.default_patron\n self._set_notification_address(patron.library)\n\n # If the patron already has an account, a\n # RemotePatronCreationFailedException is raised.\n datastr, datadict = self.api.get_data(\"response_patron_create_fail_already_exists.json\")\n self.api.queue_response(status_code=409, content=datastr)\n assert_raises_regexp(\n RemotePatronCreationFailedException, 'create_patron: http=409, response={\"message\":\"A patron account with the specified username, email address, or card number already exists for this library.\"}',\n self.api.create_patron, patron\n )\n\n # Otherwise, the account is created.\n datastr, datadict = self.api.get_data(\n \"response_patron_create_success.json\"\n )\n self.api.queue_response(status_code=201, content=datastr)\n patron_rbdigital_id = self.api.create_patron(patron)\n\n # The patron's remote account ID is returned.\n eq_(940000, patron_rbdigital_id)\n\n # The data sent to RBdigital is based on the patron's\n # identifier-to-remote-service.\n remote = patron.identifier_to_remote_service(\n DataSource.RB_DIGITAL\n )\n\n form_data = json.loads(self.api.requests[-1][-1]['data'])\n\n # No identifying information was sent to RBdigital, only information\n # based on the RBdigital-specific identifier.\n eq_(self.api.library_id, form_data['libraryId'])\n eq_(remote, form_data['libraryCardNumber'])\n eq_(\"username\" + (remote.replace(\"-\", '')), form_data['userName'])\n eq_(\"genericemail+rbdigital-%[email protected]\" % remote, form_data['email'])\n eq_(\"Patron\", form_data['firstName'])\n eq_(\"Reader\", form_data['lastName'])", "title": "" }, { "docid": "048fef6f518a165efbf7e116789abc2e", "score": "0.5029075", "text": "def forDns(self, rule):\n pat = rule[2]\n if pat == 'any':\n pat = '*'\n if pat[-1] != '*':\n pat += '.'\n self.pattern = pat", "title": "" }, { "docid": "81af2f665fcb48ba80ee72a233ff749a", "score": "0.4927902", "text": "def pattern ( self ):\n return self.__pattern", "title": "" }, { "docid": "d8c7ba8850c7c7575b2f0ef7218a7315", "score": "0.4915374", "text": "def test_regex(self) -> None:\n # Some of the undocumented endpoints which are very similar to\n # some of the documented endpoints.\n assert find_openapi_endpoint(\"/users/me/presence\") is None\n assert find_openapi_endpoint(\"/users/me/subscriptions/23\") is None\n assert find_openapi_endpoint(\"/users/iago/subscriptions/23\") is None\n assert find_openapi_endpoint(\"/messages/matches_narrow\") is None\n # Making sure documented endpoints are matched correctly.\n assert (\n find_openapi_endpoint(\"/users/23/subscriptions/21\")\n == \"/users/{user_id}/subscriptions/{stream_id}\"\n )\n assert (\n find_openapi_endpoint(\"/users/[email protected]/presence\")\n == \"/users/{user_id_or_email}/presence\"\n )\n assert find_openapi_endpoint(\"/users/[email protected]\") == \"/users/{email}\"\n assert find_openapi_endpoint(\"/messages/23\") == \"/messages/{message_id}\"\n assert find_openapi_endpoint(\"/realm/emoji/realm_emoji_1\") == \"/realm/emoji/{emoji_name}\"", "title": "" }, { "docid": "9233a357131da3637fe9a5465a757829", "score": "0.48865253", "text": "def Pattern(*args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "62ee7387de93b2bd275e7b12d9171321", "score": "0.4874527", "text": "def get_for_pattern(self, pattern):", "title": "" }, { "docid": "bc63a48fcc73fc7b35c7f471ef38e2c2", "score": "0.48446167", "text": "def patron_from_authdata_lookup(self, authdata):\n credential = Credential.lookup_by_token(\n self._db, self.data_source, self.AUTHDATA_TOKEN_TYPE, \n authdata, allow_persistent_token=True\n )\n if not credential:\n return None\n return credential.patron", "title": "" }, { "docid": "5037164137229061d96a2a2818c54997", "score": "0.48109016", "text": "def setup_regex(self):", "title": "" }, { "docid": "bae22e0c47b1474925309ff46ab250a0", "score": "0.47920665", "text": "def magic(self):", "title": "" }, { "docid": "bae22e0c47b1474925309ff46ab250a0", "score": "0.47920665", "text": "def magic(self):", "title": "" }, { "docid": "d96ae02b50a996e9e1f10d96cbcf14ac", "score": "0.47277647", "text": "def __init__(self):\n super().__init__()\n self.trailing_slash = '/?'", "title": "" }, { "docid": "ddbbb3308e0220d5f5796ed8b9b8c0bc", "score": "0.4706952", "text": "def test_parse_patron_list(self):\n l1 = self._library()\n l2 = self._library()\n p1 = self._patron()\n p1.authorization_identifier = self._str\n p1.library_id = l1.id\n p2 = self._patron()\n p2.username = self._str\n p2.library_id = l1.id\n p3 = self._patron()\n p3.external_identifier = self._str\n p3.library_id = l1.id\n p4 = self._patron()\n p4.external_identifier = self._str\n p4.library_id = l2.id\n args = [p1.authorization_identifier, 'no-such-patron',\n '', p2.username, p3.external_identifier]\n patrons = PatronInputScript.parse_patron_list(\n self._db, l1, args\n )\n assert [p1, p2, p3] == patrons\n assert [] == PatronInputScript.parse_patron_list(self._db, l1, [])\n assert [p1] == PatronInputScript.parse_patron_list(\n self._db, l1, [p1.external_identifier, p4.external_identifier])\n assert [p4] == PatronInputScript.parse_patron_list(\n self._db, l2, [p1.external_identifier, p4.external_identifier])", "title": "" }, { "docid": "fe5599b7d7e84f5900ef2593054465fb", "score": "0.46559978", "text": "def generate_regex(self):\n ido_regex = MergeRecords.key_check(key='@pattern', source=self.ido)\n if self.resource in ido_regex:\n self.is_prefixed = True \n return ido_regex", "title": "" }, { "docid": "c0aba17eb766dbfeb384992377b5138f", "score": "0.46511632", "text": "def default_open_pattern(self):\n return None", "title": "" }, { "docid": "47ec8e0a1748b3c6acca06f47a9904b7", "score": "0.46484736", "text": "def crons():\n # TODO: generate yaml rendering.\n cron_descriptor = _cron_descriptor()\n print 'cron_descriptor:', cron_descriptor", "title": "" }, { "docid": "c4818cfbc7f0a06bcebd53622c9bbce3", "score": "0.4623885", "text": "def phone(self):", "title": "" }, { "docid": "a2871dde0077e74e60f5aa8c8c3b4238", "score": "0.4599122", "text": "def REGEX(self): # noqa - this is a property\n raise NotImplementedError()", "title": "" }, { "docid": "931ebf8699d0efe474ddde24f149d180", "score": "0.45961466", "text": "def __init__(__self__,\n resource_name: str,\n args: RegexPatternSetArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "title": "" }, { "docid": "dee477dcabc55faa864cda0540a3ba2b", "score": "0.45863923", "text": "def _cron_descriptor(build_target=opts.proj.dirs.base):\n return descriptor.cron(build_target)", "title": "" }, { "docid": "629503630840c513c0b085f324bbdf67", "score": "0.45587248", "text": "def test_wineregions_get(self):\n pass", "title": "" }, { "docid": "f45b6ffb81928364af5bb5e200105186", "score": "0.45584705", "text": "def test_patrol(self):\n mysite = self.get_site()\n\n rc = list(mysite.recentchanges(total=1))\n if not rc:\n self.skipTest('no recent changes to patrol')\n\n rc = rc[0]\n\n # site.patrol() needs params\n with self.assertRaises(Error):\n list(mysite.patrol())\n try:\n result = list(mysite.patrol(rcid=rc['rcid']))\n except APIError as error:\n if error.code == 'permissiondenied':\n self.skipTest(error)\n raise\n\n if hasattr(mysite, '_patroldisabled') and mysite._patroldisabled:\n self.skipTest(f'Patrolling is disabled on {mysite} wiki.')\n\n result = result[0]\n self.assertIsInstance(result, dict)\n\n params = {'rcid': 0, 'revid': [0, 1]}\n\n raised = False\n try:\n # no such rcid, revid or too old revid\n list(mysite.patrol(**params))\n except APIError as error:\n if error.code == 'badtoken':\n self.skipTest(error)\n except Error:\n # expected result\n raised = True\n self.assertTrue(raised, msg='pywikibot.exceptions.Error not raised')", "title": "" }, { "docid": "c71c93979a57d7de5fad81df6d9f35a9", "score": "0.45526978", "text": "def test_patron_exists(users):\n test_patron = User.query.all()[0]\n assert patron_exists(test_patron.id)\n assert not patron_exists('not-existing-patron-pid')", "title": "" }, { "docid": "67f63876f989b39e426552d7d2435f11", "score": "0.45354557", "text": "def GetSelfRaceAbiliti(self):\r\n print(\"/\")", "title": "" }, { "docid": "07bb5b48020d075bfc219ce3eb943dac", "score": "0.45329458", "text": "def pattern(self):\n if self._pattern is None:\n self._resolve_pattern()\n return self._pattern", "title": "" }, { "docid": "67a2d2edcd61ea8a55e436dce0a79999", "score": "0.45329276", "text": "def test_administrativeregions_get(self):\n pass", "title": "" }, { "docid": "4f81d40eced6f1ad1c8d9d22eddcb534", "score": "0.45277438", "text": "def uuid_and_label(self, patron):\n if not patron:\n return None, None\n \n # First, find or create a Credential containing the patron's\n # anonymized key into the DelegatedPatronIdentifier database.\n adobe_account_id_patron_identifier_credential = self.get_or_create_patron_identifier_credential(\n patron\n )\n\n # Look up a Credential containing the patron's Adobe account\n # ID created under the old system. We don't use\n # Credential.lookup because we don't want to create a\n # Credential if it doesn't exist.\n old_style_adobe_account_id_credential = get_one(\n self._db, Credential, patron=patron, data_source=self.data_source,\n type=self.VENDOR_ID_UUID_TOKEN_TYPE\n )\n \n if old_style_adobe_account_id_credential:\n # The value of the old-style credential will become the\n # default value of the DelegatedPatronIdentifier, assuming\n # we have to create one.\n def new_value():\n return old_style_adobe_account_id_credential.credential\n else:\n # There is no old-style credential. If we have to create a\n # new DelegatedPatronIdentifier we will give it a value\n # using the default mechanism.\n new_value = None\n\n # Look up or create a DelegatedPatronIdentifier using the \n # anonymized patron identifier we just looked up or created.\n utility = AuthdataUtility.from_config(patron.library, self._db)\n return self.to_delegated_patron_identifier_uuid(\n utility.library_uri, adobe_account_id_patron_identifier_credential.credential,\n value_generator=new_value\n )", "title": "" }, { "docid": "24922e498bf2ee3dc26a8854486be8a5", "score": "0.4502412", "text": "def get_shutdown_error_regex(self):\r\n pass", "title": "" }, { "docid": "acc50c2c1c751919fe2f0f6b47f42f13", "score": "0.44961512", "text": "def path(self):\n ...", "title": "" }, { "docid": "f36ae20559350798bdf1a0b502857df2", "score": "0.44673547", "text": "def re_string(self):\r\n pass", "title": "" }, { "docid": "f36ae20559350798bdf1a0b502857df2", "score": "0.44673547", "text": "def re_string(self):\r\n pass", "title": "" }, { "docid": "eb7a6e528f798d6d9bfa6dcfff47ddac", "score": "0.44541135", "text": "def test_cron_accessibility():\n\n if not OS == \"Linux\" and not OS == \"Darwin\":\n raise UnsupportedOSError(\"This must be a Linux or Macintosh machine to \" \\\n + \"test if cron is running.\")\n\n cron_allow_path = None\n cron_deny_path = None\n cron_accessibility_paths = [(\"/etc/cron.allow\",\"/etc/cron.deny\"),\n (\"/var/spool/cron/allow\",\"/var/spool/cron/deny\"),\n (\"/usr/lib/cron/cron.allow\",\n \"/usr/lib/cron/cron.allow\"),\n (\"/etc/cron.d/cron.allow\",\n \"/etc/cron.d/cron.deny\"),\n (\"/var/cron/allow\",\"/var/cron/deny\")]\n\n # Try to figure out the location of the cron accessibility files.\n for (possible_allow_path,possible_deny_path) in cron_accessibility_paths:\n if os.path.exists(possible_allow_path) \\\n or os.path.exists(possible_deny_path):\n cron_allow_path = possible_allow_path\n cron_deny_path = possible_deny_path\n break\n\n \n if cron_allow_path == None and cron_deny_path == None:\n # The cron accessibility files do not exist.\n raise CronAccessibilityFilesNotFoundError(\"Unable to detect existing \" \\\n + \"cron.allow and \" \\\n + \"cron.deny files.\")\n else:\n try:\n # Get the user name.\n user_name = getpass.getuser()\n except Exception,e:\n # The user name cannot be determined, and thus the cron.allow and/or the\n # cron.deny files cannot be checked to see if the username appears in\n # them.\n raise DetectUserError(\"At least one of the cron accessibility files \" \\\n + \"were found, but they could not be searched \" \\\n + \"because the user name could not be \" \\\n + \"determined.\")\n\n # If cron.allow exists, then the user MUST be listed therein in order to use\n # cron.\n if os.path.exists(cron_allow_path):\n try:\n found_in_allow = find_substring_in_a_file_line(cron_allow_path,\n user_name)\n except Exception,e:\n raise CronAccessibilityFilesPermissionDeniedError(cron_allow_path)\n else:\n return (found_in_allow,None)\n\n # If cron.deny exists AND cron.allow does not exist, then the user must NOT\n # be listed therein in order to use cron.\n elif os.path.exists(cron_deny_path):\n try:\n found_in_deny = find_substring_in_a_file_line(cron_deny_path,user_name)\n except Exception,e:\n raise CronAccessibilityFilesPermissionDeniedError(cron_deny_path)\n else:\n return (not found_in_deny,cron_deny_path)", "title": "" }, { "docid": "4750ea2ba33798cc3f053d1f83d51cea", "score": "0.4452363", "text": "def slash_matcher(date: str) -> bool:\n slash_date = r'\\d?\\d/\\d?\\d/(\\d{1,2})?1(7|8)'\n z = re.search(slash_date, date)\n if z: \n return True\n return False", "title": "" }, { "docid": "0e148759268e87d2b3b4973adff6b7a8", "score": "0.44353184", "text": "def pdayR(self):\n return self.patterns.day", "title": "" }, { "docid": "27e404b5b94c9aee19089d18b031e391", "score": "0.44253668", "text": "def get_search_pattern(self):", "title": "" }, { "docid": "81f687a796887e723d7989c348169d0c", "score": "0.44250932", "text": "def is_wildcard(self):\n return self.minor == '*'", "title": "" }, { "docid": "db9037ac788162088cfe66d504d36404", "score": "0.44152147", "text": "def is_wildcard(self) -> bool:\n return self.name.endswith(\"::*\")", "title": "" }, { "docid": "65cb9bc918c3f30aa8a73c6e01c6e3d6", "score": "0.44098815", "text": "def sdgfsdg23245():", "title": "" }, { "docid": "8a45f517be48c1639ec6d10bbbf99fe3", "score": "0.43779388", "text": "def test_nessus_rhel_07_021100(host):\n\n content = host.file('/etc/rsyslog.conf').content\n if host.file('/etc/cron.allow').exists:\n assert host.file('/etc/cron.allow').user == 'root'\n assert host.file('/etc/cron.allow').group == 'root'\n\n assert bool(re.search(\"[\\s]*cron\\.\\*\\s+/var/log/cron\\s*\", content))", "title": "" }, { "docid": "9c72f9659a4aef4b2c89225d5b506b9e", "score": "0.4375041", "text": "def create_patron(email=None):\n email = \"patron-email\" if email is None else email\n return Patron.objects.create(name=\"patron-name\", email=email, comments=\"\")", "title": "" }, { "docid": "7c59018516257275c11943b100777f8a", "score": "0.4367793", "text": "def build_pattern(self):\n raise NotImplemented()", "title": "" }, { "docid": "2f45414c7a570a018507da473d9b7a53", "score": "0.4367019", "text": "def GetAllayRaceAbiliti(self):\r\n print(\"/\")", "title": "" }, { "docid": "b80824503474459792b7806f2ccfd1b0", "score": "0.43651477", "text": "def get_modem(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "966d67d89c3614a89f996a4155feb39d", "score": "0.43569723", "text": "def createPatternString(input: unicode, allowGlobbing: bool) -> unicode:\n ...", "title": "" }, { "docid": "296449de6630cc794ed7a8e48e99aa61", "score": "0.4356291", "text": "def GetRegularExpression(self) -> \"char const *\":\n return _ITKIOImageBaseBasePython.itkRegularExpressionSeriesFileNames_GetRegularExpression(self)", "title": "" }, { "docid": "2be4a186fe2ee4fabbc9b15b2a4c8070", "score": "0.4354597", "text": "def real_regex():\n return '|'.join(a.regex.pattern for a in AccountType.register.itervalues() if not a.deposit) or \"a^\"", "title": "" }, { "docid": "6e9c844aa80f38bbb6743b2d1d3aa7ee", "score": "0.43461168", "text": "def __init__(self, pattern):\r\n\r\n self.regexp = None\r\n self.pattern = pattern\r\n \r\n # Is pattern a regular expression?\r\n if len(pattern)>=2 and pattern[0]==\"/\" and pattern[-1]==\"/\":\r\n self.regexp = re.compile(pattern[1:-1])", "title": "" }, { "docid": "66654a4a9f9dae93ea8ebd17bb6521f7", "score": "0.43438208", "text": "def test__matches_regex_disabled(self):\r\n pass", "title": "" }, { "docid": "9354ddb03f538b8efc32be1623139e31", "score": "0.43411666", "text": "def head(self, pattern):\n return self.route(pattern, \"head\")", "title": "" }, { "docid": "015dd598e8cb0c88ba6d3472bd890728", "score": "0.43320435", "text": "def get_subdomain_patterns(cls):\n raise NotImplementedError()", "title": "" }, { "docid": "e20f8022c430c4ac36398d980b74c23f", "score": "0.4329081", "text": "def createPattern(input: unicode, allowGlobbing: bool, options: int) -> java.util.regex.Pattern:\n ...", "title": "" }, { "docid": "d43981778f5c901988239eee7b06b33e", "score": "0.43274385", "text": "def get_url_extension(self):", "title": "" }, { "docid": "ab4f44f4b3a8742eb1353abe8b6acdeb", "score": "0.43273535", "text": "def test_get_crontab_list(self):\n crontab = get_crontab_list(['printf', r'%s\\n%s\\n', 'fakecron line1', 'fakecron line2'])\n self.assertEqual(crontab, ['fakecron line1', 'fakecron line2'])", "title": "" }, { "docid": "ae6e026697dfc215d2c46e095d992e05", "score": "0.43196595", "text": "def __init__(self, pattern: str, children: NodeList = None) -> None:\n super().__init__(children)\n self.pattern = pattern", "title": "" }, { "docid": "a86f063c69d9a885e183b5d10d80263a", "score": "0.43100595", "text": "def get_pattern(self):\n return self._pattern", "title": "" }, { "docid": "a86f063c69d9a885e183b5d10d80263a", "score": "0.43100595", "text": "def get_pattern(self):\n return self._pattern", "title": "" }, { "docid": "a0dde23526c612a5c68fc280456dddee", "score": "0.43053478", "text": "def _match_star(self) -> str:\n match = regex.match(regex.escape(self.name + self.oarg) + r\"(\\*)\", self.data)\n if match:\n return match.group(1)\n return \"\"", "title": "" }, { "docid": "929268ded09fc082720e9f63223b4ebf", "score": "0.43018955", "text": "def is_regex(self):\n return self._permission_is_regex", "title": "" }, { "docid": "1bdd2c9b7b0b0b491d390cf09dead585", "score": "0.43015054", "text": "def test_regex(self):\n self.assertTrue(re.match(regex.DOMAIN, \"xn--69aa8bzb.xn--y9a3aq\"))", "title": "" }, { "docid": "b56debc6b251aa8e29f81250c98c7fc8", "score": "0.42963177", "text": "def pr(_):", "title": "" }, { "docid": "ec243fd35716b25609596f8922ee2e2e", "score": "0.4294411", "text": "def test_simple_glob_double_asterisk(self):\n assert self.Select.glob_get_normal_sf(\"**\", 1)", "title": "" }, { "docid": "e853e22baf5a86533b503fde1b963bb2", "score": "0.4290959", "text": "def get_modem_simple(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "685c8e28c6a47b81db16cf263b5a560a", "score": "0.42826498", "text": "def increment_number_served(self, patrons):\n self.number_served += patrons", "title": "" }, { "docid": "e760197209186835eedc55b6068d85d4", "score": "0.42724574", "text": "def getLinePattern(self):\n return _coin.SoCallbackAction_getLinePattern(self)", "title": "" }, { "docid": "f7b54f91b5c79b9a932bdbe145845482", "score": "0.42677718", "text": "def getDataTypePattern(self, nickname):\n res = self.serv.getDataTypePattern(nickname)\n return res", "title": "" }, { "docid": "a0944d11248e9c3cb09eb95971cb661d", "score": "0.42672172", "text": "def get_main_cal_bans():\n return ['^/(\\?|feed\\.[\\w]+$|$)', '^/events/(\\?|feed\\.[\\w]+$|$)', '^/this-(week|month|year)/', '^/events/this-(week|month|year)/', '^/upcoming/', '^/events/upcoming/', '^/week-of/', '^/events/week-of/', '^/[0-9]{4}/', '^/events/[0-9]{4}/']", "title": "" }, { "docid": "08a80205476a8c5da718c9733e5a75f8", "score": "0.4264879", "text": "def crls(self):\n raise NotImplementedError", "title": "" }, { "docid": "521b08b07c345795ce4714cfb8c3c1ef", "score": "0.42632672", "text": "def _request_handler(self, patron):\n if not patron:\n return INVALID_CREDENTIALS.detailed(_(\"No authenticated patron\"))\n\n credential = AdobeVendorIDModel.get_or_create_patron_identifier_credential(\n patron\n )\n return DeviceManagementRequestHandler(credential)", "title": "" }, { "docid": "c8609800227888387923b0a4448fddc0", "score": "0.4261918", "text": "def test_router_urls_with_custom_lookup_regex(self):\n # Create a model and viewset with a special lookup field.\n class PhonyModelIV(models.Model):\n class Meta:\n app_label = 'tests'\n\n class PhonyViewSetIV(viewsets.ModelViewSet):\n model = PhonyModelIV\n lookup_regex = '[0123456789]+'\n\n @base_action({ 'POST' })\n def special(self, request):\n pass\n\n # Create the router and register our viewset.\n with mock.patch('drf_toolbox.routers.ModelSerializer'):\n router = routers.Router()\n router.register('phony', PhonyViewSetIV)\n\n # Attempt to establish that we got back what we expected.\n for urlpattern in router.urls:\n pattern = urlpattern.regex.pattern\n if '<pk>' in pattern:\n self.assertIn('(?P<pk>[0123456789]+)', pattern)\n if '<format>' in urlpattern.regex.pattern:\n self.assertFalse(pattern.endswith(r'/\\.(?P<format>[a-z]+)$'))", "title": "" }, { "docid": "1034952e563f8758feb15e1175caa7d4", "score": "0.4256063", "text": "def discover(self):", "title": "" }, { "docid": "612ba396f183b669af5e8f5b797fac20", "score": "0.42507243", "text": "def regexes(self) -> List[str]:\n routes = []\n for route in self.routes():\n if isinstance(route, str):\n routes.append(route)\n elif isinstance(route, tuple):\n routes.append(route[0])\n else:\n raise ValueError('Invalid route type')\n return routes", "title": "" }, { "docid": "effc060799c33544c6ec5d1024eed773", "score": "0.42504838", "text": "def glob(self, pattern):\n err_msg = 'glob() is not available for %r scheme.'\n raise NotImplementedError(err_msg % self.scheme)", "title": "" }, { "docid": "e93331e9c1df579e24d0d9cb163769dd", "score": "0.4247451", "text": "def subdDuplicateAndConnect():\n pass", "title": "" }, { "docid": "41e550fd5aa2f729ecc408be3faf3a3f", "score": "0.42447075", "text": "def test_spw_id_pattern(self):\n pass", "title": "" }, { "docid": "69ec593a8a0cda9b0efcec510d700e65", "score": "0.42446935", "text": "def test_repr_walk(client, walk): \n\n walk_repr = walk.__repr__()\n assert '2020-05-17 00:00:00' in walk_repr", "title": "" }, { "docid": "c0dda216ad8e3d6ca0f0041b00743d6e", "score": "0.42343652", "text": "def non_service_method(self):", "title": "" }, { "docid": "2dcc70d0f8099974c53ada06a96745b1", "score": "0.423104", "text": "def pattern(self, pattern):\n\n self._pattern = pattern", "title": "" }, { "docid": "08a158bdaa23101877db1676a59a53b5", "score": "0.42308158", "text": "def info(self, pattern, verbose):\n pass", "title": "" }, { "docid": "3e9bc12863fce8dd656da5d6a95b5004", "score": "0.42307395", "text": "def check_date_user_pattern(line, re_datetimeuser):\n if re_datetimeuser.search(line):\n print('user separator recognized')\n else:\n raise ValueError('pattern not recgonized')", "title": "" }, { "docid": "a3990342100b2c5105969bc7de153267", "score": "0.42287657", "text": "def routes():\n pass", "title": "" }, { "docid": "da998b30f34631780b7e0953502105c8", "score": "0.42257735", "text": "def isSymlink(self):\r\n # TODO: but how? Look for a l in the dir list?\r\n raise UnsupportedOperationError(\"Not yet supported\")", "title": "" }, { "docid": "99f9f0102bc92ece075640f27a199a6f", "score": "0.42230704", "text": "def getSymlinkPath(self):\r\n # TODO: but how?\r\n raise UnsupportedOperationError(\"Not yet supported\")", "title": "" }, { "docid": "5336769d06cb9c30c1d51e9633474f83", "score": "0.42197546", "text": "def Path(self) -> str:", "title": "" }, { "docid": "d5ccf7731126bf1edf8c5d3ced305f79", "score": "0.4218137", "text": "def test_pi18_fullcommand_piri(self):\n protocol = pi()\n result = protocol.get_full_command(\"PIRI\")\n expected = b\"^P007PIRI\\xee8\\r\"\n # print(result)\n self.assertEqual(result, expected)", "title": "" }, { "docid": "57dcde38436a426083df9b4140b666e1", "score": "0.4216543", "text": "def get_timeout_error_regex(self):\r\n pass", "title": "" }, { "docid": "c16adee9bc4f3b933e9f16f7d45f492e", "score": "0.4213634", "text": "def getPathname(self) -> unicode:\n ...", "title": "" }, { "docid": "4f5abe2350a1072fb3f118460badd8f9", "score": "0.42102462", "text": "def patron_exists(patron_pid):\n return User.query.filter_by(id=patron_pid).first() is not None", "title": "" }, { "docid": "0e80fd575c0b1608d863c0fa72dfb7e0", "score": "0.42100373", "text": "def test_patterns(self):\n for prefix, entry in self.registry.items():\n pattern = entry.pattern\n if pattern is None:\n continue\n with self.subTest(prefix=prefix):\n self.assertTrue(\n pattern.startswith(\"^\"), msg=f\"{prefix} pattern {pattern} should start with ^\"\n )\n self.assertTrue(\n pattern.endswith(\"$\"), msg=f\"{prefix} pattern {pattern} should end with $\"\n )\n\n # Check that it's the same as external definitions\n # for key in ('miriam', 'wikidata'):\n # external_pattern = get_external(prefix, key).get('pattern')\n # if external_pattern:\n # self.assertEqual(pattern, external_pattern, msg=f'{prefix}: {key} pattern not same')", "title": "" }, { "docid": "617ff6d523474a2cfcbdb354cc944dec", "score": "0.42073867", "text": "def test_remove_paths_from_cron(self):\n crontab = [\n '# Some comment',\n '[email protected]',\n '* * * * * /removed/path/something',\n \"* * * * * '/removed/path/something else'\",\n \"* * * * * '/removed/path not/something else'\",\n '* * * * * /non/removed/path/something',\n ''\n ]\n expected = [\n '# Some comment',\n '[email protected]',\n \"* * * * * '/removed/path not/something else'\",\n '* * * * * /non/removed/path/something',\n ''\n ]\n self.assertListEqual(remove_paths_from_cron(crontab, [Path('/removed/path')]), expected)", "title": "" }, { "docid": "13bed750f536c3dd20b2d3fad95a27cf", "score": "0.4202331", "text": "def pat_angle(self, angle):\n self._pat_angle(angle)", "title": "" }, { "docid": "07cc99538b76c1d1a5a9403adede3cd2", "score": "0.41986582", "text": "def _get_dot11r_method(self):\n return self.__dot11r_method", "title": "" }, { "docid": "07cc99538b76c1d1a5a9403adede3cd2", "score": "0.41986582", "text": "def _get_dot11r_method(self):\n return self.__dot11r_method", "title": "" }, { "docid": "07cc99538b76c1d1a5a9403adede3cd2", "score": "0.41986582", "text": "def _get_dot11r_method(self):\n return self.__dot11r_method", "title": "" }, { "docid": "07cc99538b76c1d1a5a9403adede3cd2", "score": "0.41986582", "text": "def _get_dot11r_method(self):\n return self.__dot11r_method", "title": "" }, { "docid": "b1361bfbe2c262fd6af693ffee36fe7a", "score": "0.4192437", "text": "def onSTAR(self,mfrom,mtime,param):\n raise NotImplementedError", "title": "" }, { "docid": "984bb7983f8a22fc3c73a3d369246017", "score": "0.41923735", "text": "def regex(self) -> Optional[str]:\n return pulumi.get(self, \"regex\")", "title": "" } ]
50cb75941cf794fc96b9dbd7f9cf2e89
Returns Number of bikes currently at the station.
[ { "docid": "2a2be470d151c40a4873acb78b349c31", "score": "0.739501", "text": "def bikes(self):\n bikes = [1 if dock.bike else 0 for dock in self.docks]\n return sum(bikes)", "title": "" } ]
[ { "docid": "816080d3a0ea44c12fb05843b733ad49", "score": "0.7562642", "text": "def _get_bikes_available(sta):\n # 'num_ebikes_available\" is not part of the GBFS spec, but it appears\n # in the Divvy API response\n return sta['num_bikes_available'] + sta.get('num_ebikes_available', 0)", "title": "" }, { "docid": "4496f42ab5d6b8b0fcfb97ef3c510da9", "score": "0.75365305", "text": "def num_spikes(self):\n return self._num_spikes", "title": "" }, { "docid": "50cd8ebb069242534989da8266ad6aef", "score": "0.71774304", "text": "def n_spikes(self):\n return self.model.n_spikes", "title": "" }, { "docid": "3588aec80347d6f79b714a22c095e5f7", "score": "0.7093625", "text": "def get_num_of_baskets(self):\n return self.num_of_baskets", "title": "" }, { "docid": "ad261f6212060c80d01aca99cec5e0cd", "score": "0.68277735", "text": "def num_stations(self) -> int:\n return self._num_stations", "title": "" }, { "docid": "e19732bff1111325807172e59f4fbd4a", "score": "0.65048575", "text": "def count_spikes(spk):\n shspk = numpy.shape(spk)\n if len(shspk) == 0:\n nspk = 0\n elif shspk[0] == 0:\n nspk = 0\n else:\n nspk = shspk[0]\n return(nspk)", "title": "" }, { "docid": "ebfb0883af56dceaa1e55c2c390fae05", "score": "0.64711446", "text": "def number_of_bells(self) -> int:\n return self._tower.number_of_bells", "title": "" }, { "docid": "ca33c3b07cf4a64dab26366fc0b5876b", "score": "0.6451008", "text": "def _get_spikes(self):\n return self._spikes", "title": "" }, { "docid": "fc208ee7422fd3873dc4440be5e22342", "score": "0.6414368", "text": "def getNbStations(self) :\n return len(self._stations)", "title": "" }, { "docid": "90dd2b6a47ef054a6d6daa45e893c638", "score": "0.62876534", "text": "def get_bikes_for_week(cls, dbsession, station_id):\n station = [(\"Day\", \"Available Bikes\")]\n station_data = dbsession.query(func.weekday(cls.last_update),\n func.avg(cls.available_bikes)) \\\n .filter(cls.station_id == station_id) \\\n .group_by(func.weekday(cls.last_update)) \\\n .all()\n\n # this section parses the query return into a readable list.\n # from docs:extend() appends the contents of seq to list.\n if station_data:\n station.extend([(days[a], float(b)) for a, b in station_data])\n else:\n station.extend([(0,0)])\n\n return station", "title": "" }, { "docid": "564d945f01a1e835a43b09946b5a9512", "score": "0.6222599", "text": "def total_num_bonds(self):\n return self.GetNumberOfBonds()", "title": "" }, { "docid": "fd5519063374f4cdcf684e5108e5478e", "score": "0.62162757", "text": "def total_bids(self):\n return Bid.objects.filter(bid_busket=self).count()", "title": "" }, { "docid": "4042cbe22c03260f483529b69e2a9756", "score": "0.61959684", "text": "def get_total_bets(self) -> int:\n return self._total_bet_count.get() + self._daily_bet_count.get()", "title": "" }, { "docid": "ac90256df674b266019d9e3a9cbad7d7", "score": "0.61344844", "text": "def shared_nb(self):\n return self.bbsitting_set.count() + self.booked.count()", "title": "" }, { "docid": "6c5415e49fb6b00a313f9580c3a0a61d", "score": "0.60800654", "text": "def get_numStocks(self):\n return len(self.DoS)", "title": "" }, { "docid": "04480102cf7c3be0f60eacb5905d4568", "score": "0.60536623", "text": "def getNrStations(self):\n return len(self.stationData)", "title": "" }, { "docid": "05f5726cde6bda90b3dc6bcb2c0932da", "score": "0.60189337", "text": "def check_bikes(intent, session):\n stations = location.get_stations(config.bikes_api)\n try:\n sta = _station_from_intent(intent, stations)\n except location.AmbiguousStationError as err:\n return reply.build(str(err), is_end=True)\n except: # NOQA\n log.exception('Failed to get a station.')\n return reply.build(\"I'm sorry, I didn't understand that. Try again?\",\n persist=session.get('attributes', {}),\n is_end=False)\n\n if not sta['is_renting']:\n postamble = \", but the station isn't renting right now.\"\n else:\n postamble = \".\"\n\n n_bike = _get_bikes_available(sta)\n n_dock = _get_docks_available(sta)\n b_or_d = intent['slots']['bikes_or_docks']['value']\n n_things = n_bike if b_or_d == 'bikes' else n_dock\n\n verb = 'is' if n_things == 1 else 'are'\n b_or_d = b_or_d[:-1] if n_things == 1 else b_or_d # singular?\n text = (\"There %s %d %s available at the %s station%s\"\n % (verb, n_things, b_or_d,\n location.text_to_speech(sta['name']),\n postamble))\n return reply.build(text, is_end=True)", "title": "" }, { "docid": "f941cba019a43600fc3e1810116bac7a", "score": "0.6013025", "text": "def number_of_beds(self):\n return self._number_of_beds", "title": "" }, { "docid": "46f0ec75f085bfce11fa7fd566696d44", "score": "0.60097414", "text": "def bookmarks_count(self) -> int:\n return pulumi.get(self, \"bookmarks_count\")", "title": "" }, { "docid": "714695bf314213c14792447d6546e5a5", "score": "0.60019034", "text": "def count_bonds(self):\n n = 0\n for bond in self.iter_bonds():\n n += 1\n return n", "title": "" }, { "docid": "714695bf314213c14792447d6546e5a5", "score": "0.60019034", "text": "def count_bonds(self):\n n = 0\n for bond in self.iter_bonds():\n n += 1\n return n", "title": "" }, { "docid": "fb1dee4f78e338e2848663110b591dea", "score": "0.5990266", "text": "def get_num_gratings(self):\r\n msg = struct.pack('>2B', 56, 13)\r\n response = self.query(msg)\r\n return response[1]", "title": "" }, { "docid": "a3093ce485a5c5702b308ba1f033bfc7", "score": "0.598069", "text": "def count(self) -> float:\n return pulumi.get(self, \"count\")", "title": "" }, { "docid": "1a2d3197766cbe48816d6534de16a8e7", "score": "0.5964005", "text": "def get_bikes_for_weekday(cls, dbsession, weekday, station_id):\n station = [(\"Time\", \"Available Bikes\", \"Available Stands\")]\n\n station_data = dbsession.query(func.hour(cls.last_update),\n func.avg(cls.available_bikes),\n func.avg(cls.available_bike_stands)) \\\n .filter(cls.station_id == station_id,\n func.weekday(cls.last_update) == weekday) \\\n .group_by(func.hour(cls.last_update)) \\\n .all()\n\n # this section parses the query return into a readable list.\n # from docs:extend() appends the contents of seq to list.\n if station_data:\n station.extend([(a, float(b), float(c)) for a, b, c in station_data])\n else:\n station.extend([(0,0,0)])\n return station", "title": "" }, { "docid": "6e32f140d5241e5e8007c6d00ee210fa", "score": "0.59480816", "text": "def count(self) -> int:\n return pulumi.get(self, \"count\")", "title": "" }, { "docid": "45412fdd491ed1f2702e2b4f1630b9f6", "score": "0.5933959", "text": "def num_carns(self):\n return self._num_carns", "title": "" }, { "docid": "e87ecbae6bcbd4f6d809ad89254c3c9a", "score": "0.5899363", "text": "def buses_count(self):\n\n count = 0\n for line in self.__bus_dict.values():\n # for item in buses:\n count += len(line)\n return count", "title": "" }, { "docid": "03bc3b38080d2dbd9d0dc83263c226d9", "score": "0.5881064", "text": "def get_block_count(self, ticker: str) -> int:\n result = self.rpc_getblockcount()\n return int(result)", "title": "" }, { "docid": "78a38cd7aa0cd2e9057f9c471afe48b8", "score": "0.5865352", "text": "def get_count(self):\r\n return self.count", "title": "" }, { "docid": "b8bbb2aaf12a04aa9e9f24ed9ad2224c", "score": "0.5858812", "text": "def get_count(self):\n return self.count", "title": "" }, { "docid": "b8bbb2aaf12a04aa9e9f24ed9ad2224c", "score": "0.5858812", "text": "def get_count(self):\n return self.count", "title": "" }, { "docid": "7c1d9b736cc99e61b0fca04528041e16", "score": "0.5855683", "text": "def count(self):\n return self.get_count()", "title": "" }, { "docid": "30156e8f4cbc4515d6f0aae43826fd74", "score": "0.58556664", "text": "def get_count(self):\n\n\t\treturn self.__count", "title": "" }, { "docid": "a22fa33e6a4fa24bace8dead166949ec", "score": "0.58455414", "text": "def get_num_stations(add):\r\n name=get_zipcode_names(add)\r\n engine = get_sql_engine()\r\n station_stats = text(\r\n \"\"\"\r\n SELECT\r\n count(v.*) as num_stations\r\n FROM indego_rt1130 as v\r\n JOIN philly_zipcode as n\r\n ON ST_Intersects(v.geom, n.geom)\r\n WHERE n.code = :name\r\n \"\"\"\r\n )\r\n resp = engine.execute(station_stats, name=name).fetchone()\r\n return resp[\"num_stations\"]", "title": "" }, { "docid": "a8ab6732f24f4ab3372ef5155251c32b", "score": "0.5842171", "text": "def count_buy(self):\n return Library.functions.count_buy(self._book)", "title": "" }, { "docid": "f15d224bc8c9d5da3db2948a00550139", "score": "0.5779811", "text": "def count(self) -> int:\n return self._adapter.count()", "title": "" }, { "docid": "f406882454f02555c9dc83e3d1aaa02b", "score": "0.5778971", "text": "def number_of_seats(self):\n return self._number_of_seats", "title": "" }, { "docid": "c3ae276e8042e3eae3d469021e67c7bd", "score": "0.5766069", "text": "def get_num_goats(self) -> int:\n return len(self.get_all_goat_positions())", "title": "" }, { "docid": "6797c78c601bb2c7b5b29a76f174f9d0", "score": "0.57225686", "text": "def price_count(self):\n return self.price_set.count()", "title": "" }, { "docid": "071225f84dfb14738e210634eb2e6f53", "score": "0.571687", "text": "def get_num_boosting_rounds(self):\n return self.n_estimators", "title": "" }, { "docid": "e870c839c7b4bf5f8f70f950b64428c3", "score": "0.57085633", "text": "def count(self):\n return len(self.deck)", "title": "" }, { "docid": "ad1d1b54cf2cb25945a695503193b24c", "score": "0.5705528", "text": "def get_bag_count(self):\n # b.get_bag_count() + 1 because get_bag_count does not count itself\n # A bag does not contain itself for our purposes.\n return sum([(b.get_bag_count() + 1) * n for b, n in self.bags])", "title": "" }, { "docid": "25e95e35928a4c888dbbc6bcde9d7590", "score": "0.5697921", "text": "def get_count(self):\n return self._count", "title": "" }, { "docid": "8f3c6096ab1ae70c8e8382f84b399bfe", "score": "0.56936765", "text": "def tally(self):\n return self.count", "title": "" }, { "docid": "9729155713126dd66d7527afa812350d", "score": "0.568478", "text": "def count(self) -> Optional[float]:\n return pulumi.get(self, \"count\")", "title": "" }, { "docid": "dbaffb9ec18789ef16824409a0ad7dfe", "score": "0.5683851", "text": "def count(self) -> int:\n return self._count", "title": "" }, { "docid": "dbaffb9ec18789ef16824409a0ad7dfe", "score": "0.5683851", "text": "def count(self) -> int:\n return self._count", "title": "" }, { "docid": "dbaffb9ec18789ef16824409a0ad7dfe", "score": "0.5683851", "text": "def count(self) -> int:\n return self._count", "title": "" }, { "docid": "8602405da21a1ad8021b7f43a0f165ab", "score": "0.5676688", "text": "def rack_count(self) -> str:\n return self.__rack_count", "title": "" }, { "docid": "87f51f11090c03f43687cca4b9670697", "score": "0.5670309", "text": "def GetCount(self):\n return self._server.get_count()", "title": "" }, { "docid": "1379c4c99f5435adcbda41bdc2b35eb2", "score": "0.5669792", "text": "def find_total_numbeats(nb1, nb2, nb3, nb4, nb5, nb6):\n numbeats = nb1 + nb2 + nb3 + nb4 + nb5 + nb6\n\n logging.info('Calculated total number of beats: %s', numbeats)\n return numbeats", "title": "" }, { "docid": "dcdc21964bae62e1c837b6c50731dc9d", "score": "0.5663188", "text": "def GetCount(self):\n return(self.count)", "title": "" }, { "docid": "0922a66f3f42145a0bd4773613cce8c8", "score": "0.56498027", "text": "def get_num_markets(add):\r\n name=get_zipcode_names(add)\r\n engine = get_sql_engine()\r\n number_markets = text(\r\n \"\"\"\r\n SELECT COUNT(\"NAME\") AS num_markets\r\n FROM farmers_markets\r\n WHERE \"ZIP\" = :name\r\n \"\"\"\r\n )\r\n resp = engine.execute(number_markets, name=name).fetchone()\r\n return resp[\"num_markets\"]", "title": "" }, { "docid": "0fb3694c37304500926868578c1fdb5c", "score": "0.5649028", "text": "def seats_count(self) -> int:\n return self.__seats_count", "title": "" }, { "docid": "ab8ba7a844a710dbca697cca22003037", "score": "0.5643979", "text": "def ball_num(self):\n counter = 0\n for i in range(0, 100):\n if self.cells[i].is_ball:\n counter += 1\n return int(counter)", "title": "" }, { "docid": "1be6ea4a53becad795f2210366f3eebf", "score": "0.5641779", "text": "def count(self):\n return Library.functions.count(self._book)", "title": "" }, { "docid": "85805fac3efdf0759ebf369aec797b64", "score": "0.564145", "text": "def count(self) -> int:\n return self.__count", "title": "" }, { "docid": "0213fecb787aa8404d833e3ae6479e92", "score": "0.56325096", "text": "def getNumberOfTraces(self) -> int:\n\n if not self.debug:\n self.myFieldFox.write(\"CALC:PAR:COUN?\")\n ret = self.myFieldFox.read()\n else:\n ret = 4\n return ret", "title": "" }, { "docid": "1264b9a45480f09ee89facbac3154e1d", "score": "0.5629439", "text": "def get_num_bags():\n with open('adventofcode/twentytwenty/static_data/day7.txt', 'r') as f:\n lines = f.readlines()\n\n rules, bags = format_data(lines)\n\n total = 0\n included_bags, num_bags = run_recursion([BAG_TYPE], bags, rules, total)\n # print(included_bags)\n\n return num_bags", "title": "" }, { "docid": "f1c9d62b94a652688df6713b916b0149", "score": "0.561586", "text": "def get_shuttles_count():\n return Shuttle.objects.count()", "title": "" }, { "docid": "f2cdc81e205905bd45cbbf16c13c0244", "score": "0.5607586", "text": "def spine_switch_count(self):\n spines = GetSwitchDetails().get_spine_switch_ip()\n totalSpines = len(spines)\n return totalSpines", "title": "" }, { "docid": "1a7dbcd5142f8332534f0bca9816cfde", "score": "0.56029", "text": "def count(self):\n # TODO not implemented yet\n return 0", "title": "" }, { "docid": "138ae0ed51e8daee9cedd12cd6b0ca12", "score": "0.5596941", "text": "def getNeuronCount(self):\n\t\treturn self.loader.getNeuronCount()", "title": "" }, { "docid": "55bd55e54f50e715c534079684f4a805", "score": "0.5587393", "text": "def count_balls(self, **kwargs):\n return 0", "title": "" }, { "docid": "2638db285d898ddf6c762003fe9029f5", "score": "0.55749077", "text": "def getCount(self):\n return self.count", "title": "" }, { "docid": "5ca3bbec0eefed48735dfe6424359b03", "score": "0.55728596", "text": "def get_num_joysticks(self) -> int:\n return self._pygame.joystick.get_count()", "title": "" }, { "docid": "e95b8c83bc08f7e7a923545b26e0e4d9", "score": "0.55705875", "text": "def approximate_neighbors_count(self) -> Optional[int]:\n return pulumi.get(self, \"approximate_neighbors_count\")", "title": "" }, { "docid": "4c68c0eafd9f645b1af25dcd8aa686d4", "score": "0.5560871", "text": "def num_herbs(self):\n return self._num_herbs", "title": "" }, { "docid": "ce0c1c8bae908f7276683a53f2d4fefa", "score": "0.55560106", "text": "def get_num_streets(market):\r\n return len(market)", "title": "" }, { "docid": "b3d7494c847ad777410dd2a8f73ee9bf", "score": "0.5550524", "text": "def count(self):\n return self.ming_cursor.count()", "title": "" }, { "docid": "9dc4e027b38fc71a80090f8f99433aae", "score": "0.55501866", "text": "def get_nb_results(self):\n return self.nb_results", "title": "" }, { "docid": "0f18b3cfe541b5fd848beb2a1bb1067a", "score": "0.5532381", "text": "def number_bites_accessed(self) -> int:\r\n accessed_bites = {\r\n row['bite']\r\n for row in self.rows\r\n }\r\n\r\n return len(accessed_bites)", "title": "" }, { "docid": "441779719e0d96eb07eb5bebe30d8d6a", "score": "0.5530852", "text": "def well_count(self):\n return(len(self.wells))", "title": "" }, { "docid": "2822a0fea3e121d7e9d3a0f168537ff7", "score": "0.5527861", "text": "def count_waters(self):\n n = 0\n for frag in self.iter_waters():\n n += 1\n return n", "title": "" }, { "docid": "aa83c58a419033f214783dee3e36dfdb", "score": "0.5527838", "text": "def number_of_containing_bags(self) -> int:\n\n bag_count = 0\n for sub_bag_count, sub_bag_color in self.containing_bags:\n bag_count += sub_bag_count\n bag_count += (\n sub_bag_count * bag_registry[sub_bag_color].number_of_containing_bags\n )\n return bag_count", "title": "" }, { "docid": "0a88a261fba819b50a9db2f566a870a2", "score": "0.5521703", "text": "def get_total_brokers(self):\n return len(self.client.cluster.brokers())", "title": "" }, { "docid": "be97a25652966cfe70ff89348e1fa65f", "score": "0.5504576", "text": "def _get_docks_available(sta):\n return sta['num_docks_available']", "title": "" }, { "docid": "92bb80c117efad5dee4f7cd5a2bd0d10", "score": "0.5504223", "text": "def Count(self):\r\n\t\treturn self._get_attribute('count')", "title": "" }, { "docid": "92bb80c117efad5dee4f7cd5a2bd0d10", "score": "0.5504223", "text": "def Count(self):\r\n\t\treturn self._get_attribute('count')", "title": "" }, { "docid": "b84275c7a3f120c4e2a4b81e67fbb4ed", "score": "0.54978895", "text": "def num_animals(self):\n return self._num_herbs + self._num_carns", "title": "" }, { "docid": "34a8030df4e5b893624abed201c1aeb2", "score": "0.5496219", "text": "def count_sell(self):\n return Library.functions.count_sell(self._book)", "title": "" }, { "docid": "b85d7343b04b1171667a534c94c6bf29", "score": "0.54961455", "text": "def count(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"count\")", "title": "" }, { "docid": "b85d7343b04b1171667a534c94c6bf29", "score": "0.54961455", "text": "def count(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"count\")", "title": "" }, { "docid": "a8285e261efd767da24e435200150a13", "score": "0.5495811", "text": "def leaf_switch_count(self):\n leafs = GetSwitchDetails().get_leaf_switch_ip()\n totalLeafs = len(leafs)\n return totalLeafs", "title": "" }, { "docid": "d4ca81e052f8a87b17402ac7e8b97de8", "score": "0.54953814", "text": "def get_num_tigers(self) -> int:\n return len(self.get_all_tiger_positions())", "title": "" }, { "docid": "201bbba12d251175b770d3f4c1e75290", "score": "0.5486259", "text": "def depth_bids(self):\n return len(self.bid)", "title": "" }, { "docid": "6a32c011e7b843c186f1ccf69d1e0d6c", "score": "0.54855293", "text": "def num_wells(self):\n return self.info_wells['well'].nunique()", "title": "" }, { "docid": "659670b3f4630896d64f210f280c2d75", "score": "0.5484404", "text": "def get_likes_count(self, instance):\n return instance.likes.count()", "title": "" }, { "docid": "659670b3f4630896d64f210f280c2d75", "score": "0.5484404", "text": "def get_likes_count(self, instance):\n return instance.likes.count()", "title": "" }, { "docid": "659670b3f4630896d64f210f280c2d75", "score": "0.5484404", "text": "def get_likes_count(self, instance):\n return instance.likes.count()", "title": "" }, { "docid": "f5e9158a3865880857b84f79a896ee6b", "score": "0.54830426", "text": "def count(self):\n return self._count", "title": "" }, { "docid": "f5e9158a3865880857b84f79a896ee6b", "score": "0.54830426", "text": "def count(self):\n return self._count", "title": "" }, { "docid": "f5e9158a3865880857b84f79a896ee6b", "score": "0.54830426", "text": "def count(self):\n return self._count", "title": "" }, { "docid": "f5e9158a3865880857b84f79a896ee6b", "score": "0.54830426", "text": "def count(self):\n return self._count", "title": "" }, { "docid": "f5e9158a3865880857b84f79a896ee6b", "score": "0.54830426", "text": "def count(self):\n return self._count", "title": "" }, { "docid": "f5e9158a3865880857b84f79a896ee6b", "score": "0.54830426", "text": "def count(self):\n return self._count", "title": "" }, { "docid": "f5e9158a3865880857b84f79a896ee6b", "score": "0.54830426", "text": "def count(self):\n return self._count", "title": "" }, { "docid": "f5e9158a3865880857b84f79a896ee6b", "score": "0.54830426", "text": "def count(self):\n return self._count", "title": "" }, { "docid": "f5e9158a3865880857b84f79a896ee6b", "score": "0.54830426", "text": "def count(self):\n return self._count", "title": "" }, { "docid": "f5e9158a3865880857b84f79a896ee6b", "score": "0.54830426", "text": "def count(self):\n return self._count", "title": "" } ]
e8a33131373fbef4487cf5835a7feadb
Update any single hyper parameter or group of parameters ``params`` with ``values``.
[ { "docid": "d05d2b6d5ed40fdac465c9f72cbc14e3", "score": "0.77267337", "text": "def update_hyper(self, params, values):\n self.hyper_parameters.update(params, values)", "title": "" } ]
[ { "docid": "e4a668246ffbf52c77360299cfa41a1f", "score": "0.7678789", "text": "def update_params(self, params, grads):\n updates = self._get_updates(grads)\n for param, update in zip((p for p in params), updates):\n param += update", "title": "" }, { "docid": "140209df4436cfda0407eb3627aecf1c", "score": "0.76501244", "text": "def update_params(self, params):\n for k, v in params.items():\n self.__dict__[k] = v", "title": "" }, { "docid": "ab3ee0681cbfeddee1bc8425cb475d3b", "score": "0.7521838", "text": "def update_params(self, **params):\n self._params.update(params)", "title": "" }, { "docid": "eff82be51795ecd047ec589f6292d464", "score": "0.7417516", "text": "def update_params(self, params):\n self.params.update(params)\n return self", "title": "" }, { "docid": "2ce6e2cb4c701a5b76f3ddcaed3c190d", "score": "0.73828346", "text": "def _update_params(self, params: dict):\n for name, value in params.items():\n # WARN: this might be potentially risky\n inner_name = '_{}__{}'.format(type(self).__name__, name)\n assert inner_name in self.__dict__\n assert isinstance(value, type(self.__dict__[inner_name]))\n setattr(self, inner_name, value)", "title": "" }, { "docid": "f3915e30259db54a5f8a1785e97fbf53", "score": "0.73419183", "text": "def update_params(self, params: dict) -> None:\n self.params = params", "title": "" }, { "docid": "f3915e30259db54a5f8a1785e97fbf53", "score": "0.73419183", "text": "def update_params(self, params: dict) -> None:\n self.params = params", "title": "" }, { "docid": "8aa65f03c45c8b51538abceb76050326", "score": "0.72550863", "text": "def update_params(self, *args, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "b8fa6ea5b8615b3f48bd562963d9bb18", "score": "0.71449184", "text": "def update_params(params_base, params_update):\n d = params_base\n d.update(params_update)\n return copy(d)", "title": "" }, { "docid": "ad86156e29c993bbeef71dee9fec6bdd", "score": "0.7137348", "text": "def update_parameters(self, parameters):\n\n for key in parameters.keys():\n self.par[key] = parameters[key]", "title": "" }, { "docid": "333c9ff385285bc3ee5456521749c8b5", "score": "0.7114364", "text": "def updateParameters(self, parameters):", "title": "" }, { "docid": "c7c64745b3e68a96a76ba3a0881a32a8", "score": "0.7065126", "text": "def update_params(self, params, newparams):\n for k, v in vars(newparams).items():\n setattr(params, k, v)\n\n return params", "title": "" }, { "docid": "b208ed32a4662e01d4cd1bab396599d4", "score": "0.7064473", "text": "def update_params(self):", "title": "" }, { "docid": "b8cf1c4769211cd318027ef23ee56876", "score": "0.70406246", "text": "def set_params(self, params):\n self.alg.set_params(params, gpu_id=self.gpu_id)", "title": "" }, { "docid": "fa0e0fa85ec455643c781d54859da480", "score": "0.7034255", "text": "def set_all_values(self, params):\n return self.params_table.set_all_values(params)", "title": "" }, { "docid": "a21c4b0672e1688a78d5e5bfe1c0ab8a", "score": "0.7033472", "text": "def update(self, params, values):\n return super(State, self).update(params, values)", "title": "" }, { "docid": "85e32fbf46d95aeaa323901b8fdf9c0e", "score": "0.6962653", "text": "def update(self, params, **kwargs):\n pass", "title": "" }, { "docid": "1cb9ab63ec1d3a245d2003a51e44fd77", "score": "0.6947696", "text": "def update_params(self, **kwargs):\n # check arguments\n all_keys = self.get_params().keys()\n # update the marginal parameters\n for key_indexed, value in kwargs.items():\n if key_indexed not in all_keys:\n raise ValueError('Unrecognized keyword argument ' + key_indexed)\n key_split = key_indexed.split('_')\n key, index = '_'.join(key_split[:-1]), key_split[-1]\n if index == 'c':\n self.copula.params[key] = value\n else:\n self.marginals[int(index)].params[key] = value", "title": "" }, { "docid": "56bacedb676deb525165517689557640", "score": "0.6919931", "text": "def update_parameters(params: dict, grads: dict, learning_rate: float) -> dict:\n\n W = params[\"W\"]\n b = params[\"b\"]\n\n dW = grads[\"dW\"]\n db = grads[\"db\"]\n\n W = W - learning_rate * dW # update weights\n b = b - learning_rate * db # update bias\n\n params[\"W\"] = W\n params[\"b\"] = b\n\n return params", "title": "" }, { "docid": "4c533f2a695f6889b50b0ba10ca000f4", "score": "0.6887849", "text": "def updateParameters(self, parameters):\n\n return", "title": "" }, { "docid": "67db64b5edbea422c58f1b9a81e2cba8", "score": "0.6876087", "text": "def updateParameters(self, parameters):\n return", "title": "" }, { "docid": "67db64b5edbea422c58f1b9a81e2cba8", "score": "0.6876087", "text": "def updateParameters(self, parameters):\n return", "title": "" }, { "docid": "67db64b5edbea422c58f1b9a81e2cba8", "score": "0.6876087", "text": "def updateParameters(self, parameters):\n return", "title": "" }, { "docid": "67db64b5edbea422c58f1b9a81e2cba8", "score": "0.6876087", "text": "def updateParameters(self, parameters):\n return", "title": "" }, { "docid": "67db64b5edbea422c58f1b9a81e2cba8", "score": "0.6876087", "text": "def updateParameters(self, parameters):\n return", "title": "" }, { "docid": "67db64b5edbea422c58f1b9a81e2cba8", "score": "0.6876087", "text": "def updateParameters(self, parameters):\n return", "title": "" }, { "docid": "67db64b5edbea422c58f1b9a81e2cba8", "score": "0.6876087", "text": "def updateParameters(self, parameters):\n return", "title": "" }, { "docid": "67db64b5edbea422c58f1b9a81e2cba8", "score": "0.6876087", "text": "def updateParameters(self, parameters):\n return", "title": "" }, { "docid": "67db64b5edbea422c58f1b9a81e2cba8", "score": "0.6876087", "text": "def updateParameters(self, parameters):\n return", "title": "" }, { "docid": "67db64b5edbea422c58f1b9a81e2cba8", "score": "0.6876087", "text": "def updateParameters(self, parameters):\n return", "title": "" }, { "docid": "67db64b5edbea422c58f1b9a81e2cba8", "score": "0.6876087", "text": "def updateParameters(self, parameters):\n return", "title": "" }, { "docid": "67db64b5edbea422c58f1b9a81e2cba8", "score": "0.6876087", "text": "def updateParameters(self, parameters):\n return", "title": "" }, { "docid": "67db64b5edbea422c58f1b9a81e2cba8", "score": "0.6876087", "text": "def updateParameters(self, parameters):\n return", "title": "" }, { "docid": "67db64b5edbea422c58f1b9a81e2cba8", "score": "0.6876087", "text": "def updateParameters(self, parameters):\n return", "title": "" }, { "docid": "67db64b5edbea422c58f1b9a81e2cba8", "score": "0.6876087", "text": "def updateParameters(self, parameters):\n return", "title": "" }, { "docid": "67db64b5edbea422c58f1b9a81e2cba8", "score": "0.6876087", "text": "def updateParameters(self, parameters):\n return", "title": "" }, { "docid": "67db64b5edbea422c58f1b9a81e2cba8", "score": "0.6876087", "text": "def updateParameters(self, parameters):\n return", "title": "" }, { "docid": "67db64b5edbea422c58f1b9a81e2cba8", "score": "0.6876087", "text": "def updateParameters(self, parameters):\n return", "title": "" }, { "docid": "67db64b5edbea422c58f1b9a81e2cba8", "score": "0.6876087", "text": "def updateParameters(self, parameters):\n return", "title": "" }, { "docid": "8fc9613fcba7ac6684f79a625a09f0f3", "score": "0.68645686", "text": "def updateParameters(self, parameters):\r\n return", "title": "" }, { "docid": "8fc9613fcba7ac6684f79a625a09f0f3", "score": "0.68645686", "text": "def updateParameters(self, parameters):\r\n return", "title": "" }, { "docid": "8fc9613fcba7ac6684f79a625a09f0f3", "score": "0.68645686", "text": "def updateParameters(self, parameters):\r\n return", "title": "" }, { "docid": "8fc9613fcba7ac6684f79a625a09f0f3", "score": "0.68645686", "text": "def updateParameters(self, parameters):\r\n return", "title": "" }, { "docid": "ceb8621510de8a9efa82d5a48c69226d", "score": "0.6856631", "text": "def update(self, lr=None, values=None):\n if values is None:\n for l in self.layers:\n l.update(lr)\n else:\n assert len(values) == len(self.param()), \"Values should be length {}\".format(\n len(self.param()))\n\n init_p = 0\n for l in self.layers:\n len_p = len(l.param())\n if len_p:\n e_p = values[init_p:init_p + len_p]\n l.update(lr, e_p)\n init_p += len_p\n else:\n l.update(lr)", "title": "" }, { "docid": "0a5c8e7c8dd05fe92a2dd7ae59736b22", "score": "0.68181366", "text": "def _iparam_update(self, param_slice: slice = None, layer_names: List[str] = None) -> None:\n # for non-blocking, only one dp optimizer is allowed\n dp_optimizer = self._dp_optimizers[0]\n # perform update on the whole model\n if param_slice is None:\n param_slice = slice(len(dp_optimizer.params_ref))\n if layer_names is None:\n layer_names = list(self._layer_wait_handles.keys())\n # update params that are visible for the optimizer\n dp_optimizer.torch_optimizer.param_groups[0][\"params\"] = dp_optimizer.params_ref[\n param_slice\n ]\n # iterate over layers\n for layer_name in reversed(layer_names):\n # only perform update, if all given layers hold unfinalized wait handles (important for layer reuse)\n if layer_name not in self._active_layers:\n return\n # iterate over layer's parameters/associated wait handles\n for param_name, wait_handle, dtp, tens in self._layer_wait_handles[layer_name]:\n # get internal index of selected parameter\n param_idx = self._param_indices[param_name]\n # synchronize, get parameter's global gradient\n wait_handle.wait()\n # check if shapes are matching\n if (\n dp_optimizer.params_ref[param_idx].grad.data.shape != tens.shape\n ): # wait_handle.tensor.shape:\n raise ValueError(\"Shapes must be equal.\")\n # accumulate parameter's global gradient\n dp_optimizer.params_ref[param_idx].grad.data += tens.to(dtp) # wait_handle.tensor\n # remove layer from set of active layers, if present\n self._active_layers.discard(layer_name)\n # if desired, perform actual parameter update\n if dp_optimizer.update_next:\n dp_optimizer.torch_optimizer.step()", "title": "" }, { "docid": "5fb495fd37879478f0de1ec4bef256a0", "score": "0.68095005", "text": "def updateParams(self,gradients):\n for i in xrange(len(self.params)):\n self.params[i].set_value(self.params[i].get_value()-gradients[i]/(1/self.learning_rate+self.iterations))", "title": "" }, { "docid": "7dc82ba6363be93787b627b2216ab048", "score": "0.68032795", "text": "def update_params(self, **kwargs):\n # check arguments\n all_keys = self.get_params().keys()\n # update the marginal parameters\n for key_indexed, value in kwargs.items():\n if key_indexed not in all_keys:\n raise ValueError('Unrecognized keyword argument ' + key_indexed)\n key_split = key_indexed.split('_')\n key, index = '_'.join(key_split[:-1]), int(key_split[-1])\n self.marginals[index].params[key] = value", "title": "" }, { "docid": "0b34bea9fd383a71110dd3737d34cf3b", "score": "0.67997146", "text": "def update_params(self, **kwargs):\n for key in kwargs.keys():\n if key not in self.get_params().keys():\n raise ValueError('Wrong parameter name.')\n self.params[key] = kwargs[key]", "title": "" }, { "docid": "9b4f540087c711e3e25e6ffb4ed0c0c7", "score": "0.67886066", "text": "def update_parameters(value):\n common_parameters = value.get('parameters')\n if common_parameters is None:\n return value\n\n common_parameters = parameters_dict(common_parameters)\n\n for http_method in HTTP_METHODS:\n operation = value.get(http_method)\n if operation is None:\n continue\n\n parameters = operation.get('parameters')\n if parameters is None:\n operation.properties['parameters'] = common_parameters.values()\n\n else:\n operation['parameters'] = compat.ChainMap(\n parameters_dict(parameters), common_parameters).values()\n\n del value['parameters']\n\n return value", "title": "" }, { "docid": "f3a5b15ac6b8ee9a9598c48c1bbf65b9", "score": "0.67483455", "text": "def SetParams(self, params):\n \n for net in self.model.get_calcs():\n for name in params.keys():\n try:\n net.set_var_val(name, params.get(name))\n except:\n e = 1", "title": "" }, { "docid": "841f57e412b513196194257a62bfc586", "score": "0.66192156", "text": "def update_parameters(self, parameters, verbose=True):\n for key in parameters:\n self.properties[\"parameters\"][key].update(parameters[key])\n self.generate_parameter_objects(verbose=verbose)", "title": "" }, { "docid": "2d2bbbe534481f170b9e97c7098647e5", "score": "0.66139776", "text": "def update_api_params(self, **kwargs):\n updating = kwargs.keys()\n print('doing an update on {}'.format(updating))\n for _ in updating:\n self.update_param(_, kwargs[_])\n print(self._api_params)", "title": "" }, { "docid": "6a04f4da92650ab23824c88cc06568b6", "score": "0.6610181", "text": "def update_params(params):\r\n \r\n Jungle.params = params\r\n Jungle._food = Jungle.params['fmax']", "title": "" }, { "docid": "fb1ab4fd1b50f5aecf0ceefeab74a720", "score": "0.65953386", "text": "def set_params(self, **params):\n self._model.set_params(**params)", "title": "" }, { "docid": "f80cc986b41ff3cbd6ca174918b79b2b", "score": "0.65850806", "text": "def update_params(params):\r\n \r\n Savannah.params = params\r\n Savannah._food = Savannah.params['fmax']", "title": "" }, { "docid": "4bf21ecc3e3a817352b3494037bbfb76", "score": "0.65583175", "text": "def update_model_parameters(self, new_params):\n self.args = (new_params.copy(),) + self.args[1:]\n self.params = self.args[0]", "title": "" }, { "docid": "805e11512282a22235fb374895426319", "score": "0.6479197", "text": "def setParams(self, params):", "title": "" }, { "docid": "920ffbef43af97c98c9aa2ec69ba7bdf", "score": "0.6477679", "text": "def set_params (params):\n raise NotImplementedError()", "title": "" }, { "docid": "09d33ba13b2ed763da300248a9658eb1", "score": "0.6451106", "text": "def set_params(self, params):\r\n i = 0\r\n with torch.no_grad():\r\n for p in self.parameters():\r\n p.copy_(torch.tensor(params[i]))\r\n i += 1\r\n \r\n # set mu, sigma\r\n for bn in self.bn_layers:\r\n bn.running_mean.copy_(torch.tensor(params[i]))\r\n bn.running_var.copy_(torch.tensor(params[i+1]))\r\n i += 2", "title": "" }, { "docid": "34e5b6ac1cdf2c55cf5574f4b56670f4", "score": "0.64372396", "text": "def set_params(self, params):\n self.params = params", "title": "" }, { "docid": "75e8bb35733101503905d1a26addc8e6", "score": "0.6408763", "text": "def model_parameters(self, values: LdaParamsTuple):\n self._set_parameters(values)", "title": "" }, { "docid": "75e8bb35733101503905d1a26addc8e6", "score": "0.6408763", "text": "def model_parameters(self, values: LdaParamsTuple):\n self._set_parameters(values)", "title": "" }, { "docid": "5195f08a605c73914d2d37ec19b442f9", "score": "0.64077497", "text": "def update_param(\n param,\n grad,\n op_type,\n op_handle,\n lr_mult=1,\n decay_mult=1,\n):\n return _functions.ParamUpdate \\\n .instantiate(\n param.device,\n op_type=op_type,\n op_handle=op_handle,\n lr_mult=lr_mult,\n decay_mult=decay_mult,\n ).apply(param, grad)", "title": "" }, { "docid": "2706609e7beb2eeaa8491dc0e600f683", "score": "0.63972867", "text": "def set_params(self, **params):\r\n if not params:\r\n # Simple optimization to gain speed (inspect is slow)\r\n return self\r\n valid_params = self.get_params(deep=True)\r\n\r\n nested_params = defaultdict(dict) # grouped by prefix\r\n for key, value in params.items():\r\n key, delim, sub_key = key.partition('__')\r\n if key not in valid_params:\r\n raise ValueError('Invalid parameter %s for estimator %s. '\r\n 'Check the list of available parameters '\r\n 'with `estimator.get_params().keys()`.' %\r\n (key, self))\r\n\r\n if delim:\r\n nested_params[key][sub_key] = value\r\n else:\r\n setattr(self, key, value)\r\n valid_params[key] = value\r\n\r\n for key, sub_params in nested_params.items():\r\n valid_params[key].set_params(**sub_params)\r\n\r\n return self", "title": "" }, { "docid": "20dd0452f19b0967239190e9fd24b3a6", "score": "0.6394374", "text": "def _update_params(params, new_params, prior, prefix=\"\"):\n for name, item in params.items():\n flatten_name = \".\".join([prefix, name]) if prefix else name\n if isinstance(item, dict):\n assert not isinstance(prior, dict) or flatten_name not in prior\n new_item = new_params[name]\n _update_params(item, new_item, prior, prefix=flatten_name)\n elif (not isinstance(prior, dict)) or flatten_name in prior:\n if isinstance(params[name], ParamShape):\n param_shape = params[name].shape\n else:\n param_shape = jnp.shape(params[name])\n params[name] = ParamShape(param_shape)\n if isinstance(prior, dict):\n d = prior[flatten_name]\n elif callable(prior) and not isinstance(prior, dist.Distribution):\n d = prior(flatten_name, param_shape)\n else:\n d = prior\n param_batch_shape = param_shape[: len(param_shape) - d.event_dim]\n # XXX: here we set all dimensions of prior to event dimensions.\n new_params[name] = numpyro.sample(\n flatten_name, d.expand(param_batch_shape).to_event()\n )", "title": "" }, { "docid": "24b5f0e2e19bd5bcd253cc6a063b317a", "score": "0.6393143", "text": "def params(self, params):\n\n self._params = params", "title": "" }, { "docid": "24b5f0e2e19bd5bcd253cc6a063b317a", "score": "0.6393143", "text": "def params(self, params):\n\n self._params = params", "title": "" }, { "docid": "4f1f8bf87798573d8b6319aaed45b839", "score": "0.6392711", "text": "def update_param(self, lr):\n for i in range(len(self.layers)):\n self.layers[i].update_param(lr)", "title": "" }, { "docid": "ce9935ca9d804a83460720a20458647e", "score": "0.63868576", "text": "def update_params(self, actions, states, values, n_step_returns, lr, include_summaries, **kwargs):\n feed_dict = self._update_feed_dict(actions, states, values, n_step_returns, lr)\n # Update the parameters\n if include_summaries:\n _, summaries = self.session.run([self.minimize, self.merged_summaries], feed_dict=feed_dict)\n return summaries\n else:\n self.session.run(self.minimize, feed_dict=feed_dict)", "title": "" }, { "docid": "9e915c94460843f6dd595175da9ade91", "score": "0.63835776", "text": "def set_params(self, **params):\n if not params:\n # Simple optimization to gain speed (inspect is slow)\n return self\n for key, value in params.items():\n datatype, key = key.split('__')\n valid_params = self.base_estimators[datatype].get_params(deep=True)\n\n nested_params = defaultdict(dict) # grouped by prefix\n key, delim, sub_key = key.partition('__')\n if key in valid_params:\n if delim:\n nested_params[key][sub_key] = value\n else:\n setattr(self.base_estimators[datatype], key, value)\n valid_params[key] = value\n\n for key, sub_params in nested_params.items():\n valid_params[key].set_params(**sub_params)\n\n return self", "title": "" }, { "docid": "b6109b18b724aa5e0663f564d5ce8187", "score": "0.63733363", "text": "def add_to_params(self, parameters, value):\n pass", "title": "" }, { "docid": "edb9926fcf753e5cac93fa558a2c8351", "score": "0.6372178", "text": "def update_parameters(parameters, grads, learning_rate):\n\n L = len(parameters) // 2 # number of layers in the neural network\n\n # Update rule for each parameter. Use a for loop.\n for l in range(L):\n parameters[\"W\" + str(l + 1)] = parameters[\"W\" + str(l + 1)] - learning_rate * grads[\"dW\" + str(l + 1)]\n parameters[\"b\" + str(l + 1)] = parameters[\"b\" + str(l + 1)] - learning_rate * grads[\"db\" + str(l + 1)]\n\n return parameters", "title": "" }, { "docid": "edb9926fcf753e5cac93fa558a2c8351", "score": "0.6372178", "text": "def update_parameters(parameters, grads, learning_rate):\n\n L = len(parameters) // 2 # number of layers in the neural network\n\n # Update rule for each parameter. Use a for loop.\n for l in range(L):\n parameters[\"W\" + str(l + 1)] = parameters[\"W\" + str(l + 1)] - learning_rate * grads[\"dW\" + str(l + 1)]\n parameters[\"b\" + str(l + 1)] = parameters[\"b\" + str(l + 1)] - learning_rate * grads[\"db\" + str(l + 1)]\n\n return parameters", "title": "" }, { "docid": "77015adb08c029275c39a69d66279dc6", "score": "0.6354679", "text": "def paramSetter(block, vals, paramNames):\n for paramName, val in zip(paramNames, vals):\n # Skip setting None values.\n if val is None:\n continue\n\n if isinstance(val, (tuple, list, numpy.ndarray)):\n ParamMapper._arrayParamSetter(block, [val], [paramName])\n else:\n ParamMapper._scalarParamSetter(block, [val], [paramName])", "title": "" }, { "docid": "7f5cefe4d221aa8be789780046e8a7bf", "score": "0.6349075", "text": "def update_parameters(parameters, grads, learning_rate):\n\t\n\tL = len(parameters) // 2\n\n\t# Update rule for each parameter\n\tfor l in range(L):\n\t\tparameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - learning_rate * grads[\"dW\"+ str(l+1)]\n\t\tparameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - learning_rate * grads[\"db\"+ str(l+1)]\n\t\t\n\t\t#print(parameters[\"W\" + str(l+1)])\n\t\t\n\treturn parameters", "title": "" }, { "docid": "a9a5fa8b3baf31df52f8a3c0a6794cd0", "score": "0.6339032", "text": "def set_params(self, parameters):\n for parameter, value in parameters.items():\n setattr(self, parameter, value)", "title": "" }, { "docid": "b2f68660e35a5b7bc483e4ba28b7ca79", "score": "0.63187724", "text": "def set_params(self,param_dict):\n\t\tfor i,v in param_dict.items():\n\t\t\tif i in self.params.keys():\n\t\t\t\tself.params[i]=v", "title": "" }, { "docid": "e61880eb4a692ef5b5a71efe606933d6", "score": "0.6306993", "text": "def set_params(self, new_params: torch.Tensor) -> None:\n assert new_params.size() == self.net.get_params().size()\n progress = 0\n for pp in list(self.net.parameters()):\n cand_params = new_params[progress: progress +\n torch.tensor(pp.size()).prod()].view(pp.size())\n progress += torch.tensor(pp.size()).prod()\n pp.data = cand_params", "title": "" }, { "docid": "bf898062915b10acfba49433067fdd59", "score": "0.62997496", "text": "def update_parameters(parameters, grads, learning_rate):\r\n \r\n L = len(parameters) // 2 # number of layers in the neural network\r\n\r\n # Update rule for each parameter. Use a for loop.\r\n for l in range(L):\r\n #print \"befor: \",parameters[\"W\" + str(l + 1)]\r\n parameters[\"W\" + str(l + 1)] = parameters[\"W\" + str(l + 1)] - learning_rate * grads[\"dW\" + str(l + 1)]\r\n #parameters[\"b\" + str(l + 1)] = parameters[\"b\" + str(l + 1)] - learning_rate * grads[\"db\" + str(l + 1)] \r\n #print \"after: \",parameters[\"W\" + str(l + 1)] \r\n return parameters", "title": "" }, { "docid": "d78549a0f80572ad86eee43c06a9ff8b", "score": "0.6299007", "text": "def update_parameters(parameters,grads,learning_rate):\r\n L=len(parameters)//2\r\n for i in range(L):\r\n parameters[\"W\" + str(i + 1)] = parameters[\"W\" + str(i + 1)] - learning_rate * grads[\"dW\" + str(i+ 1)]\r\n parameters[\"b\" + str(i + 1)] = parameters[\"b\" + str(i + 1)] - learning_rate * grads[\"db\" + str(i + 1)]\r\n return parameters", "title": "" }, { "docid": "4be704728cd61635f423104de6726f10", "score": "0.6295762", "text": "def set_parameters(cls, new_params):\n testparams = [\"omega\", \"mu\", \"phi_age\", \"phi_weight\", \"beta\", \"gamma\"]\n\n for par in new_params:\n if new_params[par] < 0:\n raise ValueError('Given value \"{}\" for parameter \"{}\" is '\n 'negative!'.format(new_params[par], par))\n\n for parameter in testparams:\n if parameter in new_params:\n if new_params[parameter] > 1:\n raise ValueError('Given value for \"{}\" '\n 'must be: [0 <= {} <= 1]'\n .format(parameter, parameter))\n cls.params.update(new_params)", "title": "" }, { "docid": "2853c0a59a4d30279acb2758870a5794", "score": "0.6287522", "text": "def update_param(self, lr):\n no_layers = len(self.layers)\n for i in range(0, no_layers):\n self.layers[i].update_param(lr)", "title": "" }, { "docid": "547712b4929395a0cf57885885548e9b", "score": "0.6284055", "text": "def set_parameters(self, params):\n\n return", "title": "" }, { "docid": "d0425652d7738cc1de22256a117400f5", "score": "0.6283165", "text": "def update_parameters(self):\n L = len(self.parameters) // 2\n \n for l in range(L):\n self.parameters[\"W\" + str(l+1)] = self.parameters[\"W\" + str(l+1)] - (self.learning_rate * self.grads[\"dW\" +str(l+1)])\n self.parameters[\"b\" + str(l+1)] = self.parameters[\"b\" + str(l+1)] - (self.learning_rate * self.grads[\"db\" +str(l+1)])", "title": "" }, { "docid": "b9cbb1c86072f8df2e24a29057539a7d", "score": "0.62822527", "text": "def update_values(self):\n self.update(update_vals=True)", "title": "" }, { "docid": "9ca890bc1d41e0b999c9da0571e57094", "score": "0.62790686", "text": "def set_params(self, **params):\n pass", "title": "" }, { "docid": "84f2930fcbc6493f8af51084745d4111", "score": "0.62397003", "text": "def parameter_update(grads, layer_params, learning_rate):\r\n ### YOUR CODE HERE\r\n layer_params[\"W1\"] = layer_params[\"W1\"] - learning_rate * grads[\"W1\"]\r\n layer_params[\"b1\"] = layer_params[\"b1\"] - learning_rate * grads[\"b1\"]\r\n layer_params[\"W2\"] = layer_params[\"W2\"] - learning_rate * grads[\"W2\"]\r\n layer_params[\"b2\"] = layer_params[\"b2\"] - learning_rate * grads[\"b2\"]\r\n ### END YOUR CODE\r\n return layer_params", "title": "" }, { "docid": "5a7e9df10b4f2a34ef0aa1ce6514a541", "score": "0.62356657", "text": "def set_model_params(self, params):\n self.model_params.update(params)", "title": "" }, { "docid": "47e7775b6c3faee32dffebc100deeed6", "score": "0.6233503", "text": "def set_params(self, **params):\n for key, value in list(params.items()):\n setattr(self, key, value)\n return self", "title": "" }, { "docid": "9853e619fbb14170d098bcf768fcae17", "score": "0.6229599", "text": "def update(self):\n self.oldparams = self.params\n self.params = self.result.params", "title": "" }, { "docid": "867f5430fa9a2b67ae08394b25a66325", "score": "0.6228839", "text": "def update_parameters(parameters, grads, learning_rate):\n\n L = len(parameters) // 2 # number of layers in the neural network\n\n # Update rule for each parameter. Use a for loop.\n # START CODE HERE ### (≈ 3 lines of code)\n for l in range(L):\n parameters[\"W\" + str(l + 1)] = parameters[\"W\" + str(l + 1)] - \\\n learning_rate * grads[\"dW\" + str(l + 1)]\n parameters[\"b\" + str(l + 1)] = parameters[\"b\" + str(l + 1)] - \\\n learning_rate * grads[\"db\" + str(l + 1)]\n ### END CODE HERE ###\n return parameters", "title": "" }, { "docid": "695f8d1de64762a54f3be7858cd76532", "score": "0.6224326", "text": "def update_parameters(parameters, grads, learning_rate):\n \n L = len(parameters) // 2 # number of layers in the neural network\n\n # Update rule for each parameter. Use a for loop.\n ### START CODE HERE ### (≈ 3 lines of code)\n for l in range(L):\n parameters[\"W\" + str(l + 1)] = parameters[\"W\" + str(l + 1)] - learning_rate * grads[\"dW\" + str(l + 1)]\n parameters[\"b\" + str(l + 1)] = parameters[\"b\" + str(l + 1)] - learning_rate * grads[\"db\" + str(l + 1)]\n\n ### END CODE HERE ###\n return parameters", "title": "" }, { "docid": "5b7cdb14feb2cb2aa5c2800df2be2026", "score": "0.6221199", "text": "def update_params(self, learning_rate):\n #######################################################################\n # ** START OF YOUR CODE **\n #######################################################################\n\n for layer in self._layers:\n if isinstance(layer, LinearLayer):\n layer.update_params(learning_rate)\n\n #######################################################################\n # ** END OF YOUR CODE **\n #######################################################################", "title": "" }, { "docid": "b842a3f6b4462aebbfd28e3ee5f0fe4b", "score": "0.6203779", "text": "def set_parameters(self, params, batch_dim):\n if (self.amortized != 'none'):\n self.v = params[:, :]\n if (self.amortized != 'input'):\n self.v = self.v.repeat(batch_dim, 1)", "title": "" }, { "docid": "90ddbef8f8e8bab67e4a546276e3cc4a", "score": "0.6202632", "text": "def set_params(self, **params):\n if not params:\n # Simple optimisation to gain speed (inspect is slow)\n return self\n valid_params = self.get_params(deep=True)\n for key, value in params.iteritems():\n split = key.split('__', 1)\n if len(split) > 1:\n # nested objects case\n name, sub_name = split\n if not name in valid_params:\n raise ValueError('Invalid parameter %s for estimator %s' %\n (name, self))\n sub_object = valid_params[name]\n sub_object.set_params(**{sub_name: value})\n else:\n # simple objects case\n if not key in valid_params:\n raise ValueError('Invalid parameter %s ' 'for estimator %s'\n % (key, self.__class__.__name__))\n setattr(self, key, value)\n return self", "title": "" }, { "docid": "c33d2958d245bd50685462dedde61908", "score": "0.6193509", "text": "def update_param_inst(param_inst, param_esti_group_key, param_val):\n\n param_shortname, param_type, param_name = param_type_param_name(param_group_key=param_esti_group_key)\n\n '''\n 1. get param inst component\n '''\n if (param_type == 'esti_type'):\n param_inst_group = param_inst.esti_param\n if (param_type == 'grid_type'):\n param_inst_group = param_inst.grid_param\n if (param_type == 'interpolant_type'):\n param_inst_group = param_inst.interpolant\n if (param_type == 'dist_type'):\n param_inst_group = param_inst.dist_param\n if (param_type == 'data_type'):\n param_inst_group = param_inst.data_param\n\n '''\n 2. update dictionary \n '''\n if (len(param_inst_group) > 0):\n param_name_list = param_name.split('.')\n if (len(param_name_list) == 1):\n param_inst_group[param_name] = param_val\n elif (len(param_name_list) == 2):\n param_inst_group[param_name_list[0]][param_name_list[1]] = param_val\n elif (len(param_name_list) == 3):\n param_inst_group[param_name_list[0]][param_name_list[1]][param_name_list[2]] = param_val\n else:\n message = 'param_key_list:' + str(param_name_list) + ', too many dots, > 3, too nested'\n logger.debug(message)\n raise ValueError(message)\n else:\n '''\n if dist_type = {} for example, do not try to fill it\n only none for: data__A_params_mu_ce9901', 'data__A_params_sd_ce9901\n these parameters when estimating without integration do not exist \n '''\n param_val = None\n\n '''\n 3. update param_inst\n '''\n if (param_type == 'esti_type'):\n param_inst.esti_param = param_inst_group\n if (param_type == 'grid_type'):\n param_inst.grid_param = param_inst_group\n if (param_type == 'interpolant_type'):\n param_inst.interpolant = param_inst_group\n if (param_type == 'dist_type'):\n param_inst.dist_param = param_inst_group\n if (param_type == 'data_type'):\n param_inst.data_param = param_inst_group\n\n return param_inst", "title": "" }, { "docid": "d6e68d7a257b959dfbb7b663bf98341d", "score": "0.6193333", "text": "def set_params(self, params):\n self.params = params\n return self", "title": "" }, { "docid": "ebf3e8d7b81e9808e2b7433824fd0a67", "score": "0.6170792", "text": "def update_parameters(cls, new_par_dict):\n for par in new_par_dict.keys():\n if par not in cls.param:\n raise ValueError(\n f\"Invalid input: {par} is not a key in \"\n f\"class parameters\"\n )\n if (\n new_par_dict[par] <= 0\n and par == \"DeltaPhiMax\"\n and cls.__name__ == \"Carnivore\"\n ):\n raise ValueError(f\"{par} must be strictly positive\")\n elif new_par_dict[par] < 0 and par != \"DeltaPhiMax\":\n raise ValueError(f\"{par} must be positive\")\n elif new_par_dict[par] > 1 and (par == \"eta\" or par == \"p_sick\"):\n raise ValueError(f\"{par} must be less or equal to 1\")\n\n cls.param.update(new_par_dict)", "title": "" }, { "docid": "e3b5bea434ccb30219af6374a8d82253", "score": "0.6169946", "text": "def set_value(self):\n \n for option in self.col:\n if self.header[option][\"update\"] is False:\n self.var[self.col.index(option)]=self.header[option][\"value\"]\n \n cur_params=list(itertools.product(*self.var))\n #print (\"Current Params: \",cur_params)\n for i in cur_params:\n self.params.append(list(i))", "title": "" }, { "docid": "7fa6ae580eb1b884e9fdd594eb01ddf6", "score": "0.6169303", "text": "def set_parameters(self, **parameters):\n for (name, value) in parameters.items():\n self._vertex.set_parameter_values(name, value, self.id)", "title": "" } ]
2885d4cb316da36680a9f9014f740ba0
Given a mapping, return an instance of this wff.
[ { "docid": "5499a8b6d0b49c5ca79eebea683f22a4", "score": "0.51883656", "text": "def makeInstance( self, aMapping ):\n assert isinstance( aMapping, dict )\n\n assert isinstance( self._premiseFormSet, FormSet )\n assert isinstance( self._conclusionFormSet, FormSet )\n\n premiseSetInst = self._premiseFormSet.makeInstance( aMapping )\n conclusionSetInst = self._conclusionFormSet.makeInstance( aMapping )\n return Sequent( premiseSetInst, conclusionSetInst )", "title": "" } ]
[ { "docid": "f09b2d07dcb0f3bb276b3001b9dc68a9", "score": "0.65558213", "text": "def makeInstance( self, aMapping ):\n assert isinstance( aMapping, dict )\n\n assert isinstance( self._operator, str )\n assert isinstance( self._operands, list ) and (len(self._operands) in (1,2))\n\n if len(self._operands) == 1:\n return StructuredWFF( self._operator, self._operands[0].makeInstance( aMapping ) )\n\n else:\n return StructuredWFF( self._operator, self._operands[0].makeInstance( aMapping ),\n self._operands[1].makeInstance( aMapping ) )", "title": "" }, { "docid": "238545911d05626e6917922e211c55fc", "score": "0.64412886", "text": "def makeInstance( self, aMapping ):\n raise NotImplementedError", "title": "" }, { "docid": "e6798e18bdcbe0fdad388d2a55d7ac0a", "score": "0.6005069", "text": "def makeInstance( self, aMapping ):\n assert isinstance( aMapping, dict )\n\n assert isinstance( self._sym, str )\n\n return copy.deepcopy( aMapping[ self._sym ] )", "title": "" }, { "docid": "9303d2df3083555e208e560812a459cc", "score": "0.59024256", "text": "def _mapTo( self, other, aMapping ):\n assert isinstance( other, WFF )\n assert isinstance( aMapping, dict )\n\n assert isinstance( self._sym, str )\n\n if self._sym not in aMapping:\n mapCopy = copy.copy( aMapping )\n mapCopy[ self._sym ] = other\n return mapCopy\n elif aMapping[ self._sym ] == other:\n return aMapping\n else:\n return {}", "title": "" }, { "docid": "0447a84e7acf5f08be92effa8ab57dab", "score": "0.5828514", "text": "def mapping(self, mapping) :\n\t\ttry :\n\t\t\tself._mapping = mapping\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "b065af3e77b3a6e616f935e3c142c7ba", "score": "0.5738215", "text": "def newSpectrumMapping(self, **attrlinks):\n return SpectrumMapping(self, **attrlinks)", "title": "" }, { "docid": "021389582e74c572a79d56b33c53d16f", "score": "0.556509", "text": "def map(self, f):\n ret = MappedDataset(self, f)\n return ret", "title": "" }, { "docid": "781e9b5102518c55a8d23f0fa4effe30", "score": "0.5471244", "text": "def _mapTo( self, other, aMapping ):\n assert isinstance( other, Form )\n assert isinstance( aMapping, dict )\n\n raise NotImplementedError", "title": "" }, { "docid": "982e47eb0409fe20be3485e38884c050", "score": "0.5408524", "text": "def mapTo( self, other, aMapping=None ):\n assert isinstance( other, Form )\n assert isinstance( aMapping, dict ) or ( aMapping is None )\n\n if aMapping is None:\n aMapping={ }\n\n return self._mapTo( other, aMapping )", "title": "" }, { "docid": "bbfe3ef11b21999345bb54819ab6728c", "score": "0.5376762", "text": "def mapping():\n create_mapping()", "title": "" }, { "docid": "aa808660fa3f2af6e884c2c5c80304f4", "score": "0.5363168", "text": "def substitute(self, mapping):\n\n return mapping(self)", "title": "" }, { "docid": "fbf1e0f2e63049d2976425d8d2ce1154", "score": "0.5334537", "text": "def newMap():\n\tnewFactorioMap()", "title": "" }, { "docid": "137194fb0837a901663e6aa62a9e797c", "score": "0.532372", "text": "def _mapTo( self, other, aMapping ):\n assert isinstance( other, WFF )\n assert isinstance( aMapping, dict )\n\n assert isinstance( self._operator, str )\n assert isinstance( self._operands, list ) and (len(self._operands) in (1,2))\n\n if (not isinstance(other, StructuredWFF)) or (self._operator != other._operator) or (len(self._operands) != len(other._operands)):\n return { }\n\n if len(self._operands) == 1:\n # Unary Operation\n return self._operands[0]._mapTo( other._operands[0], aMapping )\n\n else:\n # Binary Operation\n subMap = self._operands[0]._mapTo( other._operands[0], aMapping )\n if subMap != { }:\n return self._operands[1]._mapTo( other._operands[1], subMap )\n else:\n return { }", "title": "" }, { "docid": "ef07304052d7d1adaf5330364bcd33cc", "score": "0.52282137", "text": "def map(self, f):\n return Just(f(self.value)) if self.defined else self", "title": "" }, { "docid": "8b1645683efacbfa3a376de8092ac8db", "score": "0.52134436", "text": "def mapping(self) :\n\t\ttry :\n\t\t\treturn self._mapping\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "04c49a1abc6547ea5e970bb9beb1c4cc", "score": "0.5200445", "text": "def __init__(self, by=None, over=None):\n super(Map, self).__init__(\"map\")\n self.args = [KWArg(\"by\", by), KWArg(\"over\", over)]", "title": "" }, { "docid": "f60dde2241ae5c66bcdcc9aec713e1ba", "score": "0.5184408", "text": "def buildSingleSubstSubtable(mapping):\n if not mapping:\n return None\n self = ot.SingleSubst()\n self.mapping = dict(mapping)\n return self", "title": "" }, { "docid": "14f9de637b0acbbef8f895e34e376dfa", "score": "0.51530796", "text": "def newAxisMapping(self, **attrlinks):\n return AxisMapping(self, **attrlinks)", "title": "" }, { "docid": "44db8566b812f74426e5d1410ccc933c", "score": "0.5146997", "text": "def __init__(self, mapping={}):\n self._keys = ()\n self.update(mapping)", "title": "" }, { "docid": "1f9849afefb1d84ac31a326417c2de43", "score": "0.51469404", "text": "def __init__(self, doc):\n this = _fitz.new_Graftmap(doc)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "title": "" }, { "docid": "9cc2b7cca913363e6513a09b939deee2", "score": "0.513791", "text": "def __init__(self, mapping={}):\n if isinstance(mapping, Mapping):\n self._mapping = {arg: _make_rational(value)\n for arg, value in mapping.iteritems()}\n else:\n raise TypeError(\"specify a mapping\")", "title": "" }, { "docid": "2d6ad580bf9856ec3663df6808f3b158", "score": "0.5113967", "text": "def __init__(self, mapping=None,**kwargs):\n if mapping is not None:\n #make a lookup table out of the dictionary (its faster)\n self.mapping_dict = dlist_to_dict(mapping)\n else:\n self.mapping_dict = None\n\n AEXClient.__init__(self, **kwargs)", "title": "" }, { "docid": "9bb73c422d211f0b0d22278e87459f14", "score": "0.51103866", "text": "def wrap(cls, obj):\n mp, created = Mapping.objects.get_or_create(type=Mapping.get_type(obj),\n ident=obj.id, defaults={'name': normalize_to_ascii(obj.get_name())})\n return mp", "title": "" }, { "docid": "1141a261d739c26e9f9d20bfb400a06f", "score": "0.51096994", "text": "def from_mapping(cls, m: Mapping[Any, Any]) -> \"FlatMapping\":\n if isinstance(m, FlatMapping):\n return cls(m.flatten())\n return cls(deconstruct(m))", "title": "" }, { "docid": "330aed2aae68fd65f2b2d63360cd6489", "score": "0.51013047", "text": "def binary_map(self):\n if self.pat_obj.data_name == 'LTRC':\n _ = self.LTRC_fats()\n elif self.pat_obj.data_name == 'UMM':\n _ = self.UMM_fats()\n elif self.pat_obj.data_name == 'UKSH':\n _ = self.UKSH_fats()\n return self", "title": "" }, { "docid": "71c60916ba5901f5cb3620e187c9e7d2", "score": "0.507238", "text": "def compose(self, mapping):\n items = [f.compose(mapping) for f in self._items]\n return self.__class__(items, self.shape, self.ftype)", "title": "" }, { "docid": "3d0306e11baa8b81d9862a3383829655", "score": "0.4930538", "text": "def mapping(self):\n return self.__mappings.copy()", "title": "" }, { "docid": "9d987d40b59782aa2bb6795ed5b2ad9a", "score": "0.4899019", "text": "def from_mmap(cls, fname):\n memmaped = joblib.load(fname, mmap_mode='r+')\n train = cls.read_hidden_layer(fname)\n return cls(vocab=memmaped.vocab, vectors=memmaped.vectors, train=train)", "title": "" }, { "docid": "a4b6fe03178af6907e99fa9921b6d0fb", "score": "0.48903614", "text": "def project_to_object(self, mapping):\r\n\t\treturn ProjectElementToObjectQuery(self, mapping)", "title": "" }, { "docid": "7da4ef38a2d80e5da7c318a9e7436ef8", "score": "0.48776278", "text": "def set(self, mapping):\n self.params.updateall(mapping)\n return self", "title": "" }, { "docid": "f97227812009b68f4cb1550c3a44cff8", "score": "0.48722222", "text": "def _generate_mapping(self):\n mapping = Mapping()\n rules = self._get_rules()\n mapping.mapping[\"rules\"] = rules\n return mapping", "title": "" }, { "docid": "acd3a2a434650fec8af6b702c3f78460", "score": "0.4866872", "text": "def __init__(self, doc):\n _fitz.Graftmap_swiginit(self, _fitz.new_Graftmap(doc))", "title": "" }, { "docid": "af0b8eb20c2c02ea271701f5ae45a6ca", "score": "0.48515648", "text": "def construct_wd_item(self, mapping, data_files=None):\n self.wd_item = {}\n self.wd_item[\"statements\"] = {}\n self.wd_item[\"labels\"] = {}\n self.wd_item[\"aliases\"] = {}\n self.wd_item[\"descriptions\"] = {}\n self.wd_item[\"disambiguators\"] = {}\n self.wd_item[\"wd-item\"] = None\n self.mapping = mapping\n self.create_source()\n self.set_heritage_id()\n self.set_heritage()\n self.set_country()\n self.set_adm_location()\n self.set_coords()\n self.set_is()\n self.set_street_address()\n self.update_descriptions()\n self.update_labels()\n self.set_wd_item(self.get_matching_existing())", "title": "" }, { "docid": "b0154273cf29864e24c9ec37777b0335", "score": "0.48471454", "text": "def set_dio_mapping_2(self, mapping):\n assert mapping & 0b00001110 == 0\n self.dio_mapping = self.dio_mapping[0:4] + [mapping>>6 & 0x03, mapping>>4 & 0x03]\n return mapping", "title": "" }, { "docid": "91df10e78462fe6d9504e60673e30149", "score": "0.48292074", "text": "def set_dio_mapping(self, mapping):\n mapping_1 = (mapping[0] & 0x03) << 6 | (mapping[1] & 0x03) << 4 | (mapping[2] & 0x3) << 2 | mapping[3] & 0x3\n mapping_2 = (mapping[4] & 0x03) << 6 | (mapping[5] & 0x03) << 4\n self.set_dio_mapping_1(mapping_1)\n return self.set_dio_mapping_2(mapping_2)", "title": "" }, { "docid": "db7e74004280ec4420ca97125d30d400", "score": "0.48196474", "text": "def disc_forwardmap(self, x, W, b):\n return self.f(x, W, b)", "title": "" }, { "docid": "e094579c46c12f6f1ae7e04a95dcb55d", "score": "0.48176345", "text": "def __init__(self, map):\n self.name = 'Map Built Event'\n self.map = map", "title": "" }, { "docid": "be2b8869bbdcc38665f5abaea40166e7", "score": "0.4808399", "text": "def get_dio_mapping_2(self, mapping):\n self.dio_mapping = self.dio_mapping[0:4] + [mapping>>6 & 0x03, mapping>>4 & 0x03]\n return self.dio_mapping", "title": "" }, { "docid": "3247ec0a579e27209954e07cfd4fe18c", "score": "0.48075378", "text": "def map(self, f):\n pass", "title": "" }, { "docid": "31b31a37e612ea01cb842fdc3b45a84b", "score": "0.47808006", "text": "def asMapping(self):\n self.interface = Interface.MAPPING", "title": "" }, { "docid": "012262873590d551969bda70b10c34fb", "score": "0.4763644", "text": "def weld_udf(weld_template, mapping):\n weld_obj = create_empty_weld_object()\n\n for k, v in mapping.items():\n if isinstance(v, (np.ndarray, WeldObject)):\n obj_id = get_weld_obj_id(weld_obj, v)\n mapping.update({k: obj_id})\n\n weld_obj.weld_code = weld_template.format(**mapping)\n\n return weld_obj", "title": "" }, { "docid": "9bc6090baee0b35e9ab59b84694ea229", "score": "0.47623616", "text": "def __init__(self, map=None, maker=None):\n if map and hasattr(map, 'keys') and hasattr(map, 'create_fx'):\n self.fx_map = map\n if maker and hasattr(maker, 'anim_txt'):\n self.fx_maker = maker\n return", "title": "" }, { "docid": "30993cdcbcb464c7d8f2c72087bea803", "score": "0.4749373", "text": "def __init__(self, normalizer=None, mapping=None):\n self.normalizer = normalizer\n self.mapping = mapping", "title": "" }, { "docid": "02b4a42c158e49df6c476fc8069f720a", "score": "0.47493452", "text": "def buildAlternateSubstSubtable(mapping):\n if not mapping:\n return None\n self = ot.AlternateSubst()\n self.alternates = dict(mapping)\n return self", "title": "" }, { "docid": "5456a10b49cea3750adb1f3116532599", "score": "0.47276133", "text": "def __copy__(self):\n\n # we do not want to keep the same reference on the super class map, only the existing content\n other = Map(self._inner, existing=self)\n # other._calls = self._calls # drop current call queue.\n return other", "title": "" }, { "docid": "b4c9e2377d2684f35cb767b50388404c", "score": "0.47226706", "text": "def newChainMapping(self, **attrlinks):\n return ChainMapping(self, **attrlinks)", "title": "" }, { "docid": "02b423feb94321e32fd58ccf2aff9e2f", "score": "0.47177505", "text": "def get_map(map_id):\n pass", "title": "" }, { "docid": "c6aa9d148cdc159822ef967d0de72263", "score": "0.47019303", "text": "def flat_map(self, f):\n return f(self.value) if self.defined else self", "title": "" }, { "docid": "47579cbaf7e5e2b30c3f2d8d23ff04a8", "score": "0.46934503", "text": "def instance(data):\n return FieldMapRCPerMin(data)", "title": "" }, { "docid": "a25a11ca0c74964ec3c3ee9738ba5d0d", "score": "0.46863535", "text": "def new_feature_map(self, device):\n raise NotImplementedError()", "title": "" }, { "docid": "736a69135fe8935fc89198c82e0e6ac9", "score": "0.46759808", "text": "def __mul__(self, other):\n return WordMorphism(dict((key, self(w)) for (key, w) in other._morph.iteritems()), codomain=self.codomain())", "title": "" }, { "docid": "c74e91b4a9bc6484f7c28284cf6e481f", "score": "0.46693775", "text": "def get_mapping(mapping_name):\n #read in mapping\n if type(mapping_name)==str or type(mapping_name)==unicode:\n if mapping_name.split(\".\")[-1] == \"mst\": \n mapping, obj = create_map_from_reformulation(mapping_name)\n elif mapping_name.split(\".\")[-1] == \"txt\":\n mapping = create_map_from_txt(mapping_name)\n return mapping\n else:\n return mapping_name", "title": "" }, { "docid": "6bfb632090546faaec1f4a1bf8bb1e39", "score": "0.4668573", "text": "def _merge(self, mapping):\n for key in mapping.keys():\n self[key] = mapping[key]", "title": "" }, { "docid": "a000ecb66b8499b537f189832e52c9a1", "score": "0.46623516", "text": "def makeConclusionSetInstance( self, aMapping ):\n assert isinstance( aMapping, dict )\n\n assert isinstance( self._premiseFormSet, FormSet )\n assert isinstance( self._conclusionFormSet, FormSet )\n\n return self._conclusionFormSet.makeInstance( aMapping )", "title": "" }, { "docid": "8d7c5a6466b01c18fc81f574248c51c7", "score": "0.46566683", "text": "def __load__(cls, mapping):\n args = []\n for field in cls.__fields__:\n if field.key not in mapping:\n if field.required:\n raise ValueError(\"missing field %r\" % field.key)\n arg = field.default\n else:\n arg = mapping.pop(field.key)\n if field.check is not None and not isinstance(arg, field.check):\n raise ValueError(\"invalid field %r: expected %s, got %r\"\n % (field.key, field.check.__name__, arg))\n args.append(arg)\n if mapping:\n key = sorted(mapping)[0]\n raise ValueError(\"unknown field %r\" % key)\n return cls(*args)", "title": "" }, { "docid": "04eeabb5bfecb341dae2458adb786891", "score": "0.46444923", "text": "def LensMe( mymap, phimap, psi=0.0 ):\n assert( mymap.Compatible(phimap) )\n\n nx, ny = phimap.nx, phimap.ny\n dx, dy = phimap.dx, phimap.dy\n\n # lx, ly = np.meshgrid( np.fft.fftfreq( nx, dx )[0:nx/2+1]*2.*np.pi, np.fft.fftfreq( ny, dy )*2.*np.pi )\n lx, ly = np.meshgrid( np.fft.fftfreq( nx, dx )*2.*np.pi, np.fft.fftfreq( ny, dy )*2.*np.pi )\n\n pfft = np.fft.fft2(phimap.map)\n\n # deflection field\n x, y = np.meshgrid( np.arange(0,nx)*dx, np.arange(0,ny)*dy )\n gpx = np.fft.ifft2( pfft * lx * -1.j)# * np.sqrt( (nx*ny)/(dx*dy) ) )\n gpy = np.fft.ifft2( pfft * ly * -1.j)# * np.sqrt( (nx*ny)/(dx*dy) ) )\n\n if psi != 0.0:\n gp = (gpx + 1.j*gpy)*np.exp(1.j*psi)\n gpx = gp.real\n gpy = gp.imag\n\n lxs = (x+gpx).flatten(); del x, gpx\n lys = (y+gpy).flatten(); del y, gpy\n\n # interpolate\n mymap_lensed = scipy.interpolate.RectBivariateSpline( np.arange(0,ny)*dy, np.arange(0,nx)*dx, mymap.map ).ev(lys, lxs).reshape([ny,nx])\n\n return mymap_lensed", "title": "" }, { "docid": "db2885e9a526520f55b063a5f17589ac", "score": "0.46441907", "text": "def set_mapping(self, mapping=False):\n if isinstance(mapping, bool):\n self.configure(\"mapping\", mapping)\n elif isinstance(mapping, float) and mapping <= 0.5 and mapping >= 0.0:\n self.configure(\"mapping\", mapping)\n elif mapping == \"function\":\n self.configure(\"mapping\", mapping)\n else:\n raise exceptions.MakerError((\"Mapping should be a boolean, \"\n \"'function' (str) or a float between \"\n \"0.0 and 1.0\"))\n logger.info(\"Mapping is set to '{}'\".format(mapping))", "title": "" }, { "docid": "1554e7028bb5fe03533536b4f928e649", "score": "0.46269515", "text": "def __init__(self, fighters, map):\n self.fighters = fighters\n self.map = map", "title": "" }, { "docid": "b3fa36026c350153f0e9cc69725ae0e2", "score": "0.46242103", "text": "def __init__(self):\n self.map = {}", "title": "" }, { "docid": "e8a77ca4008805a295a922bdeecfc8bc", "score": "0.46207017", "text": "def _mapTo( self, anInst, aMapping ):\n assert isinstance( self._premiseFormSet, FormSet )\n assert isinstance( self._conclusionFormSet, FormSet )\n\n assert isinstance( anInst, Sequent )\n assert isinstance( aMapping, dict )\n\n subMap = self.mapPremisesTo( anInst._premiseFormSet, aMapping )\n\n # Map to the conclusion\n subMap = self._conclusionFormSet.mapTo( anInst._conclusionFormSet, subMap )\n if subMap == { }:\n return { }\n\n return subMap", "title": "" }, { "docid": "8088029a3087a63e6f45e578f8d390d0", "score": "0.46179044", "text": "def __init__(self, mapping: pd.DataFrame):\n cols = mapping.columns\n if len(cols) == 4:\n self.colours = np.array(mapping[cols[1:4]], dtype=np.uint8)\n elif len(cols) == 2:\n self.colours = np.array([\n ColourMapping.__asrgb(x)\n for x in mapping[cols[1]]], dtype=np.uint8)\n else:\n raise TypeError(\"Mapping must be an (N,2) or (N,4) array\")\n self.minerals = list(mapping[cols[0]])", "title": "" }, { "docid": "2725a11bde58ce6126c3f9c1f8b37a22", "score": "0.46095604", "text": "def new_instance(self):\n inst = MVCCMappingStorage(name=self.__name__)\n # All instances share the same OID data, transaction log, commit lock,\n # and OID sequence.\n inst._data = self._data\n inst._transactions = self._transactions\n inst._commit_lock = self._commit_lock\n inst.new_oid = self.new_oid\n inst.pack = self.pack\n inst.loadBefore = self.loadBefore\n inst._ltid = self._ltid\n inst._main_lock = self._lock\n return inst", "title": "" }, { "docid": "8b9adc5c2e027e4a4eb54c92b5f79152", "score": "0.46038094", "text": "def set_dio_mapping_1(self, mapping):\n self.dio_mapping = [mapping>>6 & 0x03, mapping>>4 & 0x03, mapping>>2 & 0x03, mapping>>0 & 0x03] \\\n + self.dio_mapping[4:6]\n return mapping", "title": "" }, { "docid": "5b98fafe158ea2dd1ff430d1e6f4fc7f", "score": "0.459754", "text": "def __init__(self, mapping_type):\n return super(S, self).__init__(mapping_type)", "title": "" }, { "docid": "da266da1d7118c7eddf910cb5bb20c27", "score": "0.4591292", "text": "def new_measurement_map(self):\n return MeasurementMap(self.measure_to_view_map)", "title": "" }, { "docid": "cbb3ec4da4742f6780897146d07c0fd7", "score": "0.45907634", "text": "def __init__(self,\n label_map,\n max_seq_length,\n do_lower_case,\n converter,\n use_open_vocab,\n vocab_file = None,\n converter_insertion = None,\n special_glue_string_for_sources = None):\n self.label_map = label_map\n inverse_label_map = {}\n for label, label_id in label_map.items():\n if label_id in inverse_label_map:\n raise ValueError(\n 'Multiple labels with the same ID: {}'.format(label_id))\n inverse_label_map[label_id] = label\n self._inverse_label_map = frozendict.frozendict(inverse_label_map)\n self.tokenizer = tokenization.FullTokenizer(\n vocab_file, do_lower_case=do_lower_case)\n self._max_seq_length = max_seq_length\n self._converter = converter\n self._pad_id = self._get_pad_id()\n self._do_lower_case = do_lower_case\n self._use_open_vocab = use_open_vocab\n self._converter_insertion = converter_insertion\n if special_glue_string_for_sources is not None:\n self._special_glue_string_for_sources = special_glue_string_for_sources\n else:\n self._special_glue_string_for_sources = ' '", "title": "" }, { "docid": "9e4886ccdad6fd64c2af5071dd528bb1", "score": "0.45883942", "text": "def inverse_mapping(mapping):\n return mapping.__class__(map(reversed, mapping.items()))", "title": "" }, { "docid": "25cdddab5b15a45bbbd3fe9fdac43109", "score": "0.45838305", "text": "def build_mapping(self):\n\t\treturn self.mapping_args", "title": "" }, { "docid": "b9d0072d95f11a1baea6a1f9b221a990", "score": "0.45836902", "text": "def mapped(self, f, *args, **kwargs):\n return L(map(partial(f,*args,**kwargs), self))", "title": "" }, { "docid": "d3e05e1248b7d1be557916db198e59c0", "score": "0.45798346", "text": "def map(cls, auth, resource, resource_mapping):\n if not resource in resource_mapping:\n return None\n return cls(auth=auth, children=resource_mapping[resource])", "title": "" }, { "docid": "2c3240a1981eb806737196b7db038cd8", "score": "0.45610112", "text": "def newResidueMapping(self, **attrlinks):\n return ResidueMapping(self, **attrlinks)", "title": "" }, { "docid": "8118874a06bd1f7fee1fb44c2f87f461", "score": "0.45606297", "text": "def instance(data):\n return FieldMapRCPerMinStd(data)", "title": "" }, { "docid": "4873cd9f788c4eee2b5e989ed4184b6a", "score": "0.45594597", "text": "def setMap(self, map):\n\n self.width = map.width\n self.height = map.height\n self.map = map\n self.populate(self.N, self.initAngle, probability=1./self.N)\n self.check_relevance()", "title": "" }, { "docid": "6f588a0a6b8cb582e9f41a8a8b54798e", "score": "0.45587167", "text": "def instantiate(self, mapping, parent=None):\n if not isinstance(mapping, dict):\n mapping = dict((param.name, c) for (param, c) in zip(self.args, mapping))\n Scope.instantiate(self, mapping, parent)", "title": "" }, { "docid": "63203a5e964db323487a566da1b4f485", "score": "0.4557118", "text": "def map(self):\n return self.__map", "title": "" }, { "docid": "dfbeb4bfab0ed3a57c2ef0882d7e7ca2", "score": "0.45566216", "text": "def makeInstance( self, aMap ):\n assert isinstance( aMap, dict )\n\n assert isinstance( self._set, list )\n\n instSet = FormSet( )\n\n for prop in self:\n instSet.append( prop.makeInstance( aMap ) )\n\n return instSet", "title": "" }, { "docid": "fb2018bc71ccfb8c3f6a5ef57742d336", "score": "0.45545876", "text": "def __init__(self,mapfn,domain=(0,1),vector=False):\n\n self.mapfn = mapfn\n self.domain = domain\n self.vector = vector", "title": "" }, { "docid": "fdee7db553314c53a9eea88647edb78f", "score": "0.45540717", "text": "def __init__(\n self,\n mapping: ColourMapping,\n fields: Optional[Dict[str, Field]] = None):\n self.__neighbours = NearestNeighbors(n_neighbors=1)\n self.__neighbours.fit(mapping.colours)\n self.minerals = list(mapping.minerals)\n self.fields = fields or {}", "title": "" }, { "docid": "9cae5c881ec36bf5c0f68f6afcf37571", "score": "0.4551245", "text": "def get_dio_mapping_1(self, mapping):\n self.dio_mapping = [mapping>>6 & 0x03, mapping>>4 & 0x03, mapping>>2 & 0x03, mapping>>0 & 0x03] \\\n + self.dio_mapping[4:6]\n return self.dio_mapping", "title": "" }, { "docid": "b2b94ad1f1de5db0d71b737116c4cd34", "score": "0.45485887", "text": "def __init__(self):\n self.maps = BetterMap(2)\n self.num = 0", "title": "" }, { "docid": "b2b94ad1f1de5db0d71b737116c4cd34", "score": "0.45485887", "text": "def __init__(self):\n self.maps = BetterMap(2)\n self.num = 0", "title": "" }, { "docid": "00ede858b700de5a76a383b06379127d", "score": "0.4540548", "text": "def from_map(name, inobj):\n obj = EventTrigger(\n name, inobj.pop('description', None), inobj.pop('owner', None),\n inobj.pop('event', None), inobj.pop('procedure', None),\n inobj.pop('enabled', False), inobj.pop('tags', None))\n obj.set_oldname(inobj)\n return obj", "title": "" }, { "docid": "57894bee6675adca7f18edc678c95987", "score": "0.4537845", "text": "def __init__(\n self,\n phoneme_dict=None,\n word_tokenize_func=lambda x: x,\n apply_to_oov_word=None,\n mapping_file: Optional[str] = None,\n ):\n self.phoneme_dict = phoneme_dict\n self.word_tokenize_func = word_tokenize_func\n self.apply_to_oov_word = apply_to_oov_word\n self.mapping_file = mapping_file\n self.heteronym_model = None # heteronym classification model", "title": "" }, { "docid": "51b9f6cdaec6a5a058c02e5335bc5895", "score": "0.4537208", "text": "def create_mapping(self, kind, ig):\n from sfepy.fem.mappings import VolumeMapping, SurfaceMapping\n from sfepy.fem.fe_surface import FESurface\n\n coors = self.domain.get_mesh_coors()\n if kind == 's':\n coors = coors[self.all_vertices]\n\n gel = self.domain.groups[ig].gel\n conn = self.domain.groups[ig].conn\n\n if kind == 'v':\n cells = self.cells[ig]\n\n mapping = VolumeMapping(coors, conn[cells], gel=gel)\n\n elif kind == 's':\n aux = FESurface('aux', self, gel.get_surface_entities(),\n conn , ig)\n mapping = SurfaceMapping(coors, aux.leconn, gel=gel.surface_facet)\n\n return mapping", "title": "" }, { "docid": "83ed4b8ea8a102b21555015ed7af0999", "score": "0.45141146", "text": "def build_accessor_map(mapping):\n return dict( ( k, DA(k, mapping[k]) ) for k in mapping )", "title": "" }, { "docid": "f34c3ae6f7bb487795f1e4becb6d1727", "score": "0.4511679", "text": "def addMapping(name, obj):", "title": "" }, { "docid": "d399c707a5fd94c0e0ce3dde7d023462", "score": "0.4509916", "text": "def __init__(self):\n self.list_of_mappings = []", "title": "" }, { "docid": "4e08569acdc38a05d0cfce1c30e60475", "score": "0.45024487", "text": "def map (self, func):\n\t\treturn channel.create(map(func, self))", "title": "" }, { "docid": "9a7b5236d8e19c18e55915bbf0c90ec1", "score": "0.44895798", "text": "def set_mappings(self,mappings):\n self.mapper = Mapper(mappings)", "title": "" }, { "docid": "9a7b5236d8e19c18e55915bbf0c90ec1", "score": "0.44895798", "text": "def set_mappings(self,mappings):\n self.mapper = Mapper(mappings)", "title": "" }, { "docid": "4fa42617bf340e2635523a5ea5d8059d", "score": "0.44888553", "text": "def mapping(self) -> typing.Optional[\"Node\"]:\n return self._mapping", "title": "" }, { "docid": "c98350c46c44eb2c0af5fa16a5a3b34a", "score": "0.44800687", "text": "def _get_map(self):\n # create a mapscript map from scratch\n map = mapscript.mapObj()\n # Configure the map\n # NAME\n map.name = \"BCCVLMap\"\n # EXTENT ... in projection units (we use epsg:4326) WGS84\n map.extent = mapscript.rectObj(-180.0, -90.0, 180.0, 90.0)\n # map.extent = mapscript.rectObj(-20026376.39, -20048966.10, 20026376.39, 20048966.10)\n # UNITS ... in projection units\n map.units = mapscript.MS_DD\n # SIZE\n map.setSize(256, 256)\n # PROJECTION ... WGS84\n map.setProjection(\"init=epsg:4326\")\n # IMAGETYPE\n map.selectOutputFormat(\"PNG24\") # PNG, PNG24, JPEG\n map.outputformat.imagemode = mapscript.MS_IMAGEMODE_RGBA\n map.outputformat.transparent = mapscript.MS_ON\n\n # TRANSPARENT ON\n map.transparent = mapscript.MS_ON\n # IMAGECOLOR 255 255 255\n # map.imagecolor = mapscript.colorObj(255, 255, 255) ... initial color if transparent is on\n # SYMBOLSET (needed to draw circles for CSV points)\n self._update_symbol_set(map)\n # metadata: wms_feature_info_mime_type text/htm/ application/json\n # WEB\n # TODO: check return value of setMetaData MS_SUCCESS/MS_FAILURE\n map.setMetaData(\"wms_enable_request\", \"*\")\n map.setMetaData(\"wms_title\", \"BCCVL WMS Server\")\n # allow reprojection to Web Mercator\n map.setMetaData(\"wms_srs\", \"EPSG:4326 EPSG:3857\")\n # wms_enable_request enough?\n map.setMetaData(\"ows_enable_request\", \"*\")\n onlineresource = urlparse.urlunsplit(\n (self.request.scheme,\n \"{}:{}\".format(self.request.host, self.request.host_port),\n self.request.path,\n urllib.urlencode((('DATA_URL', self.request.params.get('DATA_URL').encode('utf-8')), )),\n \"\"))\n map.setMetaData(\"wms_onlineresource\", onlineresource)\n # TODO: metadata\n # title, author, xmp_dc_title\n # wms_onlineresource ... help to generate GetCapabilities request\n # ows_http_max_age ... WMS client caching hints http://www.mnot.net/cache_docs/#CACHE-CONTROL\n # ows_lsd_enabled ... if false ignore SLD and SLD_BODY\n # wms_attribution_xxx ... do we want attribution metadata?\n\n # SCALEBAR\n if False:\n # LABEL\n sbl = mapscript.labelObj()\n sbl.color = mapscript.colorObj(0, 0, 0)\n sbl.antialias = mapscript.MS_TRUE\n sbl.size = mapscript.MS_LARGE\n sb = mapscript.scalebarObj()\n sb.label = sbl\n sb.status = mapscript.MS_ON\n map.scalebar = sb\n # LEGEND\n if False:\n # LABEL\n legl = mapscript.labelObj()\n legl.color = mapscript.colorObj(64, 64, 64)\n legl.antialias = mapscript.MS_TRUE\n legl.offsetx = -23\n legl.offsety = -1\n leg = mapscript.legendObj()\n leg.keysizex = 32\n leg.keysizey = 10\n leg.keyspacingx = 5\n leg.keyspacingy = -2\n leg.status = mapscript.MS_ON\n map.legend = leg\n return map", "title": "" }, { "docid": "c6d2ef83b275a99419b99266c9a840be", "score": "0.44787076", "text": "def make_map(self):\n\n kwargs=dict()\n kwargs['eps_val'] = self.par.item()\n kwargs['t']=1\n kwargs['delta']=1e-8\n kwargs['var']=0.95\n\n #### NOTE Take advantage of the fact that self.data.x*self.weight is a temp object\n self.dmap = diffuse.diffuse(self.data.x*self.weight,**kwargs)\n self.neigen = self.dmap['neigen']", "title": "" }, { "docid": "a55f9c00b2740d2bb2aaa2588929c33c", "score": "0.4471752", "text": "def get_map(self, x, class_idx):\n pass", "title": "" }, { "docid": "36463471cdca0a6b1ca1b458e7f9deea", "score": "0.44651142", "text": "def map_create(dic):\n map = folium.Map()\n fg = folium.FeatureGroup(name=\"friend_location\")\n for key, value in dic.items():\n try:\n if value != \" *No location found\":\n loc = geo(value)\n fg.add_child(folium.Marker(location=loc,\n popup=str(key).replace(\"'\", \"\"),\n icon=folium.Icon()))\n except:\n continue\n\n map.add_child(fg)\n map.add_child(folium.LayerControl())\n return map.get_root().render()", "title": "" }, { "docid": "43c265dffe5bdad6be4ed6daa6291e0d", "score": "0.44572568", "text": "def map(self):\n return self._map", "title": "" }, { "docid": "11df3ac4b6187c62bf7dfd62fb992d5e", "score": "0.44475576", "text": "def _apply_mapping(self, mapping):\n self._POST[\"P0100LDR__\"] = mapping[0]\n self._POST[\"P0200FMT__\"] = mapping[1]\n self._POST[\"P0300BAS__a\"] = mapping[2]\n self._POST[\"P07022001_b\"] = mapping[3]\n self._POST[\"P1501IST1_a\"] = mapping[4]", "title": "" }, { "docid": "ef7cb5f89a52e8d1520f0a97be58cb6e", "score": "0.4447032", "text": "def get_mapping(cls):\n doc_type = cls.get_mapping_type_name()\n\n mapping = {\n doc_type: {\n 'properties': {\n 'id': {'type': 'long'},\n 'app': {'type': 'long'},\n 'background_color': cls.string_not_analyzed(),\n 'color': cls.string_not_analyzed(),\n 'created': {'type': 'date', 'format': 'dateOptionalTime'},\n 'image_hash': cls.string_not_analyzed(),\n 'item_type': cls.string_not_analyzed(),\n 'preview': {'type': 'object', 'dynamic': 'true'},\n 'pullquote_attribution': cls.string_not_analyzed(),\n 'pullquote_rating': {'type': 'short'},\n 'pullquote_text': {'type': 'string',\n 'analyzer': 'default_icu'},\n 'search_names': {'type': 'string',\n 'analyzer': 'default_icu'},\n 'slug': get_slug_multifield(),\n 'type': cls.string_not_analyzed(),\n }\n }\n }\n\n return cls.attach_translation_mappings(mapping, ('description',))", "title": "" }, { "docid": "c194dda46c3a0ebf934b3bbafbeb2996", "score": "0.44454628", "text": "def createS2Map(self):\n pass", "title": "" }, { "docid": "4a4b4b6fbf0b6ee23e2b3c013dfaba5a", "score": "0.4438958", "text": "def buildLigatureSubstSubtable(mapping):\n\n if not mapping:\n return None\n self = ot.LigatureSubst()\n # The following single line can replace the rest of this function\n # with fontTools >= 3.1:\n # self.ligatures = dict(mapping)\n self.ligatures = {}\n for components in sorted(mapping.keys(), key=_getLigatureKey):\n ligature = ot.Ligature()\n ligature.Component = components[1:]\n ligature.CompCount = len(ligature.Component) + 1\n ligature.LigGlyph = mapping[components]\n firstGlyph = components[0]\n self.ligatures.setdefault(firstGlyph, []).append(ligature)\n return self", "title": "" } ]
7125395a73ffefee23e546967d285002
Defines whether manager email notifications are disabled
[ { "docid": "5c31e01435d151dd270a8668fb24a20e", "score": "0.84846765", "text": "def disable_manager_email_notification(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disable_manager_email_notification\")", "title": "" } ]
[ { "docid": "61f5b4c8dcc3ca35df9d2c861dee6770", "score": "0.7824334", "text": "def disable_email_notification(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disable_email_notification\")", "title": "" }, { "docid": "61f5b4c8dcc3ca35df9d2c861dee6770", "score": "0.7824334", "text": "def disable_email_notification(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disable_email_notification\")", "title": "" }, { "docid": "e1c68f642eae84d6dcc392e360a8a0b2", "score": "0.77685374", "text": "def disable_email_notification(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"disable_email_notification\")", "title": "" }, { "docid": "fff997b110e9c6d8321f7931333daf88", "score": "0.7394228", "text": "def disabled(*args, **kwargs):\n return ManagerNotificationWrapper(ACTIONS.disabled, *args, **kwargs)", "title": "" }, { "docid": "128618e79cda32ca2784284d764abc48", "score": "0.7140762", "text": "def disable_owner_email_notification(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disable_owner_email_notification\")", "title": "" }, { "docid": "128618e79cda32ca2784284d764abc48", "score": "0.7140762", "text": "def disable_owner_email_notification(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disable_owner_email_notification\")", "title": "" }, { "docid": "55aa8a6c83a86c401cf9e603c946620a", "score": "0.6883329", "text": "def __init__(__self__, *,\n disable_manager_email_notification: Optional[pulumi.Input[bool]] = None,\n disable_owner_email_notification: Optional[pulumi.Input[bool]] = None):\n if disable_manager_email_notification is not None:\n pulumi.set(__self__, \"disable_manager_email_notification\", disable_manager_email_notification)\n if disable_owner_email_notification is not None:\n pulumi.set(__self__, \"disable_owner_email_notification\", disable_owner_email_notification)", "title": "" }, { "docid": "55aa8a6c83a86c401cf9e603c946620a", "score": "0.6883329", "text": "def __init__(__self__, *,\n disable_manager_email_notification: Optional[pulumi.Input[bool]] = None,\n disable_owner_email_notification: Optional[pulumi.Input[bool]] = None):\n if disable_manager_email_notification is not None:\n pulumi.set(__self__, \"disable_manager_email_notification\", disable_manager_email_notification)\n if disable_owner_email_notification is not None:\n pulumi.set(__self__, \"disable_owner_email_notification\", disable_owner_email_notification)", "title": "" }, { "docid": "12a7fc650ff0f4cd35c31e7291f8dd0d", "score": "0.665553", "text": "def do_not_send_mail(self):\n return self._do_not_send_mail", "title": "" }, { "docid": "25547f53fcd1785d0dddbb53d0709893", "score": "0.6440922", "text": "def anti_malware_setting_event_email_enabled(self, anti_malware_setting_event_email_enabled):\n\n self._anti_malware_setting_event_email_enabled = anti_malware_setting_event_email_enabled", "title": "" }, { "docid": "0469b4a8f2d98bbd8b827a0357325f89", "score": "0.6435385", "text": "def deactivateNotification(self):\n\t\treturn True", "title": "" }, { "docid": "d9860da92a3173f65ce4923e63ca45a9", "score": "0.64042675", "text": "def disabled():\n return current_app.config[\"COMMUNITIES_ADMINISTRATION_DISABLED\"]", "title": "" }, { "docid": "34a3a555aea423d0fd733ae81c763fec", "score": "0.60177696", "text": "def enable_email(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_email\")", "title": "" }, { "docid": "058dff107655f75fbcaea49cf058d0c9", "score": "0.59724575", "text": "def on_disable(self):\n pass", "title": "" }, { "docid": "34e71b2e186ce0977a98e98459c93a42", "score": "0.59722453", "text": "def do_not_send_mail(self, do_not_send_mail):\n allowed_values = [\"Y\", \"N\"]\n if do_not_send_mail not in allowed_values:\n raise ValueError(\n \"Invalid value for `do_not_send_mail`, must be one of {0}\"\n .format(allowed_values)\n )\n self._do_not_send_mail = do_not_send_mail", "title": "" }, { "docid": "f727cebfa664a7eeb04ba6df4856165a", "score": "0.5967442", "text": "def emailable(self):\n return self._get_attribute(\"_emailable\")", "title": "" }, { "docid": "02dd585fac29123e76e5d815d450972b", "score": "0.58404434", "text": "async def anonymous(self, ctx):\n\t\tif self.s.find_one(server_id=ctx.message.guild.id) is None:\n\t\t\tawait ctx.send(\"Use {}setupmodmail to setup modmail first!\".format(ctx.prefix))\n\t\t\treturn\n\t\t\n\t\ttry:\n\t\t\tif self.s.find_one(server_id=ctx.message.guild.id)[\"anonymous\"] == True:\n\t\t\t\tself.s.update(dict(server_id=ctx.message.guild.id, anonymous=False), [\"server_id\"])\n\t\t\t\tawait ctx.send(\"Succesfully disabled anonymous mode for modmail\")\n\t\t\telse:\n\t\t\t\tself.s.update(dict(server_id=ctx.message.guild.id, anonymous=True), [\"server_id\"])\n\t\t\t\tawait ctx.send(\"Succesfully enabled anonymous mode for modmail\")\n\t\texcept:\n\t\t\tawait ctx.send(\"Failed to change value.\")", "title": "" }, { "docid": "8ab9a9cb3104feb36e795ca882f35da4", "score": "0.5832753", "text": "def disabled(self) -> bool:\n return pulumi.get(self, \"disabled\")", "title": "" }, { "docid": "8ab9a9cb3104feb36e795ca882f35da4", "score": "0.5832753", "text": "def disabled(self) -> bool:\n return pulumi.get(self, \"disabled\")", "title": "" }, { "docid": "8ab9a9cb3104feb36e795ca882f35da4", "score": "0.5832753", "text": "def disabled(self) -> bool:\n return pulumi.get(self, \"disabled\")", "title": "" }, { "docid": "43199eaf435d66136f54bf24ce277504", "score": "0.58103865", "text": "def on_disable():\n pass", "title": "" }, { "docid": "43199eaf435d66136f54bf24ce277504", "score": "0.58103865", "text": "def on_disable():\n pass", "title": "" }, { "docid": "1a4896acb4dd7b24777593bc2abfda62", "score": "0.5752156", "text": "def testInboundEmailDisabled(self):\n self.project.process_inbound_email = False\n email_task = notify_helpers._MakeEmailWorkItem(\n notify_reasons.AddrPerm(\n True, '[email protected]', self.member, REPLY_MAY_UPDATE,\n user_pb2.UserPrefs()),\n ['reason'], self.issue,\n 'body link-only', 'body non', 'body mem',\n self.project, 'example.com', self.commenter_view, self.detail_url)\n self.assertEqual(emailfmt.NoReplyAddress(), email_task['reply_to'])", "title": "" }, { "docid": "bb9f0238795673601e97d48d365af1a8", "score": "0.57519287", "text": "async def toggle(self, ctx):\n\t\tif self.s.find_one(server_id=ctx.message.guild.id) is None:\n\t\t\tawait ctx.send(\"Use {}setupmodmail to setup modmail first!\".format(ctx.prefix))\n\t\t\treturn\n\t\t\n\t\ttry:\n\t\t\tif self.s.find_one(server_id=ctx.message.guild.id)[\"enabled\"] == True:\n\t\t\t\tself.s.update(dict(server_id=ctx.message.guild.id, enabled=False), [\"server_id\"])\n\t\t\t\tawait ctx.send(\"Succesfully disabled modmail\")\n\t\t\telse:\n\t\t\t\tself.s.update(dict(server_id=ctx.message.guild.id, enabled=True), [\"server_id\"])\n\t\t\t\tawait ctx.send(\"Succesfully enabled modmail\")\n\t\texcept:\n\t\t\tawait ctx.send(\"Failed to change value.\")", "title": "" }, { "docid": "d43f4b45ed6f17912066ad6f7914be4e", "score": "0.57329535", "text": "def is_notifications_enabled(self, is_notifications_enabled):\n\n self._is_notifications_enabled = is_notifications_enabled", "title": "" }, { "docid": "0051f1ef0ccb80a99591cbc3b78ffb3e", "score": "0.56970125", "text": "def disable(self) -> Optional[bool]:\n return self.__disable", "title": "" }, { "docid": "739223c564b73efac73ca96fba19ed77", "score": "0.56827825", "text": "def disabled(self) -> Optional[List[str]]:\n return self.__disabled", "title": "" }, { "docid": "0e30fc8425257b8b557ac6725808a037", "score": "0.5675956", "text": "def _check_email_non_default(self) -> Result:\n if self.email_config != EmailConfig.default_email_config:\n return Result(True, f\"Emails will be sent from: {self.sender()}\")\n else:\n return Result(False, f\"ERROR: Email has not been configured.\\n\"\n f\" Please update {paths.EMAIL_CONFIG} with the correct details\")", "title": "" }, { "docid": "9a3a0c00b14b0e39fb909e6b90c71e62", "score": "0.56483406", "text": "def disable(self):\n return True", "title": "" }, { "docid": "c6d9c3431b61d349901319d2e7778971", "score": "0.56455153", "text": "def disabled(self) -> \"bool\":\n return self._attrs.get(\"disabled\")", "title": "" }, { "docid": "c6d9c3431b61d349901319d2e7778971", "score": "0.56455153", "text": "def disabled(self) -> \"bool\":\n return self._attrs.get(\"disabled\")", "title": "" }, { "docid": "c6d9c3431b61d349901319d2e7778971", "score": "0.56455153", "text": "def disabled(self) -> \"bool\":\n return self._attrs.get(\"disabled\")", "title": "" }, { "docid": "c6d9c3431b61d349901319d2e7778971", "score": "0.56455153", "text": "def disabled(self) -> \"bool\":\n return self._attrs.get(\"disabled\")", "title": "" }, { "docid": "4abbf31f5132a0134a07243cf08f56bd", "score": "0.5624627", "text": "def disabled(self) -> bool:\n return self.__disabled", "title": "" }, { "docid": "89c04ad8a32b47d9da72bf445861ad20", "score": "0.5615506", "text": "def CheckDisabledByDefault(self):\n self.assertEqual(get_lock_manager(), None)", "title": "" }, { "docid": "808d64711aa46d3d91e3796e45aaae0b", "score": "0.5613287", "text": "def testViewReminderMailsSettingAndSendReminderEmail(self):\n self.login('[email protected]')\n self.request_fetcher.get('/update_settings?reminder_email=no')\n\n # The control group :-)\n self.login('[email protected]')\n self.request_fetcher.get('/update_settings?reminder_email=yes')\n\n self.request_fetcher.get('/admin/send_reminder_email')\n self.assertEmailNotSentTo('[email protected]')\n self.assertEmailSentTo('[email protected]')", "title": "" }, { "docid": "b2ddae35e76b86605e8c8efbc0916e2b", "score": "0.56001663", "text": "def disable_receipt_settings(database):\n settings = [\n 'runPurchaseActionsOnManualOrders',\n 'emailinvoiceflag',\n 'invoicepayments'\n ]\n for setting in settings:\n value = database.get_app_setting(setting)\n if value == '1':\n database.update_app_setting(setting, 0)", "title": "" }, { "docid": "b3733a6684dd1f01faaef09ef1a7d897", "score": "0.55947495", "text": "def user_toggle_notifications(session, context):\n user = context.user\n user.notifications = not user.notifications\n session.commit()\n update_settings(context)", "title": "" }, { "docid": "044e601b7fcec68d1de9d44301c303dc", "score": "0.5585977", "text": "def isDisabled( self ):\n raise NotImplementedError(\"Not implemented yet.\")", "title": "" }, { "docid": "13fd48c4ee83588fb39772693fbf04db", "score": "0.55758667", "text": "def Disable(self):\n self.manager.SetEnabled(False)", "title": "" }, { "docid": "88d739caab0d9b60359117eee36b69ee", "score": "0.5568771", "text": "def disable_notifications(token,\n customerid=None,\n accountsupressall=False):\n\n url = _utils.create_url(token, \"{0}?accountsupressall={1}\".format(\n API_URL, accountsupressall), customerid)\n\n return _query_nodeping_api.put(url)", "title": "" }, { "docid": "20a0c40145ba0c87089127863241d941", "score": "0.5560075", "text": "def disabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"disabled\")", "title": "" }, { "docid": "20a0c40145ba0c87089127863241d941", "score": "0.5560075", "text": "def disabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"disabled\")", "title": "" }, { "docid": "11b8f7ee41289301add614d692aaa30a", "score": "0.5554809", "text": "def _update_is_disabled(self):\n self.is_disabled = bool(\n self._view.settings().get('wrap_as_you_type_disabled'))", "title": "" }, { "docid": "edd912a32bf416dc339c0cbc88009c8f", "score": "0.55442", "text": "def ask_for_email_reason(self, is_required=None):", "title": "" }, { "docid": "729e14915384d294e82f29ede77c47f8", "score": "0.5539547", "text": "def disabled(self, disabled: \"bool\"):\n self._attrs[\"disabled\"] = disabled", "title": "" }, { "docid": "729e14915384d294e82f29ede77c47f8", "score": "0.5539547", "text": "def disabled(self, disabled: \"bool\"):\n self._attrs[\"disabled\"] = disabled", "title": "" }, { "docid": "729e14915384d294e82f29ede77c47f8", "score": "0.5539547", "text": "def disabled(self, disabled: \"bool\"):\n self._attrs[\"disabled\"] = disabled", "title": "" }, { "docid": "729e14915384d294e82f29ede77c47f8", "score": "0.5539547", "text": "def disabled(self, disabled: \"bool\"):\n self._attrs[\"disabled\"] = disabled", "title": "" }, { "docid": "7735be70e7e16e0823b2b28128a0b4d9", "score": "0.5536697", "text": "def is_on(self):\n return self._base.api.email_state", "title": "" }, { "docid": "0d1213ba9b854b1c6197a7284a413050", "score": "0.5536061", "text": "def anti_malware_setting_event_email_recipients(self, anti_malware_setting_event_email_recipients):\n\n self._anti_malware_setting_event_email_recipients = anti_malware_setting_event_email_recipients", "title": "" }, { "docid": "090c3b9439f32288c2963da07545a184", "score": "0.55349356", "text": "def disable(self):\n self.disabled = True", "title": "" }, { "docid": "090c3b9439f32288c2963da07545a184", "score": "0.55349356", "text": "def disable(self):\n self.disabled = True", "title": "" }, { "docid": "b70703da358e9875806b55b5a7bcaaee", "score": "0.55344075", "text": "async def async_turn_off(self, **kwargs):\n await self._base.api.set_email(False)\n await self.request_refresh()", "title": "" }, { "docid": "c3b27b19183005b6cd6b3813b330d443", "score": "0.55080324", "text": "def send_deny_notification(self):\n if self.uploader.email:\n link = \"\".join([\"http://\", Site.objects.get_current().domain, self.get_absolute_url()])\n message = render_to_string('email/video_denied.txt', {\n 'video': self,\n 'link': link,\n 'user': self.uploader\n })\n subject = \"Video denied\"\n self.uploader.email_user(subject, message)", "title": "" }, { "docid": "c7f3ec502baa5820a4536ff484ac30ef", "score": "0.55073476", "text": "def disabled(self, flag):\n if flag:\n self.config(state='disabled')\n else:\n self.config(state='normal')", "title": "" }, { "docid": "d34d3af147700514ee9d0faec23cd25f", "score": "0.55047023", "text": "def disabled_alerts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"disabled_alerts\")", "title": "" }, { "docid": "f8fb0ce07e0a474baf7ee67185e0558e", "score": "0.54719627", "text": "def invoice_vendor_deny(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n\n # make \"ids\" a list ids (required if using existing method in any model)\n ids = [int(ids)]\n recipient_ids = []\n partner_obj = self.pool.get('res.partner')\n admin_id = partner_obj.search(cr, uid, [('name', '=', 'SEPTA Admin')])\n admin = partner_obj.browse(cr, uid, admin_id, context=None)[0]\n child_ids = admin.child_ids\n for child in child_ids:\n if child.notification:\n recipient_ids.append(int(child.id))\n\n # cancel the current invoice\n canceled = self.action_cancel(cr, uid, ids, None)\n if canceled:\n\n # set invoice from canceled to vendor_denied\n res = self.write(cr, uid, ids, {'state': 'vendor_denied', 'comment': context['comment']}, None)\n wf_service = netsvc.LocalService(\"workflow\")\n for inv_id in ids:\n wf_service.trg_delete(uid, 'account.invoice', inv_id, cr)\n wf_service.trg_create(uid, 'account.invoice', inv_id, cr)\n\n # Send email if found recipient\n if res and len(recipient_ids) > 0:\n context['recipient_ids'] = recipient_ids\n template_obj = self.pool.get('email.template')\n template_id = template_obj.search(cr, uid, [('name', '=', 'Notification for Vendor Denied')])\n if template_id:\n mail = template_obj.send_mail(cr, uid, template_id[0], ids[0], True, context=context)\n else:\n raise osv.except_osv(_('Error!'), _(\n 'No Email Template Found, Please configure a email template under Email tab and named \"Notification for Vendor Denied\"'))\n return True", "title": "" }, { "docid": "7be0de3b80061429d3f4020acad0d03d", "score": "0.54704887", "text": "def test_timer_disabled_reenabled(self):\n self._client.publish(settings.set_auto_topic, settings.set_auto_disable_payload)\n self._client.publish(settings.set_auto_topic, settings.set_auto_enable_payload)\n\n off_hour, on_hour = self._get_on_off_hours()\n\n schedule_turn_off(\n client=self._client,\n when=hour_str_to_when_dict(off_hour)\n )\n schedule_turn_on(\n client=self._client,\n when=hour_str_to_when_dict(on_hour)\n )\n\n assert _received_two_messages_event.wait(timeout=6)\n\n off_message, off_datetime = next(\n m for m in _received_messages if m[0].payload.decode() == settings.off_payload\n )\n on_message, on_datetime = next(\n m for m in _received_messages if m[0].payload.decode() == settings.on_payload\n )", "title": "" }, { "docid": "ad83e8aef6dfe8dbd42a38465bbb194e", "score": "0.5462777", "text": "def disable():", "title": "" }, { "docid": "ad83e8aef6dfe8dbd42a38465bbb194e", "score": "0.5462777", "text": "def disable():", "title": "" }, { "docid": "2d850ee2f25bec70c3ce05cb4fd86b95", "score": "0.54577386", "text": "def _get_areAutodesk360NotificationsShown(self) -> \"bool\" :\n return _core.GeneralPreferences__get_areAutodesk360NotificationsShown(self)", "title": "" }, { "docid": "badf086333b333c35b91c608fe7349af", "score": "0.5442603", "text": "def is_disabled(self):\n raise Unimplemented('Unimplemented in dlkit.services')", "title": "" }, { "docid": "d7fab935121a5d634e1c91d835f6b76e", "score": "0.5437477", "text": "def disabled(self) -> \"bool\": # type: ignore\n return self.disabler is not None and self.disabler()", "title": "" }, { "docid": "db1ed3dc897bf3a3a3efd4c6cb7d4dcf", "score": "0.5428721", "text": "def imiodmsmail_settings_changed(event):\n if (IRecordModifiedEvent.providedBy(event) and event.record.interfaceName\n and event.record.interface not in (IImioDmsMailConfig, IImioDmsMailConfig2)):\n return\n if event.record.fieldName == 'mail_types':\n invalidate_cachekey_volatile_for('imio.dms.mail.vocabularies.IMMailTypesVocabulary')\n invalidate_cachekey_volatile_for('imio.dms.mail.vocabularies.IMActiveMailTypesVocabulary')\n if event.record.fieldName == 'omail_types':\n invalidate_cachekey_volatile_for('imio.dms.mail.vocabularies.OMMailTypesVocabulary')\n invalidate_cachekey_volatile_for('imio.dms.mail.vocabularies.OMActiveMailTypesVocabulary')\n if event.record.fieldName == 'omail_send_modes':\n invalidate_cachekey_volatile_for('imio.dms.mail.vocabularies.OMActiveSendModesVocabulary')\n if event.record.fieldName == 'assigned_user_check':\n update_transitions_auc_config('dmsincomingmail') # i_e ok\n n_plus_x = 'imio.dms.mail.wfadaptations.IMServiceValidation' in \\\n [adapt['adaptation'] for adapt in get_applied_adaptations()]\n snoi = False\n if event.newValue == u'no_check' or not n_plus_x:\n snoi = True\n portal = api.portal.get()\n folder = portal['incoming-mail']['mail-searches']\n if folder['to_treat_in_my_group'].showNumberOfItems != snoi:\n folder['to_treat_in_my_group'].showNumberOfItems = snoi # noqa\n folder['to_treat_in_my_group'].reindexObject()\n if event.record.fieldName in ('org_templates_encoder_can_edit', 'org_email_templates_encoder_can_edit'):\n folder_id = ('email' in event.record.fieldName) and 'oem' or 'om'\n portal = api.portal.get()\n main_folder = portal.templates[folder_id]\n s_orgs = get_registry_organizations()\n roles = ['Reader']\n all_roles = ['Reader', 'Contributor', 'Editor']\n if api.portal.get_registry_record(event.record.__name__):\n roles = list(all_roles)\n for uid in s_orgs:\n if uid not in main_folder:\n continue\n folder = main_folder[uid]\n groupname = '{}_encodeur'.format(uid)\n api.group.revoke_roles(groupname=groupname, roles=all_roles, obj=folder)\n api.group.grant_roles(groupname=groupname, roles=roles, obj=folder)\n\n if event.record.fieldName == 'imail_group_encoder':\n if api.portal.get_registry_record('imio.dms.mail.browser.settings.IImioDmsMailConfig.imail_group_encoder'):\n configure_group_encoder('imail_group_encoder')\n if event.record.fieldName == 'omail_group_encoder':\n if api.portal.get_registry_record('imio.dms.mail.browser.settings.IImioDmsMailConfig.omail_group_encoder'):\n # configure_group_encoder(['dmsoutgoingmail', 'dmsoutgoing_email'])\n configure_group_encoder('omail_group_encoder')\n if event.record.fieldName == 'contact_group_encoder':\n if api.portal.get_registry_record('imio.dms.mail.browser.settings.IImioDmsMailConfig.contact_group_encoder'):\n configure_group_encoder('contact_group_encoder', contacts_part=True)\n # set permission on contacts directory\n portal = api.portal.get()\n portal['contacts'].manage_permission('imio.dms.mail: Write mail base fields',\n ('Manager', 'Site Administrator', 'Contributor'), acquire=1)\n if event.record.fieldName == 'groups_hidden_in_dashboard_filter':\n invalidate_cachekey_volatile_for('imio.dms.mail.vocabularies.TreatingGroupsForFacetedFilterVocabulary')\n if event.record.fieldName == 'imail_folder_period' and event.newValue is not None:\n portal = api.portal.get()\n setattr(portal[MAIN_FOLDERS['dmsincomingmail']], 'folder_period', event.newValue)\n if event.record.fieldName == 'omail_folder_period' and event.newValue is not None:\n portal = api.portal.get()\n setattr(portal[MAIN_FOLDERS['dmsoutgoingmail']], 'folder_period', event.newValue)", "title": "" }, { "docid": "9d1a55394b4389ff37e62ab46d715854", "score": "0.54272723", "text": "def iipdisabledmipdisabled(self) :\n\t\ttry :\n\t\t\treturn self._iipdisabledmipdisabled\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "4b33547aaea3ebe2910aa702ee99bcdc", "score": "0.5422161", "text": "def enable_moderation(self):\n return self.properties.get(\"EnableModeration\", None)", "title": "" }, { "docid": "a0b9c5caeb7f7ff028c1a78f44d94087", "score": "0.5420929", "text": "def setDisabled(self, disabled):\n self.disabled = disabled", "title": "" }, { "docid": "227ccc8c680c427306a317e1a7190c1c", "score": "0.5413193", "text": "def disabled(self, run_info):\n\n return self.get(\"disabled\", run_info) is not None", "title": "" }, { "docid": "1c29a6cc0516c842e83bc67952589cd7", "score": "0.54022765", "text": "def _is_disabled(self, name):\n conf = getattr(self.bot.config, self._resource_name)\n disabled = conf.get(\"disable\", [])\n enabled = conf.get(\"enable\", [])\n return name not in enabled and (disabled is True or name in disabled)", "title": "" }, { "docid": "c0055586d9c63a023f875a4e390e880c", "score": "0.5402192", "text": "def client_disabled_email(days=settings.MB_CLIENT_DISABLED_FIRST_EMAIL_DAYS):\n clients = Client.objects.get_disabled(days)\n for client in clients:\n with select_locale(client):\n mail_client(subject=_('we miss You!'),\n template='emails/client_disabled.html',\n data={\n 'order':\n client.orders.get_expired(('archived', )).first(),\n 'client':\n client,\n },\n client=client)", "title": "" }, { "docid": "8e46e47bba1061e7db21229c950acfd2", "score": "0.53965276", "text": "def can_send(self, user, nf_type_label):\n try:\n user.notifications_settings.get(notification_type__label=nf_type_label, disabled=False,\n channels__contains=self.channel_id)\n return True\n except ObjectDoesNotExist:\n return False", "title": "" }, { "docid": "85424aa45a948ce265e0a868bc7e6e24", "score": "0.53853595", "text": "def adminEmails():", "title": "" }, { "docid": "14bc113f7a4e75da6caf29d3a0b27159", "score": "0.5381358", "text": "def disable(self) -> None:\n ...", "title": "" }, { "docid": "7b896c251cf6f2ec7c54df6101146ec1", "score": "0.5377305", "text": "def supports_gradebook_notification(self):\n return # boolean", "title": "" }, { "docid": "3764e778778e743c59d199d6e11946b3", "score": "0.5376229", "text": "def is_disabled(self):\n return self.get_attribute('disabled')", "title": "" }, { "docid": "892d269879afb95c105682cf527aa4fe", "score": "0.537337", "text": "def stop_notifications(self):\n self.active = False", "title": "" }, { "docid": "9486befe4ea9532ce7593e9d28580085", "score": "0.53676754", "text": "def email_account_admins(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"email_account_admins\")", "title": "" }, { "docid": "4e57beb9129dc602fc330adcc79cd835", "score": "0.53659767", "text": "def disable(self):\n result = self.__enabled\n self.__enabled = False\n return result", "title": "" }, { "docid": "7cedaddf62e4f3ab236bbf1a5d1e7f1e", "score": "0.5363446", "text": "def _set_areAutodesk360NotificationsShown(self, *args) -> \"bool\" :\n return _core.GeneralPreferences__set_areAutodesk360NotificationsShown(self, *args)", "title": "" }, { "docid": "9fcc845c2c2406ec6ca440750253e787", "score": "0.5359751", "text": "def is_disabled(self):\n return envtobool('ENTRYPOINT_DISABLE_SERVICE', False)", "title": "" }, { "docid": "c193b15d53f9df703866548d00474115", "score": "0.5349972", "text": "def disable(self):\n self._is_enabled = False", "title": "" }, { "docid": "50b0d771f0db59cc831459df538e1ce9", "score": "0.534252", "text": "def unsubscribed_from_emails(self):\n return dict.get(self, 'unsubscribed_from_emails', None)", "title": "" }, { "docid": "ae19061f9da08045b2b9bbb464cd1983", "score": "0.5340227", "text": "def is_disabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"is_disabled\")", "title": "" }, { "docid": "4276c163cf37c65075ede8d41895326d", "score": "0.53348684", "text": "def disable(self):\n pass", "title": "" }, { "docid": "4276c163cf37c65075ede8d41895326d", "score": "0.53348684", "text": "def disable(self):\n pass", "title": "" }, { "docid": "4276c163cf37c65075ede8d41895326d", "score": "0.53348684", "text": "def disable(self):\n pass", "title": "" }, { "docid": "4276c163cf37c65075ede8d41895326d", "score": "0.53348684", "text": "def disable(self):\n pass", "title": "" }, { "docid": "e0e763bc035b2f5472be98c9e527db70", "score": "0.53334975", "text": "def disable(self, email):\n self.resource.projects().serviceAccounts().disable(\n name=f\"projects/-/serviceAccounts/{email}\"\n ).execute()\n return f\"Service account `{email}` disabled.\"", "title": "" }, { "docid": "c7e2a549ab4c3897a7f967bb4328d4bb", "score": "0.5323745", "text": "def test_enabled_disable(self) -> None:\n self.register_user(\"user\", \"pass\")\n token = self.login(\"user\", \"pass\")\n\n body = {\n \"conditions\": [\n {\"kind\": \"event_match\", \"key\": \"sender\", \"pattern\": \"@user2:hs\"}\n ],\n \"actions\": [\"notify\", {\"set_tweak\": \"highlight\"}],\n }\n\n # PUT a new rule\n channel = self.make_request(\n \"PUT\", \"/pushrules/global/override/best.friend\", body, access_token=token\n )\n self.assertEqual(channel.code, 200)\n\n # disable the rule\n channel = self.make_request(\n \"PUT\",\n \"/pushrules/global/override/best.friend/enabled\",\n {\"enabled\": False},\n access_token=token,\n )\n self.assertEqual(channel.code, 200)\n\n # check rule disabled\n channel = self.make_request(\n \"GET\", \"/pushrules/global/override/best.friend/enabled\", access_token=token\n )\n self.assertEqual(channel.code, 200)\n self.assertEqual(channel.json_body[\"enabled\"], False)\n\n # re-enable the rule\n channel = self.make_request(\n \"PUT\",\n \"/pushrules/global/override/best.friend/enabled\",\n {\"enabled\": True},\n access_token=token,\n )\n self.assertEqual(channel.code, 200)\n\n # check rule enabled\n channel = self.make_request(\n \"GET\", \"/pushrules/global/override/best.friend/enabled\", access_token=token\n )\n self.assertEqual(channel.code, 200)\n self.assertEqual(channel.json_body[\"enabled\"], True)", "title": "" }, { "docid": "79868576bcb282e22d86b3d40631e68f", "score": "0.5317762", "text": "async def off(message: discord.Message):\n moderate.data[message.server.id][name] = False\n moderate.save()\n await client.say(message, \"{} **disabled**.\".format(setting))", "title": "" }, { "docid": "86af3a86fa6eb6ac50443e5f716dc40e", "score": "0.53172785", "text": "def notify_admin(title, message=\"\", team=\"all\"):\n from mist.api.helpers import send_email\n email = config.NOTIFICATION_EMAIL.get(team, config.NOTIFICATION_EMAIL)\n if email:\n send_email(title, message,\n email)", "title": "" }, { "docid": "245adf80402a5ecdeacec0695b909005", "score": "0.53101707", "text": "def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")", "title": "" }, { "docid": "245adf80402a5ecdeacec0695b909005", "score": "0.53101707", "text": "def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")", "title": "" }, { "docid": "245adf80402a5ecdeacec0695b909005", "score": "0.53101707", "text": "def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")", "title": "" }, { "docid": "245adf80402a5ecdeacec0695b909005", "score": "0.53101707", "text": "def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")", "title": "" }, { "docid": "245adf80402a5ecdeacec0695b909005", "score": "0.53101707", "text": "def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")", "title": "" }, { "docid": "701e79a1987d9c4109c4d6abd717d670", "score": "0.5306954", "text": "def is_email_required(self):\n return False", "title": "" }, { "docid": "cf52bc9849f7a29cc69d927e674e1f3c", "score": "0.52985764", "text": "def anonymous_is_enabled() -> bool:\n from ..core.config import config\n return config['general_system_enable_anonymous']", "title": "" } ]
752a38333d4fb5079420442a28da054b
Export the image shapes from the exported images in the target folder
[ { "docid": "17b50c9e141e3a6d954a0a85f1430b1e", "score": "0.692356", "text": "def export_shapes(self, *args):\n try:\n # default answer\n ans = list()\n # working variables\n coln = args[0]\n tfext = args[1]\n tsufix = args[2]\n\n gm = self.gallery\n ip = self.imgd_path\n\n # iterating over the index data\n for tid in self.getdata(coln):\n\n # config source and target folders\n tgtf = os.path.join(ip, tid)\n # recovering source images\n tgtfn = gm.get_srcimgs(tgtf, tfext)\n # exporting shapes\n tans = gm.export_shapes(tgtfn, tsufix)\n\n # compose answer\n tans = self.to_json(tans)\n ans.append(tans)\n time.sleep(DEFAULT_SHORT_SLEEP_TIME)\n\n # return answer list\n return ans\n\n # exception handling\n except Exception as exp:\n Err.reraise(exp, \"Controller: export_shapes\")", "title": "" } ]
[ { "docid": "1edd76a4bc85079ce0cea5930497783b", "score": "0.6876715", "text": "def export(self, img_files, targets, output_folder, filename_prefix=\"dataset\"):\n assert isinstance(img_files, (list, tuple)), \"Arguments `img_files` should be lists or tuples\"\n if targets is not None:\n assert isinstance(targets, (list, tuple)), \"Arguments `targets` should be lists or tuples\"\n assert len(img_files) == len(\n targets\n ), \"Number of input images should be equal to the number of input targets\"\n else:\n targets = [None] * len(img_files)\n\n output = Path(output_folder)\n if not output.exists():\n output.mkdir(parents=True)\n\n n_rows = max(min(int(np.ceil(len(img_files) / self.n_cols)), self.max_n_rows), 1)\n total_width = (self.max_output_img_size[0] + self.margins[0]) * self.n_cols\n total_height = (self.max_output_img_size[0] + self.margins[0]) * n_rows\n size = (total_width, total_height)\n n_images = len(img_files)\n max_counter = n_rows * self.n_cols\n\n with get_tqdm(total=n_images) as bar:\n for c in range(0, n_images, max_counter):\n total_img = Image.new(mode=\"RGB\", size=size, color=self.background_color)\n filepath = output / (filename_prefix + \"_part_{}.png\".format(c))\n for i, (f, t) in enumerate(zip(img_files[c : c + max_counter], targets[c : c + max_counter])):\n iy, ix = np.unravel_index(i, (n_rows, self.n_cols))\n x = ix * (self.max_output_img_size[0] + self.margins[0]) + self.margins[0] // 2\n y = iy * (self.max_output_img_size[1] + self.margins[1]) + self.margins[1] // 2\n\n raw_img = self.read_img_fn(f)\n image_id = self.img_id_fn(f)\n target = self.read_target_fn(t)\n img = render_datapoint(\n raw_img,\n target,\n image_id=image_id,\n output_size=self.max_output_img_size,\n text_color=self.text_color,\n text_size=self.text_size,\n geom_color=self.geom_color,\n blend_alpha=self.blend_alpha,\n )\n total_img.paste(img, (x, y))\n bar.update(1)\n total_img.save(filepath.as_posix())", "title": "" }, { "docid": "abdfec2e85421e3b50cdefa86fa3ac05", "score": "0.658521", "text": "def export_image(self, output_folder):\r\n dimwh = 16\r\n folder_path = os.path.join(output_folder, self.image_category)\r\n if not os.path.isdir(folder_path):\r\n os.mkdir(folder_path)\r\n final_image = Image.new(\"RGBA\", (dimwh, dimwh), color=self.background_color)\r\n if self.profile == \"square\":\r\n size = (dimwh, dimwh)\r\n offset = (0, 0)\r\n elif self.profile == \"portrait\":\r\n width, height = self.image.width, self.image.height\r\n ratio = dimwh / height\r\n size = (int(ratio * width), dimwh)\r\n offset= (int((dimwh - size[0])/2), 0)\r\n else:\r\n width, height = self.image.width, self.image.height\r\n ratio = dimwh / width\r\n size = (dimwh, int(ratio * height))\r\n offset= (0, int((dimwh - size[1])/2))\r\n if self.image_category == \"demoscene\":\r\n to_paste = self.image.resize(size, resample=Image.NEAREST)\r\n else:\r\n to_paste = self.image.resize(size, resample=Image.NEAREST)\r\n\r\n \r\n final_image.paste(to_paste, offset, to_paste)\r\n\r\n data = list(final_image.getdata())\r\n to_reshape = []\r\n image_01 = []\r\n for i in range(len(data)):\r\n is_background = sum(np.var([data[i], self.background_color], axis=0)) < (COLOR_THRESHOLD *2)\r\n to_reshape += [255, 255, 255, 255] if is_background else [0, 0, 0, 255]\r\n image_01.append(0 if is_background else 1)\r\n data = np.array(to_reshape)\r\n data = data.reshape(dimwh, dimwh, 4)\r\n \r\n final_image = Image.fromarray(data.astype(np.uint8))\r\n final_image.save(self.output_file_path)\r\n self.image_mask = image_01\r\n #final_image.save(self.output_file_path)\r", "title": "" }, { "docid": "dcdf7924242512dfdf20590db5858f0b", "score": "0.6375648", "text": "def save_images_to_file(self, ids, outroot='./'):\n pass", "title": "" }, { "docid": "c7c8b3ddbf6695c4cd15709eec7b4240", "score": "0.62452036", "text": "def create_images(connector_file, folder='ortho_views'):\n if not os.path.exists(folder):\n os.mkdir(folder)\n connector = cq.importers.importStep(connector_file).combine()\n\n image_filenames = []\n\n for view_name in VIEWS:\n v = VIEWS[view_name]\n svg = connector.toSvg(view_vector=v)\n svg = process_svg(svg)\n img_name = os.path.join(folder, connector_file.split(\".\")[0] + \"_\" + view_name + '.png')\n image_filenames.append(img_name)\n svg_blob = svg.encode('utf-8')\n with Image(blob=svg_blob, format='svg') as img:\n img.format = \"png\"\n img.trim()\n img.transform(resize='200x200')\n width, height = img.size\n height_border = (200 - height)/2\n width_border = (200 - width)/2\n img.border(Color('#FFFFFF'), width_border, height_border)\n img.sample(200, 200)\n\n img.save(filename=img_name)\n\n # Return the list of filenames\n return image_filenames", "title": "" }, { "docid": "42ce6ba98f42e0bbcde3b72cbe30e85c", "score": "0.6200433", "text": "def export_image(self, img_path):\n canvas = np.zeros((self.canvas_size, self.canvas_size))\n for obj in range(self.n_objects):\n canvas[self.x_pos[obj]:self.x_pos[obj] + self.x_sizes[obj],\n self.y_pos[obj]:self.y_pos[obj] + self.y_sizes[obj]] = 1.0\n np.save(img_path, canvas.T) # transpose b/c numpy indexing conventions", "title": "" }, { "docid": "fb200b1f295a2121633c4f7759c1680a", "score": "0.6162082", "text": "def export_paints(self, *args):\n try:\n # default answer\n ans = list()\n # working variables\n coln = args[0]\n sfext = args[1]\n tfext = args[2]\n tsufix = args[3]\n gm = self.gallery\n\n # iterating over the index data\n for tid in self.getdata(coln):\n # config source and target folders\n srcf = os.path.join(self.localg_path, tid)\n tgtf = os.path.join(self.imgd_path, tid)\n\n # recovering source images\n srcfn = gm.get_srcimgs(srcf, sfext)\n # setting target images\n tgtfn = gm.set_tgtimgs(srcfn, tgtf, tfext, tsufix)\n # exporting images\n tans = gm.export_imgs(srcfn, tgtfn, tsufix)\n\n # compose answer\n tans = self.to_json(tans)\n ans.append(tans)\n time.sleep(DEFAULT_SHORT_SLEEP_TIME)\n\n # return answer list\n return ans\n\n # exception handling\n except Exception as exp:\n Err.reraise(exp, \"Controller: export_paints\")", "title": "" }, { "docid": "788b57d9f65e929b60ba354738c4e800", "score": "0.61615205", "text": "def export_images(file_path, dataset_path, subset='train', cifar_10=True):\n # type: (str, str, str) -> None\n if cifar_10:\n label_key = 'labels'\n else:\n label_key = 'fine_labels'\n data_dict = unpickle(file_path)\n for label, file_name, data in zip(data_dict[label_key], data_dict['filenames'], data_dict['data']):\n image_path = os.path.join(dataset_path, subset, str(label), file_name)\n\n image = np.reshape(data, (3, 32, 32))\n image = np.rollaxis(image, 0, 3)[:, :, [2, 1, 0]]\n\n cv2.imwrite(image_path, image)", "title": "" }, { "docid": "d8dca08479bdf3397fb550276c5f042d", "score": "0.61293334", "text": "def burn_shapes(shapes, destination, meta):\n\twith rasterio.open(destination, 'w', **meta) as out:\n\t\tout_arr = out.read(1)\n\t\tburned = rasterio.features.rasterize(shapes=shapes, fill=0, out=out_arr, transform=out.transform)\n\t\tout.write_band(1, burned)", "title": "" }, { "docid": "104fe02fe97c7dcefce1414f20615302", "score": "0.6113365", "text": "def visualize_to_files(self, tile_shape, dir_path):\n \n print(\"Saving to file\")\n self.weights_for_visualization(tile_shape, dir_path, save=True)\n\n\n # -- visualize the positive data\n # -- visualize the negagive data\n \n img = Image.fromarray(utils.normalize_image(self.pos_data[0,0,:, :].numpy())).convert('L')\n filename = dir_path + \"/pos_data.jpg\"\n img.save(filename)\n \n img = Image.fromarray(utils.normalize_image(self.neg_data[0,0,:, :].numpy())).convert('L')\n filename = dir_path + \"/neg_data.jpg\"\n img.save(filename)\n \n filename = dir_path + \"/-poooling.jpg\"\n all_outputs = self.output_for_visualization(tile_shape, tile_spacing = (1,1))\n img = Image.fromarray(all_outputs).convert('L')\n img.save(filename)", "title": "" }, { "docid": "d9575c10c6750f6fcc0f8ad11437f4e1", "score": "0.6047934", "text": "def ExtractTargetAndSave(labels, images, path):\r\n i = 1\r\n for x in labels:\r\n index = x[0]-1\r\n crop = images[index][x[1]:x[1]+x[3], x[2]:x[2]+x[4]]\r\n face = path + '%d'%(i)+ '.jpg'\r\n io.imsave(face, crop) \r\n i = i+1", "title": "" }, { "docid": "a488f09ee81a114af54cdbe85a7256be", "score": "0.603128", "text": "def save_colorful_images(predictions, filenames, output_dir, palettes):\n for ind in range(len(filenames)):\n im = Image.fromarray(palettes[predictions[ind].squeeze()])\n fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')\n out_dir = split(fn)[0]\n if not exists(out_dir):\n os.makedirs(out_dir)\n im.save(fn)", "title": "" }, { "docid": "18be10e56940fc0197f6b9dae575d269", "score": "0.6026711", "text": "def save_shapes(self,df,output_file):\n import os\n df.to_file(os.path.join(st.processed_data_directory,output_file), driver='ESRI Shapefile')", "title": "" }, { "docid": "8e7cab407617d49a430b5f44ceee7c2a", "score": "0.6017365", "text": "def GenerateImages(foreground, background, numGenerations):\n \n # try to open files\n try: \n background = Image.open(background)\n foreground = Image.open(foreground)\n except FileNotFoundError:\n print(\"Make sure that both files are in the folder.\")\n\n\n\n # folders for saving \n if not os.path.isdir(\"./GeneratedData\"):\n os.mkdir(\"./GeneratedData\")\n if not os.path.isdir(\"./GeneratedData/masks\"):\n os.mkdir(\"./GeneratedData/masks\") \n \n # initialize dictionaries and lists\n annotation = dict()\n annotation = basicInformation(annotation)\n annotatedImages = []\n infoImages = []\n \n # convert it to the RGBA space\n background = background.convert('RGBA')\n h, w = background.size[0], background.size[1]\n \n # init unique id\n annotationId = 1\n\n for num in range(numGenerations):\n # mask init \n mask = Image.new('RGB', (h, w), (0, 0, 0))\n \n # deep copy of bkg for each of 1000 synthetic images \n final = copy.deepcopy(background)\n\n # random from 0 to 4 objects\n numObj = random.randint(0, 4)\n \n if numObj == 0:\n\n # as it was asked to generate 0 foreground obj, store this type of annotations\n data = CreateAnnotation(num, annotationId, 0,0,0,0)\n annotatedImages.append(data)\n \n final = background\n \n # create masks of obj and save them\n mask = np.array(mask)\n mask[mask > 0] = 255\n \n cv2.imwrite(f\"./GeneratedData/masks/mask_image{num}.png\", mask) \n \n annotationId += 1\n \n for obj in range(numObj):\n \n # call a function to transform the foreground obj\n frgImage = TransformForeground(foreground)\n\n # choose a random x,y position for the foreground\n maxX = final.size[0] - frgImage.size[0]\n maxY = final.size[1] - frgImage.size[1]\n\n # generate a new postion for the object\n objPosition = (random.randint(0, maxX), random.randint(0, maxY))\n \n\n # create annotation, save the obj position\n data = CreateAnnotation(num, annotationId, objPosition[0], objPosition[1], frgImage.size[0], frgImage.size[1])\n annotatedImages.append(data)\n annotationId += 1 \n \n # place the foreground object on the background\n newFgObj = Image.new('RGBA', final.size, color = (0, 0, 0, 0))\n newFgObj.paste(frgImage, objPosition)\n\n # extract the alpha channel from the foreground and paste it into a new image the size of the composite\n alpha_mask = frgImage.getchannel(3)\n new_alpha_mask = Image.new('L', final.size, color = 0)\n new_alpha_mask.paste(alpha_mask, objPosition)\n final = Image.composite(newFgObj, final, new_alpha_mask)\n \n # create masks of obj\n mask.paste(alpha_mask,objPosition,alpha_mask)\n\n \n name = './GeneratedData/img_' + str(num) + '.png'\n final.save(name, 'PNG')\n \n #save masks\n mask = np.array(mask)\n mask[mask > 0] = 255\n cv2.imwrite(f\"./GeneratedData/masks/mask_image{num}.png\", mask) \n \n imgInfo = CreateImgInfo(num, h, w)\n infoImages.append(imgInfo)\n\n annotation[\"annotations\"] = annotatedImages\n annotation[\"images\"] = infoImages\n\n \n with open('./GeneratedData/data_annotations.json', 'w') as outfile:\n json.dump(annotation, outfile)", "title": "" }, { "docid": "2ed8bbda0631022572449982c1d4ff35", "score": "0.598285", "text": "def exporting_data():\n task = ee.batch.Export.image.toDrive(image=myImg,\n region=myRegion.getInfo()['coordinates'],\n description='myDescription',\n folder='myGDriveFolder',\n fileNamePrefix='myFilePrefix',\n scale=myScale,\n crs=myCRS)\n task.start()\n task.status()\n return", "title": "" }, { "docid": "34b6cbadea1d091c05b35578c6f5d8bd", "score": "0.5936668", "text": "def collectShape(self, \n shapeArray,\n targetName, \n targetShapeFile):\n self.repository = maya.cmds.createNode('transform', n=targetName)\n maya.cmds.addAttr(self.repository, ln='shapePaths', dt='stringArray')\n\n shapePathArray = []\n\n for shape in shapeArray:\n shapePath = str(maya.cmds.ls(shape, long=True)[0])\n isNotEmpty = maya.cmds.polyEvaluate(shapePath, v=True)\n\n if isNotEmpty == 0:\n continue\n\n shapePathArray.append(shapePath)\n maya.cmds.parent(shape, self.repository, add=True, shape=True)\n\n maya.cmds.setAttr(self.repository+'.shapePaths',\n len(shapePathArray),\n *(shapePathArray),\n type='stringArray')\n\n duplicateSet = maya.cmds.duplicate(self.repository)\n maya.cmds.delete(self.repository)\n\n self.repository = maya.cmds.rename(duplicateSet, targetName)\n\n exportList = maya.cmds.listRelatives(self.repository,\n s=True,\n fullPath=True)\n\n for shape in exportList:\n maya.cmds.setAttr(shape+'.intermediateObject', 0)\n\n ffdComponents = maya.cmds.lattice(self.repository, \n divisions=[2, 2, 2], \n objectCentered=False,\n ldv=[2, 2, 2],\n n='{}_Ffd1'.format(self.repository))\n\n maya.cmds.setAttr(ffdComponents[0]+'.outsideLattice', 1)\n\n maya.cmds.setKeyframe(ffdComponents[1]+\".tz\", v=0.0, t=1)\n maya.cmds.setKeyframe(ffdComponents[1]+\".tz\", v=1.0, t=2)\n\n exportCommand = \"-fr 1 2 -root |{node} -u {attribute} -file {targetFile}\"\n exportCommand = exportCommand.format(node=self.repository,\n attribute='shapePaths',\n targetFile=targetShapeFile)\n\n maya.cmds.AbcExport(verbose=True, j=exportCommand)\n\n maya.cmds.delete(self.repository, ch=True)\n maya.cmds.delete(self.repository)\n\n self.repository = maya.cmds.createNode('transform', n=targetName)\n\n importCommand = 'AbcImport -crt -ct \"|{shape}\" \"{inputFile}\";'\n importCommand = importCommand.format(shape=targetName,\n inputFile=targetShapeFile)\n\n self.alembicNode = maya.mel.eval(importCommand)\n\n maya.cmds.disconnectAttr('time1.outTime', '{}.time'.format(self.alembicNode))\n maya.cmds.setAttr('{}.time'.format(self.alembicNode), 1)\n\n for shapeIndex, shape in enumerate(shapePathArray):\n maya.cmds.connectAttr('{0}.outPolyMesh[{1}]'.format(self.alembicNode,\n shapeIndex),\n '{0}.inMesh'.format(shape),\n f=True)\n\n maya.cmds.delete(self.repository)", "title": "" }, { "docid": "8dd1be5ffeaaf459867bcf6bffdc828a", "score": "0.593417", "text": "def makeImages(self):\n # make layout\n self.makeLayout()\n self.setAgraph()\n # make function that accepts a mode, a sector\n # and nodes and edges True and False\n self.plotGraph()\n self.plotGraph(\"reversed\",filename=\"tgraphR.png\")\n agents=n.concatenate(self.np.sectorialized_agents__)\n for i, sector in enumerate(self.np.sectorialized_agents__):\n self.plotGraph(\"plain\", sector,\"sector{:02}.png\".format(i))\n self.plotGraph(\"reversed\",sector,\"sector{:02}R.png\".format(i))\n self.plotGraph(\"plain\", n.setdiff1d(agents,sector),\"sector{:02}N.png\".format(i))\n self.plotGraph(\"reversed\",n.setdiff1d(agents,sector),\"sector{:02}RN.png\".format(i))\n self.plotGraph(\"plain\", [],\"BLANK.png\")", "title": "" }, { "docid": "6c23370d40dc0fbf5927d1d313a14d3c", "score": "0.5933819", "text": "def generate_blended_images(self):\n\n files = get_file_list(self.inputDir)\n # pngFiles = os.listdir(self.pngPath)\n pngFiles, indexs = self.get_logo_files(self.pngPath)\n random.shuffle(files)\n\n for n, file in enumerate(files):\n if n == 100: break\n try:\n srcImg = Image.open(file)\n srcImg = srcImg.convert('RGBA')\n except:\n print(\"file {} can not open\".format(file))\n continue\n if 150 <= srcImg.size[0] and 150 <= srcImg.size[1]:\n print(f'file {file} is processing.')\n # logofiles = random.sample(pngFiles, self.logoSampleNumber)\n logofiles, kwargs = self.random_png(pngFiles, indexs, self.logoSampleNumber)\n logoImgs = [Image.open(os.path.join(self.pngPath, logoFile)) for logoFile in logofiles]\n for logo_id in range(len(logoImgs)):\n if logoImgs[logo_id].mode != \"RGBA\":\n logoImgs[logo_id] = logoImgs[logo_id].convert(\"RGBA\")\n\n logoImgAugs = []\n point_lists = []\n informations = []\n for (logoImg, logoName, kwarg) in zip(logoImgs, logofiles, kwargs):\n logoImgAug, info, point_list = self.ImgOPS(srcImg, logoImg, self.isResize, self.isaddNoise,\n self.isaddPerspective, self.isaddAffine,\n self.isVirtualEdge, iters=n, **kwarg)\n logoImgAugs.append(logoImgAug)\n point_lists.append(point_list)\n info.append(logoName)\n informations.append(info)\n outImgName, outTxtpath, outImgpath, logo_names = self.setPath(informations)\n self.ImgBlend(srcImg, logoImgAugs, outImgName, outTxtpath, outImgpath, point_lists, self.locations,\n logo_names, debug=False)", "title": "" }, { "docid": "ef950505ae0a2dd30f28043758c15d64", "score": "0.5909106", "text": "def stylegan_postprocess(eval_img_path, save_path):\n # take images with numbers 10-29\n image_paths = glob.glob(eval_img_path + \"/i[0-2][0-9].png\")\n os.mkdir(save_path)\n print(image_paths)\n for path in image_paths:\n crop_stylegan(save_path, path)", "title": "" }, { "docid": "cb257b92b3fbb44d8398d8849c5f9275", "score": "0.58983755", "text": "def save_images(data_loaders, train_labels):\n training_images_rgb_folder = os.path.join(os.path.abspath(__file__), '..', 'training_images_rgb')\n training_images_grayscale_folder = os.path.join(os.path.abspath(__file__), '..', 'training_images_grayscale')\n test_images_rgb_folder = os.path.join(os.path.abspath(__file__), '..', 'test_images_rgb')\n test_images_grayscale_folder = os.path.join(os.path.abspath(__file__), '..', 'test_images_grayscale')\n\n if not os.listdir(training_images_rgb_folder):\n index = 0\n for sample in data_loaders[\"train\"].dataset.imgs:\n image_name = f\"Image_{index}_covid{train_labels[index].numpy()}.png\"\n plt.imsave(os.path.join(training_images_rgb_folder, image_name), sample[0])\n plt.imsave(os.path.join(training_images_grayscale_folder, image_name), sample[0], cmap='gray')\n print(f\"Saved {image_name}\")\n index += 1\n\n if not os.listdir(test_images_rgb_folder):\n index = 0\n for sample in data_loaders[\"test\"].dataset.imgs:\n image_name = f\"Image_{index}.png\"\n plt.imsave(os.path.join(test_images_rgb_folder, image_name), sample[0])\n plt.imsave(os.path.join(test_images_grayscale_folder, image_name), sample[0], cmap='gray')\n print(f\"Saved {image_name}\")\n index += 1", "title": "" }, { "docid": "e6c678f6b9ad9370924c90661ad66806", "score": "0.58818066", "text": "def save_all_images(data,path):\n np.save(path,data)", "title": "" }, { "docid": "34d11655325135689ca1732dcc6beda4", "score": "0.58782864", "text": "def save_image_and_xml(data_dir, filename, output_dir):\n imagesFolder = os.path.join(data_dir,\"images\")\n print(imagesFolder)\n annotationsFolder = os.path.join(data_dir,\"annotations\")\n print(annotationsFolder)\n sourceImg = os.path.join(imagesFolder,filename+\".jpg\")\n sourceXml = os.path.join(annotationsFolder+'/voc_xmls/'+,filename+\".xml\")\n sourceTxt = os.path.join(annotationsFolder+'/yolo_labels/'+,filename+\".txt\")\n destinationImg = os.path.join(output_dir,filename+\".jpg\")\n destinationXml = os.path.join(output_dir, filename + \".xml\")\n destinationTxt = os.path.join(output_dir, filename + \".txt\")\n dest = shutil.copyfile(sourceImg, destinationImg)\n dest = shutil.copyfile(sourceXml, destinationXml)\n dest = shutil.copyfile(sourceTxt, destinationTxt)", "title": "" }, { "docid": "1d55fa201eed9024493c508b6314ee65", "score": "0.5847158", "text": "def extract(cfg, sess, img_path, output_dir):\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n img_batches = image_to_batches(img)\n\n batches_out = sess.run('bob_vars_1/bob_eval_out:0',\n feed_dict={'img_in:0': img_batches})\n\n batches_to_file(batches_out, output_dir)", "title": "" }, { "docid": "2078ee2cd636697ad585a6d2272413a1", "score": "0.584625", "text": "def postprocess_images(result_folder, output_folder):\n image_names = []\n for img_name in os.listdir(result_folder):\n if img_name.endswith('.nrrd'):\n image_names.append(img_name)\n \n image_names.sort() \n \n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n \n for img_name in image_names:\n image = nrrd.read(f'{result_folder}/{img_name}')[0]\n image = _remove_small_false_positive_regions(image)\n nrrd.write(f'{output_folder}/{img_name}', image)", "title": "" }, { "docid": "24f2d83f49b341debc580c4e74210dd7", "score": "0.58450407", "text": "def saveImage(self):\r\n files = listdir(self.out_dir)\r\n filename = \"slicer-{}-output\".format(self.slice_mode)\r\n\r\n counter = 1\r\n while filename + self.props.extension in files:\r\n filename = \"slicer-\" + self.slice_mode + \"-output\" + str(counter)\r\n counter += 1\r\n\r\n fullname = path.join(self.out_dir, filename + self.props.extension)\r\n self.final_img.save(fullname)", "title": "" }, { "docid": "0b56b1c0af0b7b9befcfa33dba3310de", "score": "0.5829363", "text": "def fusionImages(pathBackground,pathOverlay,pathFusioned):\n til = Image.new(\"RGB\",(514,257))\n background = Image.open(pathBackground)\n overlay = Image.open(pathOverlay)\n til.paste(background)\n til.paste(overlay, mask=overlay)\n\n \n til.save(pathFusioned,\"PNG\")\n\n return til", "title": "" }, { "docid": "7dd68f579194380a5b88451c7c10065b", "score": "0.58156425", "text": "def save_output_images(predictions, filenames, output_dir):\n # pdb.set_trace()\n for ind in range(len(filenames)):\n im = Image.fromarray(predictions[ind].astype(np.uint8))\n fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')\n out_dir = split(fn)[0]\n if not exists(out_dir):\n os.makedirs(out_dir)\n im.save(fn)", "title": "" }, { "docid": "f3ff592b418fe3f209a178254e084b55", "score": "0.5807087", "text": "def save_images(pickle_dict):\n for img_index in range(len(pickle_dict[b\"data\"])): # Gehe alle Bilder durch\n img = pickle_dict[b\"data\"][img_index] # extrahiere Bilddaten-Array\n filename = str(pickle_dict[b\"filenames\"][img_index], \"UTF-8\") # Hole Dateinamen (inkl. Dateiendung)\n label = pickle_dict[b\"labels\"][img_index] # Hole Klassennummer\n # Einzelne Kanäle (R,G,B) waren direkt hintereinander gespeichert, hole einzelne Kanäle einzeln heraus\n c1 = img[0:1024] # R\n c2 = img[1024:2048] # G\n c3 = img[2048:3072] # B\n # Arrays (1x1024) in 32x32 reshapen\n c1 = np.reshape(c1, (32,32))\n c2 = np.reshape(c2, (32,32))\n c3 = np.reshape(c3, (32,32))\n # Farbkanäle aufeinander stacken (3D-Array entsteht)\n rgb_img_arr = np.dstack((c1,c2,c3))\n # Bild aus 3D-Array erstellen\n rgb_img = Image.fromarray(rgb_img_arr)\n # und im Ordner passend zur Klasse abspeichern\n rgb_img.save(_OUTPUT_PATH / _LABEL_NAMES[label] / filename)", "title": "" }, { "docid": "42fec4143f799a5b1671c238f13a8b98", "score": "0.5806606", "text": "def generate_assets(file, scale, output):\n scale_str = sketch_scale(scale)\n cmd = \"sketchtool export artboards \\\"%s\\\" --overwriting \\\n--formats=png --scales=%s --output=%s\" % (file, scale_str, output)\n result = subprocess.check_output(cmd, shell=True)\n\n generated = []\n for line in result.split('\\n'):\n if re.search('^Exported *', line):\n efile = line.split('Exported ')[1]\n efile = strip_sketch_scale_suffix(output, efile)\n generated.append(efile)\n return generated", "title": "" }, { "docid": "d1fb99026b7cd030e43f331a688ccafd", "score": "0.5805083", "text": "def extract_layout(img_path, file_out_folder):\n print('Got task: Extract Layout')\n img = cv2.imread(img_path)\n try:\n res = layout_model.process(img_path)\n except:\n return ''\n #Variable to make sequences number.\n run = 0\n os.mkdir(file_out_folder)\n #print(file_out_folder)\n #Loop through all parts in image and imwrite() it\n print('Go in Extract Layout func')\n\n for i in res:\n loc = i['location']\n run += 1\n y1 = loc[0][1]\n y2 = loc[2][1]\n x1 = loc[0][0]\n x2 = loc[1][0]\n\n #crop original image with cordinate from the output of lib-layout\n cropped_img = img[y1:y2, x1:x2]\n\n #store a file with different identify-numbers (sequence number)\n path = os.path.join(file_out_folder, str(run) + '.jpg')\n \n if cv2.imwrite(path, cropped_img) == False:\n print('Saved Failed - Filepath: ' + path)\n return '' \n return file_out_folder", "title": "" }, { "docid": "92261bc9825c76adaaa47b0a933a5881", "score": "0.5802956", "text": "def save_img(self):\n #Move all metrics results output images to the created directory\n self.outpath_ = self.metric_filepath + self.viz_selection + '.png'\n return self.visualizer.show(outpath=self.outpath_, clear_figure=True)", "title": "" }, { "docid": "668a3176852b1388404d8bc477b7505b", "score": "0.57847726", "text": "def split_images(images_files, layout, output_dir):\n for image_file in tqdm(images_files, desc=\"Splitting images...\"):\n image_file_model = FileModel(image_file)\n image = PIL.Image.open(image_file)\n for image_segment in layout[\"layout\"]:\n layout_model = ImageLayoutModel(image_segment)\n segment = image.crop(layout_model.box)\n segment.save(\n Path(output_dir).joinpath(\n image_file_model.get_file_name_with_view_key(layout_model.key)\n )\n )", "title": "" }, { "docid": "c01df4368d2a46a4fcbd2fc0b08468de", "score": "0.57538694", "text": "def maybe_save_images(predict_images, images, filenames):\n\n if FLAGS.output_dir is not None:\n batch_size = predict_images.shape[0]\n for i in xrange(batch_size):\n image_array = predict_images[i, :]\n image_array1 = images[i, :, 1]\n print(image_array.shape, image_array1.shape)\n indexs = list(range(0, image_array.shape[0]))\n file_path = os.path.join(FLAGS.output_dir, filenames[i])\n ax = plt.subplot(211)\n plt.plot(indexs, image_array)\n plt.subplot(212, sharex=ax)\n plt.plot(indexs, image_array1)\n plt.savefig(file_path)\n plt.close()", "title": "" }, { "docid": "81ef4b55cecb19f9943c2a5b3730a120", "score": "0.5728503", "text": "def save_output_images(predictions, pre, pathes, output_dir, epoch, phase):\r\n for ind in range(len(pathes)):\r\n os.makedirs(output_dir, exist_ok=True)\r\n if phase == 'val':\r\n fn = os.path.join(output_dir, pathes[ind].split('/')[-1].replace('.png', '.jpg'))\r\n elif phase == 'test':\r\n fn = os.path.join(output_dir, pathes[ind].split('/')[-1])\r\n else:\r\n raise ValueError('No such phase,')\r\n\r\n save_image(predictions[ind], fn)", "title": "" }, { "docid": "d8181ece24721a5736d4bccc524683fd", "score": "0.5722296", "text": "def visualize_images(output_dir, images, labels, metadata):\n\n output_dir = output_dir/\"visualizations\"\n Path.mkdir(output_dir, parents=True)\n\n for image, (index, row), meta in zip(images, labels.iterrows(), metadata):\n for label, value in row.iteritems():\n if not Path.exists(output_dir/f\"{label}{value}\"):\n Path.mkdir(output_dir/f\"{label}{value}\")\n\n cv2.imwrite(str(output_dir/f\"{label}{value}\"/\n (meta[0]+\"_\"+meta[1]+\".png\")), image)", "title": "" }, { "docid": "b600f1d9755c334c25d4e76c3963c44a", "score": "0.5715523", "text": "def image_save(image, out_dir):\n for k in range(image.shape[2]):\n path = os.path.join(out_dir, '{:04d}.tif'.format(k))\n cv2.imwrite(path, image[:, :, k])", "title": "" }, { "docid": "b600f1d9755c334c25d4e76c3963c44a", "score": "0.5715523", "text": "def image_save(image, out_dir):\n for k in range(image.shape[2]):\n path = os.path.join(out_dir, '{:04d}.tif'.format(k))\n cv2.imwrite(path, image[:, :, k])", "title": "" }, { "docid": "048d57f389d1912dd86624f2c7270493", "score": "0.57120866", "text": "def folderToInOutImages(inSize, outSize, source, folder, bars=True):\n # direct the folder to the images folder\n folder += \" (train_data)\"\n folder = \"images/\" + folder + \"/\"\n\n # create the output folder, deleting it first if it already exists\n if isdir(folder):\n shutil.rmtree(folder)\n mkdir(folder)\n\n # create the paths for the folders\n colorPath = folder + \"colorOutput/\"\n grayPath = folder + \"grayInput/\"\n\n # if the folders do not already exist, create them\n if not isdir(colorPath):\n mkdir(colorPath)\n\n if not isdir(grayPath):\n mkdir(grayPath)\n\n # get all the files in the given folder\n if isinstance(source, list):\n images = source\n else:\n images = []\n files = [file for file in listdir(source) if isfile(join(source, file))]\n\n # for each image, create the color image, then save it and convert it to gray and save it as well\n for f in files:\n # create the color image\n img = Image.open(source + f)\n images.append(img)\n\n # process all images into the training data and save them\n cnt = 0\n for i in images:\n num = str(cnt)\n while len(num) < Settings.IMG_NUM_DIGITS:\n num = \"0\" + num\n\n # create the color image\n i = scaleImage(outSize[0], outSize[1], i)\n # save the color image\n i.save(colorPath + \"color\" + num + \".png\", \"PNG\")\n if Settings.IMG_PRINT_STATUS:\n print(\"saved color image: \" + str(cnt) + \" \" + str(i))\n\n # create the gray image\n i = convertGrayScale(i)\n # resize the image\n i = scaleImage(inSize[0], inSize[1], i, bars=bars)\n # save the gray image\n i.save(grayPath + \"gray\" + num + \".png\", \"PNG\")\n if Settings.IMG_PRINT_STATUS:\n print(\"saved gray image: \" + str(cnt) + \" \" + str(i))\n\n cnt += 1", "title": "" }, { "docid": "9ba37aac2d17f0ef99cf2bca1a790e3b", "score": "0.5710014", "text": "def process_test():\n\n test_entry = unpickle(test_file)\n test_dataset = test_entry[b'data']\n test_targets = test_entry[b'fine_labels']\n test_dataset = np.vstack(test_dataset).reshape(-1, 3, 32, 32)\n test_dataset = test_dataset.transpose((0, 2, 3, 1)) \n\n root_path = data_dir + '/cifar100/test/'\n for counter, item in enumerate(test_targets):\n make_dir_if_no_exist(root_path+str(item))\n # write data\n img = test_dataset[counter]\n #bgr_image = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_RGB2BGR)\n file_path = root_path+str(item)+'/'+\"test_img_{0}.jpg\".format(str(counter))\n #print(file_path)\n # something breaks here\n #cv2.imwrite(file_path, bgr_image)\n imageio.imwrite(file_path, img)", "title": "" }, { "docid": "680e0ef2d476e8fa106b97028bc12a90", "score": "0.56949043", "text": "def create_detection_results_files (images, targets, outputs, start_id, only_local= True):\n \n if only_local:\n class_names = (\"Kidney\", \"Cyst\", \"Pyramid\", \"Hydronephrosis\", \"Others\")\n else:\n class_names = (\"Healthy\", \"Cyst\", \"Pyramid\", \"Hydronephrosis\", \"Others\",\n \"PCD\", \"HC\") \n \n label_map = {class_name: i+1 for i, class_name in enumerate(class_names)} \n label_map[\"Background\"] = 0\n \n rev_label_map = {v: k for k, v in label_map.items()} # Inverse mapping\n\n if start_id == 0:\n \n mydir = '/home/mpostigo/Documentos/bbdd/mAP-master/input/ground-truth'\n filelist = [ f for f in os.listdir(mydir) if f.endswith(\".txt\") ]\n for f in filelist:\n os.remove(os.path.join(mydir, f))\n \n mydir = '/home/mpostigo/Documentos/bbdd/mAP-master/input/detection-results'\n filelist = [ f for f in os.listdir(mydir) if f.endswith(\".txt\") ]\n for f in filelist:\n os.remove(os.path.join(mydir, f)) \n \n mydir = '/home/mpostigo/Documentos/bbdd/mAP-master/input/images-optional'\n filelist = [ f for f in os.listdir(mydir) if f.endswith(\".jpg\") ]\n for f in filelist:\n os.remove(os.path.join(mydir, f)) \n \n output_path = '/home/mpostigo/Documentos/bbdd/mAP-master/input/ground-truth/'\n output_path2 = '/home/mpostigo/Documentos/bbdd/mAP-master/input/detection-results/'\n \n for i, (image, target, output) in enumerate(zip(images, targets, outputs)):\n \n batch_size = len(target)\n \n for b in range(batch_size):\n GT = []\n D = []\n idx = target[b]['image_id'].detach().cpu().numpy()\n idx = idx+start_id\n #CREATE GT files\n filename1 = output_path+'image_'+str(idx)+'.txt'\n labels = target[b]['labels']\n boxes = target[b]['boxes']\n \n #CREATE DETECTION FILES\n filename2 = output_path2+'image_'+str(idx)+'.txt'\n pred_labels = output[b]['labels']\n scores = output[b]['scores']\n pred_boxes = output[b]['boxes']\n \n scores = scores.numpy()\n pred_labels = pred_labels.numpy()\n pred_boxes = pred_boxes.numpy()\n \n #Clean healthy score\n healthy_indexes = np.where(pred_labels==1)\n sick_indexes = np.where(pred_labels!=1)\n if (np.asarray(sick_indexes).size !=0) :\n if(np.amax(scores[sick_indexes])>=0.7):\n scores = np.delete(scores,healthy_indexes) \n pred_labels = np.delete(pred_labels,healthy_indexes)\n pred_boxes = np.delete(pred_boxes,healthy_indexes, axis=0)\n \n # #Clean sick scores \n healthy_indexes = np.where(pred_labels==1) \n sick_indexes = np.where(pred_labels!=1)\n if np.asarray(healthy_indexes).size != 0 : \n if(np.amax(scores[healthy_indexes])>=0.9):\n scores = np.delete(scores,sick_indexes) \n pred_labels = np.delete(pred_labels,sick_indexes)\n pred_boxes = np.delete(pred_boxes,sick_indexes, axis=0)\n \n elif(np.amax(scores[healthy_indexes])<=0.7):\n scores = np.delete(scores,healthy_indexes) \n pred_labels = np.delete(pred_labels,healthy_indexes)\n pred_boxes = np.delete(pred_boxes,healthy_indexes, axis=0)\n \n #Clean low scores local\n local_indexes = np.where((pred_labels==2) | (pred_labels==3) | (pred_labels==4) | (pred_labels==5)| (pred_labels==7))\n low_scores_idx = np.where(scores<0.5)\n low_scores_idx_local = np.intersect1d(local_indexes, low_scores_idx)\n \n if np.asarray(low_scores_idx_local).size!=0:\n scores = np.delete(scores,low_scores_idx_local)\n pred_labels = np.delete(pred_labels,low_scores_idx_local)\n pred_boxes = np.delete(pred_boxes,low_scores_idx_local, axis=0)\n \n #clean low scores global\n global_indexes = np.where((pred_labels==6))\n low_scores_idx = np.where(scores<0.2)\n low_scores_idx_global = np.intersect1d(global_indexes, low_scores_idx)\n \n if np.asarray(low_scores_idx_global).size !=0:\n scores = np.delete(scores,low_scores_idx_global)\n pred_labels = np.delete(pred_labels,low_scores_idx_global)\n pred_boxes = np.delete(pred_boxes,low_scores_idx_global, axis=0)\n \n #CREATE IMAGES FILES\n img = image[b].cpu()\n img = img.numpy().transpose((1, 2, 0))\n# plt.imshow(img)\n # im_path = '/home/mpostigo/Documentos/bbdd/mAP-master/input/images-optional/image_'+str(idx)+'.jpg'\n # skio.imsave(im_path, img_as_float64(img/np.amax(np.absolute(img))))\n\n for l,b in zip (labels.numpy(), boxes.numpy()):\n \n str_label = rev_label_map.get(l).replace(' ','_')\n str_box = str(b).replace('[','').replace(']','').replace(',','')\n file_line = str_label+' '+str_box+'\\n'\n GT.append(' '.join(file_line.split()))\n \n for pl, s, pb in zip (pred_labels, scores, pred_boxes):\n \n str_plabel = rev_label_map.get(pl).replace(' ','_')\n str_score = str(s)\n str_pbox = str(pb).replace('[','').replace(']','').replace(',','')\n \n file_line2 = str_plabel+' '+str_score+' '+str_pbox+'\\n'\n \n D.append(' '.join(file_line2.split()))\n \n with open(filename1, 'w') as the_file:\n the_file.write('\\n'.join(GT))\n \n with open(filename2, 'w') as the_file2:\n the_file2.write('\\n'.join(D))\n \n the_file.close()\n the_file2.close()", "title": "" }, { "docid": "ddc68a7a85829edb0de8fcf458160f1d", "score": "0.5693125", "text": "def save_images(images, img_shape, img_name):\n h, w = img_shape[0], img_shape[1]\n image_frame_dim = int(math.ceil(images.shape[0] ** .5))\n img = np.zeros((h * image_frame_dim, w * image_frame_dim, 1))\n for idx, image in enumerate(images):\n #image = np.squeeze(image)\n i = idx % image_frame_dim\n j = idx // image_frame_dim\n img[j * h:j * h + h, i * w:i * w + w, :] = image*255.0\n\n img = np.squeeze(img)\n scipy.misc.imsave(img_name, img)", "title": "" }, { "docid": "4fa2b92ac5d854f4c4d5b47fe9f2d71f", "score": "0.5684987", "text": "def save_images(self):\n rgb_np = self.get_rgb_np()\n\n cropped_img = self.crop_center(rgb_np, 504, 342)\n \n fn_prefix, _ = os.path.splitext(self.flir_img_filename)\n\n cropped_img_filename = os.path.join('Visual_Spectrum_images/' + fn_prefix.split('/')[1] + \".jpg\")\n cropped_img_visual = Image.fromarray(cropped_img)\n \n if self.is_debug:\n print(\"DEBUG Saving cropped RGB image to:{}\".format(cropped_img_filename))\n\n cropped_img_visual.save(cropped_img_filename)", "title": "" }, { "docid": "b572df98aca16eeeb012c9af04236299", "score": "0.56744987", "text": "def postprocess():\n file_list = scan_dir(args.bin_path)\n for file_path in file_list:\n data = load_bin_file(args.bin_path + file_path, shape=(352, 352), dtype=\"float32\")\n data_shape = load_bin_file(args.shape_path + file_path, dtype=\"int64\")\n img = cv2.resize(data, (int(data_shape[1]), int(data_shape[0])))\n img = sigmoid(img)\n img = (img - img.min()) / (img.max() - img.min() + 1e-8)\n img = img * 255\n file_name = file_path.split(\".\")[0] + \".jpg\"\n outfile = os.path.join(args.target_path, file_name)\n cv2.imwrite(outfile, img)\n print(\"Successfully save image in \" + outfile)", "title": "" }, { "docid": "7694ae045c289e8ce1bfb537c0ccea7b", "score": "0.5673834", "text": "def export_brep(shapes: list, path_filename: str):\n\n dirname = os.path.dirname(path_filename)\n if not os.path.exists(dirname):\n os.makedirs(dirname, exist_ok=True)\n\n bldr = OCP.BOPAlgo.BOPAlgo_Splitter()\n\n for shape in shapes:\n # checks if solid is a compound as .val() is not needed for compunds\n if isinstance(shape, cq.occ_impl.shapes.Compound):\n bldr.AddArgument(shape.wrapped)\n else:\n bldr.AddArgument(shape.val().wrapped)\n\n bldr.SetNonDestructive(True)\n\n bldr.Perform()\n\n bldr.Images()\n\n merged = cq.Compound(bldr.Shape())\n\n merged.exportBrep(str(path_filename))", "title": "" }, { "docid": "11df113e00a0b4b509c8f98b73d3ea9d", "score": "0.56699365", "text": "def main():\n model_file = sys.argv[1]\n image_dir = sys.argv[2]\n out_dir = sys.argv[3]\n\n graph = tf.get_default_graph()\n segmenter = Segmenter(model_file, graph)\n\n filenames = os.listdir(image_dir)\n for fn in filenames:\n img = Image.open(opj(image_dir, fn))\n #img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)\n img = preprocess.preprocess(np.array(img))\n img = segmenter.segmenter(img)\n img = Image.fromarray(np.uint8(img * 255))\n img = img.convert(\"L\")\n img.save(opj(out_dir, fn))", "title": "" }, { "docid": "ca17f297d682bd9a6c9ea6bba91d85f2", "score": "0.5667955", "text": "def output_images_to_new_folder(input_path = IMAGES_UNSORTED_PATH, output_path = IMAGES_SORTED_PATH, distance_threshold = DISTANCE_LIMIT, dist_calc = imex.get_distance_lat_long_haversine):\r\n #Get dictionary of reference points\r\n load_reference_points(REFERENCE_POINTS_PATH)\r\n \r\n #Get list of images to sort\r\n file_names = get_list_of_images(input_path)\r\n out_file_names = []\r\n \r\n for src in file_names: \r\n #GPS coordinate of image\r\n lat, lon = imex.get_location_from_image(src) \r\n #Datetime of image\r\n datetime = imex.get_datetime_from_image(src)\r\n #Nearest reference point to image\r\n location_name = get_nearest_reference_point(src, (lat, lon), distance_threshold, dist_calc)\r\n \r\n #extension of image\r\n file_ext = os.path.splitext(src)[1] \r\n \r\n #folder of output image\r\n dst_folder = output_path + \"\\\\\" + location_name \r\n make_folder(dst_folder)\r\n \r\n #full filename of image\r\n image_number = 1\r\n dst = dst_folder + \"\\\\\" + location_name + \"_\" + str(datetime) + \"_\" + str(image_number) + str(file_ext)\r\n \r\n #prevent filename collisions\r\n while os.path.exists(dst):\r\n image_number += 1\r\n dst = dst_folder + \"\\\\\" + location_name + \"_\" + str(datetime) + \"_\" + str(image_number) + str(file_ext)\r\n #forcibly prevent while loop nonsense\r\n if (image_number > 99999999): \r\n print(\"okay, that's really odd. what's going on here?\")\r\n break\r\n \r\n #copy the input file to the output folder\r\n out_file_names.append(dst)\r\n shutil.copy2(src,dst)\r\n \r\n print(\"\\n\".join(out_file_names))\r\n #Return a list of the files created\r\n return out_file_names", "title": "" }, { "docid": "86683d2c083727268b61463deaa8fc63", "score": "0.56667805", "text": "def img2classfication(input_json_path,input_file_path,outputs_folders_path):\n with open(input_json_path,'r') as f:\n data_dict = json.load(f)\n with tqdm(total = len(data_dict),unit= 'pic') as pbar:\n for data in data_dict:\n data_name = data['image_id']\n data_label = data['disease_class']\n create_folder(outputs_folders_path +\"/\"+str(data_label)) \n shutil.copy(input_file_path + \"/\" + data_name,outputs_folders_path + \"/\" + str(data_label) +\"/\" + data_name) \n pbar.update(1)", "title": "" }, { "docid": "36020986b88294c4e12ffcacd920f14a", "score": "0.5658101", "text": "def save_images(image, output_dir, cur_nimg):\n for name in ('name.png', 'name_%06d.png' % (cur_nimg >> 10)):\n with tf.gfile.Open(os.path.join(output_dir, name), 'wb') as f:\n f.write(image)", "title": "" }, { "docid": "c048ce31466039b22a8d76396de49af6", "score": "0.565615", "text": "def _extract(self):\n images_path = glob.glob(self._root + '/images/*')\n subs_path = []\n subs_dict = {}\n train_data = []\n train_labels = []\n test_data = []\n test_labels = []\n for image_path in images_path:\n sub_path = glob.glob(image_path + '/*')\n for sub in sub_path:\n subs_path.append(sub)\n \n for i,sub in enumerate(subs_path):\n for j,image in enumerate(glob.glob(sub + '/*')):\n image = PIL.Image.open(image)\n if image.getbands()[0] == 'L':\n image = image.convert('RGB')\n image_np = np.array(image)\n image.close()\n label = i\n if j%7 == 0:\n test_data.append(image_np)\n test_labels.append(label)\n else:\n train_data.append(image_np)\n train_labels.append(label)\n\n labels = sub.split('/')[-2]+'_'+sub.split('/')[-1]\n subs_dict[labels] = i\n print(subs_dict)\n print(len(test_labels))\n print(len(train_labels))\n \n pickle.dump((train_data, train_labels),\n open(os.path.join(self._root, 'processed/train.pkl'), 'wb'))\n pickle.dump((test_data, test_labels),\n open(os.path.join(self._root, 'processed/test.pkl'), 'wb'))", "title": "" }, { "docid": "c92ab6d6d7b673d7753748635e494019", "score": "0.56455225", "text": "def gdal_export_shapes_to_kml(outfile, inshape):\n # Check inputs\n outfile = _validation.output_file(outfile, 'kml')\n inshape = _validation.input_file(inshape, 'vector', True)\n # Create cmd\n cmd = ['saga_cmd', '-f=q', '-f=q', 'io_gdal', '5', '-SHAPES', inshape, '-FILE',\n outfile]\n # Run command\n flag = _env.run_command_logged(cmd)\n if not flag:\n raise EnvironmentError(_ERROR_TEXT.format(_sys._getframe().\n f_code.co_name, _env.errlog))", "title": "" }, { "docid": "d525ed333c177b77eee14e4258d0ca11", "score": "0.5635483", "text": "def save_images_with_name(images: Iterable, paths: Iterable, directory: Path) -> None:\n path: Path\n image: np.ndarray\n for image, path in zip(images, paths):\n new_path = str(directory / path.name)\n cv2.imwrite(new_path, image)\n logger.debug(f\"Saved '{path.name}' with shape {image.shape}\")\n\n logger.info(f\"Saved images to '{directory.absolute()}'\")", "title": "" }, { "docid": "2ca0f9af3ff8e1f0d6b719073d23f232", "score": "0.5627841", "text": "def organize():\n\n path = './data/original'\n all_samples = os.listdir(f'{path}')\n all_samples.sort()\n\n if '.DS_Store' in all_samples:\n all_samples.remove('.DS_Store')\n\n all_samples.remove('corners.csv')\n sample_count = int(len(all_samples)/2)\n\n if sample_count != 308:\n print(\"Incorrect input data was passed. Please unload the full content of the 'WashingtonOBRace' folder including \" \\\n \"'corner.csv', images and masks with names 'img_X.png' and 'mask_X.png' in the data/original folder.\")\n exit()\n\n try:\n os.stat(f'./data/testing')\n shutil.rmtree(f'./data/testing')\n except:\n pass\n try:\n os.stat(f'./data/training')\n shutil.rmtree(f'./data/training')\n except:\n pass\n\n os.makedirs(f'data/testing/images')\n os.makedirs(f'data/testing/masks')\n os.makedirs(f'data/training/images/gate')\n os.makedirs(f'data/training/masks/gate')\n\n idx = range(0, sample_count)\n testing_idx = random.sample(idx, round(0.1 * sample_count))\n\n for i in idx:\n\n source_im = f'{path}/{all_samples[i]}'\n source_mask = f'{path}/{all_samples[sample_count+i]}'\n\n if i in testing_idx:\n destination = 'data/testing/images/'\n shutil.copy(source_im, destination)\n\n destination = 'data/testing/masks/'\n shutil.copy(source_mask, destination)\n\n else:\n destination = 'data/training/images/gate/'\n shutil.copy(source_im, destination)\n\n destination = 'data/training/masks/gate/'\n shutil.copy(source_mask, destination)\n\n return", "title": "" }, { "docid": "6ae2541e61a582d243e5da4ee4dc9657", "score": "0.56256497", "text": "def _generate():\n output_dir = os.path.join(\n FLAGS.tfds_dir, \"testing\", \"test_data\", \"fake_examples\", \"shapes3d\"\n )\n test_utils.remake_dir(output_dir)\n\n images, values = _create_fake_samples()\n\n with h5py.File(os.path.join(output_dir, OUTPUT_NAME), \"w\") as f:\n img_dataset = f.create_dataset(\"images\", images.shape, \"|u1\")\n img_dataset.write_direct(images)\n values_dataset = f.create_dataset(\"labels\", values.shape, \"<f8\")\n values_dataset.write_direct(np.ascontiguousarray(values))", "title": "" }, { "docid": "08ac016b9bce8c4dfcaa3634d27bbd2b", "score": "0.5619609", "text": "def save_images(fetches, image_dir, step=None, only_output=False):\n\n filesets = []\n for i, in_path in enumerate(fetches[\"paths\"]):\n if not isinstance(in_path, str): in_path = in_path[0]\n name, _ = os.path.splitext(os.path.basename(in_path.decode(\"utf8\")))\n fileset = {\"name\": name, \"step\": step}\n for kind in [\"images\", \"outputs\", \"targets\", \"inpaintings\"]:\n if kind not in fetches.keys(): continue\n if only_output and kind == \"outputs\":\n filename = name + \".png\"\n else:\n filename = name + \"-\" + kind + \".png\"\n\n if step is not None:\n filename = \"%08d-%s\" % (step, filename)\n fileset[kind] = filename\n out_path = os.path.join(image_dir, filename)\n contents = fetches[kind][i]\n if only_output and kind != \"outputs\":\n pass\n else:\n with open(out_path, \"wb\") as f:\n f.write(contents)\n filesets.append(fileset)\n return filesets", "title": "" }, { "docid": "6a970f49bc2b2672200e82722c1d640a", "score": "0.5610731", "text": "def export_locations(self, outfilepath):\n fields = ['imgpath', 'id', 'x', 'y', 'width', 'height', 'label', 'is_contest', 'contest_id']\n try:\n csvfile = open(outfilepath, 'wb')\n dictwriter = csv.DictWriter(csvfile, fieldnames=fields)\n try:\n dictwriter.writeheader()\n except AttributeError:\n util_gui._dictwriter_writeheader(csvfile, fields)\n bounding_boxes = self.world.get_boxes(self.current_imgpath)\n if self.get_selected_boxes():\n bounding_boxes.extend(self.get_selected_boxes())\n # Let's avoid holes in ID's, which may cause headaches down the line\n bounding_boxes = sorted(bounding_boxes, key=lambda t: t.id)\n # Ensure that ID's are consecutive via use of 'enumerate'\n for id, t in enumerate(bounding_boxes):\n t.id = id\n row = {}\n row['imgpath'] = os.path.abspath(self.current_imgpath)\n row['id'] = id\n # Unscaling that has to be done\n w_img, h_img = self.img_pil.size\n row['x'] = int(round(t.x1 * w_img))\n row['y'] = int(round(t.y1 * h_img))\n width = int(round(abs(t.x1 - t.x2) * w_img))\n height = int(round(abs(t.y1 - t.y2) * h_img))\n row['width'] = width\n row['height'] = height\n # Replace commas with underscore to avoid problems with csv files\n row['label'] = t.label.replace(\",\", \"_\")\n row['is_contest'] = 1 if t.is_contest else 0\n row['contest_id'] = t.contest_id\n dictwriter.writerow(row)\n csvfile.close()\n except IOError as e:\n print \"Couldn't write to file:\", outfilepath", "title": "" }, { "docid": "d8de34ecfe53d1dfa64f4e924e16caaa", "score": "0.5604746", "text": "def save_cuts(all_cuts, grains_folder, direction='x'):\n # Border size in pixels\n border_size = 10\n # Number of cuts\n n_cuts = len(all_cuts)\n # Original shape of cuts\n shape = all_cuts[0].shape\n shape = np.array([shape[1], shape[0]])\n new_shape = shape\n # Shape of output image\n new_shape[1] = shape[1] + border_size\n new_shape[1] = new_shape[1] * n_cuts\n # Generate empty image with shape (shape + border_size) * n_cuts\n #img = np.ones(new_shape)\n img = np.ones(new_shape)\n print(\"Output image shape\",new_shape)\n for i, cut in enumerate(all_cuts):\n #print(i*(new_shape[0]+border_size), (i+1)*shape[0] + i*border_size)\n #img[:, i*(new_shape[0]):(i+1)*new_shape[0]] = cut[::-1].astype(int)\n #img[:, ]\n img[:, i*(new_shape[0]+border_size):(i+1)*shape[0] + i*border_size] = cut[::-1].astype(int)\n #plt.imshow(cut[::-1], cmap=cm.binary)\n #img = np.logical_not(img)\n fig = plt.figure(figsize=(n_cuts*10, 10))\n plt.imshow(img, cmap=cm.plasma)\n plt.axis(\"off\")\n #plt.subplots_adjust(top = 0, bottom = 0, right = 0, left = 0, hspace = 0, wspace = 0)\n plt.savefig(grains_folder+\"/%s.png\" % (direction), box_inches='tight', dpi=100,)\n plt.close()\n plt.clf()", "title": "" }, { "docid": "bd358176cc663ffd357733b377e6cdce", "score": "0.5598436", "text": "def main(prefix, imgList):\n imgCount = 0\n main_printRest(\"Origin\", len(imgList))\n for name in imgList:\n img = Image.open(name)\n # If the image isn't exist, save it!\n print name\n exist_, imgTuple = isExist(img)\n if exist_: \n main_printSkip(\"Origin\", name, imgTuple, len(imgList)-imgCount)\n else:\n img.save(\"./All/\" + str(prefix) + '.' + str(imgCount) + \".jpg\")\n imgCount += 1\n print \"Finish Merge image !!\"\n rotate(prefix, imgList, imgCount)", "title": "" }, { "docid": "ad3b50298614cc6bac1e2c58463d3a11", "score": "0.55976886", "text": "def pil_save_detections(path, img, pred, colors):\n img_w, img_h = img.size\n\n draw = ImageDraw.Draw(img)\n font = ImageFont.truetype(\"old_stuff/arial.ttf\", int(0.025 * img_h))\n for x1, y1, x2, y2, obj, cls in pred:\n cls_idx = int(np.argmax(cls))\n\n # draw boxes and text\n draw.rectangle(((x1, y1), (x2, y2)),\n outline=colors[cls_idx],\n width=int(0.002 * img_h))\n draw.text((x1, y1), \"{:.2f} {:.2f}\".format(obj, cls),\n fill=(0, 255, 0, 128), font=font)\n\n # resize image down to width of 1024\n base_width = 1024\n wpercent = base_width / float(img_w)\n hsize = int(float(img_h) * float(wpercent))\n img = img.resize((base_width, hsize), Image.LANCZOS)\n img.save(os.path.normpath(path), quality=95)", "title": "" }, { "docid": "fc13bbe1be6a51b120dc1c3448f3e93c", "score": "0.55904657", "text": "def test2(name):\n filenames = [\n 'data/image_data/testing/0000/000000.png',\n 'data/image_data/testing/0000/000040.png',\n 'data/image_data/testing/0004/000000.png',\n 'data/image_data/testing/0005/000020.png',\n 'data/image_data/testing/0005/000240.png'\n ]\n shape = (len(filenames), 176, 608, 3)\n n, h, w, c = shape\n image_data = np.zeros((n, h, w, c))\n\n i = 0\n for f in filenames:\n image = normalize_img(imread(f)) # Fix brightness and convert to lab colorspace\n image_data[i, :, :, :] = image[:h*2:2, :w*2:2, :]\n '''plt.figure()\n plt.imshow(image_data[i])\n plt.figure()\n plt.imshow(lab2rgb(image_data[i]))\n plt.show()'''\n i += 1\n\n model = GenSeg(input_shape=[None, h, w, c], num_classes=num_classes, load_model=name)\n result = model.apply(image_data)\n result = np.argmax(result, axis=-1)\n\n '''for img in result:\n plt.figure()\n plt.imshow(img.astype(np.uint8))\n plt.show()'''\n\n colored = np.empty(shape)\n\n for (i, x, y), value in np.ndenumerate(result):\n colored[i, x, y] = get_color(label_to_original(value))\n\n i = 0\n for img in colored:\n img = img.astype(np.uint8, copy=False)\n imsave('%d.png' % i, img, 'png')\n '''plt.figure()\n plt.imshow(img)\n plt.show()'''\n i += 1", "title": "" }, { "docid": "be3c97d6d5edfdfb45c293745ee0d931", "score": "0.5583597", "text": "def hdf5_images(self, filename, output_filename, action, images=True):\n self.stop = False\n ycube = self.get_ycube(filename)\n print 'output_filename is', output_filename\n self.temp_hdf5 = h5py.File(output_filename +'temporary','w')\n self.read_into_temp_hdf5(self.temp_hdf5,\n ycube,\n action,\n images)\n if not self.stop:\n self.generate_output(filename, output_filename, self.temp_hdf5)\n self.temp_hdf5.close()\n os.remove(output_filename +'temporary')\n self.input_hdf5.close() \n self.close()", "title": "" }, { "docid": "af5bc2f8904bed3a334b0401c60a7855", "score": "0.5562589", "text": "def jpg_to_png(self) -> None:\n for img in os.listdir(self.img_path):\n if os.path.splitext(img)[1] == '.jpg':\n img_path = self.img_path + '/' + img\n frame = cv2.imread(img_path)\n index = int(img.replace('.jpg', ''))\n cv2.imwrite(f'{self.img_path}/{os.path.dirname(img)}/image_{index:05d}.png', frame)\n os.remove(img_path)", "title": "" }, { "docid": "4440b603bc1932e4e643e98acd9c1a9f", "score": "0.55542225", "text": "def save_image_dataset(images):\n if not os.path.exists(\"./images\"):\n os.makedirs(\"./images\")\n\n image_paths = [f\"./images/image_{i}.png\" for i in range(0, len(images))]\n np.savetxt(\"image_locations.txt\", image_paths, fmt=\"%s\")\n\n for i in range(0, len(images)):\n images[i].save(image_paths[i])", "title": "" }, { "docid": "17a8fa842938a4ef8c0409c29b6d85d5", "score": "0.554901", "text": "def save_outputs (targets, outputs, start_id, only_local=False):\n \n if only_local:\n class_names = (\"Kidney\", \"Cyst\", \"Pyramid\", \"Hydronephrosis\", \"Others\")\n else:\n class_names = (\"Healthy\", \"Cyst\", \"Pyramid\", \"Hydronephrosis\", \"Others\",\n \"PCD\", \"HC\") \n \n label_map = {class_name: i+1 for i, class_name in enumerate(class_names)} \n label_map[\"Background\"] = 0\n \n rev_label_map = {v: k for k, v in label_map.items()} # Inverse mapping\n\n if start_id == 0:\n \n mydir = '/home/mpostigo/Documentos/bbdd/output_results/targets'\n filelist = [ f for f in os.listdir(mydir) if f.endswith(\".txt\") ]\n for f in filelist:\n os.remove(os.path.join(mydir, f)) \n \n mydir = '/home/mpostigo/Documentos/bbdd/output_results/outputs'\n filelist = [ f for f in os.listdir(mydir) if f.endswith(\".txt\") ]\n for f in filelist:\n os.remove(os.path.join(mydir, f)) \n \n output_path = '/home/mpostigo/Documentos/bbdd/output_results/'\n \n for i, (target, output) in enumerate(zip(targets, outputs)):\n \n batch_size = len(target)\n \n for b in range(batch_size):\n GT=[]\n D=[]\n \n idx = target[b]['image_id'].detach().cpu().numpy()\n idx = idx+start_id\n \n #CREATE GT files\n# filename = output_path+filenames[idx]+'.txt'\n filename1 = output_path +'targets/'+'image_'+str(idx)+'.txt'\n labels = target[b]['labels']\n # scores = target['scores']\n boxes = target[b]['boxes']\n \n #CREATE DETECTION FILES\n filename2 = output_path+'outputs/'+'image_'+str(idx)+'.txt'\n pred_labels = output[b]['labels']\n scores = output[b]['scores']\n pred_boxes = output[b]['boxes']\n \n scores = scores.numpy()\n pred_labels = pred_labels.numpy()\n pred_boxes = pred_boxes.numpy()\n \n for l,b in zip (labels.numpy(), boxes.numpy()):\n \n str_label = str(l)\n str_box = str(b).replace('[','').replace(']','').replace(',','')\n file_line = str_label+' '+str_box+'\\n'\n \n GT.append(' '.join(file_line.split()))\n \n for pl, s, pb in zip (pred_labels, scores, pred_boxes):\n \n # if (s>0.2):#only write imgs with high confidence level\n str_plabel = str(pl)\n str_score = str(s)\n str_pbox = str(pb).replace('[','').replace(']','').replace(',','') \n \n file_line2 = str_plabel+' '+str_score+' '+str_pbox+'\\n'\n \n D.append(' '.join(file_line2.split()))\n \n with open(filename1, 'w') as the_file:\n# file_line.replace(' ', ' ')\n the_file.write('\\n'.join(GT))\n \n with open(filename2, 'w') as the_file2:\n# file_line2.replace(' ', ' ')\n the_file2.write('\\n'.join(D))\n \n the_file.close()\n the_file2.close()", "title": "" }, { "docid": "fe740e318b932be55e8944b6b7d697ed", "score": "0.55230576", "text": "def collectOriginShape(self, \n targetName, \n targetShapeFile):\n shapeArray = maya.cmds.ls(intermediateObjects=True, \n geometry=True)\n\n self.repository = maya.cmds.createNode('transform', n=targetName)\n maya.cmds.addAttr(self.repository, ln='shapePaths', dt='stringArray')\n\n shapePathArray = []\n\n for shape in shapeArray:\n shapePath = str(maya.cmds.ls(shape, long=True)[0])\n isNotEmpty = maya.cmds.polyEvaluate(shapePath, v=True)\n\n if isNotEmpty == 0:\n continue\n\n shapePathArray.append(shapePath)\n maya.cmds.parent(shape, self.repository, add=True, shape=True)\n\n maya.cmds.setAttr(self.repository+'.shapePaths',\n len(shapePathArray),\n *(shapePathArray),\n type='stringArray')\n\n duplicateSet = maya.cmds.duplicate(self.repository)\n maya.cmds.delete(self.repository)\n\n self.repository = maya.cmds.rename(duplicateSet, targetName)\n\n exportList = maya.cmds.listRelatives(self.repository,\n s=True,\n fullPath=True)\n\n for shape in exportList:\n maya.cmds.setAttr(shape+'.intermediateObject', 0)\n\n ffdComponents = maya.cmds.lattice(self.repository, \n divisions=[2, 2, 2], \n objectCentered=False,\n ldv=[2, 2, 2],\n n='{}_Ffd1'.format(self.repository))\n\n maya.cmds.setAttr(ffdComponents[0]+'.outsideLattice', 1)\n\n maya.cmds.setKeyframe(ffdComponents[1]+\".tz\", v=0.0, t=1)\n maya.cmds.setKeyframe(ffdComponents[1]+\".tz\", v=1.0, t=2)\n\n exportCommand = \"-fr 1 2 -root |{node} -u {attribute} -file {targetFile}\"\n exportCommand = exportCommand.format(node=self.repository,\n attribute='shapePaths',\n targetFile=targetShapeFile)\n\n maya.cmds.AbcExport(verbose=True, j=exportCommand)\n maya.cmds.delete(self.repository, ch=True)\n maya.cmds.delete(self.repository)\n\n self.repository = maya.cmds.createNode('transform', n=targetName)\n\n importCommand = 'AbcImport -crt -ct \"|{shape}\" \"{inputFile}\";'\n importCommand = importCommand.format(shape=targetName,\n inputFile=targetShapeFile)\n\n self.alembicNode = maya.mel.eval(importCommand)\n\n maya.cmds.disconnectAttr('time1.outTime', '{}.time'.format(self.alembicNode))\n maya.cmds.setAttr('{}.time'.format(self.alembicNode), 1)\n\n for shapeIndex, shape in enumerate(shapePathArray):\n outputGroupPart = maya.cmds.listConnections('{0}.worldMesh[0]'.format(shape))[0]\n\n maya.cmds.connectAttr('{0}.outPolyMesh[{1}]'.format(self.alembicNode, shapeIndex),\n '{0}.inputGeometry'.format(outputGroupPart),\n f=True)\n\n maya.cmds.delete(shapePathArray[shapeIndex])\n\n maya.cmds.delete(self.repository)", "title": "" }, { "docid": "b4bbdd84a799aa0034d31acc4648e66c", "score": "0.5521075", "text": "def save_splits(self, folder):\n for name in self.sets:\n img_set = getattr(self, name, None)\n if img_set is not None:\n filename = f\"{folder}/{self.prefix}_{name}.txt\"\n img_set.save_img_list(filename)", "title": "" }, { "docid": "0fd53586bb4e42e02599bb99b2ab41e0", "score": "0.5512331", "text": "def test_images(self, tmpdir):\n with convert_many([TEST_NOTEBOOKS['empty'], TEST_NOTEBOOKS['images']],\n {\n '--output': str(tmpdir),\n '--images': str(tmpdir),\n }):\n assert len(glob('{}/*.png'.format(tmpdir))) == 2", "title": "" }, { "docid": "ffdc52f9b91a716f1dea09c7c8da2124", "score": "0.5508765", "text": "def save_image(data_np,save_dir,name=\"test\"):\n plt.axis('off')\n # plt.imshow(np.squeeze(images_np), cmap='gray')\n img_to_save = str(name) + \".png\"\n img_path = os.path.join(save_dir,img_to_save)\n\n plt.imsave(img_path,np.squeeze(data_np), cmap='gray')", "title": "" }, { "docid": "9c98892f57879936704129ccfa3c3539", "score": "0.5506516", "text": "def source_images():\n FLAGS = tf.app.flags.FLAGS\n\n names = tf.gfile.ListDirectory(FLAGS.source_dir_path)\n\n for name in names:\n name, ext = os.path.splitext(name)\n\n if ext.lower() not in ['.png', '.jpg', '.jpeg']:\n continue\n\n source_sd_path = os.path.join(FLAGS.source_dir_path, name + ext)\n target_bq_path = os.path.join(FLAGS.target_dir_path, name + '_bq.png')\n target_sr_path = os.path.join(FLAGS.target_dir_path, name + '_sr.png')\n\n # NOTE: read the sd image and bicubic upscale it to 4x\n sd_image = scipy.misc.imread(tf.gfile.GFile(source_sd_path, 'rb'))\n bq_image = scipy.misc.imresize(sd_image, 400, 'bicubic')\n\n # NOTE: re-map pixels range from -1.0 to +1.0\n sd_image = sd_image.astype(np.float32) / 127.5 - 1.0\n bq_image = bq_image.astype(np.float32) / 127.5 - 1.0\n\n # NOTE: expand batch dimension\n sd_image = np.expand_dims(sd_image, axis=0)\n bq_image = np.expand_dims(bq_image, axis=0)\n\n yield {\n 'sd_image': {'image': sd_image},\n 'bq_image': {'image': bq_image, 'path': target_bq_path},\n 'sr_image': {'path': target_sr_path},\n }", "title": "" }, { "docid": "39aa578a0c3b1bd9509b0bc33b22e039", "score": "0.55053174", "text": "def saveMap(filename, paths, images, faces, years, places):\n f = open(filename, 'w+')\n nodes = list(set(cbook.flatten(paths)))\n pathInd = {} #easier form to work with here\n for i in range(len(paths)):\n for j in paths[i]:\n if j in pathInd.keys():\n pathInd[j].append(i+1)\n else:\n pathInd[j] = [i+1]\n strs = []\n\n # Write nodes\n f.write('{ \"nodes\": [\\n')\n for node in nodes:\n imgPath = 'images/' + str(node) + '.png'\n #misc.imsave(websitePath + imgPath, images[node]) #XXX suspect don't need this anymore\n s = '{\"id\": ' + str(node) + ', \"line\": ' + str(pathInd[node])\n s += ', \"faces\": [' + ','.join([str(x) for x in np.nonzero(faces[node])[0]]) + ']'\n p = np.nonzero(places[node])[0]\n s += ', \"time\": ' + str(years[node]) + ', \"place\": ' + str(p[0] if len(p) > 0 else -1)\n s += '}'\n strs.append(s)\n f.write(',\\n'.join(strs) + '],\\n\"links\": [\\n')\n strs = []\n\n # Write links\n for i in range(len(paths)):\n p = paths[i]\n for j in range(0, len(p)-1):\n strs.append('{\"source\": ' + str(nodes.index(p[j])) + ', \"target\": ' + str(nodes.index(p[j+1])) + ', \"line\": ' + str(i+1) + '}')\n f.write(',\\n'.join(strs) + ']}')\n f.close()", "title": "" }, { "docid": "0081d63be635a85c704a28279d4ab88f", "score": "0.5504456", "text": "def __filegen__(root, mode, save):\n\timages_path = os.path.join(root, 'leftImg8bit')\n\tgt_path = os.path.join(root, 'gtFine')\n\n\timg_paths, gt_paths = [], [] \n\n\tfor idx in sorted(os.listdir(os.path.join(images_path, mode))):\n\t\tfor jdx in sorted(os.listdir(os.path.join(images_path, mode + '/' + idx))):\n\t\t\timg_paths.append(os.path.join(images_path, mode + '/' + idx + '/' + jdx))\n\t\t\tgt_paths.append(os.path.join(gt_path, mode + '/' + idx + '/' + jdx.replace(jdx.split('_')[3], 'gtFine_labelIds.png')))\n\n\ttry:\n\t\twith open(os.path.join(save, mode + '.txt'), 'w') as f:\n\t\t\t[f.write(str(img_paths[index]) + '\\t' + str(gt_paths[index]) + '\\n') \n\t\t\tfor index in range(len(img_paths))]\n\t\t\tf.close()\n\texcept ValueError:\n\t\tprint(f'{mode} file missing!')\t\n\t'''\n\ttry:\n\t\twith open(os.path.join(save, mode + '.csv'), 'w') as f:\n\t\t\twriter = csv.writer(f, delimiter = '\\t')\n\t\t\twriter.writerows(zip(img_paths, gt_paths))\n\texcept ValueError:\n\t\tprint(f'{mode} file missing!')\t\n\t'''", "title": "" }, { "docid": "d7a9f563c6b11e80efa683f9ffa99c78", "score": "0.549876", "text": "def copy_image_files(img_ids, foldername):\n \n # Load all traffic related image ids:\n anns = load_anns(\"/Users/David/Repositories/cocoTraffic/annotations/21_coco_sub_all_traffic/\", \n 'instances_val2017Relabelled.json') \n \n # Filter for traffic lights:\n path = '../images/'\n count = 0\n\n for img in img_ids:\n filename = (str(img) + \".jpg\").zfill(16)\n\n try:\n copyfile(path+'val2017/'+filename, path+foldername+'/'+filename)\n count +=1\n except FileNotFoundError:\n print('Folder {} does not exist. Please create it and try again.'.format(path+foldername))\n return\n assert(count == len(img_ids))\n\n print('Copied {} images to {}'.format(count, path+foldername))", "title": "" }, { "docid": "c755b68fa9e230651d78982c26061085", "score": "0.54945385", "text": "def go(self):\n script_dir=files.get_script_dir(self._campaign)\n if not os.path.exists(script_dir):\n print(\"making dir:\",script_dir)\n os.makedirs(script_dir)\n\n flist = imagemaker.get_flist(self._campaign)\n\n tilenames={}\n for key in flist:\n tilename=key[0:-2]\n tilenames[tilename] = tilename\n \n for tilename in tilenames:\n\n doprint=False\n for type in self._types:\n image_file=files.get_output_file(\n self._campaign,\n tilename,\n ext=type,\n )\n if not os.path.exists(image_file):\n doprint=True\n break\n\n if doprint:\n self._write_script(tilename)\n self._write_batch(tilename)\n else:\n self._clear_batch(tilename)", "title": "" }, { "docid": "578c7a31fdd958da5b3b9c5eb13163b1", "score": "0.5490692", "text": "def r2_arrays_to_googleEarth(images_r3_ma, lons, lats, layer_name_prefix = 'layer', kmz_filename = 'ICs',\n out_folder = './'):\n import numpy as np\n import os\n import shutil\n import simplekml\n from pathlib import Path\n\n\n n_images = images_r3_ma.shape[0] \n if type(out_folder) == str: # this should really be a path, but it could easily be a string. \n out_folder = Path(out_folder) # if it is a string, conver it. \n # 0 temporary folder for intermediate pngs\n try:\n os.mkdir('./temp_kml') # make a temporay folder to save pngs\n except:\n print(\"Can't create a folder for temporary kmls. Trying to delete 'temp_kml' incase it exisits already... \", end = \"\")\n try:\n shutil.rmtree('./temp_kml') # try to remove folder\n os.mkdir('./temp_kml') # make a temporay folder to save pngs\n print(\"Done. \")\n except:\n raise Exception(\"Problem making a temporary directory to store intermediate pngs\" )\n\n # 1: Initiate the kml\n kml = simplekml.Kml()\n \n # 2 Begin to loop through each iamge\n for n_image in np.arange(n_images)[::-1]: # Reverse so that first IC is processed last and appears as visible\n layer_name = f\"{layer_name_prefix}_{str(n_image).zfill(3)}\" # get the name of a layer a sttring\n r2_array_to_png(images_r3_ma[n_image,], layer_name, './temp_kml/') # save as an intermediate .png\n \n ground = kml.newgroundoverlay(name= layer_name) # add the overlay to the kml file\n ground.icon.href = f\"./temp_kml/{layer_name}.png\" # and the actual image part\n \n ground.gxlatlonquad.coords = [(lons[-1,0], lats[-1,0]), (lons[-1,-1],lats[-1,-1]), # lon, lat of image south west, south east\n (lons[0,-1], lats[0,-1]), (lons[0,0],lats[0,0])] # north east, north west - order is anticlockwise around the square, startign in the lower left\n \n #3: Tidy up at the end\n kml.savekmz(out_folder / f\"{kmz_filename}.kmz\", format=False) # Saving as KMZ\n shutil.rmtree('./temp_kml')", "title": "" }, { "docid": "672705ef5f84b972e292f59eaf1676c5", "score": "0.54848605", "text": "def write_images_from_dict(images_dict, folder_path):\n\n for label in images_dict:\n for i, image in enumerate(images_dict[label]):\n image_path = os.path.join(folder_path, \"{} {}.png\".format(label, i))\n if not cv2.imwrite(image_path, image):\n print(\"Could not create image at: {}\".format(image_path))", "title": "" }, { "docid": "4148d19f5e951af39eba1afa17c78d4b", "score": "0.548366", "text": "def export_cut_objects(df_row, path_out, padding, use_mask=True, bg_color=None):\n annot, _ = tl_io.load_image_2d(df_row['path_1'])\n img, name = tl_io.load_image_2d(df_row['path_2'])\n assert annot.shape[:2] == img.shape[:2], \\\n 'image sizes not match %s vs %s' % (repr(annot.shape), repr(img.shape))\n\n uq_objects = np.unique(annot)\n if len(uq_objects) == 1:\n return\n\n for idx in uq_objects[1:]:\n img_new = cut_object(img, annot == idx, padding, use_mask, bg_color)\n path_img = os.path.join(path_out, '%s_%i.png' % (name, idx))\n logging.debug('saving image \"%s\"', path_img)\n Image.fromarray(img_new).save(path_img)", "title": "" }, { "docid": "d97006b3130e3c79a366a48ac68c6d58", "score": "0.5466746", "text": "def save_images(self, ids, kpts, kpts_gt, noisy_kpts, split, step: int = 0):\n # We only need to print first 4 images\n if ids.shape[0] > 4:\n ids = ids[:4]\n kpts = kpts[:4]\n kpts_gt = kpts_gt[:4]\n\n # Retrieve Image from dataset\n if self.dataset == \"itop\":\n if split == \"train\":\n name = [self.train_loader.dataset.ids[el] for el in ids]\n ids_label = self.ids_train\n imgs_data = h5py.File(f\"/nas/DriverMonitoring/Datasets/ITOP/ITOP_{self.side}_train_depth_map.h5\", 'r')['data']\n elif split == \"val\":\n name = [self.val_loader.dataset.ids[el] for el in ids]\n ids_label = self.ids_train\n imgs_data = h5py.File(f\"/nas/DriverMonitoring/Datasets/ITOP/ITOP_{self.side}_train_depth_map.h5\", 'r')['data']\n elif split == \"test\":\n name = [self.test_loader.dataset.ids[el] for el in ids]\n ids_label = self.ids_test\n imgs_data = h5py.File(f\"/nas/DriverMonitoring/Datasets/ITOP/ITOP_{self.side}_test_depth_map.h5\", 'r')['data']\n else:\n raise ValueError(\"Split: {} not recognized\".format(split))\n\n # Convert 3D annotations to 2D\n if self.configer.get(\"metrics\", \"kpts_type\").lower() == \"3d\":\n kpts = world_to_depth(kpts)\n kpts_gt = world_to_depth(kpts_gt)\n\n imgs = list()\n for i, name in enumerate(name):\n index = int(np.where(np.array(ids_label) == name)[0])\n imgs.append(imgs_data[index])\n else:\n raise NotImplementedError(\"Dataset: {} not implemented\".format(self.dataset))\n\n imgs_detection = np.array([point_on_image(k, el) for k, el in zip(kpts, imgs)])\n imgs_gt = np.array([point_on_image(k, el) for k, el in zip(kpts_gt, imgs)])\n\n grid_pred = torchvision.utils.make_grid(torch.from_numpy(imgs_detection).permute(0, 3, 1, 2).float() / 255,\n nrow=int(imgs_detection.shape[0] ** 0.5), padding=2,\n normalize=False)\n grid_gt = torchvision.utils.make_grid(torch.from_numpy(imgs_gt).permute(0, 3, 1, 2).float() / 255,\n nrow=int(imgs_gt.shape[0] ** 0.5), padding=2,\n normalize=False)\n\n split = \"validation\" if split == \"val\" else split\n self.loss_summary.add_image(split + '_prediction', grid_pred, global_step=step)\n self.loss_summary.add_image(split + '_gt', grid_gt, global_step=step)\n\n if noisy_kpts is not None:\n if self.dataset == \"itop\":\n if noisy_kpts.shape[-1] == 3:\n noisy_kpts = world_to_depth(noisy_kpts / 1000)\n imgs_noise = np.array([point_on_image(k, el) for k, el in zip(noisy_kpts, imgs)])\n grid_noise = torchvision.utils.make_grid(torch.from_numpy(imgs_noise).permute(0, 3, 1, 2).float() / 255,\n nrow=int(imgs_detection.shape[0] ** 0.5), padding=2,\n normalize=False)\n self.loss_summary.add_image(split + '_input', grid_noise, global_step=step)\n\n return grid_gt, grid_pred", "title": "" }, { "docid": "a98364f50d67cb903ce37fc3d5d9321d", "score": "0.5465751", "text": "def export_bitmap(self,filename):\n self.bitmap.save(filename)", "title": "" }, { "docid": "73d797464c59c77177e70c05d60044b3", "score": "0.5458655", "text": "def extract_faces(filepath,endpath,start_folder=0):\r\n\r\n \"\"\"Filepath is the initial location of the images. For the datastructure of the stored images look at the example.\"\"\"\r\n\r\n sub_folders = os.listdir(filepath)\r\n for j in range(start_folder,len(sub_folders)) :\r\n os.makedirs(endpath+\"/training-data/\"+sub_folders[j],exist_ok=True)\r\n os.makedirs(endpath+\"/test-data/\" + sub_folders[j], exist_ok=True)\r\n for i,image in enumerate(os.listdir(os.path.join(filepath,sub_folders[j]))):\r\n image_path = filepath + \"/\" + sub_folders[j] + \"/\" + image\r\n img = cv2.imread(image_path)\r\n try:\r\n # x1= face_detection(img).left()\r\n # x2 = face_detection(img).right()\r\n # y1 = face_detection(img).top()\r\n # y2 = face_detection(img).bottom()\r\n coods = face_detection(img)\r\n # x,y,w,h=face_detection(img)\r\n if i < 10: # Change this number to change the number of images in training set and test set\r\n cv2.imwrite(endpath+\"/training-data/\" + sub_folders[j] + \"/\" + sub_folders[j] + \"_\" + str(\r\n i + 1) + \".jpg\", img[coods[1]:coods[3],coods[0]:coods[2],])\r\n # im = cv2.imread(endpath+\"training-data/\" + sub_folders[j] + \"/\" + sub_folders[j] + \"_\" + str(i + 1) + \".jpg\")\r\n # cv2.imshow(\"Cropped\",im)\r\n # cv2.waitKey()\r\n # cv2.destroyAllWindows()\r\n else:\r\n cv2.imwrite(\r\n endpath+\"/test-data/\" + sub_folders[j] + \"/\" + sub_folders[j] + \"_\" + str(i + 1) + \".jpg\",\r\n img[coods[1]:coods[3], coods[0]:coods[2],])\r\n except IndexError:\r\n\r\n sys.exit(\"Please provide another image.\\nImage error :\" + filepath+\"/\"+sub_folders[j]+\"/\"+image)", "title": "" }, { "docid": "6d6112ddf5145c2d8f3d9733c9daaaf4", "score": "0.5457488", "text": "def create_image_masks(images_withFixations, output_path,bw=30,test_ratio = 0.5,seed=42):\n sns.set_style(\"darkgrid\", {\"axes.facecolor\": \".9\",\n 'figure.facecolor': 'white',\n 'axes.spines.bottom': False,\n 'axes.spines.left': False,\n 'axes.spines.right': False,\n 'axes.spines.top': False,\n })\n images_names_list = [int(re.search(r'\\d+', os.path.basename(images[1])).group())\n for images in images_withFixations]\n unque_images_length = len(np.unique(images_names_list))\n splitting_imageindex = int(unque_images_length * (1- test_ratio))\n\n np.random.seed(seed)\n np.random.shuffle(images_names_list)\n\n train_list = images_names_list[:splitting_imageindex]\n validate_list = images_names_list[splitting_imageindex:]\n\n #os.chdir(output_path)\n if os.path.exists(output_path+\"/training/images\"):\n shutil.rmtree(output_path+\"/training/images\")\n os.makedirs(output_path+\"/training/images\")\n\n if os.path.exists(output_path+\"/training/masks\"):\n shutil.rmtree(output_path+\"/training/masks\")\n os.makedirs(output_path+\"/training/masks\")\n\n #os.chdir(output_path)\n if os.path.exists(output_path+\"/test/images\"):\n shutil.rmtree(output_path+\"/test/images\")\n os.makedirs(output_path+\"/test/images\")\n\n if os.path.exists(output_path+\"/test/masks\"):\n shutil.rmtree(output_path+\"/test/masks\")\n os.makedirs(output_path+\"/test/masks\")\n\n #if np.sum(images_withFixations[-1][2].iloc[:,2]):\n\n for i, image in enumerate(images_withFixations):\n image_id = os.path.basename(image[1])\n image_id_number = int(re.search(r'\\d+',\n os.path.basename(image[1])).group())\n\n if image_id_number in train_list:\n directory_name = \"training\"\n print(\"Image Training \",os.path.basename(image[1]))\n elif image_id_number in validate_list:\n directory_name = \"test\"\n print(\"Image test \",os.path.basename(image[1]))\n\n print(image[2].iloc[:,2].min())\n if image[2].iloc[:,2].min() > 0:\n\n plt.figure(figsize=(w/my_dpi, h/my_dpi), dpi=my_dpi)\n fig = sns.kdeplot(image[2].iloc[:,2],\n image[2].iloc[:,3],\n kernel = \"gau\",\n bw= bw,\n cmap = plt.cm.gray,\n shade=True,\n n_levels = 100,\n legend= False,\n cumulative= False)\n fig.axes.get_yaxis().set_visible(False)\n fig.axes.get_xaxis().set_visible(False)\n fig.axes.invert_yaxis()\n\n plt.tight_layout()\n plt.setp([fig.get_xticklines() +\n fig.get_yticklines() +\n fig.get_xgridlines() +\n fig.get_ygridlines()],antialiased=False)\n\n figure = fig.get_figure()\n figure.tight_layout(pad=0)\n figure.canvas.draw()\n image1 = np.fromstring(figure.canvas.tostring_rgb(),\n dtype=np.uint8,sep='')\n image1 = image1.reshape(figure.canvas.get_width_height()[::-1] + (3,))\n imageio.imwrite(output_path+\"/\"+directory_name+\"/masks/\"+image_id,\n image1[:,:,0])\n\n copyfile(image[1],\n output_path+\"/\"+directory_name+\"/images/\"+image_id)", "title": "" }, { "docid": "1861cf485e9d2498fbfb23388e3d0e6b", "score": "0.54358095", "text": "def get_synth_images():\n \n # code to store synthetic image data in an h5 file\n# import os\n# files = os.listdir(\"C:/Users/richardcouperthwaite/Documents/GitHub/MSEN655/classification/data/Cropped_images\")\n# print(files)\n# hf = h5py.File('SynthData.hdf5', 'w')\n# for file in files:\n# x = load_micrograph(\"C:/Users/richardcouperthwaite/Documents/GitHub/MSEN655/classification/data/Cropped_images/\"+file)\n# hf.create_dataset(file, data=x)\n \n # code to import the synthetic images from an h5 file\n hf = h5py.File('SynthData.hdf5', 'r')\n for key in hf.keys():\n x = hf.get(key)\n try:\n labels.append([key])\n x_input = np.r_[x_input, x]\n except NameError:\n labels = [[key]]\n x_input = x\n \n return labels, x_input", "title": "" }, { "docid": "b33c8d3df5ff1865d72510fb18f1d0e2", "score": "0.5434624", "text": "def basic_input(images):\n for i, image_name in enumerate(images):\n img = cv2.imread(image_name)\n cv2.imwrite(\"output/ps0-1-a-{}.png\".format(i + 1), img)", "title": "" }, { "docid": "9dc9f27e6c734f04e66138b3a165b801", "score": "0.54334617", "text": "def graphextract(self):\n self.pdf_to_img()\n #self.count_graph=1\n for self.countPage in range(self.numPages):\n self.graphfolder = \"output_page_\"+str(self.countPage)\n if(not os.path.exists(self.graphfolder)):\n os.makedirs(self.graphfolder)\n self.pageImage = self.pdfImage[self.countPage]\n self.polydp(self.houghp())", "title": "" }, { "docid": "6567dd5e3617d8dc1238899671a532df", "score": "0.5432392", "text": "def save_images(image_list):\n filenames, images = list(image_list.keys()), np.array(list(image_list.values()))\n shape = images.shape[1:]\n np.save(_get_npy_filename(shape), images)\n with open(_get_filename_filename(shape), 'wb') as f:\n pickle.dump(filenames, f)\n print('Saved {:,} images to disk'.format(images.shape[0]))", "title": "" }, { "docid": "91d8feed3b58a1829d1bfe96e8d34e3d", "score": "0.54298824", "text": "def package_data(directory, datasets, image_shape, outfile_path):\n assert(len(datasets) == 3)\n\n xsize = numpy.prod(image_shape)\n\n x_datasets = [numpy.zeros((len(dataset), xsize), dtype=numpy.uint8) for dataset in datasets]\n y_datasets = [numpy.array(dataset.values(), dtype=numpy.uint8) for dataset in datasets]\n\n print \"| \" + (\"⚐ ⚑ \" * 19) + \"-|\"\n pb = NyanBar(tasks=sum([len(dataset) for dataset in datasets]))\n for j, dataset in enumerate(datasets):\n for i, image_name in enumerate(dataset.keys()):\n pb.task_done()\n im = Image.open(directory + image_name)\n x_datasets[j][i, :] = numpy.array(im.getdata(), dtype=numpy.uint8).reshape(xsize)\n pb.finish()\n\n print '... saving data'\n # cPickle too slow (takes many minutes for tens of thousands of images over 100x100x3)\n saveme = [x_datasets[0], y_datasets[0], x_datasets[1], y_datasets[1], x_datasets[2], y_datasets[2]]\n numpy.savez(open(outfile_path, 'wb'), *saveme)\n\n print 'done'", "title": "" }, { "docid": "05c9e7f30500079bba21fd70aae854f6", "score": "0.54241884", "text": "def visualize_shape(name, shape_list, result_dir):\n vis = o3d.visualization.Visualizer()\n vis.create_window(window_name=name, width=512, height=512, left=50, top=25)\n for shape in shape_list:\n vis.add_geometry(shape)\n ctr = vis.get_view_control()\n ctr.rotate(-300.0, 150.0)\n if name == 'camera':\n ctr.translate(20.0, -20.0) # (horizontal right +, vertical down +)\n if name == 'laptop':\n ctr.translate(25.0, -60.0)\n vis.run()\n vis.capture_screen_image(os.path.join(result_dir, name+'.png'), False)\n vis.destroy_window()", "title": "" }, { "docid": "76e137eed8f8288cdb01f5e41b623bc4", "score": "0.54218173", "text": "def save_complex_output_files(self):\n if self.setname == 'train' or self.setname == 'test':\n self.disease_out.to_csv(os.path.join(self.results_dir, 'imgtrain_note'+self.setname+'_DiseaseBinaryLabels.csv'))\n self.merged.to_csv(os.path.join(self.results_dir, 'imgtrain_note'+self.setname+'_Merged.csv'))\n self.missing.to_csv(os.path.join(self.results_dir, 'imgtrain_note'+self.setname+'_Missingness.csv'))\n elif self.setname == 'predict':\n def save_set(description, out_bin, disease_out, merged, missing):\n all_ids, available_accs = load.load_all_ids_and_accs()\n ids = all_ids[all_ids['Set_Assigned']==description]['Accession'].values.tolist()\n ids = [x for x in ids if x in available_accs]\n #Select out_bin filenames and save\n out_bin = {}\n for key in ids:\n out_bin[key] = self.out_bin[key]\n #Select disease_out filenames and save\n disease_out = disease_out.loc[ids,:]\n disease_out.to_csv(os.path.join(self.results_dir, description+'_DiseaseBinaryLabels.csv'))\n #Select merged filenames and save\n merged = merged[merged['Filename'].isin(ids)]\n merged.to_csv(os.path.join(self.results_dir, description+'_Merged.csv'))\n #Select missing filenames and save\n missing = missing.loc[ids,:]\n missing.to_csv(os.path.join(self.results_dir, description+'_Missingness.csv'))\n return len(list(out_bin.keys())), merged.shape[0]\n outshape = len(list(self.out_bin.keys())); mergedshape = self.merged.shape[0]\n o1, m1 = save_set('imgtrain_extra', self.out_bin, self.disease_out, self.merged, self.missing)\n o2, m2 = save_set('imgvalid', self.out_bin, self.disease_out, self.merged, self.missing)\n o3, m3 = save_set('imgtest', self.out_bin, self.disease_out, self.merged, self.missing)\n assert o1+o2+o3 == outshape\n assert m1+m2+m3 == mergedshape", "title": "" }, { "docid": "08fd150101a0919d4a060b899ef93ff8", "score": "0.5420023", "text": "def run( flag ):\n\n if not os.path.isdir( flag.output_dir ):\n os.mkdir( flag.output_dir )\n flag.output_predict_dir = os.path.join( flag.output_dir, flag.output_predict_subdir)\n if not os.path.isdir( flag.output_predict_dir ):\n os.mkdir( flag.output_predict_dir )\n\n images, pred, masks = predict( flag )\n\n outputName = 'metrics_table.csv'\n outputPath = os.path.join( flag.output_predict_dir, outputName )\n flag.region_list = [\"bg\"] + flag.region_list\n saveClassificationMetrics( pred, masks, flag.region_list, outputPath )\n\n nImages = images.shape[0]\n for i in range( nImages ):\n img = images[i,...]\n mask = pred[i,...]\n mask_ref = masks[i,...]\n\n outputName = 'pred_%d.png' % ( i )\n outputPath = os.path.join( flag.output_predict_dir, outputName )\n makeOverlay( img, mask, outputPath )\n\n outputName = 'img_%d.png' % ( i )\n outputPath = os.path.join( flag.output_predict_dir, outputName )\n saveImage( img, outputPath )\n\n outputName = 'img-raw_%d.png' % ( i )\n outputPath = os.path.join( flag.output_predict_dir, outputName )\n saveImageRaw( img, outputPath )\n\n outputName = 'manual_mask_%d.png' % ( i )\n outputPath = os.path.join( flag.output_predict_dir, outputName )\n saveLabelImage( mask_ref, outputPath ) \n\n outputName = 'pred_mask_%d.png' % ( i )\n outputPath = os.path.join( flag.output_predict_dir, outputName )\n saveLabelImage( mask, outputPath ) \n\n outputName = 'manual_%d.png' % ( i )\n outputPath = os.path.join( flag.output_predict_dir, outputName )\n makeOverlay( img, mask_ref, outputPath )\n\n outputName = 'difference_%d.png' % ( i )\n outputPath = os.path.join( flag.output_predict_dir, outputName )\n makeOverlayMaskDifference( img, mask_ref, mask, outputPath )", "title": "" }, { "docid": "87669815ef5abeb2cb95cd4b3e19fa07", "score": "0.5416235", "text": "def convert(\n subset: str,\n clevr_path: Path,\n clevr_box_path: Optional[Path],\n refclevr_path: Optional[Path],\n output_path: Path,\n no_caption: bool,\n medium: bool,\n templates,\n next_img_id: int = 0,\n next_id: int = 0,\n):\n\n print(f\"Exporting {subset}...\")\n\n questions = None\n if not no_caption:\n print(\"Loading questions...\")\n with open(clevr_path / f\"questions/CLEVR_{subset}_questions.json\") as f:\n questions = json.load(f)[\"questions\"]\n\n scenes = []\n use_refclevr = refclevr_path is not None\n all_bboxes = []\n cat2obj = []\n if use_refclevr:\n print(\"Loading scenes (using refclevr annotations)...\")\n if subset != \"test\":\n with open(refclevr_path / f\"CLEVR_{subset}_scenes.json\") as f:\n scenes = json.load(f)[\"scenes\"]\n else:\n print(\"Loading scenes...\")\n if subset != \"test\":\n with open(clevr_path / f\"scenes/CLEVR_{subset}_scenes.json\") as f:\n scenes = json.load(f)[\"scenes\"]\n\n print(\"Loading boxes...\")\n if subset == \"val\" or subset == \"train\":\n # In the bounding box dataset, they renamed val to test for some reason\n sset = \"train\" if subset == \"train\" else \"test\"\n with open(clevr_box_path / f\"annotations_{sset}.json\") as f:\n all_bboxes = json.load(f)\n else:\n # We don't have bounding boxes for the actual test set.\n all_bboxes = []\n\n with open(clevr_box_path / \"cat2obj.json\") as f:\n cat2obj = json.load(f)\n\n categories = [{\"supercategory\": \"object\", \"id\": 1, \"name\": \"object\"}]\n annotations = []\n images = []\n\n if subset == \"test\":\n # We don't have scenes nor bounding boxes, simply create image information\n for id in tqdm(range(15000)):\n cur_img = {\n \"file_name\": \"CLEVR_test_{id:06}.png\",\n \"height\": 320,\n \"width\": 480,\n \"id\": next_img_id,\n }\n next_img_id += 1\n images.append(cur_img)\n elif no_caption:\n\n for scene in tqdm(scenes):\n all_objects = ItemCollection(scene).objects\n all_seg = None\n if use_refclevr:\n bboxes, all_seg, _ = retrieve_boxes_and_masks(scene, all_objects)\n else:\n bboxes, _ = retrieve_boxes(scene, all_objects, all_bboxes, cat2obj)\n cur_img = {\n \"file_name\": scene[\"image_filename\"],\n \"height\": 320,\n \"width\": 480,\n \"id\": next_img_id,\n }\n for i, b in enumerate(bboxes):\n xmin, ymin, w, h = b\n cur_obj = {\n \"area\": h * w,\n \"iscrowd\": 0,\n \"image_id\": next_img_id,\n \"category_id\": 1,\n \"id\": next_id,\n \"bbox\": [xmin, ymin, w, h],\n }\n if all_seg is not None:\n cur_obj[\"segmentation\"] = all_seg[i]\n next_id += 1\n annotations.append(cur_obj)\n\n next_img_id += 1\n images.append(cur_img)\n\n else:\n for qid, question in enumerate(tqdm(questions)):\n scid = question[\"image_index\"]\n scene = scenes[scid]\n\n try:\n all_objects = parse_prog(scene, question, templates, medium)\n except SkipException:\n # print(\"skipping\", qid)\n continue\n\n all_seg = None\n if use_refclevr:\n bboxes, all_seg, tokens = retrieve_boxes_and_masks(scene, all_objects)\n else:\n bboxes, tokens = retrieve_boxes(scene, all_objects, all_bboxes, cat2obj)\n cur_img = {\n \"file_name\": scene[\"image_filename\"],\n \"height\": 320,\n \"width\": 480,\n \"id\": next_img_id,\n \"caption\": question[\"question\"],\n \"answer\": question[\"answer\"],\n }\n for i, (b, t) in enumerate(zip(bboxes, tokens)):\n xmin, ymin, w, h = b\n cur_obj = {\n \"area\": h * w,\n \"iscrowd\": 0,\n \"image_id\": next_img_id,\n \"category_id\": 1,\n \"id\": next_id,\n \"bbox\": [xmin, ymin, w, h],\n \"tokens\": t,\n }\n if all_seg is not None:\n cur_obj[\"segmentation\"] = all_seg[i]\n next_id += 1\n annotations.append(cur_obj)\n\n next_img_id += 1\n images.append(cur_img)\n\n ds = {\"info\": [], \"licenses\": [], \"images\": images, \"annotations\": annotations, \"categories\": categories}\n with open(output_path / f\"{subset}.json\", \"w\") as j_file:\n json.dump(ds, j_file)\n return next_img_id, next_id", "title": "" }, { "docid": "9e8df4331186832ad548a8c20981f892", "score": "0.54159397", "text": "def mosaic(self):\n # Create /prediction/<product_name> directory\n big_image_product = os.path.join(self.big_image_folder, self.product_name)\n if not os.path.exists(big_image_product):\n os.mkdir(big_image_product)\n\n # Create list of prediction images\n image_list = []\n for subfolder in os.listdir(self.prediction_product_path):\n if os.path.isdir(os.path.join(self.prediction_product_path, subfolder)):\n image_list.append(pathlib.Path(os.path.join(self.prediction_product_path, subfolder, \"prediction.png\")))\n\n # Sort images by asc (e.g. 0_0, 0_1, 0_2)\n image_list.sort(key=lambda var: get_img_entry_id(var))\n\n \"\"\"\n A function that creates raster mosaic.\n As parameters it takes: list of images, number of tiles per row and number of columns\n \n 1) Takes the sub-tile width and height from the first image in the list\n 2) Sets final image size from col*width, rows*height\n 3) Creates final image from all sub-tiles, bounding box parameters are also set \n \"\"\"\n overlap_pix = self.overlapping * self.tile_size\n crop_coef = int(overlap_pix / 2)\n n_rows = math.ceil(10980 / (self.tile_size - crop_coef))\n new_im = image_grid_overlap(image_list, rows=n_rows, cols=n_rows, crop=crop_coef)\n\n ImageFile.LOAD_TRUNCATED_IMAGES = True\n Image.MAX_IMAGE_PIXELS = None\n\n # For a correct georeference it is necessary to use 10m resolution band\n jp2 = ''\n if self.product == \"L2A\":\n for root, dirs, files in os.walk(self.product_safe):\n if root.endswith(\"R10m\"):\n for file in files:\n if file.endswith(\".jp2\"):\n jp2 = os.path.join(root, file)\n elif self.product == \"L1C\":\n for root, dirs, files in os.walk(self.product_safe):\n if root.endswith(\"IMG_DATA\"):\n for file in files:\n if file.endswith(\"B02.jp2\"):\n jp2 = os.path.join(root, file)\n\n # Define a directory where to save a new file, resolution, etc.\n # Get name and index from product name\n date_name = self.product_name.rsplit('_', 4)[0].rsplit('_', 1)[1]\n index_name = self.product_name.rsplit('_', 1)[0].rsplit('_', 1)[-1]\n\n # Define the output names\n png_name = os.path.join(big_image_product, self.product + \"_\" + index_name + \"_\" + date_name + '_KZ_10m.png')\n tif_name = os.path.join(big_image_product, self.product + \"_\" + index_name + \"_\" + date_name + '_KZ_10m.tif')\n\n # Crop the edges in the final image\n f_tile_size = (self.tile_size - crop_coef * 2) * n_rows\n crop = f_tile_size - 10980\n new_im_cropped = ImageOps.crop(new_im, (0, 0, crop, crop))\n\n # Fill metadata for PNG format\n metadata = PngInfo()\n metadata.add_text(\"Software\", \"CM_PREDICT {}; CM_VSM {}\".format(__version__, str(self.cm_vsm_version).strip()))\n\n # Save with a recommended quality and metadata for png, tif is done further down\n new_im_cropped.save(png_name, \"PNG\", quality=95, pnginfo=metadata)\n new_im_cropped.save(tif_name, \"TIFF\", quality=95)\n\n # Deal with tiff-related issues: projection, bands, tags\n proj_rasterio(jp2, tif_name)\n '''\n Assign 0-255 to 0-5 output\n Save final single band raster\n '''\n\n # Read band 1 (out of 3, they're identical)\n with rasterio.open(tif_name) as tif:\n profile = tif.profile.copy()\n band1 = tif.read(1)\n\n # Translate values\n band1[band1 == 0] = 0\n band1[band1 == 66] = 1\n band1[band1 == 129] = 2\n band1[band1 == 192] = 3\n band1[band1 == 255] = 4\n band1[band1 == 20] = 5\n\n profile.update({\"count\": 1})\n\n with rasterio.open(tif_name, 'w', **profile) as dst:\n dst.write(band1, 1)\n\n # Add a version tag for tiff image\n tif_img = Image.open(tif_name)\n tif_img.tag[305] = \"CM_PREDICT {}; CM_VSM {}\".format(__version__, str(self.cm_vsm_version).strip())\n tif_img.save(tif_name, tiffinfo=tif_img.tag)", "title": "" }, { "docid": "2a4bdf79009e4b367b4d7c0cd245b6f0", "score": "0.5412121", "text": "def get_images(self):\n self.mainlist=[]\n try:\n for self.dirpaths, self.dirnames, self.filenames in os.walk(self.dir):\n self.class_names=len(self.dirnames)\n for i in range (int(class_names)):\n try:\n for dirpaths, dirnames, filenames in os.walk(dirnames[i]):\n file_names=int(len(filenames))\n for m in range (file_names):\n image=self.convert_image_tensor(filenames[m])\n arrayed=np.array(image)\n list=arrayed.tolist()\n image_list=[]\n\n for i in range (70):\n for m in range (70):\n for k in range (3):\n image_list.append(list[i][m][k])\n self.mainlist.append(image_list.append(self.dirnames))\n list.clear()\n except Exception as e:\n print (\"The file doesnt have any images\") \n except Exception as e:\n print ('Another direcory in the main directory') \n\n all_results = pd.DataFrame(self.mainlist)\n\n writer = pd.ExcelWriter('demo.xlsx', engine='xlsxwriter')\n\n # Convert the dataframe to an XlsxWriter Excel object.\n all_results.to_excel(writer, sheet_name='Sheet1', index=False)\n # Close the Pandas Excel writer and output the Excel file.\n \n writer.save() \n print (\"A file already exists. Please Delete the file\")", "title": "" }, { "docid": "a37f369be40410c32899c43b6ef184ec", "score": "0.5406625", "text": "def visualize_imgs(self) -> None:\n for split in [\"train\", \"val\"]:\n split_img_fpaths = glob.glob(f\"{self.img_dir}/{split}/*.jpg\")\n for img_fpath in split_img_fpaths:\n fname_stem = Path(img_fpath).stem\n img_rgb = imageio.imread(img_fpath)\n label_img_fpath = f\"{self.label_dir}/{split}/{fname_stem}_train_id.png\"\n label_img = imageio.imread(label_img_fpath)\n\n mask_img = convert_instance_img_to_mask_img(label_img, img_rgb)\n plt.imshow(mask_img)\n plt.show()\n quit()", "title": "" }, { "docid": "8f40886f1452ea9971a5df94aca85ab3", "score": "0.54047763", "text": "def process(self, image_path, images_path):\n pass", "title": "" }, { "docid": "6cc83c7b16c4334f19666947f80ecbda", "score": "0.54040086", "text": "def save_labeled_images(images, filenames):\n\n if not os.path.exists('labeled images'):\n try:\n os.mkdir('labeled images')\n print('Made labeled images folder')\n except:\n print('Error making the labeled images folder')\n for i, img in enumerate(images):\n try:\n img.convert('RGB').save(os.path.join('labeled images',\n os.path.basename(filenames[i])))\n print('labeled image saved')\n except:\n print(filenames + ' unable to save labeled image')", "title": "" }, { "docid": "b697b5f7a4472de66bdcd5e980dd3ae2", "score": "0.5402333", "text": "def writeSVG(self, path):\n\n # compute inverse scale so the SVG is near the original image size\n invScale = 1 / self.scale\n imgSize = numpy.shape(self.target[:,:,0])\n svg = svgwrite.Drawing(path, (int(imgSize[1])*invScale, int(imgSize[0])*invScale), debug=True)\n\n # background\n svg.add(svg.rect(insert=(0, 0), size=('100%', '100%'), rx=None, ry=None, fill=self.svgColor(self.background_color)))\n\n shapes = svg.add(svg.g(id='shapes'))\n for shape in self.shapes:\n\n # apply the inverse scale factor to the shape points\n polyPoints = shape.points * invScale\n\n # reverse x/y because SVG expects them in the other order\n temp = numpy.copy(polyPoints[:, 0])\n polyPoints[:, 0] = polyPoints[:, 1]\n polyPoints[:, 1] = temp\n\n polyPoints = numpy.ndarray.tolist((polyPoints).astype(numpy.int))\n\n # add the SVG polygon object\n polygon = svg.polygon(points=polyPoints, fill=self.svgColor(shape.color), opacity=shape.color[3])\n shapes.add(polygon)\n svg.save()\n\n print 'SVG saved to: ', path", "title": "" }, { "docid": "7b0039bf46a1c73fdc2b71d014b43f42", "score": "0.53960824", "text": "def extract_data(imgnames, suffix=\".pklz\", config={}, to_extract=True, \n zero_background=False, den=32, new_shape=(232, 240)):\n \n hs, ws = [], []\n ratios = []\n mhs, mws = [], []\n \n for imgname in imgnames:\n print \" Zpracovavam obrazek \"+imgname\n data, gt_mask, voxel_size = tools.load_pickle_data(imgname)\n name = imgname[:-len(suffix)]\n # Aplikace CT okenka:\n data = se.apply_CT_window(data)#, target_range=1.0, target_dtype=float)\n \n hs.append(data.shape[1])\n ws.append(data.shape[2])\n ratios.append(float(data.shape[1]) / data.shape[2])\n \n # extrakce CT oken\n if to_extract:\n mh, mw = extract_slices(data, gt_mask, config, name, \n zero_background=zero_background,\n new_shape=new_shape)\n mhs.append(mh)\n mws.append(mw)\n\n avg_shape = [np.mean(hs), np.mean(ws)]\n \n round_avg_shape = [int((avg_shape[0] + den/2) // den) * den,\n int((avg_shape[1] + den/2) // den) * den]\n \n print \"[RESULT] Prumerny tvar rezu je: \", avg_shape\n print \" -> zaokrouhleno: \", round_avg_shape\n \n if to_extract:\n max_shape = [np.max(mhs), np.max(mws)]\n round_max_shape = [int((max_shape[0] + den/2) // den) * den,\n int((max_shape[1] + den/2) // den) * den]\n print \" -> ratio: \", avg_shape[0] / avg_shape[1]\n print \"[RESULT] Maximalni tvar rezu je: \", max_shape\n print \" -> zaokrouhleno: \", round_max_shape\n# plt.hist(ratios, bins=100)\n# plt.show()\n return tuple(round_avg_shape)", "title": "" }, { "docid": "68efd6407986ecf17652eec63b1ee0ca", "score": "0.5394224", "text": "def get_export_data(conn, script_params, image, tag, units=None):\n log(\"Image ID %s...\" % image.id)\n # Get pixel size in SAME units for all images\n pixel_size_x, pixel_size_y = get_image_pixel_size(image, units)\n roi_service = conn.getRoiService()\n all_planes = False\n size_c = image.getSizeC()\n # Channels index\n channels = script_params.get(\"Channels\", [1])\n ch_indexes = []\n for ch in channels:\n if ch < 1 or ch > size_c:\n log(\"Channel index: %s out of range 1 - %s\" % (ch, size_c))\n else:\n # User input is 1-based\n ch_indexes.append(ch - 1)\n\n ch_names = image.getChannelLabels()\n\n ch_names = [ch_name.replace(\",\", \".\") for ch_name in ch_names]\n image_name = image.getName().replace(\",\", \".\")\n\n result = roi_service.findByImage(image.getId(), None)\n\n rois = result.rois\n # Sort by ROI.id (same as in iviewer)\n rois.sort(key=lambda r: r.id.val)\n export_data = []\n\n for roi in rois:\n for shape in roi.copyShapes():\n label = unwrap(shape.getTextValue())\n # wrap label in double quotes in case it contains comma\n label = \"\" if label is None else '\"%s\"' % label.replace(\",\", \".\")\n shape_type = shape.__class__.__name__.rstrip('I').lower()\n # If shape has no Z or T, we may go through all planes...\n the_z = unwrap(shape.theZ)\n z_indexes = [the_z]\n if the_z is None and all_planes:\n z_indexes = range(image.getSizeZ())\n # Same for T...\n the_t = unwrap(shape.theT)\n t_indexes = [the_t]\n if the_t is None and all_planes:\n t_indexes = range(image.getSizeT())\n\n # get pixel intensities\n for z in z_indexes:\n for t in t_indexes:\n if z is None or t is None:\n stats = None\n else:\n stats = roi_service.getShapeStatsRestricted(\n [shape.id.val], z, t, ch_indexes)\n for c, ch_index in enumerate(ch_indexes):\n row_data = {\n \"image_id\": image.getId(),\n \"image_name\": '\"%s\"' % image_name,\n \"roi_id\": roi.id.val,\n \"shape_id\": shape.id.val,\n \"type\": shape_type,\n \"text\": label,\n \"z\": z + 1 if z is not None else \"\",\n \"t\": t + 1 if t is not None else \"\",\n \"channel\": ch_names[ch_index],\n \"points\": stats[0].pointsCount[c] if stats else \"\",\n \"min\": stats[0].min[c] if stats else \"\",\n \"max\": stats[0].max[c] if stats else \"\",\n \"sum\": stats[0].sum[c] if stats else \"\",\n \"mean\": stats[0].mean[c] if stats else \"\",\n \"std_dev\": stats[0].stdDev[c] if stats else \"\",\n \"tag\": tag,\n }\n add_shape_coords(shape, row_data,\n pixel_size_x, pixel_size_y)\n export_data.append(row_data)\n\n return export_data", "title": "" }, { "docid": "32b37056b298480982dbcc45902e5df0", "score": "0.53841645", "text": "def export(checkpoint_path, export_dir, export_version, export_for_serving, add_preprocess_step, class_names, cfg):\n\n if not os.path.exists(export_dir):\n print(\"Making export directory: %s\" % (export_dir,))\n os.makedirs(export_dir)\n\n graph = tf.Graph()\n\n array_input_node_name = \"images\"\n bytes_input_node_name = \"image_bytes\"\n\n output_node_name = \"Predictions\"\n class_names_node_name = \"names\"\n\n with graph.as_default():\n\n global_step = slim.get_or_create_global_step()\n\n input_height = cfg.IMAGE_PROCESSING.INPUT_SIZE\n input_width = cfg.IMAGE_PROCESSING.INPUT_SIZE\n input_depth = 3\n\n # We want to store the preprocessing operation in the graph\n if add_preprocess_step:\n\n # The TensorFlow map_fn() function passes one argument only,\n # so I have put this method here to take advantage of scope\n # (to access input_height, etc.)\n def preprocess_image(image_buffer):\n \"\"\"Preprocess image bytes to 3D float Tensor.\"\"\"\n\n # Decode image bytes\n image = tf.image.decode_image(image_buffer)\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n\n # make sure the image is of rank 3\n image = tf.cond(\n tf.equal(tf.rank(image), 2),\n lambda: tf.expand_dims(image, 2),\n lambda: image\n )\n\n num_channels = tf.shape(image)[2]\n\n # if we decoded 1 channel (grayscale), then convert to a RGB image\n image = tf.cond(\n tf.equal(num_channels, 1),\n lambda: tf.image.grayscale_to_rgb(image),\n lambda: image\n )\n\n # if we decoded 2 channels (grayscale + alpha), then strip off the last dim and convert to rgb\n image = tf.cond(\n tf.equal(num_channels, 2),\n lambda: tf.image.grayscale_to_rgb(tf.expand_dims(image[:,:,0], 2)),\n lambda: image\n )\n\n # if we decoded 4 or more channels (rgb + alpha), then take the first three channels\n image = tf.cond(\n tf.greater(num_channels, 3),\n lambda : image[:,:,:3],\n lambda : image\n )\n\n # Resize the image to the input height and width for the network.\n image = tf.expand_dims(image, 0)\n image = tf.image.resize_bilinear(image,\n [input_height, input_width],\n align_corners=False)\n image = tf.squeeze(image, [0])\n # Finally, rescale to [-1,1] instead of [0, 1)\n image = tf.subtract(image, 0.5)\n image = tf.multiply(image, 2.0)\n return image\n\n image_bytes_placeholder = tf.placeholder(tf.string, name=bytes_input_node_name)\n preped_images = tf.map_fn(preprocess_image, image_bytes_placeholder, dtype=tf.float32)\n input_placeholder = tf.identity(preped_images, name=array_input_node_name) # Explicit name (we can't name the map_fn)\n\n # We assume the client has preprocessed the data for us\n else:\n input_placeholder = tf.placeholder(tf.float32, [None, input_height * input_width * input_depth], name=array_input_node_name)\n\n images = tf.reshape(input_placeholder, [-1, input_height, input_width, input_depth])\n\n arg_scope = nets_factory.arg_scopes_map[cfg.MODEL_NAME]()\n\n with slim.arg_scope(arg_scope):\n logits, end_points = nets_factory.networks_map[cfg.MODEL_NAME](\n inputs=images,\n num_classes=cfg.NUM_CLASSES,\n is_training=False\n )\n\n class_scores = end_points['Predictions']\n if class_names == None:\n class_names = tf.range(class_scores.get_shape().as_list()[1])\n predicted_classes = tf.tile(tf.expand_dims(class_names, 0), [tf.shape(class_scores)[0], 1], name=class_names_node_name)\n\n\n # GVH: I would like to use tf.identity here, but the function tensorflow.python.framework.graph_util.remove_training_nodes\n # called in (optimize_for_inference_lib.optimize_for_inference) removes the identity function.\n # Sticking with an add 0 operation for now.\n # We are doing this so that we can rename the output to `output_node_name` (i.e. something consistent)\n output_node = tf.add(end_points['Predictions'], 0., name=output_node_name)\n output_node_name = output_node.op.name\n\n if 'MOVING_AVERAGE_DECAY' in cfg and cfg.MOVING_AVERAGE_DECAY > 0:\n variable_averages = tf.train.ExponentialMovingAverage(\n cfg.MOVING_AVERAGE_DECAY, global_step)\n variables_to_restore = variable_averages.variables_to_restore(\n slim.get_model_variables())\n else:\n variables_to_restore = slim.get_variables_to_restore()\n\n saver = tf.train.Saver(variables_to_restore, reshape=True)\n\n if os.path.isdir(checkpoint_path):\n checkpoint_dir = checkpoint_path\n checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)\n\n if checkpoint_path is None:\n raise ValueError(\"Unable to find a model checkpoint in the \" \\\n \"directory %s\" % (checkpoint_dir,))\n\n tf.logging.info('Exporting model: %s' % checkpoint_path)\n\n sess_config = tf.ConfigProto(\n log_device_placement=cfg.SESSION_CONFIG.LOG_DEVICE_PLACEMENT,\n allow_soft_placement = True,\n gpu_options = tf.GPUOptions(\n per_process_gpu_memory_fraction=cfg.SESSION_CONFIG.PER_PROCESS_GPU_MEMORY_FRACTION\n )\n )\n sess = tf.Session(graph=graph, config=sess_config)\n\n if export_for_serving:\n\n with tf.Session(graph=graph) as sess:\n\n tf.global_variables_initializer().run()\n\n saver.restore(sess, checkpoint_path)\n\n save_path = os.path.join(export_dir, \"%d\" % (export_version,))\n\n builder = saved_model_builder.SavedModelBuilder(save_path)\n\n # Build the signature_def_map.\n signature_def_map={}\n classes_output_tensor_info = utils.build_tensor_info(predicted_classes)\n scores_output_tensor_info = utils.build_tensor_info(class_scores)\n\n # image bytes input\n if add_preprocess_step:\n image_bytes_tensor_info = utils.build_tensor_info(image_bytes_placeholder)\n image_bytes_prediction_signature = signature_def_utils.build_signature_def(\n inputs={'images': image_bytes_tensor_info},\n outputs={\n 'classes': classes_output_tensor_info,\n 'scores': scores_output_tensor_info\n },\n method_name=signature_constants.PREDICT_METHOD_NAME\n )\n signature_def_map['predict_image_bytes']=image_bytes_prediction_signature\n\n # image array input\n image_array_tensor_info = utils.build_tensor_info(input_placeholder)\n image_array_prediction_signature = signature_def_utils.build_signature_def(\n inputs={'images': image_array_tensor_info},\n outputs={\n 'classes': classes_output_tensor_info,\n 'scores': scores_output_tensor_info\n },\n method_name=signature_constants.PREDICT_METHOD_NAME\n )\n signature_def_map['predict_image_array']=image_array_prediction_signature\n signature_def_map[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]=image_array_prediction_signature\n\n legacy_init_op = tf.group(\n tf.tables_initializer(), name='legacy_init_op')\n\n builder.add_meta_graph_and_variables(\n sess, [tag_constants.SERVING],\n signature_def_map=signature_def_map,\n legacy_init_op=legacy_init_op\n )\n\n builder.save()\n\n print(\"Saved optimized model for TensorFlow Serving.\")\n\n\n else:\n with sess.as_default():\n\n tf.global_variables_initializer().run()\n\n saver.restore(sess, checkpoint_path)\n\n input_graph_def = graph.as_graph_def()\n input_node_names= [array_input_node_name]\n if add_preprocess_step:\n input_node_names.append(bytes_input_node_name)\n output_node_names = [output_node_name, class_names_node_name]\n\n constant_graph_def = graph_util.convert_variables_to_constants(\n sess=sess,\n input_graph_def=input_graph_def,\n output_node_names=output_node_names,\n variable_names_whitelist=None,\n variable_names_blacklist=None\n )\n\n if add_preprocess_step:\n optimized_graph_def = constant_graph_def\n else:\n optimized_graph_def = optimize_for_inference_lib.optimize_for_inference(\n input_graph_def=constant_graph_def,\n input_node_names=input_node_names,\n output_node_names=output_node_names,\n placeholder_type_enum=dtypes.float32.as_datatype_enum\n )\n\n save_dir = os.path.join(export_dir, str(export_version))\n if not os.path.exists(save_dir):\n print(\"Making version directory in export directory: %s\" % (save_dir,))\n os.makedirs(save_dir)\n save_path = os.path.join(save_dir, 'optimized_model.pb')\n with open(save_path, 'w') as f:\n f.write(optimized_graph_def.SerializeToString())\n\n print(\"Saved optimized model for mobile devices at: %s.\" % (save_path,))\n print(\"Input node names: %s\" % (input_node_names,))\n print(\"Output node name: %s\" % (output_node_names,))", "title": "" }, { "docid": "8cf6f8f2a4cd9623fa5a50bde7aa5838", "score": "0.5383648", "text": "def extract(img_path, out_path):\n img = open_img(img_path)\n base_path = build_base_path(out_path, img_path)\n for band, plane_bit, plane in iter_planes(img):\n data = np.packbits(plane.ravel())\n file_name = '%s%s_plane_%d' % (base_path, band, plane_bit)\n with open(file_name, 'wb') as fp:\n log.info('Saving output data to %s' % file_name)\n fp.write(data.tobytes())", "title": "" }, { "docid": "49fca7262d6157c26f59b2fc0357f4b8", "score": "0.5377908", "text": "def test_parse_and_export_png_file(self):\n summary_dir = tempfile.mkdtemp(dir=self.base_summary_dir)\n image_dir = os.path.join(summary_dir, 'image')\n os.makedirs(image_dir, mode=0o700)\n test_file_name = '%s/%s.%s.%s' % (summary_dir, 'image', 'summary', str(time.time()))\n expect_names = TestSummaryParser.prepare_image_summary_file(test_file_name)\n event_parse = EventParser(test_file_name, summary_dir)\n event_parse.parse()\n result = sorted(os.listdir(image_dir))\n shutil.rmtree(summary_dir)\n assert result == expect_names", "title": "" }, { "docid": "9b2dd832fa23e4f42e7a79fd413616fc", "score": "0.5377851", "text": "def _writeIMG(self, fname):\n #1-Definir malla nx,ny,nz)\n #2-Dar valor 0 a todo.\n #2-para cada círculo ver en que celdas cae.\n #3-dar valor 1 a esas celdas \n for circ in self._circles:\n\n center = (circ['x'] + self.xoffset, circ['y'], circ['z'])\n r = circ['r']\n mesh.add_circle(center, r, size)", "title": "" }, { "docid": "66e80363405c1b3f625ac6fee29954fd", "score": "0.5376716", "text": "def composite_imgs(bk_wd, ov_wd, out_wd, alpha=0.3):\n \n im_names = sorted([os.path.basename(x) for x in glob(\"%s/*.png\" %(bk_wd))])\n for i in im_names:\n im = \"%s/%s\"%(ov_wd,i)\n bim = \"%s/%s\"%(bk_wd,i)\n out = \"%s/%s\"%(out_wd, i)\n composite_im(im, bim, out, alpha=alpha)", "title": "" } ]
ac0f065a765912fa47ff960bcb4606e6
r""" Converts a list of connections into a Scipy sparse adjacency matrix
[ { "docid": "0cc0fe08a43fbbda265fbb121d33cbd4", "score": "0.67700714", "text": "def conns_to_am(conns, shape=None, force_triu=True, drop_diag=True,\n drop_dupes=True, drop_negs=True):\n if force_triu: # Sort connections to [low, high]\n conns = np.sort(conns, axis=1)\n if drop_negs: # Remove connections to -1\n keep = ~np.any(conns < 0, axis=1)\n conns = conns[keep]\n if drop_diag: # Remove connections of [self, self]\n keep = np.where(conns[:, 0] != conns[:, 1])[0]\n conns = conns[keep]\n # Now convert to actual sparse array in COO format\n data = np.ones_like(conns[:, 0], dtype=int)\n if shape is None:\n N = conns.max() + 1\n shape = (N, N)\n am = sprs.coo_matrix((data, (conns[:, 0], conns[:, 1])), shape=shape)\n if drop_dupes: # Convert to csr and back too coo\n am = am.tocsr()\n am = am.tocoo()\n # Perform one last check on adjacency matrix\n missing = np.where(np.bincount(conns.flatten()) == 0)[0]\n if np.size(missing) or np.any(am.col.max() < (shape[0] - 1)):\n print('Some nodes are not connected to any bonds')\n return am", "title": "" } ]
[ { "docid": "22712fc23bf9be8e59110fc71423b200", "score": "0.6723433", "text": "def retrieve_adjacency_matrix(cgpms, v_to_c):\n adjacency_list = retrieve_adjacency_list(cgpms, v_to_c)\n adjacency_matrix = np.zeros((len(adjacency_list), len(adjacency_list)))\n for i in adjacency_list:\n adjacency_matrix[i, adjacency_list[i]] = 1\n return adjacency_matrix.T", "title": "" }, { "docid": "e0e47ec7a429f14d5f40c50dc1114ea9", "score": "0.66708577", "text": "def make_adjacency(H, W):\n A = init_sparse_adjacency(H, W)\n src, dst, rng = make_conn(H, W)\n A[src, rng] = -1\n A[dst, rng] = +1\n return A.tocsr()", "title": "" }, { "docid": "1a22f4291aaae38a2c53d7e38eda14a9", "score": "0.65344644", "text": "def adjacency_dense(graph):\n sparse_adj_matrix = nx.adjacency_matrix(graph)\n return sparse_adj_matrix.toarray()", "title": "" }, { "docid": "e330bf8ef45d94a101d8c11872a91273", "score": "0.6530359", "text": "def init_sparse_adjacency(H, W):\n m, n = get_adjacency_size(H, W)\n A = scipy.sparse.lil_matrix((m, n))\n return A", "title": "" }, { "docid": "efd5c9f2b56152622eba68520f8e5e1e", "score": "0.6416737", "text": "def get_adjacency_matrices(data):\n num_rels = data.num_rels\n num_nodes = data.num_nodes\n\n A = []\n source_nodes = data.edge_index[0].numpy()\n target_nodes = data.edge_index[1].numpy()\n\n # Get edges for given (relation) edge type and construct adjacency matrix\n for i in range(num_rels):\n indices = np.argwhere(np.asarray(data.edge_type) == i).squeeze(axis=1)\n r_source_nodes = source_nodes[indices]\n r_target_nodes = target_nodes[indices]\n a = sp.csr_matrix((np.ones(len(indices)), (r_source_nodes, r_target_nodes)), shape=(num_nodes, num_nodes))\n A.append(a)\n\n return A", "title": "" }, { "docid": "24f13460ba0ba08aea3b943c981ae58c", "score": "0.6409477", "text": "def generate_adj_matrix(num_nodes, symmetric=True, sparsity=0.5, connected=True):\n A = torch.rand((num_nodes, num_nodes), dtype=torch.float)\n if symmetric:\n A = 0.5 * (A + A.t())\n A = (A < (1 - sparsity)).long() * (1 - torch.eye(num_nodes))\n if connected:\n start = torch.randperm(num_nodes)\n end = torch.cat([start[1:], start.new_full(size=[1], fill_value=start[0].item())])\n A[start, end] = 1\n A[end, start] = 1\n return A", "title": "" }, { "docid": "ffb4a668747c8ef33b1356f0a3ba43e6", "score": "0.64082646", "text": "def coo_adjacency_matrix(encoding, pnames_to_tracks, value=1.):\n entries = []\n p_index = []\n t_index = []\n for playlist_name in pnames_to_tracks:\n for tid in pnames_to_tracks[playlist_name]:\n t_index.append(tid)\n p_index.append(encoding[playlist_name])\n entries.append(value)\n\n matrix = sparse.coo_matrix(\n (entries, (t_index, p_index)),\n shape=[max(t_index) + 1, len(encoding)]\n )\n return matrix.tocsr()", "title": "" }, { "docid": "c7a34ef5b89e56ecc83a7e7b8093b79f", "score": "0.63385254", "text": "def adj_list_to_mat(adj_list):\n assert type(adj_list) == list, 'Adjacency list should be provided'\n\n adj_mat = np.zeros((len(adj_list), len(adj_list)), dtype=np.float64)\n for i in range(len(adj_list)):\n for j in adj_list[i]:\n adj_mat[i, j] = 1.0\n\n return adj_mat", "title": "" }, { "docid": "4fe09bb3006538bee6d60c0586c7cb51", "score": "0.63310343", "text": "def adjacency(dist, idx):\n M, k = dist.shape\n assert M, k == idx.shape\n assert dist.min() >= 0\n\n # Weights.\n sigma2 = np.mean(dist[:, -1])**2\n dist = np.exp(- dist**2 / sigma2)\n\n # Weight matrix.\n I = np.arange(0, M).repeat(k)\n J = idx.reshape(M*k)\n V = dist.reshape(M*k)\n W = scipy.sparse.coo_matrix((V, (I, J)), shape=(M, M))\n\n # No self-connections.\n #W.setdiag(0)\n # Self connections\n W.setdiag(1)\n\n # Non-directed graph.\n bigger = W.T > W\n W = W - W.multiply(bigger) + W.T.multiply(bigger)\n\n #assert W.nnz % 2 == 0\n assert np.abs(W - W.T).mean() < 1e-10\n assert type(W) is scipy.sparse.csr.csr_matrix\n return W", "title": "" }, { "docid": "1809b38291f5246ff8c0023f4f0e764b", "score": "0.6304205", "text": "def IsSymmetric(mat, g):\n # looping on each vertex to assign the edges == 1\n for vertex in g.graph_d: \n if isinstance(g.graph_d[vertex], int):\n mat[vertex, g.graph_d[vertex]] = 1\n else:\n for target in g.graph_d[vertex]:\n mat[vertex, target] = 1\n \n rows, cols = mat.nonzero() # get only the non zero elements from the sparse matrix \n return rows, cols", "title": "" }, { "docid": "f106074089cf817865b92c097206ad00", "score": "0.6263677", "text": "def fill_adjacency_matrix(self):\n pass", "title": "" }, { "docid": "4034ca096986f8a2726ef8ba18e746a1", "score": "0.6262828", "text": "def get_sparse_adjacency(G):\n G_str = repr(G)\n if is_networkx_str(G_str):\n import networkx as nx # pylint: disable=import-outside-toplevel\n return nx.to_scipy_sparse_array(G, dtype=np.float64)\n elif is_fastgraph_str(G_str):\n return G.to_coo()\n elif is_graphtool_str(G_str): # pragma: gt no cover\n from graph_tool.spectral import adjacency # pylint: disable=import-outside-toplevel # type: ignore\n return adjacency(G).T\n else:\n raise NotImplementedError()", "title": "" }, { "docid": "5aece1ec83f0e96cbd622c90dbd6e245", "score": "0.62486815", "text": "def affinity_matrix(graph):\n\n A = np.asarray(nx.adjacency_matrix(graph).todense())\n\n return A", "title": "" }, { "docid": "93427343ee0de7a8c2b205949fc4c2f9", "score": "0.6242046", "text": "def __build_adjacency_matrix(self):\n cdr_bipartite_matrix = convert_dataframe_to_matrix(self.cdr_dataframe, row='CellID', column='countrycode')\n adjacency_matrix = cdr_bipartite_matrix.as_matrix()\n non_zero_sms_countries = list(np.where(adjacency_matrix.sum(axis=0) != 0)[0])\n adjacency_matrix = adjacency_matrix[:, non_zero_sms_countries]\n cell_index = cdr_bipartite_matrix.index.values.tolist()\n country_index = [ind for i, ind in enumerate(cdr_bipartite_matrix.columns.values.tolist())\n if i in non_zero_sms_countries]\n return adjacency_matrix, cell_index, country_index", "title": "" }, { "docid": "226fdfe7dc181e1b236b3c27cf9aac82", "score": "0.62112325", "text": "def adjacency_ndarray(graph):\n # TODO: doesn't work when nodes are not labeled by 0-indexed integers.\n num_nodes = graph.number_of_nodes()\n adj_mat = np.zeros((num_nodes, num_nodes))\n for (u, v) in graph.edges:\n adj_mat[u][v] = 1\n adj_mat[v][u] = 1\n\n return adj_mat", "title": "" }, { "docid": "7815e48d646e379a2688fdcb25aef8f0", "score": "0.62069607", "text": "def conn_gen(n_atom, conn_list):\n mat = np.ones((n_atom, n_atom), dtype=float) * -1.0\n for item in conn_list:\n if len(item) == 0:\n continue\n mat[item[0], item[1]] = item[2]\n mat[item[1], item[0]] = item[2] # This matrix should be symmetric\n return mat", "title": "" }, { "docid": "f332d4eb875efb726b837fb54fbbea64", "score": "0.61776817", "text": "def get_adj_list(self, spas_nbrlist):\n spas_list = [x for x in spas_nbrlist.keys()]\n adjacency = pd.DataFrame(0, index=spas_list, columns=spas_list)\n\n for spa in spas_list:\n for nbr in spas_nbrlist[str(spa)]:\n adjacency.at[spa, nbr] = 1\n\n return adjacency", "title": "" }, { "docid": "a18a552457c8e002a880155124388672", "score": "0.6175989", "text": "def get_synth_graph_adjacency(small=False):\n\n # first define only the upper triangular matrix of adjacency\n A = np.array([\n [0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ], dtype=np.float32)\n if small:\n idxs = [0, 1, 4, 5, 8, 10]\n A = A[idxs][:, idxs]\n\n # finally, make the matrix symmetric\n A += A.T\n\n return A", "title": "" }, { "docid": "0cfc8fa732621d78eccc2752608850f0", "score": "0.61256427", "text": "def adjacency_mat(graph):\n E = graph_to_edges(graph)\n\n V = []\n for (v1, v2) in E:\n if v1 not in V:\n V.append(v1)\n if v2 not in V:\n V.append(v2)\n\n A = [[0]*len(V)]*len(V)\n\n # for i, v1 in enumerate(V):\n # for j, v2 in enumerate(V):\n # if (v1, v2) in E:\n # print(i, j, v1, v2)\n # A[i][j] = 1\n\n for v1 in V:\n l = V.index(v1)\n for v2 in V:\n c = V.index(v2)\n print((v1, v2) in E, l, c, v1, v2)\n if (v1, v2) in E:\n A[l][c] = 1\n A[c][l] = 1\n\n\n print(A[l][c])\n\n\n\n\n # for v in set()\n print(V)\n print(E)\n print(A)\n\n return A", "title": "" }, { "docid": "af364fa0d2749eeb164553b492e95951", "score": "0.6107943", "text": "def adj_matrix(nodes: list) -> dict:\n adj_matrix = {}\n try:\n for n in nodes:\n adj_matrix[n.name] = {}\n for c in n.neighbor:\n if c in adj_matrix[n.name]:\n adj_matrix[n.name][c] += 1\n else:\n adj_matrix[n.name][c] = 1\n except Exception as e:\n print(f\"[INFO] {e}\")\n finally:\n return adj_matrix", "title": "" }, { "docid": "cceaea42a6060a7d09df3012f3214a85", "score": "0.608379", "text": "def adjacency(edge_pairs, select_position, idx_dict, sigma2=1.0, directed=False):\n\n num_rows, num_cols = select_position.shape\n N, k = edge_pairs.shape\n row_list = []\n col_list = []\n dist_list = []\n for i in range(N):\n node_i = idx_dict[i]\n i_row, i_col = node_i // num_cols, node_i % num_cols\n for j in edge_pairs[i]:\n if j == -1:\n continue\n row_list.append(i)\n col_list.append(j)\n node_j = idx_dict[j]\n j_row, j_col = node_j // num_cols, node_j % num_cols\n dist_i_j = 1.0 - np.abs(select_position[i_row, i_col] - select_position[j_row, j_col]) / \\\n max(select_position[i_row, i_col], select_position[j_row, j_col])\n\n # dist_i_j = 1.0\n dist_list.append(dist_i_j)\n\n W = scipy.sparse.coo_matrix((dist_list, (row_list, col_list)), shape=(N, N))\n\n # No self-connections.\n W.setdiag(0)\n\n if not directed:\n # Non-directed graph.\n bigger = W.T > W\n W = W - W.multiply(bigger) + W.T.multiply(bigger)\n assert W.nnz % 2 == 0\n assert np.abs(W - W.T).mean() < 1e-10\n\n # assert type(W) is scipy.sparse.csr.csr_matrix\n return W", "title": "" }, { "docid": "14e9f2e32f67cdb90a414b24a38a1597", "score": "0.6062279", "text": "def makeSparseDM(X, thresh):\n N = X.shape[0]\n D = pairwise_distances(X, metric=\"euclidean\")\n [I, J] = np.meshgrid(np.arange(N), np.arange(N))\n I = I[D <= thresh]\n J = J[D <= thresh]\n V = D[D <= thresh]\n return sparse.coo_matrix((V, (I, J)), shape=(N, N)).tocsr()", "title": "" }, { "docid": "0f4c16c6b9a73fa619a856eca2825df5", "score": "0.59916294", "text": "def make_coo(make_pairs):\n coords = list(zip(*(pair\n for idx, (node1, node2) in enumerate(graph.edges())\n for pair in make_pairs(idx, node1, node2))))\n data = np.ones(2*graph.size())\n return sparse.coo_matrix((data, coords),\n shape=(numnodes, 2*numedges))", "title": "" }, { "docid": "c758b2e4bd214348e06d6e5387dda004", "score": "0.59793794", "text": "def fill_adjacency_matrix(self):\n self.adjacencyMatrix = np.zeros((2*(2**self.n - 1), 2*(2**self.n - 1)))\n for i in range(2**(self.n-1)-1):\n # First we must find i's layer, which is layerNum\n layer_num = np.floor(np.log2(i+1))\n # Then we find where the next layer's nodes start off in numbering\n next_layer = 2**(layer_num + 1)-1\n # We then set the children of i to be adjacent to i\n self.adjacencyMatrix[i, int(next_layer + 2*(i-(2**layer_num - 1)))] = 1\n self.adjacencyMatrix[int(next_layer + 2*(i-(2**layer_num - 1))), i] = 1\n self.adjacencyMatrix[i, int(next_layer + 2*(i-(2**layer_num - 1)) + 1)] = 1\n self.adjacencyMatrix[int(next_layer + 2*(i-(2**layer_num - 1)) + 1), i] = 1\n for i in range(2**(self.n-1)-1):\n # We are essentially doing the same thing as\n # the above loop, but inverting the nodes to get a diamond-shaped configuration\n # First we must find i's layer, which is layerNum\n layer_num = np.floor(np.log2(i+1))\n # Then we find where the next layer's nodes start off in numbering\n next_layer = 2**(layer_num + 1)-1\n # We then set the children of i to be adjacent to i\n self.adjacencyMatrix[2*2**self.n - 3 - i, 2*2**self.n - 3 - int(next_layer + 2*(i-(2**layer_num - 1)))] = 1\n self.adjacencyMatrix[2*2**self.n - 3 - int(next_layer + 2*(i-(2**layer_num - 1))), 2*2**self.n - 3 - i] = 1\n self.adjacencyMatrix[2*2**self.n - 3 - i, 2*2**self.n - 3 - int(next_layer + 2*(i-(2**layer_num - 1)) + 1)] = 1\n self.adjacencyMatrix[2*2**self.n - 3 - int(next_layer + 2*(i-(2**layer_num - 1)) + 1), 2*2**self.n - 3 - i] = 1\n # Next comes making the cycle that actually \"glues\" the trees together\n for i in range(2**(self.n-1)-1, 2**self.n-1):\n # fills out the ``glue'' in a specific way (think |x| repeated over and over again)\n if i % 2 == 1:\n print(i)\n self.adjacencyMatrix[i, 2**(self.n-1) + i] = 1\n self.adjacencyMatrix[i, 2**(self.n-1) + i + 1] = 1\n self.adjacencyMatrix[2**(self.n-1) + i, i] = 1\n self.adjacencyMatrix[2**(self.n-1) + i + 1, i] = 1\n else:\n self.adjacencyMatrix[i, 2**(self.n-1) + i] = 1\n self.adjacencyMatrix[i, 2**(self.n-1) + i - 1] = 1\n self.adjacencyMatrix[2**(self.n-1) + i - 1, i] = 1\n self.adjacencyMatrix[2**(self.n-1) + i, i] = 1\n\n j = 2**(self.n - 1) - 1", "title": "" }, { "docid": "69ba9aef6f500e018423d705c8f63a85", "score": "0.59755147", "text": "def dense_to_sparse(adj: Tensor) -> Tuple[Tensor, Tensor]:\n assert adj.dim() >= 2 and adj.dim() <= 3\n assert adj.size(-1) == adj.size(-2)\n\n edge_index = adj.nonzero().t()\n\n if edge_index.size(0) == 2:\n edge_attr = adj[edge_index[0], edge_index[1]]\n return edge_index, edge_attr\n else:\n edge_attr = adj[edge_index[0], edge_index[1], edge_index[2]]\n batch = edge_index[0] * adj.size(-1)\n row = batch + edge_index[1]\n col = batch + edge_index[2]\n return torch.stack([row, col], dim=0), edge_attr", "title": "" }, { "docid": "e5295eee0c2534d5c38468fb570b2b60", "score": "0.5966513", "text": "def preprocess_adj(adj):\r\n adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))\r\n return sparse_to_tuple(adj_normalized)", "title": "" }, { "docid": "b09c2fd76992abfcdda7638c36f80a7a", "score": "0.5953484", "text": "def make_consensus_matrix(clusters):\n matrix = []\n for k in clusters:\n columns = []\n for x in clusters[k]:\n row = []\n for y in clusters[k]:\n if x == y:\n row.append(1)\n else:\n row.append(0)\n columns.append(row)\n matrix.append(columns)", "title": "" }, { "docid": "e43b4b3588585228329e0c09bddc86dc", "score": "0.5924514", "text": "def create_adj_mat(self):\n adj_mat = sp.dok_matrix(\n (self.n_users + self.n_items, self.n_users + self.n_items), dtype=np.float32\n )\n adj_mat = adj_mat.tolil()\n\n R = sp.dok_matrix((self.n_users, self.n_items), dtype=np.float32)\n user_np = np.array(self.ratings[DEFAULT_USER_COL])\n item_np = np.array(self.ratings[DEFAULT_ITEM_COL])\n for u in range(self.n_users):\n index = list(np.where(user_np == u)[0])\n i = item_np[index]\n for item in i:\n R[u, item] = 1\n R = R.tolil()\n adj_mat[: self.n_users, self.n_users :] = R\n adj_mat[self.n_users :, : self.n_users] = R.T\n adj_mat = adj_mat.todok()\n print(\"already create adjacency matrix\", adj_mat.shape)\n norm_adj_mat = normalized_adj_single(adj_mat + sp.eye(adj_mat.shape[0]))\n mean_adj_mat = normalized_adj_single(adj_mat)\n print(\"already normalize adjacency matrix\")\n return adj_mat.tocsr(), norm_adj_mat.tocsr(), mean_adj_mat.tocsr()", "title": "" }, { "docid": "0ae4cfb58de7bbf66c1661e5ae482fbb", "score": "0.5923926", "text": "def preprocess_adj(adj):\n adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))\n return sparse_to_tuple(adj_normalized)", "title": "" }, { "docid": "0ae4cfb58de7bbf66c1661e5ae482fbb", "score": "0.5923926", "text": "def preprocess_adj(adj):\n adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))\n return sparse_to_tuple(adj_normalized)", "title": "" }, { "docid": "0ae4cfb58de7bbf66c1661e5ae482fbb", "score": "0.5923926", "text": "def preprocess_adj(adj):\n adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))\n return sparse_to_tuple(adj_normalized)", "title": "" }, { "docid": "0ae4cfb58de7bbf66c1661e5ae482fbb", "score": "0.5923926", "text": "def preprocess_adj(adj):\n adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))\n return sparse_to_tuple(adj_normalized)", "title": "" }, { "docid": "76d68173c67d4eb4a0703e50d05c749b", "score": "0.5879074", "text": "def states2matrix(states, sparse=False):\n coords = states2edges(states)\n data = np.ones(len(coords))\n row, col = list(zip(*coords))\n row, col = np.array(row), np.array(col)\n N, M = max(row) + 1, max(col) + 1\n mat = coo_matrix((data, (row, col)), shape=(N, M))\n if sparse:\n return mat\n else:\n return mat.toarray()", "title": "" }, { "docid": "41e0de8ca943be8ff2558614f90655d5", "score": "0.58558464", "text": "def __repr__(self):\n edges_list = self.filter_edges_list[:]\n length = len(edges_list)\n final_edges_list = self.final_edges_list[:]\n nodes = {}\n for i,edge in enumerate(edges_list):\n nodes[edge[:4]] = i\n \n col = []\n row = []\n data = []\n for edge in final_edges_list:\n if edge[4] is not None:\n try:\n row.append(nodes[edge[4:8]])\n col.append(nodes[edge[:4]])\n data.append(edge[8])\n except KeyError:\n pass\n \n adj_matrix = csr_matrix((data,(row,col)), shape=(length,length))\n \n return adj_matrix, nodes", "title": "" }, { "docid": "41607fe3505d881a880fc3f42c76f820", "score": "0.5840698", "text": "def complete_graph_matrix(n):\n adj_mat = np.ones(shape=(n, n), dtype=int)\n np.fill_diagonal(adj_mat, 0) # in-place operation!\n return adj_mat", "title": "" }, { "docid": "e39e2dc011541450bc913134b463b142", "score": "0.58143264", "text": "def nxGraphToAdjmat(self):\n no_of_var = len(self.nx_graph.nodes)\n assert no_of_var > 0\n self.adjmat = np.zeros((no_of_var, no_of_var))\n np.fill_diagonal(self.adjmat, None)\n self.adjmat[self.adjmat == 0] = -1\n\n for (i, j) in self.nx_graph.edges:\n self.addDirectedEdge(i, j)", "title": "" }, { "docid": "61900e6cf72bbc774e3b620d617c1620", "score": "0.58098364", "text": "def sparsemat_from_flow(flow_df, return_ids=False):\n idx_i, ids = pd.factorize(flow_df.origin, sort=True)\n idx_j, _ = pd.factorize(flow_df.destination, sort=True)\n data = flow_df.flow\n n = len(ids)\n\n mat = sp.csr_matrix((data, (idx_i, idx_j)), shape=(n, n))\n\n return (mat, ids) if return_ids else mat", "title": "" }, { "docid": "10c568f3140d24271d72b05873979f43", "score": "0.5798798", "text": "def atom_connectivity_matrix(molecule):\n molecule = molecule.hydrogen_suppressed\n size = molecule.size\n matrix = [[0 for x in range(size)] for y in range(size)]\n d = {}\n for index, atom in enumerate(molecule.atoms):\n d[atom] = index\n for bond in molecule.bonds:\n atoms = list(bond)\n i, j = d[atoms[0]], d[atoms[1]]\n matrix[i][j], matrix[j][i] = bond.conventional_bond_order, bond.conventional_bond_order\n return Matrix(matrix)", "title": "" }, { "docid": "e18fadc3b03a6ef83a90e1a2c34304c3", "score": "0.5785609", "text": "def adjmat_2_graph(self, adjm):\n return {self.nodes[r]: [(self.nodes[c], w) for c, w in enumerate(row) if w != 0]\n for r, row in enumerate(adjm)}", "title": "" }, { "docid": "e2015e4483c2c9ba965ecd69bdf21eb5", "score": "0.57784104", "text": "def build_connect_mat(mol):\n\t\tinf = float('Infinity') # 'Not connected' is repr by infinity\n\t\tnewMat = [[inf for x in range(mol.size)] for y in range(mol.size)]\n\t\tfor i in range(mol.size):\n\t\t\tfor j in range(mol.size):\n\t\t\t\tval = mol.connectMat[i][j]\n\t\t\t\tval = inf if not val else 1\n\t\t\t\tnewMat[i][j] = val\n\t\treturn newMat", "title": "" }, { "docid": "df0d565c042cd337d5d43557b2ce1d3b", "score": "0.57744116", "text": "def _compute_adjacencies(self,\n nodes_gpr: List[core.Node],\n possible_direct_connect: List[bool],\n possible_cs_connect: List[List[List[bool]]],\n possible_cs_detour: List[List[bool]],\n possible_cs_link: List[List[List[bool]]]\n ) -> List[List[int]]:\n adjs = [[] for _ in nodes_gpr]\n # add direct connections and one-off detours\n for i in range(len(self._route)-1):\n if possible_direct_connect[i]:\n adjs[i].append(i+1)\n for j in range(len(self._route)+i*self.instance.n_cs,\n len(self._route)+(i+1)*self.instance.n_cs):\n i_cs = (j - len(self._route)) % self.instance.n_cs\n if possible_cs_detour[i][i_cs]:\n if possible_cs_connect[i][i_cs][0]:\n adjs[i].append(j)\n if possible_cs_connect[i][i_cs][1]:\n adjs[j].append(i+1)\n # add intra-cs links\n for i in range(len(self._route)-1):\n begin_idx = len(self._route)+i*self.instance.n_cs\n end_idx = len(self._route)+(i+1)*self.instance.n_cs\n for mid_idx1 in range(begin_idx, end_idx):\n i_cs1 = (mid_idx1 - len(self._route)) % self.instance.n_cs\n for mid_idx2 in range(begin_idx, end_idx):\n i_cs2 = (mid_idx2 - len(self._route)) % self.instance.n_cs\n if possible_cs_link[i][i_cs1][i_cs2]:\n adjs[mid_idx1].append(mid_idx2)\n\n return adjs", "title": "" }, { "docid": "f1866c92211af803b977880bbcdccd11", "score": "0.5756864", "text": "def _build_sparse_matrix(L):\n shape = L.shape\n i = torch.LongTensor(np.vstack((L.row, L.col)).astype(int))\n v = torch.FloatTensor(L.data)\n return torch.sparse.FloatTensor(i, v, torch.Size(shape))", "title": "" }, { "docid": "aad733afe9b6a2e6d8ce3d252b89af58", "score": "0.5751577", "text": "def get_adjacency_matrix(self):\n return list([list(row) for row in self.adjacency_matrix]) # return the adjacency matrix", "title": "" }, { "docid": "9e01b07cd72357f97d2923c62fcbcfbd", "score": "0.5751396", "text": "def get_adj_mat(graph):\n size = len(graph.nodes())\n matrix = np.zeros(shape=(size, size), dtype=np.float)\n\n for edge in graph.edges(data=True):\n matrix[edge[0] - 1][edge[1] - 1] = edge[2]['weight']\n matrix[edge[1] - 1][edge[0] - 1] = edge[2]['weight']\n\n return matrix", "title": "" }, { "docid": "b4c46711e1942707e74c45f902356c5b", "score": "0.5735724", "text": "def graph_from_adjacency_matrix(\n adjacency_matrix: Union[np.ndarray, List[List[int]]],\n atomicnums: Optional[Union[np.ndarray, List[int]]] = None,\n) -> nx.Graph:\n\n G = nx.Graph(adjacency_matrix)\n\n if atomicnums is not None:\n attributes = {idx: atomicnum for idx, atomicnum in enumerate(atomicnums)}\n nx.set_node_attributes(G, attributes, \"atomicnum\")\n\n return G", "title": "" }, { "docid": "39099017085677e6e5fcfe9bb11dc0e1", "score": "0.5724878", "text": "def represent_sparse(ascii_grid):\n ascii_grid = ascii_grid.strip()\n sparse_repr = []\n lines = ascii_grid.split(\"\\n\")\n for i in range(len(lines)):\n for j in range(len(lines[i])):\n if lines[i][j] == \"1\":\n sparse_repr.append((i, j))\n return sparse_repr", "title": "" }, { "docid": "1028ffd99694565cc2d144fd9b0d85c9", "score": "0.5724794", "text": "def __create_sparse_matrix(self, base):\r\n\r\n #Create sparse hadamard matrix\r\n base_matrix = lil_matrix(base.matrix)\r\n\r\n #Create full sparse identity matrix\r\n sparse_matrix = identity(self.size, format='lil')\r\n\r\n if self.n_I == 0:\r\n #\"Put\" dense hadamard matrix in sparse matrix\r\n target_states = 2**self.n_target\r\n sub_matrix_index = self.size-target_states\r\n sparse_matrix[sub_matrix_index: , sub_matrix_index: ] = base_matrix\r\n\r\n #Convert to csc format\r\n c_gate = csc_matrix(sparse_matrix)\r\n\r\n return c_gate\r\n else:\r\n # Extract bottom left corner of sparse matrix\r", "title": "" }, { "docid": "a77b2480d32e63e347de2bae49d4cdc1", "score": "0.5719115", "text": "def produce_adjacency_matrix(filename):\n file_obj = open(filename)\n adjacency_matrix = []\n\n for line in file_obj.readlines():\n num_list = []\n string_list = line.split()\n for number in string_list:\n num_list.append(int(number))\n\n adjacency_matrix.append(num_list)\n\n return parse_matrix(adjacency_matrix)", "title": "" }, { "docid": "306d807a43e0fdba9b108a6a189393e6", "score": "0.5715615", "text": "def _get_connectivity(atoms):\n cutoff = natural_cutoffs(atoms)\n neighborList = neighborlist.NeighborList(\n cutoff, self_interaction=False, bothways=True\n )\n neighborList.update(atoms)\n matrix = neighborlist.get_connectivity_matrix(neighborList.nl).toarray()\n return matrix", "title": "" }, { "docid": "19dea278257aa7bb55e5907e1739f4fd", "score": "0.5713727", "text": "def getSparseJacobian(jac):\n row, col = jac.shape\n nnz = []\n nnzrow = []\n nnzcol = []\n for i in range(row):\n for j in range(col):\n if jac[i, j] == 0:\n continue\n else:\n nnz.append(jac[i, j])\n nnzrow.append(i)\n nnzcol.append(j)\n print('we found %d nnz' % len(nnz))\n return sym.Matrix(nnz), sym.Matrix(nnzrow), sym.Matrix(nnzcol)", "title": "" }, { "docid": "978eccab0b146f842681df4658243b4f", "score": "0.57060313", "text": "def to_sparse_pd(m):\n return pd.SparseDataFrame([\n pd.SparseSeries(m[i].toarray().ravel()) for i in np.arange(m.shape[0])\n ])", "title": "" }, { "docid": "a10f03cb2862df0096c50d65d259a1f9", "score": "0.5705681", "text": "def adj_matrix(vertices):\n n = vertices.shape[0]\n adjMat = np.zeros((n, n))\n for i in range(n):\n adjMat[i, :] = np.linalg.norm(vertices - vertices[i], axis=1)\n adjMat = csr_matrix(adjMat)\n return adjMat", "title": "" }, { "docid": "67a27153e195ad7ca3a7fbb696604c01", "score": "0.57054406", "text": "def listdict_to_listlist_and_matrix(sparse):\n V = range(len(sparse))\n graph = [[] for _ in V]\n weight = [[None for v in V] for u in V]\n for u in V:\n for v in sparse[u]:\n graph[u].append(v)\n weight[u][v] = sparse[u][v]\n return graph, weight", "title": "" }, { "docid": "ee0426fc32e287c22063c9095ca5de49", "score": "0.57054204", "text": "def assemble_stiffness_matrix(N, Ak):\n\n # Some convenient definitions\n L = N - 1\n cols = [] ; rows = [] ; values = []\n\n # Loop through each interior node - be careful of the boundary!\n for i in range(L):\n for j in range(L):\n\n # Compute the index k\n k = i*L + j\n\n # Add the non zero entries to row k of A - checking that the nodes\n # don't lie on the boundary!\n rows.append(k); cols.append(k); values.append(2*(Ak[0,0] + Ak[1,1] + Ak[2,2]))\n\n if (j + 1) <= (L - 1):\n rows.append(k); cols.append(i*L+(j+1)); values.append(Ak[0, 1] + Ak[1, 0])\n\n if (j - 1) >= 0:\n rows.append(k); cols.append(i*L + (j-1)); values.append(Ak[1, 0] + Ak[0, 1])\n\n if i - 1 >= 0:\n rows.append(k); cols.append((i-1)*L + j); values.append(Ak[0, 2] + Ak[2, 0])\n\n if i + 1 <= (L - 1):\n rows.append(k); cols.append((i+1)*L + j); values.append(Ak[2, 0] + Ak[0, 2])\n\n if j + 1 < (L - 1) and i - 1 >= 0:\n rows.append(k); cols.append((i-1)*L + (j+1)); values.append(Ak[1, 2] + Ak[2, 1])\n\n if j - 1 >= 0 and i + 1 <= (L - 1):\n rows.append(k); cols.append((i+1)*L + (j-1)); values.append(Ak[2, 1] + Ak[1, 2])\n\n # Construct A in sparse format\n A = coo_matrix((values, (rows, cols)), shape=(L**2, L**2)).tocsr()\n\n return A", "title": "" }, { "docid": "3332bf0548694635fdc38caf49eb7d09", "score": "0.56987745", "text": "def graph_matrix(edges, directed=True, order=-1):\n edges = np.asarray(edges)\n if order < 0:\n order = edges.max() + 1\n\n shape = (order, order)\n graph = np.zeros(shape, dtype=int)\n indices = tuple(edges.T)\n graph[indices] = 1\n if not directed:\n graph[indices[::-1]] = 1\n\n return graph", "title": "" }, { "docid": "2e9060b23046b73d87db066bbd68c3bd", "score": "0.56963277", "text": "def normalize_adj_matrices_for_graphs(graphs):\n\n adjacency_matrices = get_padded_adjacency(graphs)\n\n # add self_loops\n for i in range(adjacency_matrices.shape[0]):\n adjacency_matrices[i] = np.add(adjacency_matrices[i],\n np.identity(adjacency_matrices.shape[2]))\n\n D_diagonal = adjacency_matrices.sum(axis=1)\n D_diagonal = np.array(list(map(f, D_diagonal)))\n D = np.zeros(adjacency_matrices.shape)\n\n for i in range(adjacency_matrices.shape[0]):\n np.fill_diagonal(D[i], D_diagonal[i])\n\n A = np.matmul(adjacency_matrices, D)\n A = np.matmul(D, A)\n\n return A", "title": "" }, { "docid": "565f7f8acf765b4a783ae4182254191f", "score": "0.5694252", "text": "def adjacency_matrix_(self):\n return self._adjacency_matrix", "title": "" }, { "docid": "4b0793fdaf1851d13d35df35c64809ca", "score": "0.56803334", "text": "def generateAdjacencyMatrix(self, edges, numVertices):\n matrix = [[0 for j in range(numVertices)] for i in range(numVertices)]\n for edge in edges:\n value = edge[WEIGHT] if self.weighted else 1\n self.setMatrixValue(matrix, edge[FROM_VERTEX], edge[TO_VERTEX], value)\n if not self.directed:\n self.setMatrixValue(matrix, edge[TO_VERTEX], edge[FROM_VERTEX], value)\n return matrix", "title": "" }, { "docid": "2d3ed5363f507131c96b76f83ff2ab52", "score": "0.56703377", "text": "def make_adj_directed_tri_indices(elements: np.ndarray, num_sites: int) -> sp.csc_array:\n t0 = elements[:, 0]\n t1 = elements[:, 1]\n t2 = elements[:, 2]\n i = np.column_stack([t0, t1, t2]).ravel()\n j = np.column_stack([t1, t2, t0]).ravel()\n # store triangle index + 1 (zero means no edge connecting i and j)\n data = np.repeat(np.arange(1, elements.shape[0] + 1), 3)\n return sp.csc_array((data, (i, j)), shape=(num_sites, num_sites))", "title": "" }, { "docid": "115e34339b392b2dde0b624111a62e26", "score": "0.56656384", "text": "def get_sparse_rate_matrix_info(cursor):\n\n # get a set of all states\n cursor.execute(\n 'select state from distn '\n 'union '\n 'select source from rates '\n 'union '\n 'select sink from rates '\n 'union '\n 'select state from states '\n )\n states = set(t[0] for t in cursor)\n nstates = len(states)\n\n # get the sparse equilibrium distribution\n cursor.execute('select state, prob from distn')\n distn = dict(cursor)\n\n # construct the rate matrix as a networkx weighted directed graph\n dg = nx.DiGraph()\n cursor.execute('select source, sink, rate from rates')\n for a, b, weight in cursor:\n dg.add_edge(a, b, weight=weight)\n\n # assert that the distribution has the right form\n if not all(0 <= p <= 1 for p in distn.values()):\n raise Exception(\n 'equilibrium probabilities '\n 'should be in the interval [0, 1]')\n if not np.allclose(sum(distn.values()), 1):\n raise Exception('equilibrium probabilities should sum to 1')\n\n # assert that rates are not negative\n if any(data['weight'] < 0 for a, b, data in dg.edges(data=True)):\n raise Exception('rates should be non-negative')\n\n # assert detailed balance\n for a, b in permutations(states, 2):\n if b in dg[a] and a in dg[b]:\n if not np.allclose(\n distn[a] * dg[a][b]['weight'],\n distn[b] * dg[b][a]['weight'],\n ):\n raise Exception('expected detailed balance')\n\n # return the eq distn and the rate graph\n return distn, dg", "title": "" }, { "docid": "753d36dca6044c791debb29099699125", "score": "0.56580085", "text": "def get_swc_matrix(neuron):\n loc = neuron.location\n A = np.zeros([loc.shape[1], 7])\n A[:, 0] = np.arange(loc.shape[1])\n A[:, 1] = neuron.nodes_type\n A[:, 2:5] = loc.T\n A[:, 5] = neuron.diameter\n A[:, 6] = neuron.parent_index + 1\n A[0, 6] = -1\n return A", "title": "" }, { "docid": "d41e0a09318ca544d3a56b7f0852d917", "score": "0.565537", "text": "def _dense_to_full_sparse(matrix):\n # TODO: Allow methods to hard-code Jacobian/Hessian sparsity structure\n # in the case it is known a priori.\n # TODO: Decompose matrices to infer maximum fill-in sparsity structure.\n nrow, ncol = matrix.shape\n row = []\n col = []\n data = []\n for i, j in itertools.product(range(nrow), range(ncol)):\n row.append(i)\n col.append(j)\n data.append(matrix[i, j])\n row = np.array(row)\n col = np.array(col)\n data = np.array(data)\n return sps.coo_matrix((data, (row, col)), shape=(nrow, ncol))", "title": "" }, { "docid": "248c354f931479f40e8999fb8cd3b78f", "score": "0.5648065", "text": "def _get_connections(position_features):\n adjacency = {}\n for fname in _get_feature_names(position_features):\n # Create an empty adjacenct list for feature f\n adjacency[fname] = []\n for pf in position_features:\n # If f is at a position\n if fname in pf:\n # add those position features to f's adjacency list\n adjacency[fname].extend(pf)\n # remove f itself from f's adjacency list\n adjacency[fname].remove(fname)\n # remove duplicates\n adjacency[fname] = set(adjacency[fname])\n _check_enough_groups(adjacency)\n return adjacency", "title": "" }, { "docid": "e9051861090be440c4f91c183f1bbbaf", "score": "0.56431985", "text": "def createAdjMatrix(self, uniquePoints, numSegments, numTrapezoids):\n for point in uniquePoints:\n if point.name[0] == 'P':\n self.allPNames.append(int(point.name[1:]))\n else:\n self.allQNames.append(int(point.name[1:]))\n self.totSegments = numSegments\n self.totTrapezoids = numTrapezoids\n n = len(uniquePoints) + numSegments + numTrapezoids + 2\n self.adjMatrix = [[0]*n for i in range(n)]\n\n self.headerForAdjMatrix(0, ' ')\n self.headerForAdjMatrix(len(self.adjMatrix) - 1, 'Sum')\n self.fillAdjMatrix()\n\n adjFileName = \"adjMatrixOutput.txt\"\n outFile = open(adjFileName, \"w\")\n for arr in self.adjMatrix:\n for col, elem in enumerate(arr):\n elem = str(elem)\n if len(elem) == 1:\n elem = ' ' + elem + ' '\n if len(elem) == 2:\n elem = ' ' + elem\n\n outFile.write(elem + ' ')\n\n outFile.write('\\n')\n outFile.close\n\n print 'Adjacency matrix saved in file:', adjFileName", "title": "" }, { "docid": "33849c784fcc9d3d3b4502c3a0651de9", "score": "0.5639506", "text": "def _doktocsr(dok):\n row, JA, A = [list(i) for i in zip(*dok.row_list())]\n IA = [0]*((row[0] if row else 0) + 1)\n for i, r in enumerate(row):\n IA.extend([i]*(r - row[i - 1])) # if i = 0 nothing is extended\n IA.extend([len(A)]*(dok.rows - len(IA) + 1))\n shape = [dok.rows, dok.cols]\n return [A, JA, IA, shape]", "title": "" }, { "docid": "6e75769f6758b0be60811d944f7780e0", "score": "0.5633699", "text": "def compute_sparse_dense(attrs, inputs, out_type):\n return [topi.nn.sparse_dense(inputs[0], inputs[1], inputs[2], inputs[3], attrs[\"sparse_lhs\"])]", "title": "" }, { "docid": "0a0dd29e2fb5a9e3f88bb8484fe25574", "score": "0.5626392", "text": "def get_igraph_from_adjacency(adjacency, directed=None):\n import igraph as ig\n sources, targets = adjacency.nonzero()\n weights = adjacency[sources, targets]\n if isinstance(weights, np.matrix):\n weights = weights.A1\n g = ig.Graph(directed=directed)\n g.add_vertices(adjacency.shape[0]) # this adds adjacency.shap[0] vertices\n g.add_edges(list(zip(sources, targets)))\n try:\n g.es['weight'] = weights\n except:\n pass\n if g.vcount() != adjacency.shape[0]:\n logg.warn('The constructed graph has only {} nodes. '\n 'Your adjacency matrix contained redundant nodes.'\n .format(g.vcount()))\n return g", "title": "" }, { "docid": "342a1ad4e93ecbfc739b0e7172b2953d", "score": "0.5625886", "text": "def graph_2_mat(self, graph):\n g = defaultdict(int, {n: defaultdict(int, {k: w for k, w in vert}) for n, vert in graph.items()})\n return [[g[r][c] for c in self.nodes] for r in self.nodes]", "title": "" }, { "docid": "46ebf04b2fe917f0138212ad6326baea", "score": "0.56243527", "text": "def preprocess_adj(adj):\n if adj.shape[0]==adj.shape[1]:\n adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))\n return sparse_to_tuple(adj_normalized)\n else:\n return sparse_to_tuple(adj)", "title": "" }, { "docid": "71045e4ece7e371cdc81fdc69a1aed70", "score": "0.5620164", "text": "def undirected_graph_2_adjacency_matrix(graph, edge_weights=None, non_edge_value=0, sparse=True):\n if edge_weights is None:\n edge_weights = np.ones((graph.num_edges(),), np.float64)\n\n num_v = graph.num_vertices()\n sources, targets = graph.edge_list()\n\n if sparse:\n try:\n from scipy.sparse import csr_matrix\n except:\n raise RuntimeError(\"scipy required to create a sparse matrix.\")\n\n if non_edge_value != 0:\n raise ValueError(\"'non_edge_value' must be equal to 0 is 'sparse' is True: Scipy sparse matrix dor not \"\n \"support custom default value.\")\n\n A = csr_matrix((edge_weights, (sources, targets)), shape=(num_v, num_v), dtype=edge_weights.dtype)\n A += A.T\n\n else:\n A = np.empty((num_v, num_v), dtype=edge_weights.dtype)\n A.fill(non_edge_value)\n A[sources, targets] = edge_weights\n A[targets, sources] = edge_weights\n\n return A", "title": "" }, { "docid": "e4a04001fd0a8420ef6ccf24c967eb6f", "score": "0.559816", "text": "def make_sparse(A, m=None, n=None):\n if n is None: \n if m is None: # Calculate dimensions if none specified.\n m = len(A) \n n = len(A[0]) \n n = m # Assume square if only one dimension is specified.\n \n val = []\n col = []\n rowStart = []\n rs = True # Value of rs is True until we have a 'hit' in a row.\n colPos = -1 # The position of the last entry in col.\n\n # Convert to CSR format.\n for i in range(m):\n rs = True\n for j in range(n):\n if A[i][j] != 0:\n val.append(A[i][j])\n col.append(j)\n colPos += 1\n if rs:\n rowStart.append(colPos)\n rs = False\n # Note a 'hanging' column position is required for CSR fomat:\n rowStart.append(colPos + 1)\n \n return val, col, rowStart", "title": "" }, { "docid": "f60240b9519975cc9179f4fc7b9df72b", "score": "0.5585394", "text": "def sparse_matrix(self):\n # There are two types of equations in this system:\n # (1) Equilibrium of each joint in 2 directions\n # (2) Geometric constraints for beam force of each beam\n self.n_eqs = 2 * self.n_joints + self.n_beams\n self.delta_xy = {}\n values, rows, cols = [], [], []\n\n # Generate the sparse matrix in COO format\n for i in range(self.n_eqs):\n # Get entries for the submartix [0:2*n_joints, 0:2*n_beams]\n # For each beam, the beam force coefficient of the first joint is 1, and -1 for the second joint\n # For each joint, related beams can be divided into two types determined by the position of the joint\n if i < self.n_joints:\n joint = self.joints[i, 0]\n beam_pos = list(np.where(self.beams[:, 1] == joint)[0])\n beam_neg = list(np.where(self.beams[:, 2] == joint)[0])\n cols += beam_pos + beam_neg\n cols += [x + self.n_beams for x in beam_pos] + [x + self.n_beams for x in beam_neg]\n rows += list(i * np.ones(len(beam_pos) + len(beam_neg)))\n rows += list((i + self.n_joints) * np.ones(len(beam_pos) + len(beam_neg)))\n values += 2 * (list(np.ones(len(beam_pos))) + list(-np.ones(len(beam_neg))))\n\n # Get entries for the submartix [2*n_joints:2*n_joints+b_beams, 0:2*n_beams]\n # For each beam, the beam force coefficient of the first joint is -dy, and dx for the second joint\n elif i >= 2 * self.n_joints:\n beam = i - 2 * self.n_joints + 1\n j1 = float(self.beams[np.where(self.beams[:, 0] == beam)[0], 1])\n j2 = float(self.beams[np.where(self.beams[:, 0] == beam)[0], 2])\n self.delta_xy[beam] = self.xy[j2] - self.xy[j1]\n cols += [beam - 1 + self.n_beams, beam - 1]\n rows += [i, i]\n values += (np.array((1, -1)) * self.delta_xy[beam]).tolist()\n\n # Get entries for the submatrix [0:2*n_joints, 2*n_beams:2*n_beams+2*n_support]\n # For each fixed joint, the reaction force coeffcient is 1\n joints_support = self.joints[np.where(self.joints[:, -1] == 1)[0], :]\n n_support = joints_support.shape[0]\n self.n_variables = 2 * (self.n_beams + n_support)\n\n for i in range(n_support):\n joint = joints_support[i, 0]\n cols += [2 * self.n_beams + i, 2 * self.n_beams + i + n_support]\n rows += [joint - 1, joint - 1 + self.n_joints]\n values += [1, 1]\n\n # Convert sparse matrix to CSR format for computing\n self.matrix = sparse.csr_matrix((values, (rows, cols)), shape=(self.n_eqs, self.n_variables))", "title": "" }, { "docid": "7b7b674cde52efd3529146b7c3f76168", "score": "0.55775136", "text": "def compute_sparse_conv2d(attrs, inputs, out_type):\n return [\n topi.nn.sparse_conv2d(\n inputs[0], inputs[1], inputs[2], inputs[3], attrs[\"layout\"], attrs[\"kernel_size\"]\n )\n ]", "title": "" }, { "docid": "160e43ba75b1e0e175200fed886baa80", "score": "0.55636436", "text": "def _sparse_normalize_rows(mat):\n # print(mat) format of mat ===> (row_idx, col_idx), weight\n n_nodes = mat.shape[0]\n # Normalize Adjacency matrix to transition matrix\n # Diagonal of the degree matrix is the sum of nonzero elements\n degrees_div = np.array(np.sum(mat, axis=1)).flatten() # out-degrees\n # This is equivalent to inverting the diag mat\n # weights are 1 / degree\n degrees = np.divide(\n 1,\n degrees_div,\n out=np.zeros_like(degrees_div, dtype=float),\n where=(degrees_div != 0)\n )\n # construct sparse diag mat\n # to broadcast weights to adj mat by dot product\n D = sparse.dia_matrix((n_nodes, n_nodes), dtype=np.float64)\n D.setdiag(degrees)\n # premultiplying by diag mat is row-wise mul\n return sparse.csr_matrix(D.dot(mat))", "title": "" }, { "docid": "160e43ba75b1e0e175200fed886baa80", "score": "0.55636436", "text": "def _sparse_normalize_rows(mat):\n # print(mat) format of mat ===> (row_idx, col_idx), weight\n n_nodes = mat.shape[0]\n # Normalize Adjacency matrix to transition matrix\n # Diagonal of the degree matrix is the sum of nonzero elements\n degrees_div = np.array(np.sum(mat, axis=1)).flatten() # out-degrees\n # This is equivalent to inverting the diag mat\n # weights are 1 / degree\n degrees = np.divide(\n 1,\n degrees_div,\n out=np.zeros_like(degrees_div, dtype=float),\n where=(degrees_div != 0)\n )\n # construct sparse diag mat\n # to broadcast weights to adj mat by dot product\n D = sparse.dia_matrix((n_nodes, n_nodes), dtype=np.float64)\n D.setdiag(degrees)\n # premultiplying by diag mat is row-wise mul\n return sparse.csr_matrix(D.dot(mat))", "title": "" }, { "docid": "7a8bdbb9e40bff5d5436e657843b8265", "score": "0.5560937", "text": "def __init__(self, vertices, edges):\n self.adjacency = {} #nodes as keys, connections as lists\n\n for node in vertices:\n self.adjacency[node] = [] \n for edge in edges:\n if node in edge: \n if edge[1] == node:\n self.adjacency[node].append( (edge[0], edge[2]) )\n elif edge[0] == node: \n self.adjacency[node].append( edge[1:3] )\n #tuple of (node,weight) ", "title": "" }, { "docid": "baf56790469b2072d153f5fa15a9a491", "score": "0.5556926", "text": "def sym_adj(adj):\n adj = sp.coo_matrix(adj)\n rowsum = np.array(adj.sum(1))\n d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n #d-1/2 * A * d-1/2\n return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).astype(np.float32).todense()", "title": "" }, { "docid": "6613ccaceb18e76b015a372054d2b6a6", "score": "0.55536485", "text": "def get_adj_matrix(dim_0, dim_1, start, end, directed=False, homogeneous=False, weights=None):\n\n # add reverse edge if undirected to same node type\n if homogeneous and not directed:\n # need lists for easy concatenation... (trying to be safe with arrays or Series)\n if not isinstance(start, list):\n start = start.tolist()\n if not isinstance(end, list):\n end = end.tolist()\n if weights is not None:\n if not isinstance(weights, list):\n weights = weights.tolist()\n weights = weights + weights\n\n tmp = start + end\n end = end + start\n start = tmp\n\n if weights is not None:\n # Weights needs to be a numpy array\n if isinstance(weights, list):\n weights = np.array(weights)\n elif isinstance(weights, pd.Series):\n weights = weights.values\n weights = weights.astype('float64')\n matrix = coo_matrix((weights, (start, end)))\n else:\n ones = np.ones(len(start), 'int16')\n matrix = coo_matrix((ones, (start, end)))\n\n # Fill any missing rows\n matirx = matrix.tocsr()\n diff_0 = (dim_0 - matrix.shape[0])\n if diff_0 > 0:\n add = np.zeros((diff_0, matrix.shape[1]))\n matrix = vstack((matrix, add))\n\n # Fill any missing Columns\n matrix = matrix.tocsc()\n diff_1 = (dim_1 - matrix.shape[1])\n if diff_1 > 0:\n add = np.zeros((matrix.shape[0], diff_1))\n matrix = hstack((matrix, add))\n\n return matrix.tocsc()", "title": "" }, { "docid": "bd5bf79434e428a9d86f696b4e296486", "score": "0.554916", "text": "def __init__(self, vertices, edges):\n self.vertices = vertices\n self.edges = edges\n \"\"\" Initializing the entire graph with 0s \"\"\"\n self.directedAdjacencyMatrix = [[0 for j in range(vertices)] for i in range(vertices)]\n self.undirectedAdjacencyMatrix = [[0 for j in range(vertices)] for i in range(vertices)]\n\n \"\"\" Ensuring that every node has an edge to itself \"\"\"\n for i in range(self.vertices):\n self.undirectedAdjacencyMatrix[i][i] = 1", "title": "" }, { "docid": "f5e1a4281b9da3ce2cc71501f28b9f9c", "score": "0.5547174", "text": "def adjacency(sMask):\n curr = 0\n adj = []\n for y in range(sMask.shape[0]):\n #print(y)\n for x in range(sMask.shape[1]):\n if x >= sMask.shape[1] - 1: #reach end of row\n if y >= sMask.shape[0] - 1: #reach end of graph\n return adj\n else: #switch to new line, update curr\n curr = sMask[0, y+1]\n continue\n else: #still iterating row\n if sMask[x,y] != curr:\n if (curr, sMask[x,y]) not in adj:\n #new edge not in adjacency list\n adj.append((curr, sMask[x,y]))\n adj.append((sMask[x,y], curr)) #bidirectional\n curr = sMask[x,y]\n else: #already in adj\n curr = sMask[x,y]\n continue\n else:\n continue\n return adj", "title": "" }, { "docid": "45c227cda6b91ab2905f5198d535ba26", "score": "0.55468506", "text": "def save_graph(graph, output_file):\n n = graph.number_of_nodes()\n adjacency_matrix = np.zeros((n, n))\n\n for x, y in graph.edges:\n adjacency_matrix[x, y] = 1\n adjacency_matrix[y, x] = 1\n\n np.savetxt(output_file,\n adjacency_matrix,\n fmt='%i',\n delimiter=',')", "title": "" }, { "docid": "2569a52695addcf64f4662d164eeb512", "score": "0.5536119", "text": "def mount_adjacency_list(self, adjacency_matrix):\n\t\tadjacency_list = {}\n\t\tfor v1 in range(len(adjacency_matrix)):\n\t\t\tadjacency_list.update({v1: [v2 for v2 in range(len(adjacency_matrix[v1])) if adjacency_matrix[v1][v2] == 1]})\n\t\treturn adjacency_list", "title": "" }, { "docid": "1a71428d28bdd7d03d343d002a6f082e", "score": "0.55268997", "text": "def __init__(self, data_attribute_list: List[Union[sparse.dok_matrix, np.ndarray]],\r\n incidence_matrix: sparse.dok_matrix, data_array_names_debug=None,\r\n resize=True):\r\n if sparse.issparse(data_attribute_list[0]):\r\n self.is_data_format_sparse = True\r\n else:\r\n self.is_data_format_sparse = False\r\n # data list should be all sparse or all dense (there's probably a nicer way to\r\n # write this (ex-NOR)\r\n if (all([not sparse.issparse(i) for i in data_attribute_list])\r\n or all([sparse.issparse(i) for i in data_attribute_list])) is False:\r\n warnings.warn(\"Recieved a mix of sparse and dense data types in \"\r\n \"data_attribute_list. Unexpected \"\r\n \"behaviour may occur. Please specify either sparse matrices or \"\r\n \"numpy arrays\")\r\n\r\n # check if the bottom row and right col are empty, if so, we can store the dest in them,\r\n # if not, we need to append\r\n if sparse.issparse(incidence_matrix):\r\n self.is_incidence_mat_sparse = True\r\n nnz = (incidence_matrix[-1, :].count_nonzero()\r\n + incidence_matrix[:, -1].count_nonzero())\r\n\r\n else:\r\n self.is_incidence_mat_sparse = False\r\n nnz = (np.count_nonzero(incidence_matrix[-1, :])\r\n + np.count_nonzero(incidence_matrix[:, -1]))\r\n if self.is_data_format_sparse != self.is_incidence_mat_sparse:\r\n raise ValueError(\"Recieved one sparse/ dense network attributes and the opposite for \"\r\n \"for incidence matrix. Please specify both as sparse or dense, \"\r\n \"not a mix\")\r\n\r\n if nnz > 0 and resize:\r\n print(\"Adding an additional row and column to house sink state.\")\r\n incidence_matrix = _zero_pad_mat(incidence_matrix, bottom=True, right=True)\r\n data_matrix_list_new = []\r\n for i in data_attribute_list:\r\n data_matrix_list_new.append(_zero_pad_mat(i, bottom=True, right=True))\r\n data_attribute_list = data_matrix_list_new\r\n self.padded = True\r\n else:\r\n self.padded = False\r\n\r\n self.incidence_matrix = incidence_matrix.astype(int)\r\n\r\n self.data_array = np.array(data_attribute_list)\r\n if data_array_names_debug is None:\r\n data_array_names_debug = (),\r\n self.data_fields = data_array_names_debug # convenience for debugging\r\n self.n_dims = len(self.data_array)", "title": "" }, { "docid": "9ded78aa48f7eb414dab3b2bcb2d97a0", "score": "0.55155164", "text": "def connectivity_matrix(streamlines, label_volume, voxel_size,\n symmetric=False, return_mapping=False,\n mapping_as_streamlines=False):\n assert label_volume.dtype.kind == 'i'\n assert label_volume.ndim == 3\n assert label_volume.min() >= 0\n voxel_size = np.asarray(voxel_size)\n # If streamlines is an iterators\n if return_mapping and mapping_as_streamlines:\n streamlines = list(streamlines)\n #take the first and last point of each streamline\n endpoints = [sl[0::len(sl)-1] for sl in streamlines]\n #devide by voxel_size to get get voxel indices\n endpoints = (endpoints // voxel_size).astype('int')\n if endpoints.min() < 0:\n raise IndexError('streamline has negative values, these values ' +\n 'are outside the image volume')\n i, j, k = endpoints.T\n #get labels for label_volume\n endlabels = label_volume[i, j, k]\n if symmetric:\n endlabels.sort(0)\n mx = endlabels.max() + 1\n matrix = ndbincount(endlabels, shape=(mx, mx))\n if symmetric:\n np.maximum(matrix, matrix.T, out=matrix)\n if return_mapping:\n mapping = {}\n for i, (a, b) in enumerate(endlabels.T):\n mapping.setdefault((a, b), []).append(i)\n if mapping_as_streamlines:\n mapping = {k: [streamlines[i] for i in indices]\n for k, indices in mapping.items()}\n return matrix, mapping\n else:\n return matrix", "title": "" }, { "docid": "3088627ee12617d7eea6e81ac9c14031", "score": "0.5513605", "text": "def from_pairs(pairs):\n vertices = sorted(set([p[0] for p in pairs] +\n [p[1] for p in pairs]))\n matrix = [[0 for _ in vertices] for _ in vertices]\n for a, b in pairs:\n i, j = vertices.index(a), vertices.index(b)\n matrix[i][j] = 1\n return matrix", "title": "" }, { "docid": "7e48a7c0735d6733d153ce457b7b9970", "score": "0.55096203", "text": "def _access_matrix(self):\n A = np.zeros((self.n_state, self.n_state))\n # communities\n comm0 = range(5)\n comm1 = range(5, 10)\n comm2 = range(10, 15)\n comms = [comm0, comm1, comm2]\n # bnecks\n bnecks0 = [0, 4]\n bnecks1 = [5, 9]\n bnecks2 = [10, 14]\n bnecks = [bnecks0, bnecks1, bnecks2]\n # edges\n for comm in comms:\n for i in comm:\n for j in comm:\n if i != j:\n A[i, j] = 1.0\n # break within-community bottlenecks\n for i in range(3):\n A[bnecks[i][0], bnecks[i][1]] = 0.0\n A[bnecks[i][1], bnecks[i][0]] = 0.0\n # cross-community bottlenecks\n A[bnecks[0][1], bnecks[1][0]] = 1.0\n A[bnecks[1][1], bnecks[2][0]] = 1.0\n A[bnecks[2][1], bnecks[0][0]] = 1.0\n self.comms = comms\n self.bnecks = bnecks\n self.A = ((A + A.T) != 0).astype(\"int\")", "title": "" }, { "docid": "e2591cda20b35e67d53b0a6b32c513b8", "score": "0.55068064", "text": "def loadSparse(infile,startrow,endrow,startOverlaps={}):\n \n \n a = graphsparse()\n reader = csv.reader(open(infile,\"r\"))\n overlapDict = startOverlaps\n\n rowcount = 0\n for row in reader:\n rowcount += 1\n\n if rowcount > endrow:\n break\n\n if rowcount < startrow:\n continue\n\n \n #print row\n\n (tile,start,stop,tilenumber) = (row[0],int(row[1]),int(row[2]),int(row[3]))\n \n # add this tile to the overlap list\n overlapDict[tile] = (start,stop,tilenumber)\n \n # for each element of the overlap\n for item in overlapDict.keys():\n\n if item == tile:\n continue\n \n # if the end of the existing element is before the start of the new element, remove it\n if overlapDict[item][1] < start:\n del overlapDict[item]\n continue\n \n # if the start of the new element is >= the existing element start, and <= existing stop , set up existing -> new\n # unless we have new->existing (we do not want circular paths)\n if start >= overlapDict[item][0] and start <= overlapDict[item][1]:\n #print \"making overlap %s --> %s\"%(item,bac)\n if (tilenumber, overlapDict[item][2]) not in a:\n a[(overlapDict[item][2],tilenumber)] = {'d' : 1}\n # if the start of the new element == the start of the existing element , set up new - > existing, unless\n # we have existing->new (we do not want circular paths)\n if start == overlapDict[item][0]:\n if (overlapDict[item][2], tilenumber) not in a:\n #print \"make overlap %s --> %s\"%(bac,item)\n a[(tilenumber,overlapDict[item][2])] = {'d' : 1}\n\n if rowcount%50 == 0:\n logger.info(\"loadSpare : rowcount, adjacency matrix size : %s , %s\"%(rowcount,len(a)))\n\n\n return (a,overlapDict)", "title": "" }, { "docid": "d9b446cd71681a005b82c2c3fc4cc47c", "score": "0.5504818", "text": "def sparse(i, j, v, m, n):\n return scipy.sparse.csr_matrix((v, (i, j)), shape=(m, n))", "title": "" }, { "docid": "013e5fdfd00915a4f7fe3a2abcc9a724", "score": "0.550441", "text": "def __init__(self):\n self.adjacency_matrix = []\n self.vertex_list = []", "title": "" }, { "docid": "50b49edaf443a28a94fe3c1e66af99de", "score": "0.54941463", "text": "def _get_omni_matrix(graphs):\n shape = graphs[0].shape\n n = shape[0] # number of vertices\n m = len(graphs) # number of graphs\n\n A = np.array(graphs, copy=False, ndmin=3)\n\n # Do some numpy broadcasting magic.\n # We do sum in 4d arrays and reduce to 2d array.\n # Super fast and efficient\n out = (A[:, :, None, :] + A.transpose(1, 0, 2)[None, :, :, :]).reshape(n * m, -1)\n\n # Averaging\n out /= 2\n\n return out", "title": "" }, { "docid": "e59ed916fda4be0ee5d185beb61b4425", "score": "0.5489763", "text": "def sym_adj(adj):\n adj = sp.coo_matrix(adj)\n rowsum = np.array(adj.sum(1))\n d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).astype(np.float32).todense()", "title": "" }, { "docid": "d000f4a2b9093b52d81e41d6a8d296a1", "score": "0.5477242", "text": "def array_to_sparse(mat, tol):\n indicator_mat = np.abs(mat) > tol\n indicator_mat = indicator_mat.astype(float)\n return sp.csc_matrix(mat*indicator_mat)", "title": "" }, { "docid": "b0ef79b719cd333ce297beb339fe26d1", "score": "0.5474648", "text": "def adjugate(matrix):\n cof_mat = cofactor(matrix)\n adj = []\n for i in range(len(cof_mat)):\n row = []\n for j in range(len(cof_mat)):\n row.append(cof_mat[j][i])\n adj.append(row)\n return adj", "title": "" }, { "docid": "d598088d09138bd83d734e8351214690", "score": "0.5456843", "text": "def to_adjacency_matrix(tree):\n all_clades = list(tree.find_clades(order='level'))\n lookup = {}\n for i, elem in enumerate(all_clades):\n lookup[elem] = i\n adjacency_matrix = numpy.zeros((len(all_clades), len(all_clades)))\n for parent in tree.find_clades(terminal=False, order='level'):\n for child in parent.clades:\n adjacency_matrix[lookup[parent], lookup[child]] = 1\n if not tree.rooted:\n # Branches can go from \"child\" to \"parent\" in unrooted trees\n adjacency_matrix += adjacency_matrix.transpose() # Fixed an error here\n return all_clades, adjacency_matrix", "title": "" }, { "docid": "ab7cb5e30968ad10e73a997dafe76f4e", "score": "0.5456597", "text": "def connectivities(self) -> Union[np.ndarray, csr_matrix, None]:\n return self._connectivities", "title": "" }, { "docid": "88a82371ef3d5a99f7e0bbaae89ab7aa", "score": "0.54428214", "text": "def tocoo(self): \n d = []\n row = []\n col = []\n for k,v in self.seq_items.items():\n row = row + [k]*len(v)\n col = col + v\n d = d + [1]*len(v)\n print(\"row:{}\\ncol:{}\\nd:{}\\nnum_seqs:{}\\nnum_items:{}\".format(max(row),max(col),len(d),self.num_seqs,self.num_items))\n coo = sp.coo_matrix((d,(row,col)),\n shape =(self.num_seqs,self.num_items)) #(row,col)處為1,其餘為0 \n\n return coo", "title": "" }, { "docid": "43b39f372a4a744754b3bb70870aaf5c", "score": "0.5436057", "text": "def get_adj_matrix(self):\n\n\t\tblock = self.block\n\t\tadj_matrix = np.zeros(shape=(self.num_atoms, self.num_atoms), dtype='int')\n\t\t\n\t\tbonds = self.get_bonds()\n\t\tfor bond in bonds:\n\t\t\tsrc = bond[1]\n\t\t\tdst = bond[2]\n\t\t\tb_type = bond[3]\n\t\t\tadj_matrix[src-1][dst-1] = b_type\n\t\t\n\t\treturn adj_matrix", "title": "" }, { "docid": "0f0239ecee244fe498533955be73f7ee", "score": "0.5428606", "text": "def to_igraph(self):\n return _utils.get_igraph_from_adjacency(self.connectivities)", "title": "" }, { "docid": "0d8ba3c11fb67c5dd48ece15720b7933", "score": "0.5426403", "text": "def get_dp_vertex_sparse(self, dtype=np.float64, sparseformat=coo_matrix):\n nnz = self.get_dp_vertex_nnz()\n irow,icol,data = zeros(nnz, dtype=np.int32), zeros(nnz, dtype=np.int32), zeros(nnz, dtype=dtype) # Start to construct coo matrix\n\n atom2so = self.sv.atom2s\n nfdp = self.dpc2s[-1]\n n = self.sv.atom2s[-1]\n inz = 0\n for atom,[sd,fd,pt,spp] in enumerate(zip(self.dpc2s,self.dpc2s[1:],self.dpc2t,self.dpc2sp)):\n if pt!=1: continue\n s,f = atom2so[atom:atom+2]\n for p in range(sd,fd):\n for a in range(s,f):\n for b in range(s,f):\n irow[inz],icol[inz],data[inz] = p,a+b*n,self.prod_log.sp2vertex[spp][p-sd,a-s,b-s]\n inz+=1\n\n for atom, [sd,fd,pt,spp] in enumerate(zip(self.dpc2s,self.dpc2s[1:],self.dpc2t,self.dpc2sp)):\n if pt!=2: continue\n inf= self.bp2info[spp]\n a,b = inf.atoms\n sa,fa,sb,fb = atom2so[a],atom2so[a+1],atom2so[b],atom2so[b+1]\n for p in range(sd,fd):\n for a in range(sa,fa):\n for b in range(sb,fb):\n irow[inz],icol[inz],data[inz] = p,a+b*n,inf.vrtx[p-sd,a-sa,b-sb]; inz+=1;\n irow[inz],icol[inz],data[inz] = p,b+a*n,inf.vrtx[p-sd,a-sa,b-sb]; inz+=1;\n return sparseformat((data, (irow, icol)), dtype=dtype, shape=(nfdp,n*n))", "title": "" }, { "docid": "be1e86a0f077f51e2572834768e28e9d", "score": "0.5419639", "text": "def node_connections(self)->np.ndarray:\n nnodes = self.nnodes()\n nnames = self.node_names()\n rval = np.zeros([nnodes, nnodes])\n src_node:ANPNode\n for src in range(nnodes):\n srcname = nnames[src]\n src_node = self.node_obj(srcname)\n for dest in range(nnodes):\n dest_name = nnames[dest]\n if src_node.is_node_node_connection(dest_name):\n rval[dest,src]=1\n return rval", "title": "" } ]
a569a968b10802e67698e98d11f0fc52
If needed to initialize within CPU sampler (e.g. vector epsilongreedy, see EpsilonGreedyAgent for details).
[ { "docid": "7fe7fdffb15fc98bad9fff2213283bd3", "score": "0.6219568", "text": "def collector_initialize(self, global_B=1, env_ranks=None):\n pass", "title": "" } ]
[ { "docid": "469d5d7feab90369d1d6c0f4d9d4f23d", "score": "0.70388573", "text": "def initialize(self):\n self.actor_optimizer.sync()\n self.critic_optimizer.sync()\n self.sess.run(self.target_init_updates)", "title": "" }, { "docid": "658e8b56c9e2945fd00f727f4cad78f7", "score": "0.6639481", "text": "def agent_init(self):\r\n self.weights = np.random.uniform(-0.001,0,2048)\r\n self.tdError = None\r\n self.traces = []\r\n self.currentAction = None\r\n self.previousAction = None\r\n self.currentTiles = np.zeros(0)\r\n self.previousTiles = np.zeros(0)\r\n self.currentActionValue = None\r\n self.previousActionValue = None \r\n self.frogs = 69696969", "title": "" }, { "docid": "bca429589be237ec688c3cc8210cf26b", "score": "0.65200406", "text": "def _init_run(self):\n # initialize the Q-values and the eligibility trace\n #self.Q = 0.01 * numpy.random.rand(self.N, self.N, 8) + 0.1\n self.e = numpy.zeros((self.N, self.N, 8))\n self.w = numpy.zeros((self.N, self.N, 8))\n\n # list that contains the times it took the agent to reach the target for all trials\n # serves to track the progress of learning\n self.latency_list = []\n\n # initialize the state and action variables\n self.x_position = None\n self.y_position = None\n self.action = None\n self.path = [] #where pass the mouse", "title": "" }, { "docid": "38639db8f1672ea2135327765e65e8ed", "score": "0.64779973", "text": "def init_parallel(self):\n dist.init_parallel_env()", "title": "" }, { "docid": "538a291e72970877f0f2cd8d0920b11e", "score": "0.64131016", "text": "def localLocalInitialize(self, solutionExport = None):\n self._endJobRunnable = 1\n self.gradDict['pertNeeded'] = self.gradDict['numIterForAve'] * 2", "title": "" }, { "docid": "797049f0c4409f4c1982537c4bda8935", "score": "0.6398831", "text": "def pre_pool_init(self, core):\n pass", "title": "" }, { "docid": "dfc096ff48f99a0cf7d197bf6c62c297", "score": "0.6382503", "text": "def job_init(args):\n global i\n i = 0\n posdo.info('running with %d uvs' % (posdo.uvs_nof()))\n return 0", "title": "" }, { "docid": "4c9dbeb2f1e10636d08b667f327a7180", "score": "0.63585645", "text": "def construct():\n base.next_processing_init_value = RndGen.get_random(base.processing_init_value)\n base.next_admission_init_value = RndGen.get_random(base.admission_init_value)", "title": "" }, { "docid": "c43a27765bb62dc8b844ef07108a80b2", "score": "0.6329874", "text": "def brain_init():\n\traise NotImplementedError", "title": "" }, { "docid": "0c230e370806dc3df223306372ddd84b", "score": "0.63269717", "text": "def _initialize_neurons(self):", "title": "" }, { "docid": "81e09d754c56b5737bfc0ba337eb230e", "score": "0.6318943", "text": "def run_init(self):\n pass", "title": "" }, { "docid": "695c45453c044da0a896d51241645b90", "score": "0.6316914", "text": "def setUp(self):\n self.processes = multiprocessing.cpu_count()\n self.options = LinearQubitOperatorOptions(self.processes)", "title": "" }, { "docid": "ab445ce781720408311771d09575987c", "score": "0.6309545", "text": "def initialize(N0,L,Nt,pflag):", "title": "" }, { "docid": "1dd5962dcd014b1a62dfe9af09501d3c", "score": "0.63038605", "text": "def _initialize(self):\n for cell in self._registered_cells:\n cell.initialize()\n # Array initialisation is handled by PyNN", "title": "" }, { "docid": "c92637668a066945e6c1624fbc4d1976", "score": "0.629565", "text": "def initialize():", "title": "" }, { "docid": "2cc02228122e69b63d4e29255cba404e", "score": "0.6278364", "text": "def initialize(pts):\n home_robot()\n return", "title": "" }, { "docid": "54f19eaaaaae0e0958c99a0d7745d6d5", "score": "0.6252341", "text": "def __init__(self, random_seed: int = 0, steps_number: int = 100, gpu_number: Optional[int] = 0) -> None:\n super().__init__(random_seed, steps_number, gpu_number)\n session_config(gpu_number)", "title": "" }, { "docid": "4d78e0d3e028da7d7029e335ba77185e", "score": "0.6236528", "text": "def initialize():\n\tpass", "title": "" }, { "docid": "dd0b44c883a1e29f40376f64de026d79", "score": "0.62249464", "text": "def agent_init(self, agent_info={}):\n\n # set random seed for each run\n self.rand_generator = np.random.RandomState(agent_info.get(\"seed\")) \n\n iht_size = agent_info.get(\"iht_size\")\n num_tilings = agent_info.get(\"num_tilings\")\n num_tiles = agent_info.get(\"num_tiles\")\n\n # initialize self.tc to the tile coder we created\n self.tc = CrawlerTileCoder", "title": "" }, { "docid": "0b048a216072f96746095bef046a21ca", "score": "0.6219519", "text": "def initialize(self):\n self.count_num_points()\n self.generate_IR()\n self.initialize_samples()", "title": "" }, { "docid": "de07e5f060635fdabcecd3cd0dfdeffd", "score": "0.62124217", "text": "def __init__(self, for_interpolation, is_train=True):\n self.is_train = is_train\n # no batch normalization in the paper\n # self.batch_norm_params = {'decay': 0.9997,\n # 'epsilon': 0.001,\n # 'is_training': self.is_train}\n self.for_interpolation = for_interpolation\n self.mode = 'interpolater_' if for_interpolation else 'computer_'", "title": "" }, { "docid": "56259cf5ee14a5ad591747cb22bdde80", "score": "0.6209006", "text": "def init():\n global process_and_score\n from azure_utils.samples.deep_rts_samples import get_model_api\n process_and_score = get_model_api()", "title": "" }, { "docid": "782ed42c863e95b7119d11d256a29137", "score": "0.6199288", "text": "def __init__(self):\n #self.theta = np.zeros((3, 2))\n #self.state = RandomAgent.reset(self,[-20,20])\n \n self.count_episodes = -1\n self.max_position = -0.4\n self.epsilon = 0.5\n self.gamma = 0.99\n self.running_rewards = 0\n self.policy = ActorCritic()\n self.optimizer = optim.Adam(self.policy.parameters(), lr=0.0001, betas=(0.9, 0.999))\n self.check_new_episode = 1\n self.count_iter = 0\n self.successful = 0", "title": "" }, { "docid": "4adb0764522566f0501c6648143facdd", "score": "0.6196275", "text": "def initialize(self):\n self.z = 0.05 * np.ones(self.model.shape[1])\n self.g_star = 0.\n self.current_solution = np.zeros(self.model.shape[1])\n self.iter_ = 0\n self.gamma = np.ones(self.prior.shape[0])\n self.update_inv_gamma()", "title": "" }, { "docid": "232a87f307a67be276259c654c633733", "score": "0.6192141", "text": "def init_agent(self):", "title": "" }, { "docid": "913f4e940c1308f0b88361117d7cb208", "score": "0.61880904", "text": "def initialize ():", "title": "" }, { "docid": "84cbdcc7b8d598f7fc9392999d45ad43", "score": "0.617346", "text": "def __init__(__self__, *,\n cpu: Optional[int] = None):\n if cpu is not None:\n pulumi.set(__self__, \"cpu\", cpu)", "title": "" }, { "docid": "8253e608bdbf979652dbeb33c79978bc", "score": "0.61619", "text": "def __init__(self, args, setup=dict(device=torch.device('cpu'), dtype=torch.float)):\n self.args, self.setup = args, setup\n self.retain = True if self.args.ensemble > 1 and self.args.local_rank is None else False\n self.stat_optimal_loss = None", "title": "" }, { "docid": "8253e608bdbf979652dbeb33c79978bc", "score": "0.61619", "text": "def __init__(self, args, setup=dict(device=torch.device('cpu'), dtype=torch.float)):\n self.args, self.setup = args, setup\n self.retain = True if self.args.ensemble > 1 and self.args.local_rank is None else False\n self.stat_optimal_loss = None", "title": "" }, { "docid": "7f3a732da9911765ec77a90efbed202f", "score": "0.6151522", "text": "def init(\n # rank=int(os.getenv(\"CUDA_VISIBLE_DEVICES\", 0)) + 1,\n rank=0,\n all_to_one = False,\n expected_domain_tasks=int(os.getenv(\"DAMPED_N_DOMAIN\", 1)), port=29500\n) -> None:\n logger.info(\"Waiting for domain-task trainer connection\")\n if all_to_one and rank == 0:\n rank=int(os.getenv(\"CUDA_VISIBLE_DEVICES\", 0)) + 1\n utils.init_distributedenv(rank, world_size=expected_domain_tasks + 1, port=port)\n\n # init ManagedMemory\n ManagedMemory()", "title": "" }, { "docid": "f913796cf11b45f27e67ac81902f6a09", "score": "0.6149913", "text": "def _initialize_sampler(self, X, y):", "title": "" }, { "docid": "be57190574652185ccccdf62d4a6b40c", "score": "0.61442935", "text": "def init():\n\n # Placeholder init code. Replace the sleep with check for model files required etc...\n time.sleep(1)", "title": "" }, { "docid": "1bb71fa7bf0328c06f27264a4e475683", "score": "0.61411655", "text": "def test_default_cpu():\n command = [\"run.py\", \"++trainer.max_epochs=1\", \"++trainer.gpus=0\"]\n run_command(command)", "title": "" }, { "docid": "c906512caba03832f5b0eca7408d983d", "score": "0.61364836", "text": "def init_randomization(self):\n pass", "title": "" }, { "docid": "6da541e5ff1c1403c7d67d3134010473", "score": "0.6132954", "text": "def initialize_cores(self, solver):\n if \"-p\" in solver.stdFlags:\n if self.cores is None:\n self.cores = multiprocessing.cpu_count()\n self.threads = self.cores * 2\n else:\n self.cores = 1\n self.threads = 1", "title": "" }, { "docid": "ea0d4c6f57de23798b15b8fb4eae6410", "score": "0.61273277", "text": "def setUp(self):\n self.ens_n = test_funcs.build_ensemble(qp.stats.norm_gen.test_data['norm'])\n self.ens_n_shift = test_funcs.build_ensemble(qp.stats.norm_gen.test_data['norm_shifted'])", "title": "" }, { "docid": "130e8560da9238ad9d8b7f569978395b", "score": "0.6102606", "text": "def _init_env_variables(self):\n # For Info Purposes\n self.cumulated_reward = 0.0\n # Set to false Done, because its calculated asyncronously\n self._episode_done = False\n \n # We wait a small ammount of time to start everything because in very fast resets, laser scan values are sluggish\n # and sometimes still have values from the prior position that triguered the done.\n time.sleep(0.2)", "title": "" }, { "docid": "b4d8f5cf7b7597c056b1ba2c74eac024", "score": "0.6102326", "text": "def _ps_init(G, env, policy, bernoulli_reset: bool):\n G.env = pickle.loads(env)\n G.policy = pickle.loads(policy)\n G.bernoulli_reset = pickle.loads(bernoulli_reset)", "title": "" }, { "docid": "e0f5895a068737c77ebd0bd29ce50ab1", "score": "0.610205", "text": "def Prepare(self) -> None:\r\n self._Operations = [[1/self.RegisterSize]*self.RegisterSize for _ in self._Agents]\r\n self._Register = tuple(random.uniform(0,1) for _ in range(self.RegisterSize))", "title": "" }, { "docid": "2d2d3cd8157367cca3165403ea547d8b", "score": "0.6098453", "text": "def runtime_init(self):\r\n pass", "title": "" }, { "docid": "4d96921f2247fc3fe41bc48fc4e722ee", "score": "0.60974634", "text": "def _real_initialize(self):\n\t\tpass", "title": "" }, { "docid": "1812f4176dfd19fb21ee138deb80fb17", "score": "0.609716", "text": "def initialize_agent(self):\n pass", "title": "" }, { "docid": "417c26ef73f2b183e8f0c67bbf04e69f", "score": "0.60967517", "text": "def __init__(self,nu=0.0):\n self.nu = nu\n self.setup()", "title": "" }, { "docid": "a3eb461d7d2bdd4af0c79e0c3a42ca24", "score": "0.60958916", "text": "def __init__(self):\n # =========================================================\n # \n # \n # ** Your code here **\n #\n # \n # =========================================================\n self.gamma = 0.9\n self.alpha = 0.2\n self.epsion = 0.3\n self.train_num = 2*QLearner.GAME_NUM\n self.record_states = []\n self.states_value = collections.defaultdict(int) # state -> value\n np.random.seed(92727)", "title": "" }, { "docid": "6a20a70c9e3d20a22da3db44a5a6669b", "score": "0.60928774", "text": "def initialize_build(*args, **kwargs):\n # TODO: For now, no additional process level costs to initialize", "title": "" }, { "docid": "201919403fca5dd0a315992798d36af6", "score": "0.6084671", "text": "def _setup_init(self):\n with tf.variable_scope(\"output\", reuse=True):\n assert self.q_values is not None\n self.policy_proba = tf.nn.softmax(self.q_values)", "title": "" }, { "docid": "a446115b74e97dde82a82cb5b426a651", "score": "0.6077428", "text": "def initialize(self):\n self.beliefs = DiscreteDistribution()\n self.beliefs[self.initialAgentPosition] = 1.0", "title": "" }, { "docid": "77295072ee2ae198936fdef9853e8853", "score": "0.6073356", "text": "def test_solver_init(self, MockClient):\n\n # assertWarns not available in py2\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n DWaveSampler(solver_features={'qpu': True})\n self.assertEqual(len(w), 1)\n self.assertTrue(issubclass(w[-1].category, DeprecationWarning))\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n DWaveSampler(solver={'qpu': True})\n self.assertEqual(len(w), 0)\n\n MockClient.reset_mock()\n solver = {'qpu': True, 'num_qubits__gt': 1000}\n sampler = DWaveSampler(solver_features=solver)\n MockClient.from_config.assert_called_once_with(solver=solver)", "title": "" }, { "docid": "a53051bb1075c5b3e9a7d1cee86634b9", "score": "0.6062371", "text": "def __init__(self, model_name:str):\n self.model = pyflamegpu.ModelDescription(model_name)\n self.agent = self.model.newAgent(\"agent\")\n self.env = self.model.Environment()\n self.population = pyflamegpu.AgentVector(self.agent, TEST_LEN)\n #self.model.addStepFunction(DEFAULT_STEP) # Default step not required", "title": "" }, { "docid": "83191f01888d03c9cb21cd7725c43ba3", "score": "0.6061181", "text": "def _real_initialize(self):\n pass", "title": "" }, { "docid": "fa045a40b3bd143caccadf686bcba9c5", "score": "0.6060435", "text": "def __init__(self, num_gpus, controller=\"/cpu:0\"):\n self.__num_gpus = num_gpus\n self.__controller = controller", "title": "" }, { "docid": "294322aaa239022794acfc6107bea500", "score": "0.6056465", "text": "def initialise_sampler(self):\n raise NotImplementedError", "title": "" }, { "docid": "0eef4bd763ecba124d12e9c7d7d5ea35", "score": "0.60462916", "text": "def init_params(self):\n\t\ts = self.env.reset(self.np_random)\n\t\twhile(True):\n\t\t\ta = self.np_random.choice(range(self.env.anum))\n\t\t\tr, s_n, done = self.env.observe(s,a,self.np_random)\n\t\t\tif r > 0: # First nonzero reward\n\t\t\t\tif self.env.episodic:\n\t\t\t\t\tself.means = r*np.ones(self.dim,dtype=np.float)\n\t\t\t\telse:\n\t\t\t\t\tself.means = r/(1-self.discount)*np.ones(self.dim,dtype=np.float)\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tif done:\n\t\t\t\t\tself.means = np.zeros(self.dim,dtype=np.float)\n\t\t\t\t\tbreak\n\t\t\t\ts = s_n", "title": "" }, { "docid": "0693be5f301ccc01a7bca7721c08c20c", "score": "0.60438234", "text": "def init_once():\n global NVML_STATE, NVML_OWNER_PID\n\n if NVML_STATE in {\n NVMLState.DISABLED_PYNVML_NOT_AVAILABLE,\n NVMLState.DISABLED_CONFIG,\n NVMLState.DISABLED_LIBRARY_NOT_FOUND,\n NVMLState.DISABLED_WSL_INSUFFICIENT_DRIVER,\n }:\n return\n elif NVML_STATE == NVMLState.INITIALIZED and NVML_OWNER_PID == os.getpid():\n return\n elif NVML_STATE == NVMLState.UNINITIALIZED and not dask.config.get(\n \"distributed.diagnostics.nvml\"\n ):\n NVML_STATE = NVMLState.DISABLED_CONFIG\n return\n elif (\n NVML_STATE == NVMLState.INITIALIZED and NVML_OWNER_PID != os.getpid()\n ) or NVML_STATE == NVMLState.UNINITIALIZED:\n try:\n pynvml.nvmlInit()\n except (\n pynvml.NVMLError_LibraryNotFound,\n pynvml.NVMLError_DriverNotLoaded,\n pynvml.NVMLError_Unknown,\n ):\n NVML_STATE = NVMLState.DISABLED_LIBRARY_NOT_FOUND\n return\n\n if _in_wsl() and parse_version(\n pynvml.nvmlSystemGetDriverVersion().decode()\n ) < parse_version(MINIMUM_WSL_VERSION):\n NVML_STATE = NVMLState.DISABLED_WSL_INSUFFICIENT_DRIVER\n return\n else:\n from distributed.worker import add_gpu_metrics\n\n # initialization was successful\n NVML_STATE = NVMLState.INITIALIZED\n NVML_OWNER_PID = os.getpid()\n add_gpu_metrics()\n else:\n raise RuntimeError(\n f\"Unhandled initialisation state ({NVML_STATE=}, {NVML_OWNER_PID=})\"\n )", "title": "" }, { "docid": "64295a1be25695943e893165597d9912", "score": "0.60321236", "text": "def solver_setup():\r\n pass", "title": "" }, { "docid": "f05815d2435e3269fa6e5bf19440c2cc", "score": "0.6031041", "text": "def initialize(self):\n ########################################################################################\n # TODO: #\n # Initialize weights self.params['w'] using normal distribution with mean = 0 and #\n # std = self.layer_params['weight_scale']. #\n # #\n # Initialize biases self.params['b'] with 0. #\n ######################################################################################## \n W = np.zeros((self.layer_params[\"output_size\"], self.layer_params[\"input_size\"]))\n \n for i in range(self.layer_params[\"output_size\"]):\n for j in range(self.layer_params[\"input_size\"]):\n W[i,j] = np.random.normal(0, self.layer_params['weight_scale'])\n\n b = np.zeros(self.layer_params[\"output_size\"])\n self.params['w'] = W\n self.params['b'] = b\n\n ########################################################################################\n # END OF YOUR CODE #\n ########################################################################################\n \n self.cache = None", "title": "" }, { "docid": "2f7ec7c954dc77cefe9ab31e9bba93f9", "score": "0.6030724", "text": "def _init_env_variables(self):\n # For Info Purposes\n self.cumulated_reward = 0.0\n # Set to false Done, because its calculated asyncronously\n self._episode_done = False\n self._outofrange = False\n\n #self.desired_point.x = random.uniform(1,10)\n #self.desired_point.y = random.uniform(-5,5)\n\n try:\n self.deleteModel()\n except:\n pass\n self.respawnModel()\n #self.get_statemsg()\n #self.moveto()\n #time.sleep(2)\n #self.obstaclemoveto()\n odometry = self.get_odom()\n self.previous_distance_from_des_point = self.get_distance_from_desired_point(odometry.pose.pose.position)", "title": "" }, { "docid": "0db04a40403741ad73f6c62b209340b5", "score": "0.6021725", "text": "def init_engine(self, data):\n self.sample_generator = DataLoaderBase(ratings=data.train)\n adj_mat, norm_adj_mat, mean_adj_mat = self.sample_generator.get_adj_mat(\n self.config\n )\n norm_adj = sparse_mx_to_torch_sparse_tensor(norm_adj_mat)\n\n self.config[\"model\"][\"norm_adj\"] = norm_adj\n\n self.config[\"model\"][\"n_users\"] = data.n_users\n self.config[\"model\"][\"n_items\"] = data.n_items\n self.engine = LightGCNEngine(self.config)", "title": "" }, { "docid": "b8a57d54bc226134981ce70d9e3ce42e", "score": "0.6019904", "text": "def env_init(self, env_info={}):\n self.discrete_torque = env_info.get(\"discrete_torque\")\n \n# print(\"discrete_torque\",self.discrete_torque)\n \n self.env = gym.make(\"Pendulum-v0\")\n self.env.seed(0)", "title": "" }, { "docid": "0adb6ab5463cc5d5f57f7206ad440435", "score": "0.601441", "text": "def __init__(self, env, config):\n \n \n self.env = env\n self.config = config\n # self.seed = (config['seed'])\n\n # set parameter for ML\n self.set_parameters(config)\n # Replay memory\n self.memory = ReplayBuffer(config)\n # Q-Network\n self.create_agents(config)\n # load agent\n if self.load_model:\n self.load_agent('trained_tennis_2k86.pth')", "title": "" }, { "docid": "59c1165845876e8af73204019b9371b3", "score": "0.60109746", "text": "def init_step(self, X):\n pass", "title": "" }, { "docid": "f8d6c0dc16f4b95250997a71b9350285", "score": "0.6004663", "text": "def init(self):\n self._random = np.random.default_rng(self._seed)\n self._count = 0", "title": "" }, { "docid": "043c82ea9c55999180e82170a5d7a7e7", "score": "0.6003877", "text": "def init_learning():\n np.random.seed(0)\n torch.manual_seed(0)\n torch.cuda.manual_seed_all(0)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False", "title": "" }, { "docid": "9c231cf415aff32aeb0356b44a7a4ddd", "score": "0.5996959", "text": "def _initialize(self, n_workers: int, n_labels: int) -> None:\n\n self.spamming_ = sps.uniform(1, 1 + self.default_noise).rvs(\n size=(n_workers, 2),\n random_state=self.random_state,\n )\n self.thetas_ = sps.uniform(1, 1 + self.default_noise).rvs(\n size=(n_workers, n_labels),\n random_state=self.random_state\n )\n\n self.spamming_ = self.spamming_ / self.spamming_.sum(axis=1, keepdims=True)\n self.thetas_ = self.thetas_ / self.thetas_.sum(axis=1, keepdims=True)\n\n if self.method == 'vb':\n self.theta_priors_ = np.empty((n_workers, 2))\n self.theta_priors_[:, 0] = self.alpha\n self.theta_priors_[:, 1] = self.beta\n\n self.strategy_priors_ = np.ones((n_workers, n_labels)) * 10.0", "title": "" }, { "docid": "d4f193a362bc111345ce75abefd81d18", "score": "0.59952676", "text": "def __init__( self, core = None, pin_powers = None, pin_factors = None ):\n self._logger = logging.getLogger( 'data' )\n\n self._node_assy_weights = {}\n self._node_weights = {}\n self._weights = {}\n\n if core is not None:\n self.load( core, pin_powers, pin_factors )", "title": "" }, { "docid": "0f5bd01a103347f590041827ad4f7643", "score": "0.59894776", "text": "def __init__(self, nwalkers=100, nstep=1000, step_size=3,\n ntherm=-1, ndecor=1,\n nelec=1, ndim=1,\n init={'type': 'uniform', 'min': -5, 'max': 5},\n cuda=False):\n\n SamplerBase.__init__(self, nwalkers, nstep,\n step_size, ntherm, ndecor, nelec, ndim, init,\n cuda)", "title": "" }, { "docid": "9b30fc65b0b6dbc9a7bcaf7554fcad17", "score": "0.5987275", "text": "def init():", "title": "" }, { "docid": "9b30fc65b0b6dbc9a7bcaf7554fcad17", "score": "0.5987275", "text": "def init():", "title": "" }, { "docid": "df2fcaafaa7f12537c9ccc3c4fcf9835", "score": "0.59821516", "text": "def __init__(self):\n self.picture = 0\n self._gather_data()\n self._train_system()", "title": "" }, { "docid": "87f2f517abe139897f35ba77dc27aadc", "score": "0.5979425", "text": "def init_variables(self):\n pass", "title": "" }, { "docid": "b72e5dd8e15b227d8efcb4ae1dbb2613", "score": "0.59701353", "text": "def initialize(self):\n\n\t\t# initialize the current and previous time variables\n\t\t# this will be used as timing variables for the control system\n\t\tself.currTime = time.time()\n\t\tself.prevTime = self.currTime\n\n\t\t# system's previous error so that there is no OutOfBoundsException\n\t\tself.prevError = 0\n\n\t\t# initialize the PID term result\n\t\tself.cI = 0\n\t\tself.cD = 0\n\t\tself.cP = 0", "title": "" }, { "docid": "aed4ab90efb9a69335705f6baad6a70a", "score": "0.59602445", "text": "def init(self):\n # Initialize environment to get input/output dimensions\n self.train_env = utils.make_env(self.cfg.env)\n self.eval_env = utils.make_env(self.cfg.env)\n ob_dim, = self.train_env.observation_space.shape\n ac_dim, = self.train_env.action_space.shape\n # Setup policy, baseline, and critic\n self.policy = policies.TanhGMMMLPPolicy(\n ob_dim, ac_dim,\n num_components=self.cfg.policy_num_components,\n hidden_num=self.cfg.policy_hidden_num,\n hidden_size=self.cfg.policy_hidden_size,\n hidden_act=self.cfg.policy_hidden_act,\n )\n self.vf = critics.MLPBaseline(\n ob_dim,\n hidden_num=self.cfg.vf_hidden_num,\n hidden_size=self.cfg.vf_hidden_size,\n hidden_act=self.cfg.vf_hidden_act,\n )\n self.qf = critics.QAMLPCritic(\n ob_dim, ac_dim,\n hidden_num=self.cfg.critic_hidden_num,\n hidden_size=self.cfg.critic_hidden_size,\n hidden_act=self.cfg.critic_hidden_act,\n )\n # Temperature parameter used to weight the entropy bonus\n self.log_alpha = nn.Parameter(\n torch.as_tensor(self.cfg.alpha_initial, dtype=torch.float32).log()\n )\n\n # Make copy of baseline for Q-targets.\n self.vf_target = copy.deepcopy(self.vf)\n\n # And send everything to the right device\n self.to(self.device)\n\n # Setup optimizers for all networks (and log_alpha)\n self.policy_optimizer = utils.get_optimizer(\n name=self.cfg.policy_optimizer,\n params=self.policy.parameters(),\n lr=self.cfg.policy_lr,\n )\n self.vf_optimizer = utils.get_optimizer(\n name=self.cfg.vf_optimizer,\n params=self.vf.parameters(),\n lr=self.cfg.vf_lr,\n )\n self.qf_optimizer = utils.get_optimizer(\n name=self.cfg.critic_optimizer,\n params=self.qf.parameters(),\n lr=self.cfg.critic_lr,\n )\n self.alpha_optimizer = utils.get_optimizer(\n name=self.cfg.alpha_optimizer,\n params=[self.log_alpha],\n lr=self.cfg.alpha_lr,\n )\n\n # Setup replay buffer\n self.buffer = buffers.RingBuffer(\n capacity=int(self.cfg.buffer_capacity),\n keys=['ob', 'ac', 'rew', 'next_ob', 'done'],\n dims=[ob_dim, ac_dim, None, ob_dim, None]\n )\n\n # Setup samplers (used for data generating / evaluating rollouts)\n self.train_sampler = samplers.Sampler(\n env=self.train_env,\n policy=self.policy,\n max_steps=self.cfg.max_path_length_train\n )\n self.eval_sampler = samplers.Sampler(\n env=self.eval_env,\n policy=self.policy,\n max_steps=self.cfg.max_path_length_eval\n )\n\n # Set target entropy, derive from size of action space if non-obvious\n if self.cfg.target_entropy is None:\n self.target_entropy = -ac_dim\n self.logger.info(\n 'Using dynamic target entropy: %s', self.target_entropy\n )\n else:\n self.target_entropy = self.cfg.target_entropy\n self.logger.info(\n 'Using static target entropy: %s', self.target_entropy\n )", "title": "" }, { "docid": "90e68ce105c836c78ad5ec34835aebfd", "score": "0.5959569", "text": "def __init__(self,N,Nc):\n self.N = N\n self.Nc = Nc\n self.gamma = 1 # penalty tradeoff\n self.beta = 10 # inverse temperature for soft spins\n self.zeta = 1\n self.hasBeenSetup = False\n self.rng = np.random.RandomState()", "title": "" }, { "docid": "aa5af38bf932379fba3a76d722b05ea4", "score": "0.5958274", "text": "def __init__(self,\n p0 = -1.0*0.8499/0.6997,\n p1 = 1.0/0.6997\n ):\n self.p0 = p0\n self.p1 = p1\n self.initialize()", "title": "" }, { "docid": "3717cb50a8730e2e23e8a8f8f34a517a", "score": "0.5957107", "text": "def __init__(self):\n super(ParametersHyperparameterOptimization, self).__init__()\n self.direction = 'minimize'\n self.n_trials = 100\n self.hlist = []\n self.hyper_opt_method = \"optuna\"\n self.checkpoints_each_trial = 0\n self.checkpoint_name = \"checkpoint_mala_ho\"\n self.study_name = None\n self.rdb_storage = None\n self.rdb_storage_heartbeat = None", "title": "" }, { "docid": "ae7ee02a114852553fcddba92d2937e5", "score": "0.5951721", "text": "def __init__(self):\r\n self.params, self.grads = [], [] \r\n self.softmax = Softmax()\r\n self.cache = None", "title": "" }, { "docid": "d270c64e1df2ee2ad86325a5b22653b6", "score": "0.5949623", "text": "def initializeUniformly(self, gameState):\n self.particles = []\n \"*** YOUR CODE HERE ***\"\n #TODO:\n\n #raiseNotDefined()", "title": "" }, { "docid": "d270c64e1df2ee2ad86325a5b22653b6", "score": "0.5949623", "text": "def initializeUniformly(self, gameState):\n self.particles = []\n \"*** YOUR CODE HERE ***\"\n #TODO:\n\n #raiseNotDefined()", "title": "" }, { "docid": "559941c668b8d4b64225b335b0db8cc9", "score": "0.59392744", "text": "def __init__(self, state_size, action_size, device, seed, LR=5e-4, gamma=0.95, entropy_weight=0.02, actor_network_max_grad_norm = 5, critic_network_max_grad_norm = 5, nstepqlearning_size=5, gae_lambda = 1.0):\n self.state_size = state_size\n self.action_size = action_size\n self.entropy_weight = entropy_weight\n random.seed(seed)\n self.gamma=gamma\n self.actor_network_max_grad_norm = actor_network_max_grad_norm\n self.critic_network_max_grad_norm = critic_network_max_grad_norm\n self.nstepqlearning_size = nstepqlearning_size\n self.gae_lambda = gae_lambda\n self.device=device\n\n print(\"----Dumping agent hyperparameters---- \")\n print(\"LR: \", LR)\n print(\"gamma: \", gamma)\n print(\"actor_network_max_grad_norm: \", self.actor_network_max_grad_norm)\n print(\"critic_network_max_grad_norm: \", self.critic_network_max_grad_norm)\n print(\"nstepqlearning_size: \", self.nstepqlearning_size)\n print(\"gae_lambda: \", self.gae_lambda)\n print(\"entropy_weight: \", self.entropy_weight)\n print(\"------------------------------------- \")\n\n self.actor_net = ActorNet(state_size, action_size, device, seed).to(self.device) # Theta\n self.critic_net = CriticNet(state_size, action_size, seed).to(self.device) # Thetav\n self.actor_optimizer = optim.RMSprop(self.actor_net.parameters(), lr=LR)\n self.critic_optimizer = optim.RMSprop(self.critic_net.parameters(), lr=LR)", "title": "" }, { "docid": "f180a462438556a7776f589f3a88aad7", "score": "0.5938198", "text": "def re_init(self):\n self.__init__(actions=self.actions, gamma=self.gamma, r_max=self.r_max, v_max=self.v_max,\n deduce_v_max=self.deduce_v_max, n_known=self.n_known, deduce_n_known=self.deduce_n_known,\n epsilon_q=self.epsilon_q, epsilon_m=self.epsilon_m, delta=self.delta, n_states=self.n_states,\n max_memory_size=self.max_memory_size, prior=self.prior,\n estimate_distances_online=self.estimate_distances_online,\n min_sampling_probability=self.min_sampling_probability, name=self.name)", "title": "" }, { "docid": "a2e08f405c17bb04142627868d6489f8", "score": "0.59379417", "text": "def _init_env_variables(self):\n # For Info Purposes\n self.cumulated_reward = 0.0\n # Set done to false, because its calculated asyncronously\n self._episode_done = False", "title": "" }, { "docid": "19fcb8fdd61dd4996404fa5df4a3c9ca", "score": "0.59250414", "text": "def _initialise_net(self):", "title": "" }, { "docid": "5b273c4fd1e3b528e6e4e57228b49e15", "score": "0.59183115", "text": "def init():\n pass", "title": "" }, { "docid": "36734fb7d2a34ad66f503814fcce0d32", "score": "0.59174293", "text": "def test_init(self):\n for _ in range(parameters.number_of_loops):\n _ = FJC(\n parameters.number_of_links_minimum,\n parameters.link_length_reference,\n parameters.hinge_mass_reference\n )", "title": "" }, { "docid": "e9ca52ee5a16b62f2b73166b36b35555", "score": "0.5912559", "text": "def _init():\n init()\n initialized = True", "title": "" }, { "docid": "744be4a15e4ccd558735f1c0d91ccd4f", "score": "0.59106493", "text": "def init():\n pass", "title": "" }, { "docid": "a84929c4f59b5bbf4b32585463e4c5ff", "score": "0.5909908", "text": "def test_initialization(self):\n N1 = Normal([1, 0.1])\n N2 = Normal([1, 0.1])\n\n N3 = N1 ** N2\n\n N4 = N1 ** 2\n\n N5 = 2 ** N1", "title": "" }, { "docid": "280d7e67edadce5c5e2ce3f12e2ac126", "score": "0.59099", "text": "def check(self):\n\n\n # why does Etienne have it this way?\n if 'NGPU' not in PAR:\n setattr(PAR, 'NGPU', 4)\n\n super(tiger_sm_gpu, self).check()", "title": "" }, { "docid": "7fdfceb41d891b6dfce098245e494c28", "score": "0.5905441", "text": "def __init__(self, **kwargs):\n super(EnsembleLeagueInstance, self).__init__(**kwargs)\n\n self._ensemble = None\n self._t_env = 0 # Used for continuous total environment step count to ensure correct logging", "title": "" }, { "docid": "1ef8bb3614b9b6c72492a200a4e831a2", "score": "0.5903165", "text": "def initialize(args):\n if (args.a > 1 or args.a < 0):\n raise ValueError('Alpha value is not between 0 and 1')\n elif (args.p < 2):\n raise ValueError('Parent pool size must be at least 2')\n elif (args.n < 2):\n raise ValueError('Number of nodes must be at least 2')\n elif (args.e < 1):\n raise ValueError('Number of edges must be at least 1')\n\n return iterate(args) # calls iterate function in iterator.py", "title": "" }, { "docid": "ca42331947f2908de52e5aeb810380d4", "score": "0.5900861", "text": "def _guess_init_params(self):", "title": "" }, { "docid": "bfc8034df6fd0b9d33f49a6cde7390ea", "score": "0.5893471", "text": "def agent_init(self, agent_init_info):\n # Store the parameters provided in agent_init_info.\n self.num_actions = agent_init_info[\"num_actions\"]\n self.num_states = agent_init_info[\"num_states\"]\n self.epsilon = agent_init_info[\"epsilon\"]\n self.step_size = agent_init_info[\"step_size\"]\n\n self.discount = agent_init_info[\"discount\"]\n self.rand_generator = np.random.RandomState(agent_init_info[\"seed\"])\n torch.manual_seed(agent_init_info[\"seed\"])\n self.T = agent_init_info.get(\"T\",10)\n\n # 3) dutch trace\n self.exp_decay = 1 - 1 / self.T\n self.alpha = agent_init_info[\"alpha\"]\n\n self.rnn = SimpleRNN(self.num_states+1, self.num_states+1,self.num_actions).to(device)\n self.target_rnn = SimpleRNN(self.num_states+1, self.num_states+1,self.num_actions).to(device)\n self.update_target()\n self.optimizer = torch.optim.Adam(self.rnn.parameters(), lr=self.step_size)\n self.buffer = ReplayMemory(1000)\n self.tau = .5\n self.flag = False\n self.train_steps = 0", "title": "" }, { "docid": "5871bdaad270b11961edad0bae660fba", "score": "0.5889342", "text": "def _specific_init(self):\n\n ## initialize inducing points locations and \n #if not self._learn_inducing_locations:\n start = np.min(self._bin_times)\n end = np.max(self._bin_times)\n z_locations = np.linspace(start, end, self._numb_inducing)\n\n #for trial_i in range(0,self._numb_trials):\n # for lat_i in range(0,self._numb_latents):\n self._z_induc_loc[:] = z_locations\n\n # print(self._z_induc_loc)\n\n # self._z_induc_loc[:] = self._bin_times[0:self._numb_bins-1]\n\n if self._numb_inducing > self._numb_bins:\n self._z_induc_loc[0:self._numb_bins] = self._bin_times\n for i in range(0, self._numb_inducing - self._numb_bins):\n ind = self._numb_bins + i\n self._z_induc_loc[ind] = self._bin_times[-1] + i +2\n\n self._pre_compute_kernel_matrices()\n\n self._perturbate_S(120)\n\n #self._C_matrix = self._meta_data[\"C_gamma\"]", "title": "" }, { "docid": "92175f4d8bb607d0ec80d55d7c9f8ea7", "score": "0.5885681", "text": "def __init__(self):\n self.generation = 0\n self.best = 0\n self.brain = NeuralNetwork(shape)\n self.load()\n self.testing()\n # self.runPop()", "title": "" }, { "docid": "03c5e269c8979955ea07cacd711edd93", "score": "0.588358", "text": "def __init__(self, iterations=-1, random=False, seed=42):\r\n self.parameters = {}\r\n self.iterations = iterations\r\n self.random = random\r\n self.seed = seed", "title": "" }, { "docid": "538a8e5ece8ed6f44d1bde9b724fa423", "score": "0.58823955", "text": "def worker_init_fn(worker_id):\r\n base_seed = torch.IntTensor(1).random_().item()\r\n #print(worker_id, base_seed)\r\n np.random.seed(base_seed + worker_id)", "title": "" }, { "docid": "29c3e516645ceea2bd53fa47edd8daed", "score": "0.5881845", "text": "def __init__(self):\n self._w = None\n self._b = None\n self._optimum = None\n self._loss_tol = 1e-6\n self._batch_size = 64\n self._iter_bound = 2000\n # Important value\n self._learn_rate = 3e-3", "title": "" }, { "docid": "7e5a94ece41addf136294f237566ad6a", "score": "0.58792466", "text": "def pool_init(params_):\n\n from platform import system\n\n if system() == 'Windows':\n params.update(params_)", "title": "" }, { "docid": "34746de39ab626d8af618eff1f29c377", "score": "0.58789736", "text": "def init():\n\n try:\n vm.ensure_is_ready(prompt_init=False, prompt_start=True)\n except YurtException as e:\n logging.error(e.message)", "title": "" }, { "docid": "f8a0efe992c3d6f92c2ce8c0d41e0682", "score": "0.58789384", "text": "def __init__(self):\n self.comment = \"\"\n self.network = ParametersNetwork()\n self.descriptors = ParametersDescriptors()\n self.targets = ParametersTargets()\n self.data = ParametersData()\n self.running = ParametersRunning()\n self.hyperparameters = ParametersHyperparameterOptimization()\n self.debug = ParametersDebug()\n self.manual_seed = None\n\n # Properties\n self.use_horovod = False\n self.use_gpu = False", "title": "" } ]
bbcc36d5579f24bc1c2dc5198e458a9d
Adds the feasibility pumprelated configurations.
[ { "docid": "e2e70289d6e3debbad8212b243eff57a", "score": "0.6846095", "text": "def _add_fp_configs(CONFIG):\n CONFIG.declare(\n 'fp_cutoffdecr',\n ConfigValue(\n default=1e-1,\n domain=PositiveFloat,\n description='Additional relative decrement of cutoff value for the original objective function.',\n ),\n )\n CONFIG.declare(\n 'fp_iteration_limit',\n ConfigValue(\n default=20,\n domain=PositiveInt,\n description='Feasibility pump iteration limit',\n doc='Number of maximum iterations in the feasibility pump methods.',\n ),\n )\n # TODO: integrate this option\n CONFIG.declare(\n 'fp_projcuts',\n ConfigValue(\n default=True,\n description='Whether to add cut derived from regularization of MIP solution onto NLP feasible set.',\n domain=bool,\n ),\n )\n CONFIG.declare(\n 'fp_transfercuts',\n ConfigValue(\n default=True,\n description='Whether to transfer cuts from the Feasibility Pump MIP to main MIP in selected strategy (all except from the round in which the FP MIP became infeasible).',\n domain=bool,\n ),\n )\n CONFIG.declare(\n 'fp_projzerotol',\n ConfigValue(\n default=1e-4,\n domain=PositiveFloat,\n description='Tolerance on when to consider optimal value of regularization problem as zero, which may trigger the solution of a Sub-NLP.',\n ),\n )\n CONFIG.declare(\n 'fp_mipgap',\n ConfigValue(\n default=1e-2,\n domain=PositiveFloat,\n description='Optimality tolerance (relative gap) to use for solving MIP regularization problem.',\n ),\n )\n CONFIG.declare(\n 'fp_discrete_only',\n ConfigValue(\n default=True,\n description='Only calculate the distance among discrete variables in regularization problems.',\n domain=bool,\n ),\n )\n CONFIG.declare(\n 'fp_main_norm',\n ConfigValue(\n default='L1',\n domain=In(['L1', 'L2', 'L_infinity']),\n description='Different forms of objective function MIP regularization problem.',\n ),\n )\n CONFIG.declare(\n 'fp_norm_constraint',\n ConfigValue(\n default=True,\n description='Whether to add the norm constraint to FP-NLP',\n domain=bool,\n ),\n )\n CONFIG.declare(\n 'fp_norm_constraint_coef',\n ConfigValue(\n default=1,\n domain=PositiveFloat,\n description='The coefficient in the norm constraint, correspond to the Beta in the paper.',\n ),\n )", "title": "" } ]
[ { "docid": "342b722c53e5f59010effacf09af25a0", "score": "0.55445564", "text": "def pibooth_configure(cfg):", "title": "" }, { "docid": "0ddc05c18d735661f13caa0f82e6d32e", "score": "0.5481886", "text": "def _fp_setup2(self):\n # TODO: right now it's hard to implement this required stage", "title": "" }, { "docid": "c1504725d56b3b308e798131018e8c13", "score": "0.54720384", "text": "def setup_fermi(self):\n eventclass=5 # 2 (Source) or 5 (UltracleanVeto)\n eventtype=0 # 0 (all), 3 (bestpsf) or 5 (top3 quartiles)\n mask_type='top300'\n force_mask_at_bin_number=10\n\n self.f1 = fp.fermi_plugin(maps_dir,fermi_data_dir=fermi_data_dir,work_dir=work_dir,CTB_en_min=0,CTB_en_max=40,nside=self.nside,eventclass=eventclass,eventtype=eventtype,newstyle=1,data_July16=True)\n\n if mask_type != 'False':\n self.f1.make_ps_mask(mask_type = mask_type,force_energy = True,energy_bin = force_mask_at_bin_number)\n self.f1.add_diffuse_newstyle(comp = 'p7', eventclass = eventclass, eventtype = eventtype)\n self.f1.add_bubbles(comp='bubs') #bubbles\n self.f1.add_iso(comp='iso') #iso\n self.f1.add_ps_model(comp='ps_model')\n\n # Exposure correct J_map_arr\n self.J_map_arr *= self.f1.CTB_exposure_maps\n\n # Add J-factor map with mean 1 in each energy bin\n self.f1.add_template_by_hand('J_map',np.array([self.J_map_arr[i]/np.mean(self.J_map_arr[i]) for i in range(40)]))", "title": "" }, { "docid": "9bac370dbd62678eb30d0625fa7bfc3d", "score": "0.54585046", "text": "def setupconfig():\n from Manager import Studio\n studio = Studio.Instance\n cfgeff = studio.configEffect_st\n cfgeff.bloomToggle.isOn = False\n cfgeff.vignetteToggle.isOn = False\n cfgeff.sunShaftsToggle.isOn = False\n cfgeff.fogToggle.isOn = False\n cfgeff.depthOfFieldToggle.isOn = False\n #cfgeff.ssaoToggle.isOn = True\n #cfgeff.selfShadowToggle.isOn = True\n \n # Turn off backgrounds\n studio.uiBGChanger.onOffToggle.isOn = False", "title": "" }, { "docid": "60fa8f9ba13be4c8a831cd19aa02a9ce", "score": "0.5368766", "text": "def _configure(self):\n FaultCohesive._configure(self)\n self.eqsrcs = self.inventory.eqsrcs\n self.output = self.inventory.output\n return", "title": "" }, { "docid": "b8650fb45cf5ef11bc2c94c4d467562f", "score": "0.5242913", "text": "def _setup_applications(self):\n if 'host_nfs_path' in self.config['settings'] and 'guest_nfs_path' in self.config['settings']:\n self.settings['nfs'] = NFSSettings(host_vm_nfs_path=self.config['settings']['host_nfs_path'],\n guest_vm_nfs_path=self.config['settings']['guest_nfs_path'])\n\n self._setup_printer()", "title": "" }, { "docid": "23111234a9f4513f4ba4213500bccb0a", "score": "0.5158769", "text": "def add_settings_early(self):\n\n # config settings\n config = {\n # some generic settings for every site, to point to location of some stuff\n mconst.DEF_SETTINGNAME_pkgdirimps_sitempacks: [pkgdirimp_sitempacks],\n mconst.DEF_SETTINGNAME_controllerroot: pkgdirimp_controllers,\n mconst.DEF_SETTINGNAME_sitefilepath: misc.calc_modulefiledirpath(__file__),\n # should we also load mewlo site installed setuptools plugins\n mconst.DEF_SETTINGNAME_flag_importsetuptoolspacks: True,\n mconst.DEF_SETTINGNAME_replaceshadowpath: '${sitefilepath}/replaceshadow',\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_config, config)\n\n # config settings\n config = {\n # Name of site\n mconst.DEF_SETTINGNAME_sitename: 'Mewlo',\n # Specify where this site serves from\n # these siteurls should not end in / so if you are serving a site at root just use relative of '' and absolute of 'http://sitename.com'\n mconst.DEF_SETTINGNAME_siteurl_relative: '',\n mconst.DEF_SETTINGNAME_siteurl_absolute: 'http://127.0.0.1:8080',\n #mconst.DEF_SETTINGNAME_siteurl_relative: '/public/publicity',\n #mconst.DEF_SETTINGNAME_siteurl_absolute: 'http://127.0.0.1:8080/public/publicity',\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_config, config)\n\n # config settings\n config = {\n # online status information\n mconst.DEF_SETTINGNAME_isenabled: True,\n mconst.DEF_SETTINGNAME_isonline: True,\n mconst.DEF_SETTINGNAME_offline_mode: 'maintenance',\n mconst.DEF_SETTINGNAME_offline_message: 'We are down for leap-year maintenance; we will be back soon.',\n mconst.DEF_SETTINGNAME_offline_allowadmin: False,\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_config, config)\n\n\n\n # extension pack config -- we need to explicitly enable plugins\n packconfig = {\n 'mouser.mewlotestplug' : {\n 'isenabled': False,\n },\n 'mouser.testpack' : {\n 'isenabled': False,\n },\n 'mewlo.siteaddon.account' : {\n 'isenabled': True,\n },\n 'mewlo.siteaddon.group' : {\n 'isenabled': True,\n },\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_packs, packconfig)\n\n\n # database config\n databaseconfig = {\n 'settings' : {\n 'sqlalchemy_loglevel' : logging.NOTSET,\n #'sqlalchemy_loglevel' : logging.INFO,\n },\n 'default' : {\n 'url' : 'sqlite:///${dbfilepath}/mewlo_testsite1.sqlite',\n #'tablename_prefix': 'mewlo_',\n 'flag_echologging' : False,\n },\n 'mysql_unused' : {\n # Sample configuration for mysql\n 'url' : 'mysql://mewlo_user:mewlo_pass@localhost:3306/mewlo_testsite1',\n 'tablename_prefix': 'mewlo_'\n },\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_database, databaseconfig)\n self.settings.listappend_settings_key(mconst.DEF_SETTINGSEC_make_dirs, '${dbfilepath}')\n\n # email config settings\n mailconfig = {\n # online status information\n 'smtp_host': self.get_configval('mail_smtp_host'),\n 'smtp_login': self.get_configval('mail_smtp_login'),\n 'smtp_port': self.get_configval('mail_smtp_port'),\n 'smtp_mode': self.get_configval('mail_smtp_mode'),\n 'smtp_password': self.get_configval('mail_smtp_password'),\n 'mail_from' : self.get_configval('mail_from'),\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_mail, mailconfig)\n\n\n # account siteaddon settings\n siteaddonconfig = {\n # online status information\n 'registration_mode': 'immediate',\n 'flag_require_email_verified_before_login': False,\n }\n self.settings.merge_settings_key('siteaddon_account', siteaddonconfig)\n\n\n\n # ATTN: UNFINISHED\n # asset mounts config\n if (False):\n assetmountconfig = {\n 'default' : {\n # an internal assetmount just needs a url route\n 'type': 'internal',\n 'routeid': 'static_files',\n },\n 'external' : {\n 'type': 'external',\n 'filepath': '${mewlofilepath}/public_assets',\n 'urlpath': 'http://127.0.0.1/mewlo/public_assets',\n },\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_asset_mounts, assetmountconfig)\n\n\n\n\n\n #print \"TESTING CONFIG1:\"\n #self.run_configfunc('sayhello',1,2,3)\n #print \"TESTING CONFIG2:\"\n #self.run_allconfigfuncs('sayhello',1,2,3)", "title": "" }, { "docid": "6a8b82bcaa8911b2384081c9c951d7f5", "score": "0.5151565", "text": "def configFeAsic(self,gain,shape,base,slk=None,slkh=None,monitorBandgap=None,monitorTemp=None):\n pass", "title": "" }, { "docid": "1740ce6dd7eb1ceac7a65a80b0f12724", "score": "0.51115227", "text": "def add_config(self):\n\n config = {\n 'count_up': CountUp,\n 'count_down': CountDown,\n 'count_up_or_down': CountUpOrDown,\n 'high_speed_counter_definition': HighSpeedCounterDefinition,\n 'high_speed_counter': HighSpeedCounter,\n 'pulse_output': PulseOutput\n }\n\n return config", "title": "" }, { "docid": "eca23a7194da963d6fc5d9b1d7cda56e", "score": "0.5067314", "text": "def load_conf_permban_with_frozensand(self):\n self._permban_with_frozensand = False\n if self.config.has_option('server', 'permban_with_frozensand'):\n try:\n self._permban_with_frozensand = self.config.getboolean('server', 'permban_with_frozensand')\n except ValueError, err:\n self.warning(err)\n\n self.info(\"Send permbans to Frozen Sand : %s\" % ('yes' if self._permban_with_frozensand else 'no'))", "title": "" }, { "docid": "15e894c64a7ac428ef444cf3c584661b", "score": "0.50596815", "text": "def __manage_pump(self):\r\n with self.config_lock:\r\n if self.config['pump_auto_control'] == False:\r\n # Controller doesn't need to do anything about the pump as it is in manual control mode\r\n pass\r\n else:\r\n # Pump is in automatic mode\r\n if self.config['\"pump_auto_control_mode'] == 'normally_off':\r\n # For current functionality there is nothing that can force the pump to turn on (e.g.\r\n # fire extinguishing).\r\n pass\r\n else:\r\n # Pump is normally on.\r\n pump_parameters = self.well_tank_dev.parameters\r\n if self.config['pump_auto_control_turn_off_when_well_empty']:\r\n if pump_parameters ['well_water_presence'] == 'not_present':\r\n # No water in the well\r\n self.well_tank_dev.send_command('pump', 'turn_off')\r\n else:\r\n # Water in the well is present\r\n if self.config['pump_auto_control_turn_off_when_tank_full']:\r\n if pump_parameters['tank'] == 'full':\r\n self.well_tank_dev.send_command('pump', 'turn_off')\r\n else:\r\n self.well_tank_dev.send_command('pump', 'turn_on')\r\n else:\r\n # Do not turn off the pump if the well is empty\r\n if self.config['pump_auto_control_turn_off_when_tank_full']:\r\n if pump_parameters ['tank'] == 'full':\r\n self.well_tank_dev.send_command('pump', 'turn_off')\r\n else:\r\n self.well_tank_dev.send_command('pump', 'turn_on')\r\n else:\r\n # Do not trun off the pump when the tank is full\r\n self.well_tank_dev.send_command('pump', 'turn_on')", "title": "" }, { "docid": "3685bf2c34d7dec9492dab9aec8e4103", "score": "0.50424755", "text": "def add_config(self, conf_map):\n if self.active.isChecked():\n self.add_feat_conf(conf_map)", "title": "" }, { "docid": "d3cb69dd607021a256582310139cdd31", "score": "0.50403947", "text": "def setup_platform(hass, config, add_entities, discovery_info=None):\n import jsonpath\n jsonpath = jsonpath.jsonpath\n global HEAT_PUMPS\n hub.update_overview()\n if int(hub.config.get(CONF_CLIMATE, 1)):\n HEAT_PUMPS = hub.get('$.heatPumps')\n if HEAT_PUMPS:\n for heat_pump in HEAT_PUMPS[0]:\n device_label = jsonpath(heat_pump, '$.deviceLabel')[0]\n add_entities([\n VerisureHeatPump(device_label)\n ])", "title": "" }, { "docid": "85a66a0e5d5e6750c1d67bae3f90aa40", "score": "0.5038301", "text": "def read_configs(conf: Property) -> None:\n for fizz_conf in conf.find_all('Fizzlers', 'Fizzler'):\n with logger.context(fizz_conf['id', '??']):\n fizz = FizzlerType.parse(fizz_conf)\n\n if fizz.id in FIZZ_TYPES:\n raise user_errors.UserError(user_errors.TOK_DUPLICATE_ID.format(kind='Fizzler', id=fizz.id))\n\n FIZZ_TYPES[fizz.id] = fizz\n\n LOGGER.info('Loaded {} fizzlers.', len(FIZZ_TYPES))\n\n if options.get(str, 'game_id') != utils.STEAM_IDS['APTAG']:\n return\n # In Aperture Tag, we don't have portals. For fizzler types which block\n # portals (trigger_portal_cleanser), additionally fizzle paint.\n for fizz in FIZZ_TYPES.values():\n if not fizz.blocks_portals:\n continue\n for brush in fizz.brushes:\n if brush.keys['classname'].casefold() == 'trigger_portal_cleanser':\n brush_name = brush.name\n # Retrieve what key is used for start-disabled.\n brush_start_disabled = None\n for key_map in [brush.keys, brush.local_keys]:\n if brush_start_disabled is None:\n for key, value in key_map.items():\n if key.casefold() == 'startdisabled':\n brush_start_disabled = value\n break\n break # Jump past else.\n else:\n # No fizzlers in this item.\n continue\n\n # Add a paint fizzler brush to these fizzlers.\n fizz.brushes.append(FizzlerBrush(\n brush_name,\n textures={\n TexGroup.TRIGGER: consts.Tools.TRIGGER,\n },\n keys={\n 'classname': 'trigger_paint_cleanser',\n 'startdisabled': brush_start_disabled or '0',\n 'spawnflags': '9',\n },\n local_keys={},\n outputs=[],\n singular=True,\n ))", "title": "" }, { "docid": "780393bf9bb344a80b999051fa397c92", "score": "0.500142", "text": "def config():\n if app.args.ui_mode == \"jinja\":\n ui_config = {\n \"p1\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\":\"material\",\n \"lineWrapping\" : True,\n \"mode\": \"yaml\",\n \"indentUnit\": 2,\n \"tabSize\": 2\n },\n \"title\": \"DATA\",\n \"inventory\": bool(app.args.inventory_source),\n \"b1\": {\n \"icon\": None,\n \"show\": False,\n \"text\": None,\n \"url\": None\n }\n },\n \"p2\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\": \"material\",\n \"lineWrapping\" : True,\n \"mode\": \"jinja2\"\n },\n \"title\": \"RENDER\",\n \"b1\": {\n \"icon\": \"create\",\n \"show\": True,\n \"text\": \"Render\",\n \"url\": \"/render\"\n }\n },\n \"p3\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\": \"material\",\n \"lineWrapping\" : True,\n \"mode\": 'text'\n },\n \"title\": \"RESULT\",\n \"b1\": {\n \"icon\": \"link\",\n \"show\": bool(app.args.url),\n \"text\": \"link\"\n }\n }\n }\n elif app.args.ui_mode == \"schema\":\n ui_config = {\n \"p1\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\":\"material\",\n \"lineWrapping\" : True,\n \"mode\": \"yaml\",\n \"indentUnit\": 2,\n \"tabSize\": 2\n },\n \"title\": \"DATA\",\n \"inventory\": bool(app.args.inventory_source),\n \"b1\": {\n \"icon\": \"create\",\n \"show\": True,\n \"text\": \"schema\",\n \"url\": \"/schema\"\n }\n },\n \"p2\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\": \"material\",\n \"lineWrapping\" : True,\n \"mode\": \"yaml\"\n },\n \"title\": \"SCHEMA\",\n \"b1\": {\n \"icon\": \"check\",\n \"show\": True,\n \"text\": \"Validate\",\n \"url\": \"/validate\"\n }\n },\n \"p3\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\": \"material\",\n \"lineWrapping\" : True,\n \"mode\": \"yaml\"\n },\n \"title\": \"VALIDATION SUCCESS/ERRORS\",\n \"b1\": {\n \"icon\": \"link\",\n \"show\": bool(app.args.url),\n \"text\": \"link\"\n }\n }\n }\n return jsonify(ui_config)", "title": "" }, { "docid": "8df2b702ebcf53c82f0f2b518d9efa8c", "score": "0.49975795", "text": "def bootstrap_config(self):\n self.logger.info(\"applying bootstrap configuration\")\n self.wait_write(\"\\r\", None)\n # Wait for the prompt\n time.sleep(1)\n self.wait_write(\"system-view\", \"<HPE>\")\n self.wait_write(\"ssh server enable\", \"[HPE]\")\n self.wait_write(\"user-interface class vty\", \"[HPE]\")\n self.wait_write(\"authentication-mode scheme\", \"[HPE-line-class-vty]\")\n self.wait_write(\"protocol inbound ssh\", \"[HPE-line-class-vty]\")\n self.wait_write(\"quit\", \"[HPE-line-class-vty]\")\n self.wait_write(\"local-user %s\" % (self.username), \"[HPE]\")\n self.wait_write(\"password simple %s\" % (self.password), \"[HPE-luser-manage-%s]\" % (self.username))\n self.wait_write(\"service-type ssh\", \"[HPE-luser-manage-%s]\" % (self.username))\n self.wait_write(\"authorization-attribute user-role network-admin\", \"[HPE-luser-manage-%s]\" % (self.username))\n self.wait_write(\"quit\", \"[HPE-luser-manage-%s]\" % (self.username))\n self.wait_write(\"interface GigabitEthernet%s/0\" % (self.num_nics + 1), \"[HPE]\")\n self.wait_write(\"ip address 10.0.0.15 255.255.255.0\", \"[HPE-GigabitEthernet%s/0]\" % (self.num_nics + 1))\n self.wait_write(\"quit\", \"[HPE-GigabitEthernet%s/0]\" % (self.num_nics + 1))\n self.wait_write(\"quit\", \"[HPE]\")\n self.wait_write(\"quit\", \"<HPE>\")\n self.logger.info(\"completed bootstrap configuration\")", "title": "" }, { "docid": "394a2fa9a93faa85637c83052e68f4d3", "score": "0.49927118", "text": "def add_configuration(self, params):\n config_index = len(self.configurations)+1\n # '-g' is a mandatory argument of CaVEMan (Location of tsv ignore regions file)\n # Other programs do not require (or even support) this type of file\n # Therefore, this benchmark framework makes this file an optional input\n # (an empty file is given if not specified)\n if 'setup:-g' not in params.keys():\n logging.info(\"CaVEMan: config_{0} adding setup:-g=/dev/null in params\".format(config_index))\n params['setup:-g'] = '/dev/null'\n self.configurations.append(SinglePairedConfiguration(params, config_index))", "title": "" }, { "docid": "96f40eaec436a0790d1ef2241e9a3199", "score": "0.49780756", "text": "def provide_felix_config(self):\n # First read the config values, so as to avoid unnecessary\n # writes.\n prefix = None\n ready = None\n iface_pfx_key = key_for_config('InterfacePrefix')\n try:\n prefix = self.client.read(iface_pfx_key).value\n ready = self.client.read(READY_KEY).value\n except etcd.EtcdKeyNotFound:\n LOG.info('%s values are missing', CONFIG_DIR)\n\n # Now write the values that need writing.\n if prefix != 'tap':\n LOG.info('%s -> tap', iface_pfx_key)\n self.client.write(iface_pfx_key, 'tap')\n if ready != 'true':\n # TODO Set this flag only once we're really ready!\n LOG.info('%s -> true', READY_KEY)\n self.client.write(READY_KEY, 'true')", "title": "" }, { "docid": "2a24da6612d261e53366894fa18e046a", "score": "0.49709854", "text": "def _get_MindtPy_FP_config():\n CONFIG = ConfigBlock('MindtPy-GOA')\n CONFIG.declare(\n 'init_strategy',\n ConfigValue(\n default='FP',\n domain=In(['FP']),\n description='Initialization strategy',\n doc='Initialization strategy used by any method. Currently the '\n 'continuous relaxation of the MINLP (rNLP), solve a maximal '\n 'covering problem (max_binary), and fix the initial value for '\n 'the integer variables (initial_binary).',\n ),\n )\n\n _add_common_configs(CONFIG)\n _add_fp_configs(CONFIG)\n _add_oa_cuts_configs(CONFIG)\n _add_subsolver_configs(CONFIG)\n _add_tolerance_configs(CONFIG)\n _add_bound_configs(CONFIG)\n return CONFIG", "title": "" }, { "docid": "2dee24ed0fbf277ababdc1a0faac94cc", "score": "0.4966645", "text": "def add_options(self):\n self.add_option_save()\n self.add_option_enable()", "title": "" }, { "docid": "a9cab2ac734cc3d1a585c298ee3bd51d", "score": "0.49497613", "text": "def pibooth_startup(cfg, app):", "title": "" }, { "docid": "efda9310da6cd20760295084e8b2f580", "score": "0.49387687", "text": "def setup_confighelper(self):\n self.cfghelper = cfgmodule.MCfgModule()\n self.cfghelper.load_configfiles(self.configname, self.get_pkgdirimp_config())", "title": "" }, { "docid": "9c34f0e0d64b8770d803da25ea906c91", "score": "0.49345687", "text": "def setup(args):\n # chaparral,denseForest,lake,canyon,burning,burnt = neighbours\n config_path = args[0]\n config = utils.load(config_path)\n # -- THE CA MUST BE RELOADED IN THE GUI IF ANY OF THE BELOW ARE CHANGED --\n config.title = \"Forest Fire\"\n config.dimensions = 2\n config.states = \\\n (\n CHAPARRAL,\n DENSE_FORREST,\n LAKE,\n CANYON,\n BURNING,\n BURNT,\n START_BURN,\n END_BURN\n )\n\n # ------------ -------------------------------------------------------------\n\n config.state_colors = \\\n [\n (0.6,0.6,0), #chaparral\n (0,0.4,0), #dense forrest\n (0,0.5,1), #lake\n (0.5,0.5,0.5), #canyon\n (1,0,0), #burning\n (0.25,0.25,0.25), #burnt\n (1,0.7,0), #starting to burn\n (0.8,0,0.2) #ending burn\n ]\n\n config.grid_dims = (grid_size, grid_size)\n config.num_generations = 1000\n config.set_initial_grid(initial_grid)\n config.wrap = False\n\n # --------------------------------------------------------------------\n\n # the GUI calls this to pass the user defined config\n # into the main system with an extra argument\n # do not change\n if len(args) == 2:\n config.save()\n sys.exit()\n return config", "title": "" }, { "docid": "900757cd4e83c45dff8e381381954c86", "score": "0.49191934", "text": "def relay_buzzer_config(self):\r\n\t\tbus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_CONFIG, PCA9536_WDBZ_CONFIG_PINX)\r\n\t\t\r\n\t\t\"\"\"Select the Output Port Register Configuration data from the given provided value\"\"\"\r\n\t\tif self.pin == 0 :\r\n\t\t\tbus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_OUTPUT, PCA9536_WDBZ_OUTPUT_PIN0)\r\n\t\telif self.pin == 1 :\r\n\t\t\tbus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_OUTPUT, PCA9536_WDBZ_OUTPUT_PIN1)\r\n\t\telif self.pin == 2 :\r\n\t\t\tbus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_OUTPUT, PCA9536_WDBZ_OUTPUT_PIN2)", "title": "" }, { "docid": "49bc17036f8c11192d9c7173cd6c413a", "score": "0.49151355", "text": "def add_settings_early(self):\n pass", "title": "" }, { "docid": "5ed7282ca74d2b25352b3dc970b529b3", "score": "0.489255", "text": "def init(self) -> None:\n fpcr = self.ap.read32(self.address + FPB.FP_CTRL)\n self.fpb_rev = 1 + ((fpcr & FPB.FP_CTRL_REV_MASK) >> FPB.FP_CTRL_REV_SHIFT)\n if self.fpb_rev not in (1, 2):\n LOG.warning(\"Unknown FPB version %d\", self.fpb_rev)\n self.nb_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF)\n self.nb_lit = (fpcr >> 7) & 0xf\n LOG.info(\"%d hardware breakpoints, %d literal comparators\", self.nb_code, self.nb_lit)\n for i in range(self.nb_code):\n self.hw_breakpoints.append(HardwareBreakpoint(self.address + FPB.FP_COMP0 + 4*i, self))\n\n # disable FPB (will be enabled on first bp set)\n self.disable()\n for bp in self.hw_breakpoints:\n self.ap.write_memory(bp.comp_register_addr, 0)", "title": "" }, { "docid": "497d636e3decbea301698dfeb01255e0", "score": "0.48816466", "text": "def setups():\n setups = []\n\n # If you run this in detailed mode, you need to set --t8 to 1e8\n kotani2017_F2 = dict()\n kotani2017_F2['name'] = 'kotani2017_F2'\n kotani2017_F2['piltemplate'] = kotani2017_F2_pil\n kotani2017_F2['pilparams'] = [None]\n kotani2017_F2['pepperargs'] = {'condensed': True, 'conc': 'nM', 'release_cutoff': 10}\n kotani2017_F2['simulation'] = [\n ('pilsimulator', '--nxy', '--atol', '1e-13', '--rtol', '1e-13', '--mxstep', '10000', '--t8', '36000', '--p0', 'S1=10', 'S2=10', 'R=20', 'C1=1'),\n ('pilsimulator', '--nxy', '--atol', '1e-13', '--rtol', '1e-13', '--mxstep', '10000', '--t8', '36000', '--p0', 'S1=10', 'S2=10', 'R=20', 'C1=0.5'),\n ('pilsimulator', '--nxy', '--atol', '1e-13', '--rtol', '1e-13', '--mxstep', '10000', '--t8', '36000', '--p0', 'S1=10', 'S2=10', 'R=20', 'C1=0.05')]\n kotani2017_F2['reporter'] = 'D'\n kotani2017_F2['exp_results'] = [(7733, 7.42), (11333, 6.18), (25533, 1.40)]\n setups.append(kotani2017_F2)\n\n\n\n # If you run this in detailed mode, you need to set --t8 to 1e8\n kotani2017_F3 = dict()\n kotani2017_F3['name'] = 'kotani2017_F3'\n kotani2017_F3['piltemplate'] = kotani2017_F3_pil\n kotani2017_F3['pilparams'] = [None]\n kotani2017_F3['pepperargs'] = {'condensed': True, 'conc': 'nM', 'release_cutoff': 10}\n kotani2017_F3['simulation'] = [\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S1=10', 'S2=10', 'S3=10', 'S4=10', 'R=20', 'C1=0.1'),\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S1=10', 'S2=10', 'S3=10', 'S4=10', 'R=20', 'C1=0.01'),\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S1=10', 'S2=10', 'S3=10', 'S4=10', 'R=20', 'C1=0.001')]\n kotani2017_F3['reporter'] = 'D'\n kotani2017_F3['exp_results'] = [(21220, 7.72), (64203, 3.12), (86996, 0.69)]\n setups.append(kotani2017_F3)\n\n # If you run this in detailed mode, you need to set --t8 to 1e8\n kotani2017_F4 = dict()\n kotani2017_F4['name'] = 'kotani2017_F4'\n kotani2017_F4['piltemplate'] = kotani2017_F4_pil\n kotani2017_F4['pilparams'] = [None]\n kotani2017_F4['pepperargs'] = {'condensed': True, 'conc': 'nM', 'release_cutoff': 10}\n kotani2017_F4['simulation'] = [\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S5au=10', 'S6au=10', 'R=20', 'C1x=0.1'),\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S5au=10', 'S6au=10', 'R=20', 'C1x=0.01'),\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S5au=10', 'S6au=10', 'R=20', 'C1x=0.001'),\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S5au=10', 'S6au=10', 'R=20', 'C1x=0')]\n kotani2017_F4['reporter'] = 'D'\n kotani2017_F4['exp_results'] = [(6815, 6.06), (9004, 4.78), (10278, 4.03), (10795, 3.73)]\n setups.append(kotani2017_F4)\n\n return setups", "title": "" }, { "docid": "58604dc4a8f7ec896af4afd4ccbb2e5f", "score": "0.48768887", "text": "def base_install():\n # scwrl\n scwrl = {}\n print('{BOLD}{HEADER}Generating configuration files for ISAMBARD.{END_C}\\n'\n 'All required input can use tab completion for paths.\\n'\n '{BOLD}Setting up SCWRL 4.0 (Recommended){END_C}'.format(**text_colours))\n scwrl_path = get_user_path('Please provide a path to your SCWRL executable', required=False)\n scwrl['path'] = str(scwrl_path)\n pack_mode = get_user_option(\n 'Please choose your packing mode (flexible is significantly slower but is more accurate).',\n ['flexible', 'rigid'])\n if pack_mode == 'rigid':\n scwrl['rigid_rotamer_model'] = True\n else:\n scwrl['rigid_rotamer_model'] = False\n settings['scwrl'] = scwrl\n\n # dssp\n print('{BOLD}Setting up DSSP (Recommended){END_C}'.format(**text_colours))\n dssp = {}\n dssp_path = get_user_path('Please provide a path to your DSSP executable.', required=False)\n dssp['path'] = str(dssp_path)\n settings['dssp'] = dssp\n\n # buff\n print('{BOLD}Setting up BUFF (Required){END_C}'.format(**text_colours))\n buff = {}\n ffs = []\n ff_dir = isambard_path / 'buff' / 'force_fields'\n for ff_file in os.listdir(str(ff_dir)):\n ff = pathlib.Path(ff_file)\n ffs.append(ff.stem)\n force_field_choice = get_user_option(\n 'Please choose the default BUFF force field, this can be modified during runtime.',\n ffs)\n buff['default_force_field'] = force_field_choice\n settings['buff'] = buff\n return", "title": "" }, { "docid": "eb0b1f03f523df6dc0986bc7d631af5e", "score": "0.48754638", "text": "def configure(self):", "title": "" }, { "docid": "eb0b1f03f523df6dc0986bc7d631af5e", "score": "0.48754638", "text": "def configure(self):", "title": "" }, { "docid": "eb0b1f03f523df6dc0986bc7d631af5e", "score": "0.48754638", "text": "def configure(self):", "title": "" }, { "docid": "eb0b1f03f523df6dc0986bc7d631af5e", "score": "0.48754638", "text": "def configure(self):", "title": "" }, { "docid": "dfc5cb22e0e77db0bdb2f06eebc620fb", "score": "0.48677015", "text": "def update_config(self):\n if self.integration is None:\n return\n self.enabled = self.integration.has_option(self.get_config_name())\n self.pedantic = self.integration.configuration.get_bool(\n 'filter.mrproper')", "title": "" }, { "docid": "2f6a1ef219e49a6c1dc323b59fdb9ca1", "score": "0.48639885", "text": "def setup_peeling(band):\n log = logging.getLogger(\"Peeler\")\n\n if len(band.cal_names) == 0:\n log.info('No calibrators found for {0}. No peeling done.'.format(band.file))\n band.do_peeling = False\n return\n\n # Split calibrator list into flux bins of nsrc_per_bin or fewer sources\n nbins = int(np.ceil(len(band.cal_names)/band.nsrc_per_bin))\n sorted_cal_ind = np.argsort(band.cal_apparent_fluxes)[::-1]\n list_of_flux_bins = np.array_split(np.array(band.cal_apparent_fluxes)[sorted_cal_ind], nbins)\n bin_ends = []\n for f, flux_bin in enumerate(list_of_flux_bins):\n if f == 0:\n bin_ends.append(len(flux_bin))\n elif f < nbins-1:\n bin_ends.append(bin_ends[f-1]+len(flux_bin))\n else:\n continue\n list_of_name_bins = np.array_split(np.array(band.cal_names)[sorted_cal_ind], bin_ends)\n list_of_size_bins = np.array_split(np.array(band.cal_sizes)[sorted_cal_ind], bin_ends)\n\n peel_bins = []\n mean_flux1 = None\n solint_min = band.solint_min\n for names, fluxes, sizes in zip(list_of_name_bins, list_of_flux_bins, list_of_size_bins):\n mean_flux = np.mean(fluxes)\n if band.use_timecorr or not band.scale_solint:\n sol_int = solint_min\n else:\n if mean_flux1 is not None:\n # Scale by flux_ratio^2\n sol_int = min([int(np.ceil(solint_min * (mean_flux1 / mean_flux)**2)), 5*solint_min])\n else:\n mean_flux1 = mean_flux\n sol_int = solint_min\n if sol_int < 1:\n sol_int = 1\n bin_dict = {'names': names.tolist(), 'sol_int': sol_int, 'fluxes': fluxes, 'sizes': sizes}\n peel_bins.append(bin_dict)\n\n if band.use_timecorr:\n logtxt = 'Calibrator sets for {0}:\\n Set 1 = {1}; app. flux = {2}; size = {3} arcmin'.format(\n band.file, peel_bins[0]['names'], peel_bins[0]['fluxes'], peel_bins[0]['sizes']/60.0)\n if len(peel_bins) > 1:\n for p, peel_bin in enumerate(peel_bins[1:]):\n logtxt += '\\n Set {0} = {1}; app. flux = {2}; size = {3} arcmin'.format(p+2,\n peel_bin['names'], peel_bin['fluxes'], peel_bin['sizes']/60.0)\n else:\n logtxt = 'Calibrator sets for {0}:\\n Set 1 = {1}; solint = {2} time slots; app. flux = {3}; size = {4} arcmin'.format(\n band.file, peel_bins[0]['names'], peel_bins[0]['sol_int'], peel_bins[0]['fluxes'], peel_bins[0]['sizes']/60.0)\n if len(peel_bins) > 1:\n for p, peel_bin in enumerate(peel_bins[1:]):\n logtxt += '\\n Set {0} = {1}; solint = {2} time slots; app. flux = {3}; size = {4} arcmin'.format(p+2,\n peel_bin['names'], peel_bin['sol_int'], peel_bin['fluxes'], peel_bin['sizes']/60.0)\n log.info(logtxt)\n\n msname = band.file.split('/')[-1]\n make_peeling_parset('{0}/parsets/{1}.peeling.parset'.format(band.outdir,\n msname), peel_bins, scalar_phase=band.use_scalar_phase,\n phase_only=band.phase_only, sol_int_amp=band.solint_amp)\n\n create_peeling_skymodel(band.file, band.master_skymodel, radius=band.fwhm_deg*1.5,\n flux_cutoff_Jy=0.1, outdir=band.outdir,\n master_skymodel=band.master_skymodel, use_patches=band.use_patches)\n band.do_peeling = True\n band.peel_bins = peel_bins", "title": "" }, { "docid": "35b2dc00345a36d132d3f0fbb53ea0d7", "score": "0.48581967", "text": "def fan_init():\n dev_fan.reset()\n try:\n dev_fan.percentage_on = float(config.option(\n 'percentage_on', 'Fan', None))\n except ValueError:\n pass\n try:\n dev_fan.percentage_off = float(config.option(\n 'percentage_off', 'Fan', None))\n except ValueError:\n pass\n # Rounding\n round_def = 1\n round_min = 0\n round_max = 6\n try:\n dev_fan.round_perc = int(config.option('round_perc', 'Fan', round_def))\n except ValueError:\n dev_fan.round_perc = round_def\n dev_fan.round_perc = max(min(dev_fan.round_perc, round_max), round_min)\n try:\n dev_fan.round_temp = int(config.option('round_temp', 'Fan', round_def))\n except ValueError:\n dev_fan.round_temp = round_def\n dev_fan.round_temp = max(min(dev_fan.round_temp, round_max), round_min)", "title": "" }, { "docid": "3a4a3d07f4c1c1e99e3a04b7dc673d02", "score": "0.48412126", "text": "def _configure(self):\n pass", "title": "" }, { "docid": "0f009acf642c086ac6a8f79b7bba9f87", "score": "0.48372933", "text": "def setup(app):\n app.connect('builder-inited', generate_area_file)\n\n # Add option to only build a couple areas since all take a while--ONLY FOR DEV\n app.add_config_value('metpy_generate_all_areas', default=True, rebuild='html', types=bool)", "title": "" }, { "docid": "7f8e2850b2b96a6ac9d6f01d10d733e1", "score": "0.48358548", "text": "def defaultconfig(self):\r\n\r\n config_data = {\r\n \"path_to_database\": \"FUDB/FOLLOWUP.DB\",\r\n \"path_to_frontend\": \"FUDB/\",\r\n \"path_to_dcs_info\": \"FUDB/\",\r\n \"path_to_bin\": \"bin/\",\r\n \"path_to_excels_exported_from_database\": \"excels exported/\",\r\n \"path_to_excels_to_be_imported_in_database\": \"excels to be imported/\",\r\n \"path_to_new_opfiles\": \"DC BATCHES IN WORK/0 NEW/\",\r\n \"path_to_batches_unassigned\": \"DC BATCHES IN WORK/1 UNASSIGNED/\",\r\n \"path_to_batches_prepfiles\": \"DC BATCHES IN WORK/2 PREPARED FILES/\",\r\n \"path_to_batches_assigned\": \"DC BATCHES IN WORK/3 ASSIGNED/\",\r\n \"path_to_batches_tobechecked\": \"DC BATCHES IN WORK/4 TO BE CHECKED/\",\r\n \"path_to_batches_tbimported\": \"DC BATCHES IN WORK/5 TO BE IMPORTED/\",\r\n \"path_to_batches_finished\": \"DC BATCHES IN WORK/6 FINISHED/\",\r\n \"path_to_batches_instandby\": \"DC BATCHES IN WORK/7 IN STANDBY/\",\r\n \"path_to_batches_unrecordable\": \"DC BATCHES IN WORK/8 UNRECORDABLE/\",\r\n \"batch_status_options_responsible\": \"PREP. OP FILE, IMPORTATION & SPLIT FILE, RELIABILITY & DATA UPGRADE, CHECK OP FILE, CHECK SPLIT FILE, CHECK FRONT END, **TO BE CHECKED\",\r\n \"batch_status_options_proofreader\": \"OP FILE OK, SPLIT FILE OK, FRONT END OK, **TO BE IMPORTED, **FINISHED, **REWORK, **STANDBY, **UNRECORDABLE\",\r\n \"batch_status_options_overall\": \"ONGOING, STANDBY, FINISHED, UNRECORDABLE\",\r\n \"aircrafts\": \"A300, A300-600, A310, A320, A330, A340, A350, A380\",\r\n \"split_batch_factor\": \"2, 3, 4, 5, 6, 7, 8, 9\",\r\n \"IDlentgh\": \"6\",\r\n \"port\": \"5000\"\r\n }\r\n \r\n if not os.path.isfile(os.path.join(self.cwd, \"config.json\")):\r\n self.func.write_json(config_data, self.cwd, fname=\"config.json\")", "title": "" }, { "docid": "5b241ea604585b8564d7ea65910b617e", "score": "0.4832459", "text": "def configure_fghr(dwelling):\n # TODO: Should check that fghr is allowed for this system\n\n if dwelling.get('fghrs') is not None:\n # TODO: Need to add electrical power G1.4\n # FIXME: Entire fghrs calc is unfinished really\n dwelling.fghrs.update(\n dict(get_fghr_system(dwelling.fghrs['pcdf_id'])))\n\n if dwelling.fghrs[\"heat_store\"] == \"3\":\n assert dwelling.water_sys.system_type == HeatingTypes.combi\n assert not dwelling.get('hw_cylinder_volume')\n assert not dwelling.has_hw_cylinder\n\n dwelling.has_hw_cylinder = True\n dwelling.has_cylinderstat = True\n dwelling.has_hw_time_control = True\n hw_cylinder_volume = dwelling.fghrs['heat_store_total_volume']\n dwelling.measured_cylinder_loss = dwelling.fghrs['heat_store_loss_rate']\n dwelling.water_sys.table2b_row = 5\n\n # !!! This ideally wouldn't be here! Basically combi loss\n # !!! has already been calculated, but now we are adding a\n # !!! thermal store, so need to recalculate it\n if dwelling.water_sys.get('pcdf_data'):\n configure_combi_loss(dwelling,\n dwelling.water_sys,\n dwelling.water_sys.pcdf_data)\n else:\n dwelling.water_sys.combi_loss = combi_loss_table_3a(\n hw_cylinder_volume, dwelling.water_sys)\n\n if dwelling.fghrs[\"has_pv_module\"]:\n assert \"PV_kWp\" in dwelling.fghrs\n appendix_m.configure_pv_system(dwelling.fghrs)\n dwelling.fghrs['monthly_solar_hw_factors'] = TABLE_H3[dwelling.fghrs['pitch']]\n\n\n dwelling.hw_cylinder_volume = hw_cylinder_volume\n\n else:\n assert not \"PV_kWp\" in dwelling.fghrs\n\n if (dwelling.water_sys.system_type in [HeatingTypes.combi,\n HeatingTypes.storage_combi] and\n dwelling.water_sys.get('has_no_keep_hot') and not dwelling.has_hw_cylinder):\n\n dwelling.fghrs['equations'] = dwelling.fghrs['equations_combi_without_keephot_without_ext_store']\n else:\n dwelling.fghrs['equations'] = dwelling.fghrs['equations_other']", "title": "" }, { "docid": "b5e62e73b6dd5d8ef7b0870a49423009", "score": "0.4830389", "text": "def relay_buzzer_config(self):\r\n\t\tbus.write_byte_data(PCA9537_DEFAULT_ADDRESS, PCA9537_REG_CONFIG, PCA9537_CONFIG_PINX)\r\n\t\t\r\n\t\t\"\"\"Select the Output Port Register Configuration data from the given provided value\"\"\"\r\n\t\tif self.pin == 0 :\r\n\t\t\tbus.write_byte_data(PCA9537_DEFAULT_ADDRESS, PCA9537_REG_OUTPUT, PCA9537_OUTPUT_PIN0)\r\n\t\telif self.pin == 1 :\r\n\t\t\tbus.write_byte_data(PCA9537_DEFAULT_ADDRESS, PCA9537_REG_OUTPUT, PCA9537_OUTPUT_PIN1)", "title": "" }, { "docid": "8a8e5608dfaaf04a538026afb90d6bd1", "score": "0.48292902", "text": "def infocalypse_setupfms(ui_, **opts):\n # REQUIRES config file.\n execute_setupfms(ui_, opts)", "title": "" }, { "docid": "232be0bd8c6ad31f5464bb5a31b25d1d", "score": "0.4819942", "text": "def add_flag_band(self, fb: FlagBand) -> None:\n self.flag_bands[fb.pq_band] = fb\n self.bands.add(fb.pq_band)\n if fb.pq_manual_merge:\n fb.pq_manual_merge = True\n if fb.pq_fuse_func and self.fuse_func and fb.pq_fuse_func != self.fuse_func:\n raise ConfigException(f\"Fuse functions for flag bands in product set {self.product_names} do not match\")\n if fb.pq_ignore_time != self.ignore_time:\n raise ConfigException(f\"ignore_time option for flag bands in product set {self.product_names} do not match\")\n elif fb.pq_fuse_func and not self.fuse_func:\n self.fuse_func = fb.pq_fuse_func\n self.declare_unready(\"products\")\n self.declare_unready(\"low_res_products\")", "title": "" }, { "docid": "dbe132d4a0acac69ff972c6eac3dec42", "score": "0.4815888", "text": "def _configure(self):\n dconfig = DConfiguration(self._le2mserv.gestionnaire_graphique.screen)\n if dconfig.exec_():\n pms.TEMPS_PARTIE, pms.TREATMENT, pms.GRILLES = dconfig.get_config()\n self._le2mserv.gestionnaire_graphique.infoserv(\n [trans_TC(u\"Part time: {}\").format(pms.TEMPS_PARTIE),\n trans_TC(u\"Treatment: {}\").format(pms.get_treatment(pms.TREATMENT)),\n trans_TC(u\"Grids: {}\").format(len(pms.GRILLES))])", "title": "" }, { "docid": "8a4a7c8921a6125dcf26ac31044174c0", "score": "0.4814854", "text": "def start_interface(self):\n # VCV Tab\n self.VCV_frequency_spb.setValue(self.conf[\"VCV\"].getfloat(\"frequency\"))\n self.VCV_volume_spb.setValue(self.conf[\"VCV\"].getfloat(\"volume\"))\n self.VCV_pressure_max_spb.setValue(self.conf[\"VCV\"].getfloat(\"pressure_max\"))\n # PCV Tab\n self.PCV_frequency_spb.setValue(self.conf[\"PCV\"].getfloat(\"frequency\"))\n self.PCV_pressure_spb.setValue(self.conf[\"PCV\"].getfloat(\"pressure\"))\n self.PCV_volume_max_spb.setValue(self.conf[\"PCV\"].getfloat(\"volume_max\"))\n # PSV Tab\n self.PSV_pressure_spb.setValue(self.conf[\"PSV\"].getfloat(\"pressure\"))\n self.PSV_sensitivity_spb.setValue(self.conf[\"PSV\"].getfloat(\"sensitivity\"))\n # Alarms Tab\n self.al_tidal_volume_min_spb.setValue(self.conf[\"Alarms\"].getfloat(\"tidal_volume_min\"))\n self.al_tidal_volume_max_spb.setValue(self.conf[\"Alarms\"].getfloat(\"tidal_volume_max\"))\n self.al_tidal_volume_chkBox.setChecked(self.conf[\"Alarms\"].getboolean(\"tidal_volume_on\"))\n self.al_volume_minute_min_spb.setValue(self.conf[\"Alarms\"].getfloat(\"volume_minute_min\"))\n self.al_volume_minute_max_spb.setValue(self.conf[\"Alarms\"].getfloat(\"volume_minute_max\"))\n self.al_volume_minute_chkBox.setChecked(self.conf[\"Alarms\"].getboolean(\"volume_minute_on\"))\n self.al_flow_min_spb.setValue(self.conf[\"Alarms\"].getfloat(\"flow_min\"))\n self.al_flow_max_spb.setValue(self.conf[\"Alarms\"].getfloat(\"flow_max\"))\n self.al_flow_chkBox.setChecked(self.conf[\"Alarms\"].getboolean(\"flow_on\"))\n self.al_paw_min_spb.setValue(self.conf[\"Alarms\"].getfloat(\"paw_min\"))\n self.al_paw_max_spb.setValue(self.conf[\"Alarms\"].getfloat(\"paw_max\"))\n self.al_paw_chkBox.setChecked(self.conf[\"Alarms\"].getboolean(\"paw_on\"))\n self.al_plateau_pressure_min_spb.setValue(\n self.conf[\"Alarms\"].getfloat(\"plateau_pressure_min\"))\n self.al_plateau_pressure_max_spb.setValue(\n self.conf[\"Alarms\"].getfloat(\"plateau_pressure_max\"))\n self.al_plateau_pressure_chkBox.setChecked(\n self.conf[\"Alarms\"].getboolean(\"plateau_pressure_on\"))\n self.al_PEEP_min_spb.setValue(self.conf[\"Alarms\"].getfloat(\"PEEP_min\"))\n self.al_PEEP_max_spb.setValue(self.conf[\"Alarms\"].getfloat(\"PEEP_max\"))\n self.al_PEEP_chkBox.setChecked(self.conf[\"Alarms\"].getboolean(\"PEEP_on\"))\n self.al_frequency_min_spb.setValue(self.conf[\"Alarms\"].getfloat(\"frequency_min\"))\n self.al_frequency_max_spb.setValue(self.conf[\"Alarms\"].getfloat(\"frequency_max\"))\n self.al_frequency_chkBox.setChecked(self.conf[\"Alarms\"].getboolean(\"frequency_on\"))\n self.al_apnea_min_spb.setValue(self.conf[\"Alarms\"].getfloat(\"apnea_min\"))\n self.al_apnea_max_spb.setValue(self.conf[\"Alarms\"].getfloat(\"apnea_max\"))\n self.al_apnea_chkBox.setChecked(self.conf[\"Alarms\"].getboolean(\"apnea_on\"))\n # Config Tab\n self.cfg_tare_spb.setValue(self.conf['Config'].getfloat(\"tare\"))\n\n # Always shown elements\n self.inhale_time_val.setText(\"0,0 s\")\n self.exhale_time_val.setText(\"0,0 s\")\n self.IE_ratio_val.setText(\"1:1\")\n self.peak_pressure_val.setText(\"0,0 cm H2O\")\n self.tidal_volume_val.setText(\"0 ml\")\n self.inhale_pause_spb.setValue(self.conf[\"Panel\"].getfloat(\"inhale_pause\"))\n self.stop_btn.setEnabled(False)\n self.emerg_btn.setEnabled(True)", "title": "" }, { "docid": "dc8a29a64ddfaa262279b1631e7b438c", "score": "0.48136824", "text": "def _augment_pipeline_cfg(self):", "title": "" }, { "docid": "99d3d058b2b442b3e6658c1cc0b4e79d", "score": "0.48129982", "text": "def _installed_apps_add(self):\n config.add_plugin(self.module_path)", "title": "" }, { "docid": "5d41f3f280a0869d07edc69a889a32e9", "score": "0.48049232", "text": "def add_modules(self):\n # This is the threshold detect inverter on the output of the RBL\n self.rbl_inv_inst=self.add_inst(name=\"rbl_inv\",\n mod=self.inv,\n offset=self.rbl_inv_offset+vector(0,self.inv.width),\n rotate=270,\n mirror=\"MX\")\n self.connect_inst([\"bl[0]\", \"out\", \"vdd\", \"gnd\"])\n\n self.tx_inst=self.add_inst(name=\"rbl_access_tx\",\n mod=self.access_tx,\n offset=self.access_tx_offset,\n rotate=90)\n # D, G, S, B\n self.connect_inst([\"vdd\", \"delayed_en\", \"bl[0]\", \"vdd\"])\n # add the well and poly contact\n\n self.dc_inst=self.add_inst(name=\"delay_chain\",\n mod=self.delay_chain,\n offset=self.delay_chain_offset,\n rotate=90)\n self.connect_inst([\"en\", \"delayed_en\", \"vdd\", \"gnd\"])\n\n self.rbc_inst=self.add_inst(name=\"bitcell\",\n mod=self.replica_bitcell,\n offset=self.bitcell_offset,\n mirror=\"MX\")\n self.connect_inst([\"bl[0]\", \"br[0]\", \"delayed_en\", \"vdd\", \"gnd\"])\n\n self.rbl_inst=self.add_inst(name=\"load\",\n mod=self.rbl,\n offset=self.rbl_offset)\n self.connect_inst([\"bl[0]\", \"br[0]\"] + [\"gnd\"]*self.bitcell_loads + [\"vdd\", \"gnd\"])", "title": "" }, { "docid": "2b50c88cdb149f8802f7f899b057184d", "score": "0.48039088", "text": "def apply_user_configuration(self, config):\n self.logDisplay.set_logging_level(config['log'].get('logging_level', fallback='Verbose'))\n\n # MIDI\n self.winchMidiInputCombo.select_item(config['midi'].get('winch_midi_input', fallback='<no selection>'))\n self.midiOutputCombo.select_item(config['midi'].get('midi_output', fallback='<no selection>'))\n\n # OSC\n oscdef = config['osc']\n self.oscListenerConfig.set_OSC_port(oscdef.get('listener_addr', fallback='localhost'),\n oscdef.getint('listener_port', fallback=3751))\n\n self.oscSenderConfig.set_OSC_port(oscdef.get('sender_addr', fallback='localhost'),\n oscdef.getint('sender_port', fallback=3752))\n\n # DMX\n self.dmxSelect.select_item(config['dmx'].get('dmx_output_serial_port', fallback='<no selection>'))\n\n # winches\n for i, winchSelect in enumerate(self.winchSelects):\n key = \"winch_%d_output_serial_port\" % (i+1)\n winchSelect.select_item(config['winches'].get(key, fallback = '<no selection>'))\n return", "title": "" }, { "docid": "8d60749e4a9d5b236eb9aed41c8115ab", "score": "0.48023015", "text": "def configuration():", "title": "" }, { "docid": "87306fb100f6faa2896cf7c156c31496", "score": "0.4801877", "text": "def configure(self):\n pass", "title": "" }, { "docid": "87306fb100f6faa2896cf7c156c31496", "score": "0.4801877", "text": "def configure(self):\n pass", "title": "" }, { "docid": "2ef96dec6a78c0088afd1bf63f5d7768", "score": "0.4801574", "text": "def __writeConfig(self):\n page = None\n\n #TODO: get values of configurations here\n particles = \"#f\" if not base.particleMgrEnabled else \"#t\"\n volume = str(round(base.musicManager.getVolume(), 2))\n mute = \"#f\" if base.AppHasAudioFocus else \"#t\"\n #TODO: add any configuration variable name that you have added\n customConfigVariables = [\n \"\", \"particles-enabled\", \"audio-mute\", \"audio-volume\"]\n if os.path.exists(prcFile):\n # open the config file and change values according to current\n # application settings\n page = loadPrcFile(Filename.fromOsSpecific(prcFile))\n removeDecls = []\n for dec in range(page.getNumDeclarations()):\n # Check if our variables are given.\n # NOTE: This check has to be done to not loose our base or other\n # manual config changes by the user\n if page.getVariableName(dec) in customConfigVariables:\n decl = page.modifyDeclaration(dec)\n removeDecls.append(decl)\n for dec in removeDecls:\n page.deleteDeclaration(dec)\n # NOTE: particles-enabled and audio-mute are custom variables and\n # have to be loaded by hand at startup\n # Particles\n page.makeDeclaration(\"particles-enabled\", particles)\n # audio\n page.makeDeclaration(\"audio-volume\", volume)\n page.makeDeclaration(\"audio-mute\", mute)\n else:\n # Create a config file and set default values\n cpMgr = ConfigPageManager.getGlobalPtr()\n page = cpMgr.makeExplicitPage(\"%s Pandaconfig\"%appName)\n # set OpenGL to be the default\n page.makeDeclaration(\"load-display\", \"pandagl\")\n # get the displays width and height\n w = self.pipe.getDisplayWidth()\n h = self.pipe.getDisplayHeight()\n # set the window size in the config file\n page.makeDeclaration(\"win-size\", \"%d %d\"%(w, h))\n # set the default to fullscreen in the config file\n page.makeDeclaration(\"fullscreen\", \"1\")\n # particles\n page.makeDeclaration(\"particles-enabled\", \"#t\")\n # audio\n page.makeDeclaration(\"audio-volume\", volume)\n page.makeDeclaration(\"audio-mute\", \"#f\")\n # create a stream to the specified config file\n configfile = OFileStream(prcFile)\n # and now write it out\n page.write(configfile)\n # close the stream\n configfile.close()", "title": "" }, { "docid": "d7f35b2c85bc233ca1aad1551ec48ba3", "score": "0.4801492", "text": "def add_feat_conf(self, conf_map):\n pass", "title": "" }, { "docid": "84c705b6bdc8db6f23dcf91809dd0684", "score": "0.47929943", "text": "def configure(self):\r\n pass", "title": "" }, { "docid": "f81b0989f4511ff02e00c925e6c870eb", "score": "0.4789513", "text": "def _use_existing_configuration(self):\n HW_Init(self.ftdi, None)", "title": "" }, { "docid": "5072abe2c1b700888005029ce8bcfbc1", "score": "0.4767145", "text": "def setup_fpa():\n # it is a silicon detector. Based on the graph, the quantum efficiency\n # at 1.06 um is ~50%.\n fpa = {}\n fpa[\"quantum_efficiency\"] = 0.5\n return fpa", "title": "" }, { "docid": "bc7d4b40ac896eae4aeb7de92faf94b1", "score": "0.4762265", "text": "def configure(self):\n\n pass", "title": "" }, { "docid": "f17debb52c8b9f2207bed007a2a04c1e", "score": "0.47597814", "text": "def configure_step(self):\n\n pass", "title": "" }, { "docid": "646527891aa0fec499d0ea98acec0a4f", "score": "0.4756072", "text": "def configure_step(self):\n pass", "title": "" }, { "docid": "d86c5a22e21db8495cb6c8ea131cc692", "score": "0.47544494", "text": "def configs(self):\n raise NotImplementedError()", "title": "" }, { "docid": "21449661a824dcd1f6d07d30666a6fb7", "score": "0.47508505", "text": "def addAllFactories(self) -> None:\n ...", "title": "" }, { "docid": "984773abcd49f79f7fd0d6ed531c3903", "score": "0.47487208", "text": "def modify_setupcfg(struct, opts):\n opts[\"namespace\"] = [PYSCAFFOLDEXT_NS]\n setupcfg_path = [opts[\"project\"], \"setup.cfg\"]\n struct = helpers.modify(struct, setupcfg_path, add_install_requires)\n struct = helpers.modify(struct, setupcfg_path, add_pytest_requirements)\n struct = helpers.modify(struct, setupcfg_path,\n lambda x: add_entry_point(x, opts))\n return struct, opts", "title": "" }, { "docid": "3a89f62d07a9b6faec8990d0a5d819d0", "score": "0.47239232", "text": "def handle_add_setting(event):\n forex_type, currency_type, price_type, price = None, None, None, None\n tokens = event.message.text.split(\" \")\n if len(tokens) >= 5:\n forex_type = ForexType.get_type(tokens[1])\n currency_type = CurrencyType.get_type(tokens[2])\n price_type = PriceType.get_type(tokens[3])\n price = float(tokens[4])\n\n if forex_type is None or currency_type is None or price_type is None or price is None:\n line_bot.replyMessage(event.reply_token, \"設定格式錯誤\\n範例: '設定 買入 美元 低於 30.4'\")\n elif forex_notifier.addNotify(event.source.user_id, currency_type, price, forex_type, price_type):\n line_bot.replyMessage(event.reply_token, \"成功設定-通知\")\n else:\n line_bot.replyMessage(event.reply_token, \"設定失敗\")", "title": "" }, { "docid": "9cc41aa2db4de415553352d5a800a20b", "score": "0.47238025", "text": "def _populate(self):\n self.addDemographics()\n self.addLabs()\n self.addProblems()\n self.addMeds()\n self.addAllergies()\n self.addImmunizations()\n self.addVitals()\n self.populated_p = True", "title": "" }, { "docid": "a3a8d4a7953db078c47206a98dfe28b8", "score": "0.47200802", "text": "def infocalypse_setup(ui_, **opts):\n\n execute_setup(ui_,\n opts['fcphost'],\n opts['fcpport'],\n opts['tmpdir'])\n\n if not opts['nofms']:\n execute_setupfms(ui_, opts)\n else:\n ui_.status(\"Skipped FMS configuration because --nofms was set.\\n\")", "title": "" }, { "docid": "2067ed2df75da444bb18e1a9a8b49ad3", "score": "0.47159076", "text": "def setup(self):\n self.setup_button_handlers()\n\n # Enable various plugin pollers if enabled in the config.\n # Note: plugins defined as instance variables to prevent\n # their pollers from being garbage collected.\n if self.config[\"plugins\"][\"openweathermap.org\"][\"enabled\"]:\n from src.plugins import weather\n self.weather_plugin = weather.WeatherPlugin(self)\n self.weather_plugin.create_widgets()\n self.weather_plugin.setup_polling()\n\n if self.config[\"plugins\"][\"HSL\"][\"enabled\"]:\n from src.plugins import trains\n self.train_plugin = trains.TrainPlugin(self)\n self.train_plugin.create_widgets()\n self.train_plugin.setup_polling()\n\n if self.config[\"plugins\"][\"DHT22\"][\"enabled\"]:\n from src.plugins import dht22\n self.dht22_plugin = dht22.DHT22Plugin(self)\n self.dht22_plugin.create_widgets()\n self.dht22_plugin.setup_polling()\n\n # Set a higher row streches to the last used row to push elements\n # closer together\n nrows = self.main_window.right_plugin_grid.rowCount()\n self.main_window.right_plugin_grid.setRowStretch(nrows-1, 1)\n\n # Setup settings window's checkbox initial values:\n tts_enabled = self.config[\"main\"][\"TTS\"]\n self.settings_window.readaloud_checkbox.setChecked(tts_enabled)\n\n nightmode = self.config[\"main\"][\"nighttime\"].get(\"enabled\", False)\n self.settings_window.nightmode_checkbox.setChecked(nightmode)\n\n # Store nighttime range as datetimes to config.\n start_dt = utils.time_str_to_dt(self.config[\"main\"][\"nighttime\"][\"start\"])\n end_dt = utils.time_str_to_dt(self.config[\"main\"][\"nighttime\"][\"end\"])\n\n # Ensure start is before end \n if end_dt <= start_dt:\n end_dt = end_dt + timedelta(1)\n\n self.config[\"main\"][\"nighttime\"].update({\n \"start_dt\": start_dt,\n \"end_dt\": end_dt\n })\n\n # Set a timer to update the range on next nighttime end\n self.nighttime_update_timer = QTimer(self.main_window)\n self.nighttime_update_timer.setSingleShot(True)\n self.nighttime_update_timer.timeout.connect(self._update_nighttime_range)\n\n DELAY_UNTIL_DAYTIME = int((self.config[\"main\"][\"nighttime\"][\"end_dt\"] - datetime.now()).total_seconds())\n self.nighttime_update_timer.start(DELAY_UNTIL_DAYTIME*1000)\n\n alarm_brightness_enabled = self.config[\"main\"][\"full_brightness_on_alarm\"]\n self.settings_window.alarm_brightness_checkbox.setChecked(alarm_brightness_enabled)\n\n # Set main window's alarm time display to currently active alarm time\n alarm_time = self.get_current_active_alarm()\n if alarm_time:\n self.main_window.alarm_time_lcd.display(alarm_time)\n\n self.screen_blank_timer = QTimer(self.main_window)\n self.screen_blank_timer.setSingleShot(True)\n self.screen_blank_timer.timeout.connect(self.blank_screen_and_hide_control_buttons)\n\n self.main_window.mouseReleaseEvent = self.on_release_event_handler\n\n # Set radio stations from config to the settings window options\n self.radio_streams = self.config[\"radio\"][\"urls\"]\n self.settings_window.radio_station_combo_box.addItems(self.radio_streams.keys())\n\n # Ensure station set as default is set as current item\n default_station = self.config[\"radio\"][\"default\"]\n self.settings_window.radio_station_combo_box.setCurrentText(default_station)", "title": "" }, { "docid": "7dea25cb9d4b64ee00980249939f5283", "score": "0.47128785", "text": "def setup_platform(hass, config, add_entities, discovery_info=None):\n bond = hass.data[DOMAIN]['bond_hub']\n\n for deviceId in bond.getDeviceIds():\n device = bond.getDevice(deviceId)\n if device['type'] != DeviceTypes.CEILING_FAN:\n continue\n\n deviceProperties = bond.getProperties(deviceId)\n fan = BondFan(bond, deviceId, device, deviceProperties)\n add_entities([fan])", "title": "" }, { "docid": "5312d88457e7e5a5b83b29fec2d2e222", "score": "0.47103322", "text": "def __init__(self, initScript=None):\n super(Pump, self).__init__(initScript)\n \n # the isentropic compressor\n self.ideal = IdealPump()\n self.AddUnitOperation(self.ideal, 'Ideal')\n \n # a heater to add the waste heat to the outlet\n self.waste = Heater.Heater()\n self.AddUnitOperation(self.waste, 'Waste')\n self.waste.GetPort(DELTAP_PORT).SetValue(0.0, FIXED_V)\n \n # connect them\n self.ConnectPorts('Ideal', OUT_PORT, 'Waste', IN_PORT)\n\n # energy sensors (needed for signals)\n self.idealQ = Sensor.EnergySensor()\n self.AddUnitOperation(self.idealQ, 'IdealQ')\n self.ConnectPorts('Ideal', IN_PORT + 'Q', 'IdealQ', OUT_PORT)\n \n self.wasteQ = Sensor.EnergySensor()\n self.AddUnitOperation(self.wasteQ, 'WasteQ')\n self.ConnectPorts('Waste', IN_PORT + 'Q', 'WasteQ', OUT_PORT)\n\n self.totalQ = Sensor.EnergySensor()\n self.AddUnitOperation(self.totalQ, 'TotalQ')\n \n # create a signal stream for the efficiency\n self.effStream = Stream.Stream_Signal()\n self.effStream.SetParameterValue(SIGTYPE_PAR, GENERIC_VAR)\n self.AddUnitOperation(self.effStream, 'EfficiencySig')\n \n #set relation between ideal and total Q\n self.set = Set.Set()\n self.AddUnitOperation(self.set, 'Set')\n self.set.SetParameterValue(SIGTYPE_PAR, ENERGY_VAR)\n self.set.GetPort(Set.ADD_PORT).SetValue(0.0, FIXED_V)\n self.ConnectPorts('TotalQ',SIG_PORT, 'Set', SIG_PORT + '0')\n self.ConnectPorts('IdealQ',SIG_PORT, 'Set', SIG_PORT + '1')\n self.ConnectPorts('EfficiencySig', OUT_PORT, 'Set', Set.MULT_PORT)\n \n # energy stream balance\n self.mix = Balance.BalanceOp()\n self.AddUnitOperation(self.mix, 'Mix')\n self.mix.SetParameterValue(NUSTIN_PAR + Balance.S_ENE, 1)\n self.mix.SetParameterValue(NUSTOUT_PAR + Balance.S_ENE, 2)\n self.mix.SetParameterValue(Balance.BALANCETYPE_PAR, Balance.ENERGY_BALANCE)\n \n # connect the mixer ports\n self.ConnectPorts('IdealQ',IN_PORT,'Mix',OUT_PORT + 'Q0')\n self.ConnectPorts('WasteQ',IN_PORT,'Mix',OUT_PORT + 'Q1')\n self.ConnectPorts('TotalQ',OUT_PORT,'Mix', IN_PORT + 'Q0')\n \n # export the flow ports\n self.BorrowChildPort(self.ideal.GetPort(IN_PORT), IN_PORT)\n self.BorrowChildPort(self.waste.GetPort(OUT_PORT), OUT_PORT)\n self.BorrowChildPort(self.totalQ.GetPort(IN_PORT), IN_PORT + 'Q')\n self.BorrowChildPort(self.effStream.GetPort(IN_PORT), EFFICIENCY_PORT)\n self.BorrowChildPort(self.ideal.GetPort(DELTAP_PORT), DELTAP_PORT)\n \n #Change the type of the energy port such that it is in Work units and scaling\n self.totalQ.GetPort(IN_PORT).GetProperty().SetTypeByName(WORK_VAR)", "title": "" }, { "docid": "587ba2ebe1a59380f6b51d0457f12ca1", "score": "0.47084418", "text": "def _setup_pipeline_cfg(self):", "title": "" }, { "docid": "0edbddca100b6c5943655ecd9ef3be52", "score": "0.47067645", "text": "def copy_config_to_properties(self, config):\n ## EPICS\n self.epics_root = config.get('epics_root')\n\n ## Directories\n self.smurf_cmd_dir = config.get('smurf_cmd_dir')\n self.tune_dir = config.get('tune_dir')\n self.status_dir = config.get('status_dir')\n self.default_data_dir = config.get('default_data_dir')\n\n ## Useful constants\n constant_cfg = config.get('constant')\n self.pA_per_phi0 = constant_cfg.get('pA_per_phi0')\n\n ## Timing\n timing_cfg = config.get('timing')\n self.timing_reference = timing_cfg['timing_reference']\n\n ## Cold amplifier biases\n amp_cfg = config.get('amplifier')\n\n # 4K HEMT\n self.hemt_Vg = amp_cfg['hemt_Vg']\n self.hemt_bit_to_V = amp_cfg['bit_to_V_hemt']\n self.hemt_Vd_series_resistor = amp_cfg['hemt_Vd_series_resistor']\n self.hemt_Id_offset = amp_cfg['hemt_Id_offset']\n self.hemt_gate_min_voltage = amp_cfg['hemt_gate_min_voltage']\n self.hemt_gate_max_voltage = amp_cfg['hemt_gate_max_voltage']\n\n # 50K HEMT\n self.fiftyk_Vg = amp_cfg['LNA_Vg']\n self.fiftyk_dac_num = amp_cfg['dac_num_50k']\n self.fiftyk_bit_to_V = amp_cfg['bit_to_V_50k']\n self.fiftyk_amp_Vd_series_resistor = amp_cfg['50K_amp_Vd_series_resistor']\n self.fiftyk_Id_offset = amp_cfg['50k_Id_offset']\n ## Tune parameters\n tune_band_cfg = config.get('tune_band')\n self.default_tune = tune_band_cfg['default_tune']\n self.gradient_descent_gain = {\n int(band):v for (band,v) in\n tune_band_cfg['gradient_descent_gain'].items()}\n self.gradient_descent_averages = {\n int(band):v for (band,v) in\n tune_band_cfg['gradient_descent_averages'].items()}\n self.gradient_descent_converge_hz = {\n int(band):v for (band,v) in\n tune_band_cfg['gradient_descent_converge_hz'].items()}\n self.gradient_descent_step_hz = {\n int(band):v for (band,v) in\n tune_band_cfg['gradient_descent_step_hz'].items()}\n self.gradient_descent_momentum = {\n int(band):v for (band,v) in\n tune_band_cfg['gradient_descent_momentum'].items()}\n self.gradient_descent_beta = {\n int(band):v for (band,v) in\n tune_band_cfg['gradient_descent_beta'].items()}\n self.feedback_start_frac = {\n int(band):v for (band,v) in\n tune_band_cfg['feedback_start_frac'].items()}\n self.feedback_end_frac = {\n int(band):v for (band,v) in\n tune_band_cfg['feedback_end_frac'].items()}\n self.eta_scan_del_f = {\n int(band):v for (band,v) in\n tune_band_cfg['eta_scan_del_f'].items()}\n self.eta_scan_averages = {\n int(band):v for (band,v) in\n tune_band_cfg['eta_scan_averages'].items()}\n self.delta_freq = {\n int(band):v for (band,v) in\n tune_band_cfg['delta_freq'].items()}\n # Tracking algo\n self.lms_freq_hz = {\n int(band):v for (band,v) in\n tune_band_cfg['lms_freq'].items()}\n\n ## Reading/writing data\n self.fs = config.get('fs')\n\n ## In fridge\n self.R_sh = config.get('R_sh')\n\n ## Which bands are have their configurations specified in the\n ## pysmurf configuration file?\n smurf_init_config = config.get('init')\n bands = smurf_init_config['bands']\n\n ## Carrier\n self.dsp_enable = smurf_init_config['dspEnable']\n self.ultrascale_temperature_limit_degC = config.get('ultrascale_temperature_limit_degC')\n self.data_out_mux = {\n band:smurf_init_config[f'band_{band}']['data_out_mux']\n for band in bands}\n\n ## AMC\n # Which bands are present in the pysmurf configuration file?\n self.bands = bands\n self.amplitude_scale = {\n band:smurf_init_config[f'band_{band}']['amplitude_scale']\n for band in bands}\n self.iq_swap_in = {\n band:smurf_init_config[f'band_{band}']['iq_swap_in']\n for band in bands}\n self.iq_swap_out = {\n band:smurf_init_config[f'band_{band}']['iq_swap_out']\n for band in bands}\n self.ref_phase_delay = {\n band:smurf_init_config[f'band_{band}']['refPhaseDelay']\n for band in bands}\n self.ref_phase_delay_fine = {\n band:smurf_init_config[f'band_{band}']['refPhaseDelayFine']\n for band in bands}\n self.band_delay_us = {\n band:smurf_init_config[f'band_{band}']['bandDelayUs']\n for band in bands}\n self.att_uc = {\n band:smurf_init_config[f'band_{band}']['att_uc']\n for band in bands}\n self.att_dc = {\n band:smurf_init_config[f'band_{band}']['att_dc']\n for band in bands}\n self.trigger_reset_delay= {\n band:smurf_init_config[f'band_{band}']['trigRstDly']\n for band in bands}\n\n # Mapping from attenuator numbers to bands\n att_cfg = config.get('attenuator')\n att_cfg_keys = att_cfg.keys()\n attenuator = {}\n attenuator['band'] = np.zeros(len(att_cfg_keys),dtype=int)\n attenuator['att'] = np.zeros(len(att_cfg_keys),dtype=int)\n for i, k in enumerate(att_cfg_keys):\n attenuator['band'][i] = att_cfg[k]\n attenuator['att'][i] = k[-1]\n self.attenuator = attenuator\n\n ## RTM\n flux_ramp_cfg = config.get('flux_ramp')\n self.num_flux_ramp_counter_bits = flux_ramp_cfg['num_flux_ramp_counter_bits']\n self.reset_rate_khz = tune_band_cfg.get('reset_rate_khz')\n self.fraction_full_scale = tune_band_cfg.get('fraction_full_scale')\n\n ## Cryocard\n self.bias_line_resistance = config.get('bias_line_resistance')\n self.high_low_current_ratio = config.get('high_low_current_ratio')\n self.high_current_mode_bool = config.get('high_current_mode_bool')\n # Mapping from peripheral interface controller (PIC) to bias group\n pic_cfg = config.get('pic_to_bias_group')\n pic_cfg_keys = pic_cfg.keys()\n pic_to_bias_group = np.zeros((len(pic_cfg_keys), 2), dtype=int)\n for i, k in enumerate(pic_cfg_keys):\n val = pic_cfg[k]\n pic_to_bias_group[i] = [k, val]\n self.pic_to_bias_group = pic_to_bias_group\n\n ## Tracking algo\n # lmsGain ; this one's a little odd ; it's defined in each of\n # the band_# configuration file blocks, while the other main\n # tracking algorithm parameter, lms_freq_hz, is defined in the\n # tune_band configuration file block...\n self.lms_gain = {\n band:smurf_init_config[f'band_{band}']['lmsGain']\n for band in bands}\n self.lms_delay = {\n band:smurf_init_config[f'band_{band}']['lmsDelay']\n for band in bands}\n self.feedback_enable = {\n band:smurf_init_config[f'band_{band}']['feedbackEnable']\n for band in bands}\n self.feedback_gain = {\n band:smurf_init_config[f'band_{band}']['feedbackGain']\n for band in bands}\n self.feedback_limit_khz = {\n band:smurf_init_config[f'band_{band}']['feedbackLimitkHz']\n for band in bands}\n self.feedback_polarity = {\n band:smurf_init_config[f'band_{band}']['feedbackPolarity']\n for band in bands}\n\n ## Mappings\n # Bias groups available\n self.all_groups = config.get('all_bias_groups')\n\n # Number of bias groups and bias group to RTM DAC pair\n # mapping\n bias_group_cfg = config.get('bias_group_to_pair')\n bias_group_keys = bias_group_cfg.keys()\n\n # Number of bias groups\n self.n_bias_groups = len(bias_group_cfg)\n\n # Bias group to RTM DAC pair mapping\n bias_group_to_pair = np.zeros((len(bias_group_keys), 3), dtype=int)\n for i, k in enumerate(bias_group_keys):\n val = bias_group_cfg[k]\n bias_group_to_pair[i] = np.append([k], val)\n self.bias_group_to_pair = bias_group_to_pair\n\n # Bad resonator mask\n bad_mask_config = config.get('bad_mask')\n bad_mask_keys = bad_mask_config.keys()\n bad_mask = np.zeros((len(bad_mask_keys), 2))\n for i, k in enumerate(bad_mask_keys):\n bad_mask[i] = bad_mask_config[k]\n self.bad_mask = bad_mask", "title": "" }, { "docid": "45a1ec9b7a4cb35e22e468cbf738d921", "score": "0.47058004", "text": "def apply_beam_settings(self):\n raise NotImplementedError", "title": "" }, { "docid": "859f382671cd514c8893ce846bb70ccb", "score": "0.47048777", "text": "def main(config):\n current_config = DKRConfig()\n\n for key, value in config.items():\n\n if key in current_config.config:\n for version in value['versions']:\n current_config.add_entrypoint_version(key, version)\n continue\n\n current_config.add_entrypoint(key, value['versions'])\n\n current_config.write(create=True)", "title": "" }, { "docid": "17b36fac7ee47eda282fc200b1315325", "score": "0.4703463", "text": "def build_config(self, config):\n config.setdefaults('Makesmith Settings', {'COMport': 'COM5', 'xPitch': 20, 'openFile': \" \"})", "title": "" }, { "docid": "62f35cd074cf8fc6723560d4f67e75d9", "score": "0.46961617", "text": "def setup_fan():\n global dev_fan\n dev_fan = iot_fan.Fan(config.option('pin_name', 'Fan'))\n fan_init()", "title": "" }, { "docid": "15981065d45d9766969efdc4b9ce3548", "score": "0.46959153", "text": "def beehive_configure(self):\n run_data = {\n u'tags':[u'configure']\n } \n self.ansible_playbook(u'beehive', run_data, \n playbook=self.beehive_playbook)", "title": "" }, { "docid": "7893f2a96559fe12264e88b8046f7fdd", "score": "0.46956462", "text": "def configure(self) -> None:", "title": "" }, { "docid": "c216841e15d18d2b526c232e9e76b3c7", "score": "0.4690884", "text": "def reffile_setup(self):\n # Prepare to find files listed as 'config'\n # and set up PSF path\n\n # set up as dictionary of dictionaries\n self.configfiles = {}\n self.psfpath = {}\n self.psfbasename = {}\n self.psfpixfrac = {}\n self.reference_file_dir = {}\n\n for instrument in 'nircam niriss fgs'.split():\n self.configfiles[instrument] = {}\n self.psfpath[instrument] = os.path.join(self.datadir, instrument, 'gridded_psf_library')\n self.psfbasename[instrument] = instrument\n self.reference_file_dir[instrument] = os.path.join(self.datadir, instrument, 'reference_files')\n\n # Set instrument-specific file paths\n if instrument == 'nircam':\n self.psfpixfrac[instrument] = 0.25\n elif instrument == 'niriss':\n self.psfpixfrac[instrument] = 0.1\n elif instrument == 'fgs':\n self.psfpixfrac[instrument] = 0.1\n\n # Set global file paths\n self.configfiles[instrument]['filter_throughput'] = os.path.join(self.modpath, 'config', 'placeholder.txt')\n\n for instrument in 'miri nirspec'.split():\n self.configfiles[instrument] = {}\n self.psfpixfrac[instrument] = 0\n self.psfbasename[instrument] = 'N/A'\n\n # create empty dictionaries\n list_names = 'superbias linearity gain saturation ipc astrometric photom pam dark lindark'.split()\n for list_name in list_names:\n setattr(self, '{}_list'.format(list_name), {})\n\n self.det_list = {}\n self.det_list['nircam'] = ['A1', 'A2', 'A3', 'A4', 'A5', 'B1', 'B2', 'B3', 'B4', 'B5']\n self.det_list['niriss'] = ['NIS']\n self.det_list['fgs'] = ['G1', 'G2']\n self.det_list['nirspec'] = ['NRS']\n self.det_list['miri'] = ['MIR']\n\n for instrument in 'nircam niriss fgs miri nirspec'.split():\n for list_name in list_names:\n getattr(self, '{}_list'.format(list_name))[instrument] = {}\n\n if self.offline:\n # no access to central store. Set all files to none.\n for list_name in list_names:\n if list_name in 'dark lindark'.split():\n default_value = ['None']\n else:\n default_value = 'None'\n for det in self.det_list[instrument]:\n getattr(self, '{}_list'.format(list_name))[instrument][det] = default_value\n\n elif instrument == 'nircam':\n rawdark_dir = os.path.join(self.datadir, 'nircam/darks/raw')\n lindark_dir = os.path.join(self.datadir, 'nircam/darks/linearized')\n for det in self.det_list[instrument]:\n self.dark_list[instrument][det] = glob(os.path.join(rawdark_dir, det, '*.fits'))\n self.lindark_list[instrument][det] = glob(os.path.join(lindark_dir, det, '*.fits'))\n\n elif instrument in ['nirspec', 'miri']:\n for key in 'subarray_def_file fluxcal filtpupil_pairs readpatt_def_file crosstalk ' \\\n 'dq_init_config saturation_config superbias_config refpix_config ' \\\n 'linearity_config filter_throughput'.split():\n self.configfiles[instrument][key] = 'N/A'\n default_value = 'none'\n for list_name in list_names:\n for det in self.det_list[instrument]:\n getattr(self, '{}_list'.format(list_name))[instrument][det] = default_value\n\n else: # niriss and fgs\n for det in self.det_list[instrument]:\n if det == 'G1':\n self.dark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/raw', FGS1_DARK_SEARCH_STRING))\n self.lindark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/linearized', FGS1_DARK_SEARCH_STRING))\n\n elif det == 'G2':\n self.dark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/raw', FGS2_DARK_SEARCH_STRING))\n self.lindark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/linearized', FGS2_DARK_SEARCH_STRING))\n\n elif det == 'NIS':\n self.dark_list[instrument][det] = glob(os.path.join(self.datadir, 'niriss/darks/raw',\n '*uncal.fits'))\n self.lindark_list[instrument][det] = glob(os.path.join(self.datadir, 'niriss/darks/linearized',\n '*linear_dark_prep_object.fits'))", "title": "" }, { "docid": "2a697401b0712450ef9d714559f0e190", "score": "0.4687691", "text": "def hsdpa_physical_downlink_settings(self):\r\r\n\r\r\n config_list = []\r\r\n\r\r\n config_list.append (\"\")\r\r\n\r\r\n config_list.append ( \"%-24s %-18s\" % (\"Channel( Carrier 1)\", \"Level\"))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"==================\", \"=====\"))\r\r\n\r\r\n pcpich_level = -10.2\r\r\n self.set_pcpich_code_level(carrier=1, leveldB=pcpich_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"P-CPICH\", pcpich_level))\r\r\n\r\r\n psch_level = -15.2\r\r\n ssch_level = psch_level\r\r\n pccpch_level = -12.2\r\r\n self.write('CONFigure:WCDMa:SIGN:DL:LEVel:PSCH %s' %psch_level)\r\r\n self.write('CONFigure:WCDMa:SIGN:DL:LEVel:SSCH %s' %ssch_level)\r\r\n self.write('CONFigure:WCDMa:SIGN:DL:LEVel:PCCPch %s' %pccpch_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"P-SCH\", psch_level))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"S-SCH\", ssch_level))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"P-CCPCH\", pccpch_level))\r\r\n\r\r\n\r\r\n # SCCPH power level and channelisation code\r\r\n sccpch_level = -12.2\r\r\n self.set_dl_chan_code_level(dl_chan='SCCPch', code=2, level_dB=sccpch_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"S-CCPCH\", sccpch_level))\r\r\n\r\r\n # PICH power level and channelisation code\r\r\n pich_level = -15.2\r\r\n self.set_dl_chan_code_level(dl_chan='PICH', code=2, level_dB=pich_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"PICH\", pich_level))\r\r\n\r\r\n # AICH power level and channelisation code\r\r\n aich_level = -15.2\r\r\n self.set_dl_chan_code_level(dl_chan='AICH', code=3, level_dB=aich_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"AICH\", aich_level))\r\r\n\r\r\n # DPCH power and channelisation code\r\r\n dpch_level = -18.2\r\r\n self.set_dl_chan_code_level(dl_chan='DPCH', code=3, level_dB=dpch_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"DPCH\", dpch_level))\r\r\n\r\r\n # F-DPCH power and channelisation ocde\r\r\n fdpch_level = -18.2\r\r\n self.set_dl_chan_code_level(dl_chan='FDPCh', code=6, level_dB=fdpch_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"F-DPCH\", fdpch_level))\r\r\n\r\r\n # DPCH enhanced settings\r\r\n self.configure_enhanced_dl_dpch()\r\r\n\r\r\n\r\r\n # *****************************************************************************\r\r\n # Configure 2 HS-SCCH: level, channelization code, UE ID and dummy UE ID\r\r\n # *****************************************************************************\r\r\n hssch_level_1 = -20.2\r\r\n hssch_level_2 = -20.2\r\r\n self.set_hssch_level(hssch_num=1, carrier=1, leveldB=hssch_level_1)\r\r\n self.set_hssch_level(hssch_num=2, carrier=1, leveldB=hssch_level_2)\r\r\n self.set_hssch_code(hssch_num=1, carrier=1, codeNum=2)\r\r\n self.set_hssch_code(hssch_num=2, carrier=1, codeNum=7)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-SCCH #1\", hssch_level_1))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-SCCH #2\", hssch_level_2))\r\r\n\r\r\n self.set_default_ue_id_hssch(carrier=1)\r\r\n\r\r\n # HS-PDSCH Enhanced Settings\r\r\n self.set_hsdsch_mpo(carrier=1, control=\"AUTO\", pwrOffsetManual=\"\")\r\r\n # unscheduled frame type for HSDPA\r\r\n # possible types are 'DUMMy', 'DTX'\r\r\n self.hsdsch_unsched_frames(carrier=1, usFrameType='DUMMY')\r\r\n\r\r\n # *****************************************************************************\r\r\n # Configure HS-PDSCH: level and first channelization code number\r\r\n # *****************************************************************************\r\r\n\r\r\n hsdsch_level = -1.2\r\r\n self.set_hsdsch_level(carrier=1, leveldB = hsdsch_level)\r\r\n self.set_hsdsch_chanelisation_code(code=1, carrier=1)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-PDSCH\", hsdsch_level))\r\r\n\r\r\n\r\r\n # // *****************************************************************************\r\r\n # Set level and channelization code of E-AGCH, E-HICH and E-RGCH.\r\r\n # *****************************************************************************\r\r\n eagch_level = -20.2\r\r\n ehich_level = -20.2\r\r\n ergch_level = -20.2\r\r\n self.set_dl_chan_code_level(dl_chan='EAGCh', code=3, level_dB=eagch_level)\r\r\n self.set_dl_chan_code_level(dl_chan='EHICh', code=6, level_dB=ehich_level)\r\r\n self.set_dl_chan_code_level(dl_chan='ERGCh', code=6, level_dB=ergch_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"E-AGCH\", eagch_level))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"E-HICH\", ehich_level))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"E-RGCH\", ergch_level))\r\r\n\r\r\n config_list.append (\"\")\r\r\n\r\r\n for line in config_list:\r\r\n print line\r\r\n\r\r\n if self.dc_hsdpa:\r\r\n\r\r\n self.hsdpa_physical_downlink_settings_carrier2()", "title": "" }, { "docid": "150afdda99714e2f84c9f0d21553f852", "score": "0.4680646", "text": "def configure(self):\n add_config(self.config,\n 'compass.ocean.tests.global_ocean.make_diagnostics_files',\n 'make_diagnostics_files.cfg', exception=True)", "title": "" }, { "docid": "4ed046c63e14147f4aefb7a9d7876cef", "score": "0.467725", "text": "def setup_dev():\n setup_general()", "title": "" }, { "docid": "1129bdd69386cff2ceb1c8cb6f8afae2", "score": "0.4675958", "text": "def setup_prod():\n setup_general()", "title": "" }, { "docid": "4869b173da8606596d27a6978f53662f", "score": "0.46753436", "text": "def data_setup_appliances():\n appliance_list = []\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance1\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance2\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance3\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance4\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance5\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_RELAY, \"gpio_appliance1\", gpio_pin_id=13))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_RELAY, \"gpio_appliance2\", gpio_pin_id=15))\n return appliance_list", "title": "" }, { "docid": "6b0386a570e303b69795cb99a0e82c6a", "score": "0.46736163", "text": "def RPC_DigitizationBasicCfg(flags, **kwargs):\n acc = MuonGeoModelCfg(flags)\n if \"PileUpTools\" not in kwargs:\n PileUpTools = acc.popToolsAndMerge(RPC_DigitizationToolCfg(flags))\n kwargs[\"PileUpTools\"] = PileUpTools\n acc.merge(PileUpToolsCfg(flags, **kwargs))\n return acc", "title": "" }, { "docid": "6e69c0ca4141736da812b92477d41190", "score": "0.467141", "text": "def populate_config(self, config):\n self.use_wine_mappings.set_active(config['use_wine_mappings'])\n self.force_recheck.set_active(config['force_recheck'])\n self._previous_force_recheck = config['force_recheck']\n self.resume.set_active(config['resume'])\n try:\n self.glade.get_widget('time_added_checkbox').set_active(\n 'time_added' in config['transfer_meta'])\n except KeyError:\n pass\n self.resume_dat_entry.set_text(config['previous_resume_dat_path'])", "title": "" }, { "docid": "3f82823bbabb247768d995e75d0311c9", "score": "0.46704623", "text": "def setup(self, config, base, xsize, ysize, ignore, logger):\n # .. Do any custom setup you need to do.\n # Probably want to call the base class setup function to do the normal determination\n # of the size and position values.\n\n # Extra processing of 'bandpass' argument\n # Most needed type-checking is done in galsim.bandpass\n self._req_bp_fields = ['throughput', 'wave_type']\n self._opt_bp_fields = ['red_limit', 'blue_limit', 'zeropoint']\n try:\n bp = config['bandpass']\n for req in self._req_bp_fields:\n if req not in bp.keys():\n raise ValueError('Must pass field {} for a bandpass object!'.format(req))\n # for opt in self._opt_bp_fields:\n # if opt not in bp.keys():\n # config['bandpass'][opt] = None\n for key in bp.keys():\n if key not in (self._req_bp_fields+self._opt_bp_fields):\n raise ValueError('Field {} is not a valid entry for a bandpass!'.format(key))\n except KeyError:\n raise KeyError('`bandpass` is a required field for a COSMOSChromatic stamp!')\n\n extra_ignore = ignore + ['bandpass']\n return super(self.__class__, self).setup(config, base, xsize, ysize, extra_ignore, logger)", "title": "" }, { "docid": "c48a2c6b8f2372f457343d859ceee0a9", "score": "0.4670437", "text": "def get_fp_config_files(self):\n self.get_config_files()\n for file in self.txt_files: \n if \"fp\" in file:\n self.fp_config_files.append(file)\n return self.fp_config_files", "title": "" }, { "docid": "cfc9bb773b1f070cd5fdd27ddf7c260d", "score": "0.46689287", "text": "def configure(self):\n if self.three_layer:\n config = self.config\n # remove the continental shelf\n config.set('soma', 'phi', '1e-16')\n config.set('soma', 'shelf_depth', '0.0')", "title": "" }, { "docid": "d6eec29aeea18ecfc4a3f68f88e3f1f1", "score": "0.46675545", "text": "def test_forwarder_updates_pv_when_config_change_add_two_pvs(docker_compose_no_command):\n data_topic = \"TEST_forwarderData_change_config\"\n pvs = [PVSTR, PVLONG]\n prod = ProducerWrapper(\"localhost:9092\", CONFIG_TOPIC, data_topic)\n prod.add_config(pvs)\n\n sleep(2)\n cons = create_consumer()\n sleep(2)\n cons.subscribe([data_topic])\n sleep(2)\n\n poll_for_valid_message(cons)\n poll_for_valid_message(cons)\n\n expected_values = {PVSTR: (Value.String, b\"\"), PVLONG: (Value.Int, 0)}\n\n messages = [poll_for_valid_message(cons)[0], poll_for_valid_message(cons)[0]]\n check_multiple_expected_values(messages, expected_values)\n cons.close()", "title": "" }, { "docid": "e420a5237f6c7764eb9497d027610bad", "score": "0.46664828", "text": "async def _register_components(\n hass, speed_list=None, preset_modes=None, speed_count=None\n):\n await _register_fan_sources(hass)\n\n with assert_setup_component(1, \"fan\"):\n value_template = \"\"\"\n {% if is_state('input_boolean.state', 'on') %}\n {{ 'on' }}\n {% else %}\n {{ 'off' }}\n {% endif %}\n \"\"\"\n\n test_fan_config = {\n \"value_template\": value_template,\n \"preset_mode_template\": \"{{ states('input_select.preset_mode') }}\",\n \"percentage_template\": \"{{ states('input_number.percentage') }}\",\n \"oscillating_template\": \"{{ states('input_select.osc') }}\",\n \"direction_template\": \"{{ states('input_select.direction') }}\",\n \"turn_on\": [\n {\n \"service\": \"input_boolean.turn_on\",\n \"entity_id\": _STATE_INPUT_BOOLEAN,\n },\n {\n \"service\": \"test.automation\",\n \"data_template\": {\n \"action\": \"turn_on\",\n \"caller\": \"{{ this.entity_id }}\",\n },\n },\n ],\n \"turn_off\": [\n {\n \"service\": \"input_boolean.turn_off\",\n \"entity_id\": _STATE_INPUT_BOOLEAN,\n },\n {\n \"service\": \"input_number.set_value\",\n \"data_template\": {\n \"entity_id\": _PERCENTAGE_INPUT_NUMBER,\n \"value\": 0,\n },\n },\n {\n \"service\": \"test.automation\",\n \"data_template\": {\n \"action\": \"turn_off\",\n \"caller\": \"{{ this.entity_id }}\",\n },\n },\n ],\n \"set_preset_mode\": [\n {\n \"service\": \"input_select.select_option\",\n \"data_template\": {\n \"entity_id\": _PRESET_MODE_INPUT_SELECT,\n \"option\": \"{{ preset_mode }}\",\n },\n },\n {\n \"service\": \"test.automation\",\n \"data_template\": {\n \"action\": \"set_preset_mode\",\n \"caller\": \"{{ this.entity_id }}\",\n \"option\": \"{{ preset_mode }}\",\n },\n },\n ],\n \"set_percentage\": [\n {\n \"service\": \"input_number.set_value\",\n \"data_template\": {\n \"entity_id\": _PERCENTAGE_INPUT_NUMBER,\n \"value\": \"{{ percentage }}\",\n },\n },\n {\n \"service\": \"test.automation\",\n \"data_template\": {\n \"action\": \"set_value\",\n \"caller\": \"{{ this.entity_id }}\",\n \"value\": \"{{ percentage }}\",\n },\n },\n ],\n \"set_oscillating\": [\n {\n \"service\": \"input_select.select_option\",\n \"data_template\": {\n \"entity_id\": _OSC_INPUT,\n \"option\": \"{{ oscillating }}\",\n },\n },\n {\n \"service\": \"test.automation\",\n \"data_template\": {\n \"action\": \"set_oscillating\",\n \"caller\": \"{{ this.entity_id }}\",\n \"option\": \"{{ oscillating }}\",\n },\n },\n ],\n \"set_direction\": [\n {\n \"service\": \"input_select.select_option\",\n \"data_template\": {\n \"entity_id\": _DIRECTION_INPUT_SELECT,\n \"option\": \"{{ direction }}\",\n },\n },\n {\n \"service\": \"test.automation\",\n \"data_template\": {\n \"action\": \"set_direction\",\n \"caller\": \"{{ this.entity_id }}\",\n \"option\": \"{{ direction }}\",\n },\n },\n ],\n }\n\n if preset_modes:\n test_fan_config[\"preset_modes\"] = preset_modes\n\n if speed_count:\n test_fan_config[\"speed_count\"] = speed_count\n\n assert await setup.async_setup_component(\n hass,\n \"fan\",\n {\"fan\": {\"platform\": \"template\", \"fans\": {\"test_fan\": test_fan_config}}},\n )\n\n await hass.async_block_till_done()\n await hass.async_start()\n await hass.async_block_till_done()", "title": "" }, { "docid": "2c50930abe8e2aa9c6e4ab3dadd0ca22", "score": "0.46627563", "text": "async def spa_configured(self):\n await self.send_config_req()\n await self.send_panel_req(0, 1)\n # get the versions and model data\n await self.send_panel_req(2, 0)\n while True:\n if (self.connected\n and self.config_loaded\n and self.macaddr != 'Unknown'\n and self.curtemp != 0.0):\n return\n await asyncio.sleep(1)", "title": "" }, { "docid": "2e6450cbb3c032f25f0fc817397110ed", "score": "0.46561337", "text": "def configure(process, options):\n\n # create the main module path\n process.add_path('path')\n\n # enable the JSON filter (if given)\n if options.jsonFilterFile:\n process.enable_json_lumi_filter(options.jsonFilterFile)\n\n # == configure CMSSW modules ==========================================\n\n # -- Jets (default from miniAOD) --------------------------------------\n\n if options.withPATCollections:\n # just write out miniAOD jets\n process.add_output_commands(\n 'keep patJets_slimmedJets_*_*',\n 'keep patJets_slimmedJetsAK8_*_*',\n )\n\n # -- Jets (from miniAOD, but with possibly new JECs from GT) ----------\n\n from PhysicsTools.PatAlgos.tools.jetTools import updateJetCollection\n\n updateJetCollection(\n process,\n jetSource = cms.InputTag('slimmedJets'),\n labelName = 'UpdatedJEC',\n jetCorrections = ('AK4PFchs', cms.vstring(['L1FastJet', 'L2Relative', 'L3Absolute', 'L2L3Residual']), 'None') # Update: Safe to always add 'L2L3Residual' as MC contains dummy L2L3Residual corrections (always set to 1)\n )\n\n updateJetCollection(\n process,\n jetSource = cms.InputTag('slimmedJetsAK8'),\n labelName = 'UpdatedJECAK8',\n jetCorrections = ('AK8PFchs', cms.vstring(['L1FastJet', 'L2Relative', 'L3Absolute', 'L2L3Residual']), 'None') # Update: Safe to always add 'L2L3Residual' as MC contains dummy L2L3Residual corrections (always set to 1)\n )\n\n process.jecSequence = cms.Sequence(process.patJetCorrFactorsUpdatedJEC * process.updatedPatJetsUpdatedJEC)\n process.jecSequenceAK8 = cms.Sequence(process.patJetCorrFactorsUpdatedJECAK8 * process.updatedPatJetsUpdatedJECAK8)\n\n process.path *= process.jecSequence\n process.path *= process.jecSequenceAK8\n\n if options.withPATCollections:\n process.add_output_commands(\n 'keep patJets_updatedPatJetsUpdatedJEC_*_*',\n 'keep patJets_updatedPatJetsUpdatedJECAK8_*_*',\n )\n\n # -- Jets (reclustered with jet toolbox) ------------------------------\n\n from Karma.Common.Sequences.jetToolbox_cff import addJetToolboxSequences\n\n # create reclustering sequences\n\n jet_collection_names = []\n\n # AK4CHS jets (include pileupJetID)\n jet_collection_names += addJetToolboxSequences(\n process, isData=options.isData,\n min_jet_pt=15,\n jet_algorithm_specs=('ak4',),\n pu_subtraction_methods=('CHS',),\n do_pu_jet_id=True\n )\n\n # AK8CHS jets (no pileupJetID available)\n jet_collection_names += addJetToolboxSequences(\n process, isData=options.isData,\n min_jet_pt=15,\n jet_algorithm_specs=('ak8',),\n pu_subtraction_methods=('CHS',),\n do_pu_jet_id=False\n )\n\n # AK4Puppi and AK8Puppi jets\n jet_collection_names += addJetToolboxSequences(\n process, isData=options.isData,\n min_jet_pt=15,\n jet_algorithm_specs=('ak4', 'ak8',),\n pu_subtraction_methods=('Puppi',),\n do_pu_jet_id=False\n )\n\n # put reclustering sequences on path\n for _jet_collection_name in jet_collection_names:\n process.path *= getattr(process, _jet_collection_name)\n ## write out reclustered jets\n #process.add_output_commands('keep patJets_{}_*_*'.format(_jet_collection_name))\n\n # -- Jet ID (precomputed and embedded as userInts) -------------------\n\n for _jet_collection_name in jet_collection_names:\n _id_producer_name = \"{}IDValueMap\".format(_jet_collection_name)\n _enriched_jet_collection_name = \"{}WithJetIDUserData\".format(_jet_collection_name)\n\n # produce the jet id value map\n setattr(\n process,\n _id_producer_name,\n cms.EDProducer(\"PatJetIDValueMapProducer\",\n filterParams = cms.PSet(\n version = cms.string('WINTER16'),\n quality = cms.string('TIGHTLEPVETO'),\n ),\n src = cms.InputTag(_jet_collection_name)\n )\n )\n\n # embed jet id information in pat::Jet itprocess\n setattr(\n process,\n _enriched_jet_collection_name,\n cms.EDProducer(\"PATJetUserDataEmbedder\",\n src = cms.InputTag(_jet_collection_name),\n userInts = cms.PSet(\n jetIdWinter16TightLepVeto = cms.InputTag(_id_producer_name),\n ),\n )\n )\n\n # add modules to path\n process.path *= getattr(process, _id_producer_name)\n process.path *= getattr(process, _enriched_jet_collection_name)\n\n # write out ID-enriched jet collection\n if options.withPATCollections:\n process.add_output_commands(\n 'keep patJets_{}_*_*'.format(_enriched_jet_collection_name)\n )\n\n # -- MET --------------------------------------------------------------\n\n from PhysicsTools.PatUtils.tools.runMETCorrectionsAndUncertainties import runMetCorAndUncFromMiniAOD\n\n # run this to keep MET Type-I correction up-to-date with currently applied JECs\n runMetCorAndUncFromMiniAOD(\n process,\n isData=True,\n )\n\n process.path *= process.fullPatMetSequence\n\n if options.withPATCollections:\n process.add_output_commands(\n 'keep patMETs_slimmedMETs_*_*',\n )\n\n # -- Electrons --------------------------------------------------------\n\n # just write out miniAOD electrons\n if options.withPATCollections:\n process.add_output_commands(\n \"keep patElectrons_slimmedElectrons_*_*\",\n )\n\n # Note: electron scale/smearing correction information is contained in\n # the following userFloats: 'ecalEnergyPreCorr' and 'ecalEnergyPostCorr'\n\n # Note: electron ID information is stored as \"pseudo-userData\" in\n # PAT::Electrons (not in the inherited PAT::Object userData variables)\n # and can be accessed using PAT::Electrons::electronID() with the\n # corresponding tag (e.g. 'cutBasedElectronID-Summer16-80X-V1-loose')\n\n\n # -- Muons ------------------------------------------------------------\n\n # just write out miniAOD muons\n if options.withPATCollections:\n process.add_output_commands(\n \"keep patMuons_slimmedMuons_*_*\",\n )\n\n # -- Primary Vertices -------------------------------------------------\n\n from PhysicsTools.SelectorUtils.pvSelector_cfi import pvSelector\n\n # \"good\" primary vertices\n\n process.add_module(\n 'goodOfflinePrimaryVertices',\n cms.EDFilter(\n 'PrimaryVertexObjectFilter',\n src = cms.InputTag(\"offlineSlimmedPrimaryVertices\"),\n filterParams = pvSelector.clone(\n maxZ = 24.0\n ), # ndof >= 4, rho <= 2\n ),\n on_path='path',\n write_out=False,\n )\n\n # == END configure CMSSW modules ======================================\n\n\n\n # == configure Karma modules ==========================================\n\n # -- preliminary checks\n\n if options.useHLTFilter and not options.hltRegexes:\n raise ValueError(\n \"Option 'useHLTFilter' is true, but 'hltRegexes' \"\n \"is empty: no events would be written out. Aborting!\")\n elif not options.hltRegexes:\n print(\"[karmaSkim] WARNING: Option 'hltRegexes' is empty:\"\n \"no trigger information will be written out!\")\n\n # -- General Event Information ----------------------------------------\n\n from Karma.Skimming.EventProducer_cfi import karmaEventProducer\n\n process.add_module(\n 'karmaEvents',\n karmaEventProducer(isData=options.isData).clone(\n goodPrimaryVerticesSrc = cms.InputTag(\"goodOfflinePrimaryVertices\"),\n hltProcessName = cms.string(\"HLT\"),\n # interesting trigger paths must match one of these regexes:\n hltRegexes = cms.vstring(*options.hltRegexes),\n #hltRegexes = cms.vstring(\"HLT_(AK8)?PFJet[0-9]+_v[0-9]+\", \"HLT_DiPFJetAve[0-9]+_v[0-9]+\"),\n metFiltersSrc = cms.InputTag(\"TriggerResults\", \"\", options.metFiltersProcess),\n writeOutTriggerPrescales = cms.bool(True),\n ),\n on_path='path',\n write_out=True,\n )\n\n # filter out event if no interesting HLT path fired (if requested)\n if options.useHLTFilter:\n process.add_module(\n 'karmaEventHLTFilter',\n cms.EDFilter(\"EventHLTFilter\",\n cms.PSet(\n karmaEventSrc = cms.InputTag(\"karmaEvents\")\n )\n ),\n on_path='path',\n write_out=False, # don't write out the TriggerResults object\n )\n\n # -- MC-specific event information ------------------------------------\n\n if not options.isData:\n\n from Karma.Skimming.GeneratorQCDInfoProducer_cfi import karmaGeneratorQCDInfoProducer\n\n process.add_module(\n 'karmaGeneratorQCDInfos',\n karmaGeneratorQCDInfoProducer.clone(\n genEventInfoProductSrc = cms.InputTag(\"generator\"),\n ),\n on_path='path',\n write_out=True,\n )\n\n # -- Trigger Objects --------------------------------------------------\n\n from Karma.Skimming.TriggerObjectCollectionProducer_cfi import karmaTriggerObjectCollectionProducer\n\n process.add_module(\n 'karmaTriggerObjects',\n karmaTriggerObjectCollectionProducer.clone(\n karmaRunSrc = cms.InputTag(\"karmaEvents\"),\n ),\n on_path='path',\n write_out=True,\n )\n\n # -- Gen-Particles (MC only) ------------------------------------------\n\n if not options.isData:\n\n from Karma.Skimming.GenParticleCollectionProducer_cfi import karmaGenParticleCollectionProducer\n\n process.add_module(\n 'karmaGenParticles',\n karmaGenParticleCollectionProducer.clone(\n inputCollection = cms.InputTag(\"prunedGenParticles\"),\n ),\n on_path='path',\n write_out=True,\n )\n\n # -- MET --------------------------------------------------------------\n\n from Karma.Skimming.METCollectionProducer_cfi import karmaMETCollectionProducer\n\n process.add_module(\n 'karmaMETs',\n karmaMETCollectionProducer.clone(\n inputCollection = cms.InputTag(\"slimmedMETs\"),\n ),\n on_path='path',\n write_out=True,\n )\n\n # -- MET correction levels (as edm::ValueMaps) ------------------------\n\n if options.withMETCorrectionLevels:\n\n from Karma.Skimming.METCorrectedLVValueMapProducer_cfi import karmaMETCorrectedLVValueMapProducer\n\n process.add_module(\n 'karmaMETCorrectedLVs',\n karmaMETCorrectedLVValueMapProducer.clone(\n inputCollection = cms.InputTag(\"karmaMETs\"),\n associationSpec = cms.VPSet(\n # uncorrected MET\n cms.PSet(\n name = cms.string(\"Raw\"),\n transientMapKey = cms.string(\"corP4Raw\"),\n ),\n\n # uncorrected MET (from CHS candidates)\n cms.PSet(\n name = cms.string(\"RawCHS\"),\n transientMapKey = cms.string(\"corP4RawCHS\"),\n ),\n\n # uncorrected MET\n cms.PSet(\n name = cms.string(\"Type1\"),\n transientMapKey = cms.string(\"corP4Type1\"),\n ),\n )\n ),\n on_path='path',\n write_out=True,\n )\n\n from Karma.Skimming.METCorrectedSumEtValueMapProducer_cfi import karmaMETCorrectedSumEtValueMapProducer\n\n process.add_module(\n 'karmaMETCorrectedSumEts',\n karmaMETCorrectedSumEtValueMapProducer.clone(\n inputCollection = cms.InputTag(\"karmaMETs\"),\n associationSpec = cms.VPSet(\n # uncorrected MET\n cms.PSet(\n name = cms.string(\"Raw\"),\n transientMapKey = cms.string(\"corSumEtRaw\"),\n ),\n\n # uncorrected MET (from CHS candidates)\n cms.PSet(\n name = cms.string(\"RawCHS\"),\n transientMapKey = cms.string(\"corSumEtRawCHS\"),\n ),\n\n # uncorrected MET\n cms.PSet(\n name = cms.string(\"Type1\"),\n transientMapKey = cms.string(\"corSumEtType1\"),\n ),\n )\n ),\n on_path='path',\n write_out=True,\n )\n\n # -- Jets -------------------------------------------------------------\n\n from Karma.Skimming.JetCollectionProducer_cfi import karmaJets\n from Karma.Skimming.JetCorrectedLVValueMapProducer_cfi import karmaJetCorrectedLVValueMapProducer, karmaJetCorrectedLVValueMapProducerForPuppi\n from Karma.Skimming.JetIdValueMapProducers_cfi import karmaJetIdValueMapProducer, karmaJetPileupIdValueMapProducer, karmaJetPileupIdDiscriminantValueMapProducer\n\n # create \"karma::Jet\" collections from pat::Jets\n for _jet_collection_name in jet_collection_names:\n\n # add karma modules for producing the skimmed jet collections\n _module_name = \"karma{}{}\".format(_jet_collection_name[0].upper(), _jet_collection_name[1:])\n process.add_module(\n _module_name,\n karmaJets.clone(\n inputCollection = cms.InputTag(\"{}WithJetIDUserData\".format(_jet_collection_name)),\n ),\n on_path='path',\n write_out=True,\n )\n\n # write out jet ID information to transients (used to fill value maps)\n _t = getattr(process, _module_name).transientInformationSpec\n _t.fromUserIntAsBool = cms.PSet(\n jetIdWinter16TightLepVeto = cms.string(\"jetIdWinter16TightLepVeto\"),\n )\n if 'AK4PFCHS' in _jet_collection_name:\n _t.fromUserFloat = cms.PSet(\n pileupJetId = cms.string(\"AK4PFCHSpileupJetIdEvaluator:fullDiscriminant\"),\n )\n _t.fromUserInt = cms.PSet(\n pileupJetId = cms.string(\"AK4PFCHSpileupJetIdEvaluator:fullId\"),\n )\n\n # add karma module for producing the Jet ID value map\n _valuemap_module_name = \"karma{}{}JetIds\".format(_jet_collection_name[0].upper(), _jet_collection_name[1:])\n process.add_module(\n _valuemap_module_name,\n karmaJetIdValueMapProducer.clone(\n inputCollection = cms.InputTag(_module_name),\n ),\n on_path='path',\n write_out=True,\n )\n\n # add karma modules for producing the pileup jet ID value maps (AK4CHS-only)\n if 'AK4PFCHS' in _jet_collection_name:\n _valuemap_module_name = \"karma{}{}JetPileupIds\".format(_jet_collection_name[0].upper(), _jet_collection_name[1:])\n process.add_module(\n _valuemap_module_name,\n karmaJetPileupIdValueMapProducer.clone(\n inputCollection = cms.InputTag(_module_name),\n ),\n on_path='path',\n write_out=True,\n )\n _valuemap_module_name = \"karma{}{}JetPileupIdDiscriminants\".format(_jet_collection_name[0].upper(), _jet_collection_name[1:])\n process.add_module(\n _valuemap_module_name,\n karmaJetPileupIdDiscriminantValueMapProducer.clone(\n inputCollection = cms.InputTag(_module_name),\n ),\n on_path='path',\n write_out=True,\n )\n\n if 'Puppi' in _jet_collection_name:\n _valuemap_producer = karmaJetCorrectedLVValueMapProducerForPuppi\n else:\n _valuemap_producer = karmaJetCorrectedLVValueMapProducer\n\n # add karma modules for producing the correction level value maps\n _valuemap_module_name = \"karma{}{}JECs\".format(_jet_collection_name[0].upper(), _jet_collection_name[1:])\n process.add_module(\n _valuemap_module_name,\n _valuemap_producer.clone(\n inputCollection = cms.InputTag(_module_name),\n ),\n on_path='path',\n write_out=True,\n )\n\n # -- Gen-Jets ---------------------------------------------------------\n\n if not options.isData:\n\n from Karma.Skimming.GenJetCollectionProducer_cfi import karmaGenJetsAK4, karmaGenJetsAK8\n\n process.add_module(\n 'karmaGenJetsAK4',\n karmaGenJetsAK4.clone(\n inputCollection = cms.InputTag(\"ak4GenJetsNoNu\"),\n ),\n on_path='path',\n write_out=True,\n )\n\n process.add_module(\n 'karmaGenJetsAK8',\n karmaGenJetsAK8.clone(\n inputCollection = cms.InputTag(\"ak8GenJetsNoNu\"),\n ),\n on_path='path',\n write_out=True,\n )\n\n # -- Photons ----------------------------------------------------------\n\n from Karma.Skimming.PhotonCollectionProducer_cfi import karmaPhotonCollectionProducer\n\n process.add_module(\n 'karmaPhotons',\n karmaPhotonCollectionProducer.clone(),\n on_path='path',\n write_out=True,\n )\n\n # -- Electrons --------------------------------------------------------\n\n from Karma.Skimming.ElectronCollectionProducer_cfi import karmaElectronCollectionProducer\n\n process.add_module(\n 'karmaElectrons',\n karmaElectronCollectionProducer.clone(),\n on_path='path',\n write_out=True,\n )\n\n # -- Electron IDs -----------------------------------------------------\n\n from Karma.Skimming.ElectronIdValueMapProducer_cfi import karmaElectronIdValueMapProducer\n\n process.add_module(\n 'karmaElectronIds',\n karmaElectronIdValueMapProducer.clone(\n inputCollection = cms.InputTag(\"karmaElectrons\")\n ),\n on_path='path',\n write_out=True\n )\n\n # -- Muons ------------------------------------------------------------\n\n from Karma.Skimming.MuonCollectionProducer_cfi import karmaMuonCollectionProducer\n\n process.add_module(\n 'karmaMuons',\n karmaMuonCollectionProducer.clone(),\n on_path='path',\n write_out=True,\n )\n\n # -- Primary Vertices -------------------------------------------------\n\n from Karma.Skimming.VertexCollectionProducer_cfi import karmaVertexCollectionProducer\n\n process.add_module(\n 'karmaVertices',\n karmaVertexCollectionProducer.clone(),\n on_path='path',\n write_out=True,\n\n )\n\n # == END configure Karma modules ======================================\n\n\n # selective writeout based on path decisions\n process.enable_selective_writeout('path')\n\n # just in case we need it\n return process", "title": "" }, { "docid": "243a89dcf44f74d9f0bd0233440c2880", "score": "0.46531618", "text": "def option_setup(self):\n self.get_master_contracts(exchange=Exchanges.NFO.name)\n self._options_master_contracts = self._master_contracts[\"NSE-OPT\"]\n self._future_master_contracts = self._master_contracts[\"NSE-FUT\"]\n self.create_bnf_instruments()", "title": "" }, { "docid": "0d7fdc22685d5d82229f7c93032103dc", "score": "0.46470782", "text": "async def test_implemented_preset_mode(hass: HomeAssistant, start_ha) -> None:\n assert len(hass.states.async_all()) == 1\n\n state = hass.states.get(\"fan.mechanical_ventilation\")\n attributes = state.attributes\n assert attributes.get(\"percentage\") is None\n assert attributes.get(\"supported_features\") & FanEntityFeature.PRESET_MODE", "title": "" }, { "docid": "48f85e04edbb60be6b75adf715cef9b7", "score": "0.46463642", "text": "def set_udfs(self):\n\n flowcell_type = self.process.all_inputs()[0].udf.get('Flowcell Type')\n\n for key, val in self.process_settings[flowcell_type].items():\n self.process.udf[key] = val\n self.process.put()\n\n for art in self.artifacts:\n for key, val in self.artifact_settings[flowcell_type].items():\n art.udf[key] = val\n art.put()", "title": "" }, { "docid": "ba1ac218072551f066e6a29ba833a166", "score": "0.46397036", "text": "def add_options(_config):\n settings = [\n [\"cache_worker\", bool, lambda x: x in [True, False], False, False],\n [\n \"kube_deployment\",\n str,\n lambda x: x in [\"pod\", \"container\", \"file\", \"call\"],\n False,\n \"pod\",\n ],\n [\n \"kube_version\",\n str,\n lambda _: [\"v1.27.0\", \"v1.26.0\", \"v1.25.0\", \"v1.24.0\", \"v1.23.0\"],\n False,\n \"v1.27.0\",\n ],\n ]\n return settings", "title": "" }, { "docid": "705ac82ea9d0c8c444c7de75edab7180", "score": "0.4639025", "text": "def config_init(self):\n\n game_opts = [\n\n # Execution Options\n ('debug',False), # Toggle Debug Messaging\n ('log_path',False), # Turn on logging (w/path)\n ('log_lvl',logging.DEBUG), # Set log level\n\n # World Generation Options\n ('flex_limit',3) # Sets the maximum variance\n\n ]\n\n # Attempts to pull each value from the configuration\n # if not in config, the default value defined above\n # is set instead\n for opt in game_opts:\n try:\n setattr(self,opt[0],self.conf.conf_dict[opt[0]])\n except:\n setattr(self,opt[0],opt[1])\n continue", "title": "" }, { "docid": "aa504b0929d4c27ec2518d31c53017e3", "score": "0.46382338", "text": "async def fishingsettings(self, ctx:commands.Context):", "title": "" }, { "docid": "c71b5caeec87d3335c06d67274258112", "score": "0.46338764", "text": "def configure(self):\n warnings.warn(\"No options to configure for \" + self.__class__.__name__)", "title": "" }, { "docid": "ec05549315170e4be99eac41c43b7e00", "score": "0.4632596", "text": "def setup(self):\n pass # pragma: no cover", "title": "" }, { "docid": "c015dce3bdad0e68613290c085cc14f0", "score": "0.46324575", "text": "def setup(args):\n cfg = get_cfg()\n add_imaterialist_config(cfg)\n cfg.merge_from_file(model_zoo.get_config_file(\"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\"))\n cfg.merge_from_file(args.config_file)\n \n cfg.merge_from_list(args.opts)\n cfg.freeze()\n default_setup(cfg, args)\n # Setup logger for \"imaterialist\" module\n setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name=\"imaterialist\")\n return cfg", "title": "" } ]
4e2db4273aef647e4d55e183e06eefd6
Return all company ids listed in organisations.
[ { "docid": "81954b5d12648e52d6fa2fc00a550007", "score": "0.79137444", "text": "def company_ids(self) -> Generator[Optional[CompanyIDType], None, None]:\n for organisation in self:\n yield organisation.company_id", "title": "" } ]
[ { "docid": "0b7a3b6dbcd6a308d05b331ee622257a", "score": "0.75206494", "text": "def listCompanyIds( self ):\n return self._data[ COMPANIES ].keys()", "title": "" }, { "docid": "04e10cbead0a53dd80e56af38a329a04", "score": "0.71382254", "text": "def active_company_ids(self) -> Generator[CompanyIDType, None, None]:\n for organisation in self:\n if organisation.company_id and not organisation._skip_company:\n yield organisation.company_id", "title": "" }, { "docid": "0161dc78f6d5e26f1cc8de9a97f27b3d", "score": "0.6409637", "text": "def get_incident_list_by_organization_code(org_code):\n # get orgnization id\n org = Organization.objects.get(code=org_code)\n\n # get user list of the organization\n profiles = Profile.objects.filter(organization=org)\n\n all_incidents = []\n # all external organizations will all ways be listed on linked-individuals\n for profile in profiles:\n incidents = Incident.objects.filter(linked_individuals__id=profile.user_id)\n for incident in incidents:\n if incident not in all_incidents:\n all_incidents.append(incident)\n\n return all_incidents", "title": "" }, { "docid": "de14e2000ddf8d5f8072009278eb8bc8", "score": "0.63340145", "text": "def active_ids(self) -> Generator[OrganisationIDsType, None, None]:\n if self.charity_id and not self._skip_charity:\n yield self.charity_id\n if self.company_id and not self._skip_company:\n yield self.company_id", "title": "" }, { "docid": "7f2e632a64cea837d3ebfe6526878b61", "score": "0.6330583", "text": "def organizations_adminof_ids(self):\n return [org['userid'] for org in self.organizations_adminof()]", "title": "" }, { "docid": "fcbac75235de7a84f0d8da1ff5c3e790", "score": "0.6265619", "text": "def enumerateCompanies( self ):\n return self._data[ COMPANIES ].items()", "title": "" }, { "docid": "220b8e0dca8cc076b4c554aa66b4be49", "score": "0.62086064", "text": "def organisations(self):\n orgs = []\n \n # Run the search, fetch all orgs, 9999 max. TODO: Scroll???\n res = self.DB.search(\n index=self.config['elasticsearch']['database'],\n doc_type=\"organisation\",\n size = 9999,\n body = {\n 'query': {\n 'match_all': {}\n }\n }\n )\n \n for hit in res['hits']['hits']:\n org = hit['_source']['id']\n orgClass = KibbleOrganisation(self, org)\n yield orgClass", "title": "" }, { "docid": "0c5b27f0d630bb1fc467ec67af4db419", "score": "0.6198823", "text": "def organizations_owned_ids(self):\n return [org['userid'] for org in self.organizations_owned()]", "title": "" }, { "docid": "530cb0645c82146c84e23d9690f030de", "score": "0.61887956", "text": "def get_companies_list(self):\n return self.companies_list", "title": "" }, { "docid": "71131852033d19dec2a453137a2394df", "score": "0.5958758", "text": "def list_organizations(self):\n\n try:\n res = self._send_request('GET', self._org_url, '', 'organizations')\n if res and res.status_code in self._resp_ok:\n return res.json()\n except dexc.DfaClientRequestFailed:\n LOG.error(_LE(\"Failed to send request to DCNM.\"))", "title": "" }, { "docid": "e082e6b4afd984201cde9c967054b2ff", "score": "0.59492296", "text": "def organisations(\n self, # *args: QueryParameters, **kwargs: QueryParameters\n ) -> OrganisationSequenceList:\n if not hasattr(self, \"_organisations\"):\n # reader_dict_args: QueryParameters = {\n # **self.data_reader_params,\n # # **kwargs,\n # }\n # org_entry_args: QueryParameters = {\n # **self.organisation_entry_params,\n # # **kwargs,\n # }\n # self._organisations: OrganisationSequenceList = [\n # OrganisationEntry(org, **org_entry_args)\n # # for _, org in self.data_reader(*args, **reader_dict_args)\n # for _, org in self.data_reader(**reader_dict_args)\n self._organisations: OrganisationSequenceList = [\n OrganisationEntry(\n {**org, \"line_number\": line_number},\n # https://github.com/python/mypy/issues/5382[arg-type]\n **self.organisations_entry_params, # type: ignore\n )\n # for _, org in self.data_reader(*args, **reader_dict_args)\n for line_number, org in self._data_reader(\n self.organisations_data_source, **self._data_reader_params\n )\n ]\n return self._organisations", "title": "" }, { "docid": "e88b9e3d99504518a9625f6d05bee347", "score": "0.58825964", "text": "def charity_ids(self) -> Generator[Optional[CharityIDType], None, None]:\n for organisation in self:\n yield organisation.charity_id", "title": "" }, { "docid": "6b38425ccce324a320196fd437f9d6d3", "score": "0.58699507", "text": "def organizations(self):\n return self._organizations", "title": "" }, { "docid": "15d7aa414a4b201a0818bc9eed5e8131", "score": "0.58250606", "text": "def listDepartmentIds( self ):\n return self._data[ DEPARTMENTS ].keys()", "title": "" }, { "docid": "9cc4e94383d68d668428967689a6acbd", "score": "0.58078194", "text": "def get_companies(self) -> list:\n data = self._call(\"Company/Get\", \"get\", params={\"$filter\": \"Active eq true\", \"$select\": \"ID,Name\", \"$orderby\": \"Name\"})\n return data[\"Results\"]", "title": "" }, { "docid": "a54925e7e1ff0612470b74fa8ed2f091", "score": "0.5787439", "text": "def user_organizations_adminof_ids(self):\n return [self.userid] + self.organizations_adminof_ids()", "title": "" }, { "docid": "fd6e1d933d44501024cceb34854c9b5e", "score": "0.5712246", "text": "async def xml_requests_list(\n organisations: List[requesters.OrganisationRequestDetail]\n) -> List[requesters.XMLRequest]:\n requests_list = []\n async with ClientSession(connector=TCPConnector(ssl=False)) as session:\n for detail_request in organisations:\n xml_requests = await detail_request.iati_xml_requests(session=session)\n for xml_request in xml_requests:\n assert xml_request.organisation_handle\n requests_list.append(xml_request)\n\n return requests_list", "title": "" }, { "docid": "ff5473c7f95e3eac9ae6f34a89f568d6", "score": "0.56987786", "text": "def getOrganizations(self, includeData=False):\n # TODO: call as it is seems to crash with 502 error. breaking it up into\n # a paged call\n orgConfig = {}\n organizations = []\n pageSize = 100\n currentPosition = 0\n pageCnt = 1\n if includeData:\n orgConfig = {\n 'order_by': 'name',\n 'all_fields': True,\n 'include_extras': True,\n 'include_tags': True,\n 'include_groups': True,\n 'include_users': True,\n 'limit': pageSize,\n 'offset': currentPosition\n }\n while True:\n LOGGER.debug(f\"OrgConfig is {orgConfig}\")\n LOGGER.debug(f\"pagecount is {pageCnt}\")\n\n retVal = self.remoteapi.action.organization_list(**orgConfig)\n LOGGER.debug(f\"records returned: {len(retVal)}\")\n organizations.extend(retVal)\n\n if not retVal or len(retVal) < pageSize:\n break\n currentPosition = currentPosition + pageSize\n orgConfig['offset'] = currentPosition\n pageCnt += 1\n return organizations", "title": "" }, { "docid": "a512cd187c25b44a7fd82526d3c7ceff", "score": "0.5659566", "text": "def test_get_organisations(self):\n\n with self.env.create():\n # Add Organisation objects\n names = [\"Curtin University\", \"Massachusetts Institute of Technology\", \"Harvard University\"]\n dt = pendulum.now(self.timezone)\n dt_utc = dt.in_tz(tz=\"UTC\")\n for name in names:\n self.env.session.add(orm.Organisation(name=name, created=dt, modified=dt))\n self.env.session.commit()\n\n # Assert that Organisation objects returned\n objects = self.api.get_organisations(limit=10)\n self.assertEqual(len(names), len(objects))\n for i, (obj, name) in enumerate(zip(objects, names)):\n expected_id = i + 1\n self.assertIsInstance(obj, Organisation)\n self.assertEqual(expected_id, obj.id)\n self.assertEqual(name, obj.name)\n self.assertEqual(dt_utc, obj.created)\n self.assertEqual(dt_utc, obj.modified)", "title": "" }, { "docid": "8d92eaa4ce85127d80b3b4cf03b38e8b", "score": "0.56574696", "text": "async def xml_requests_get(\n organisations: List[str] = None\n) -> List[requesters.IatiXMLRequest]:\n logger.info(\"Fetching Organisation List\")\n if not organisations:\n orl = requesters.OrganisationRequestList()\n organisations = await orl.to_list(session=None)\n logger.info(\"Convert list into request objects\")\n organisation_requests = await organisation_requests_list(organisations)\n logger.info(\"Grab XML file references for organisations: as URLs\")\n await organisation_requests_fetch(organisation_requests)\n logger.info(\"Grab XML file references for organisations: as XmlRequest objects\")\n xml_requests = await xml_requests_list(organisation_requests)\n logger.info(\"Fetch XML references for organisations\")\n # await xml_requests_fetch(xml_requests)\n logger.info(\"XML requests returning\")\n\n xml_requests.reverse()\n return xml_requests", "title": "" }, { "docid": "80de9679d1feb553c3515c1afe659d4b", "score": "0.56542075", "text": "def getCityIDs(cities):\n return list(cities.keys())", "title": "" }, { "docid": "194fda7bda0f62cea292d5d04795886c", "score": "0.56529456", "text": "def listCompanies( self ):\n res = []\n for id, value in self.enumerateCompanies():\n res.append( { 'id' : id, \n 'postfix' : value[ COMPANY_POSTFIX ], \n 'name' : value[ COMPANY_NAME ], \n 'title' : value[ COMPANY_TITLES ][0] ,\n } )\n return res", "title": "" }, { "docid": "12c77a41d17cf8e0cdaf37c12d007bc3", "score": "0.56398743", "text": "def get_organizations(self):\n all_ids = self.owned_organizations + self.assc_organizations\n return DatastoreModel.get_by_id(all_ids)", "title": "" }, { "docid": "0377a07b2b57b55a973661e387a2624e", "score": "0.56373036", "text": "def get_id_list(self, collection: str):\n if self.db is None:\n raise ValueError('Database is not available. Setup database first.')\n logging.info('Search all id from {}'.format(collection))\n id_list = [x['_id'] for x in self.db[collection].find(filter={}, projection={'_id': 1}, sort=[('_id', -1)])]\n return id_list", "title": "" }, { "docid": "da4a754b7ba3e934ed97ef8fc19a1311", "score": "0.56271255", "text": "def get_all_organizations(self):\n portal_catalog = api.portal.get_tool('portal_catalog')\n queryDict = {}\n queryDict['portal_type'] = 'organization'\n queryDict['sort_on'] = 'getObjPositionInParent'\n results = portal_catalog.searchResults(queryDict)\n return [result.getObject() for result in results]", "title": "" }, { "docid": "fcab691538d0e2df9aef3b4a7e3eba82", "score": "0.5609007", "text": "def getOrganizationNames(self):\n orgList = self.remoteapi.action.organization_list()\n return orgList", "title": "" }, { "docid": "ac6d3e0907a6f0bb7aada45eb3bc5061", "score": "0.5539813", "text": "def organization_list(client=client.Client(), order_by='', sort='', organizations='', all_fields=''):\n args = client.sanitize_params(locals())\n\n resp = client.request(action='organization_list', data=args)\n if not resp['success']:\n raise exceptions.CKANError(resp.error)\n return resp", "title": "" }, { "docid": "4fdead31823319b1400b2bbf5097b4ce", "score": "0.55237633", "text": "def caa_identities(self) -> List[str]:\n value = force(self.meta.get('caaIdentities', []), list)\n for val in value:\n force(val, str)\n return value", "title": "" }, { "docid": "57765c6ed25b8bcf36ef51c4093d3a5b", "score": "0.55013216", "text": "def user_organizations_owned_ids(self):\n return [self.userid] + self.organizations_owned_ids()", "title": "" }, { "docid": "a440208120b0e9ca0c453f6f3e592eaa", "score": "0.54495275", "text": "def find_affiliations_by_org_id(org_id):\n # Accomplished in service instead of model (easier to avoid circular reference issues).\n entities = db.session.query(Entity) \\\n .join(AffiliationModel) \\\n .options(\n contains_eager(Entity.affiliations),\n subqueryload(Entity.contacts).subqueryload(ContactLink.contact),\n subqueryload(Entity.created_by),\n subqueryload(Entity.modified_by)) \\\n .filter(AffiliationModel.org_id == org_id, Entity.affiliations.any(AffiliationModel.org_id == org_id)) \\\n .order_by(AffiliationModel.created.desc()) \\\n .all()\n\n return [EntityService(entity).as_dict() for entity in entities]", "title": "" }, { "docid": "b8e65996f29e9122ace91dccae6e6107", "score": "0.54476905", "text": "def get_org_ids_string(sf_instance):\n # this where condition is specific to my use case.\n where_condition = \"WHERE sfLma__Package_Version__r.Name LIKE '%v%1.%'\"\n data = sf_instance.query(\"SELECT sfLma__Subscriber_Org_ID__c FROM sfLma__License__c \" + where_condition)\n\n org_ids = set()\n for k, v in data.items():\n if k == \"records\":\n for item in v:\n org_ids.add(item.get('sfLma__Subscriber_Org_ID__c'))\n\n org_ids_string = ','.join(str(s) for s in org_ids)\n return org_ids_string", "title": "" }, { "docid": "bed8bd16322daeb09a48751fb13a3c21", "score": "0.5441119", "text": "def getAllIdsFromPlayersInAList():\n db, cursor = connect()\n\n query = \"SELECT players.id FROM players;\"\n cursor.execute(query)\n results = cursor.fetchall()\n\n db.close()\n\n id_list = []\n for val in results:\n id_list.append(val[0])\n return id_list", "title": "" }, { "docid": "9a896e2c3074596ef060ef42c3e9727f", "score": "0.5441077", "text": "def active_charity_ids(self) -> Generator[CharityIDType, None, None]:\n for organisation in self:\n if organisation.charity_id and not organisation._skip_charity:\n yield organisation.charity_id", "title": "" }, { "docid": "c7a8d8a2f38680a2e72b5edab6884337", "score": "0.54305667", "text": "def _getDepartments( self, company=None ):\n if company is None:\n return self.enumerateDepartments()\n departments = []\n for id, value in self.enumerateDepartments():\n if value[ COMPANY ] == company:\n departments.append( ( id, value ) )\n return departments", "title": "" }, { "docid": "30a5ddbafcd76d889fc7e92af0a85269", "score": "0.5430037", "text": "def get_companies(self, **params):\n endpoint = \"/companies/paged\"\n return self._client._get(self.BASE_URL + endpoint, params=params)", "title": "" }, { "docid": "b4ec2c6ec7aa6e88a3ea4f6c954fef63", "score": "0.54101634", "text": "def get_organizations_to_delete():\n\n all_organizations = seed.models.Organization.objects.all()\n bad_organizations = [org for org in all_organizations if org.id not in get_core_organizations()]\n return bad_organizations", "title": "" }, { "docid": "c1747f04be831b6272896064247d9be5", "score": "0.5378089", "text": "def getListOfCompanyTitles( self ):\n res = []\n for value in self._data[ COMPANIES ].values():\n for title in value[ COMPANY_TITLES ]:\n if title not in res:\n res.append( title )\n return res", "title": "" }, { "docid": "0eb5ebeacf30011ceaa91e2a6007086b", "score": "0.5369961", "text": "def list_orgs(self):\n orgs = list(self.orgs.keys())\n orgs.sort()\n return orgs", "title": "" }, { "docid": "0cca4e7dfa179ea51966a072609a0dc2", "score": "0.5315879", "text": "def orgList(self):\n\n print \"Generating orgList\"\n base_url = self.context_state.current_base_url()\n org_list = self.database.currentOrgs()\n ol_len_third = len(org_list) / 3 + 1\n rez = [[], [], []]\n count = 0\n for o in org_list:\n if o.alt_cal_url:\n url = o.alt_cal_url\n else:\n url = \"%s?org=%d\" % (base_url, o.oid)\n rez[count / ol_len_third].append({\n 'url': url,\n 'orgname': o.orgname,\n 'acronym': o.acronym,\n })\n count += 1\n return rez", "title": "" }, { "docid": "b8df4ffc9b5ef273bd88661b7f02b8a1", "score": "0.530556", "text": "def industry_orgs(self, entity_id, cycle=DEFAULT_CYCLE, limit=DEFAULT_LIMIT):\n return self._get_url_json('aggregates/industry/%s/orgs.json' % entity_id, cycle, limit)", "title": "" }, { "docid": "93c7d3feade872b5c42887c19994d423", "score": "0.5275293", "text": "def get_team_ids(self):\n return teams.list_teams(self.year)", "title": "" }, { "docid": "a8d1e49a5efa9616c63300e7e1ec909f", "score": "0.52739465", "text": "def industries(self) -> list:\n if self._industries is not None:\n return self._industries\n in_use = {}\n for ind in self.use_table.columns:\n in_use[ind] = True\n industries = []\n for ind in self.make_table.index:\n if ind in in_use:\n industries.append(ind)\n industries.sort()\n self._industries = industries\n return industries", "title": "" }, { "docid": "5ddd39b095176dd160e52ce8aa72bc86", "score": "0.5271172", "text": "def organizations(self, number=-1, etag=None):\n url = self._build_url(\"user\", \"orgs\")\n return self._iter(int(number), url, orgs.ShortOrganization, etag=etag)", "title": "" }, { "docid": "164768cf5e91e0b20a8c877cdea96847", "score": "0.52461344", "text": "def all(self, org_id, force=False):\n if (self.__acme_accounts) and (not force):\n return self.__acme_accounts\n\n self.__acme_accounts = []\n result = self.find(org_id)\n for acct in result:\n self.__acme_accounts.append(acct)\n\n return self.__acme_accounts", "title": "" }, { "docid": "71e67f7add862c10e0140fbd7943edba", "score": "0.5235517", "text": "def organisation_id(self):\n\n return self.commission.organisation_id if self.commission else None", "title": "" }, { "docid": "cbb7f41c569a6241777f3191652abc61", "score": "0.52294874", "text": "def _get_list(self, collection):\n collection_list = []\n for data in collection:\n collection_list.append(data.id)\n return collection_list", "title": "" }, { "docid": "03cd2db4f857e579b20961928afa5ced", "score": "0.52040565", "text": "def get_list_of_ids_from_mongo(self) -> list:", "title": "" }, { "docid": "d2786ceaada341c5ff7ca012ac9d0aa5", "score": "0.5201891", "text": "def organizations_owned(self):\n if (\n self.userinfo\n and self.userinfo.get('organizations')\n and 'owner' in self.userinfo['organizations']\n ):\n return list(self.userinfo['organizations']['owner'])\n else:\n return []", "title": "" }, { "docid": "45d49e52e6549079b115133f4fd41a17", "score": "0.5198822", "text": "def get_all_school_ids():\n collector = {}\n url = '%s?api_key=%s&fields=id,school.name' % (schools_root, api_key)\n for page in range(1, 391):\n next_url = \"%s&page=%s\" % (url, page)\n nextdata = json.loads(requests.get(next_url).text)\n for entry in nextdata['results']:\n collector[entry['id']] = entry['school.name']\n with open('school_ids.json', 'w') as f:\n f.write(json.dumps(collector))", "title": "" }, { "docid": "a569193942c82f67e4a897440cc79db3", "score": "0.5189651", "text": "def organizations():\n\n # Get Security Key\n sec_key = Config.query.filter_by(key=\"security_key\").first()\n if sec_key == None:\n return \"Security Key not set.\"\n\n # Verify the security key\n if request.args.get('k') != sec_key.value:\n return \"Invlalid Security Key. You do not have access to this system.\"\n\n # See if there is a query\n query = request.args.get('query')\n if query:\n # Search for the query\n orgs = [i.name for i in Organization.query.filter(Organization.name.like('%' + query + '%'))]\n else:\n # Respond with JSON of all organizations\n orgs = [i.name for i in Organization.query.all()]\n\n # Return the JSON response\n return jsonify(orgs)", "title": "" }, { "docid": "af2614ee16f38f5c6d3f933bb15c179f", "score": "0.5178718", "text": "def organizations(self, number=-1, etag=None):\n # Import here, because a toplevel import causes an import loop\n from .orgs import ShortOrganization\n\n url = self._build_url(\"orgs\", base_url=self._api)\n return self._iter(int(number), url, ShortOrganization, etag=etag)", "title": "" }, { "docid": "35bb756d1778349d5cd17ee30efedca7", "score": "0.51727134", "text": "def getAcceptedOrganizations(\n program_key, limit=None, models=types.MELANGE_MODELS):\n limit = limit or _DEFAULT_ORG_NUMBER\n\n cache_key = _ORG_CACHE_KEY_PATTERN % (limit, program_key.name())\n cached_data = memcache.get(cache_key)\n if cached_data:\n if datetime.datetime.now() < cached_data.time + _ORG_CACHE_DURATION:\n return cached_data.orgs\n else:\n start_cursor = cached_data.cursor\n else:\n start_cursor = None\n\n # organizations are not returned from the cache so datastore is be queried\n query = models.ndb_org_model.query(\n models.ndb_org_model.program == ndb.Key.from_old_key(program_key),\n models.ndb_org_model.status == org_model.Status.ACCEPTED)\n orgs, next_cursor, _ = query.fetch_page(limit, start_cursor=start_cursor)\n\n if len(orgs) < limit:\n extra_orgs, next_cursor, _ = query.fetch_page(limit - len(orgs))\n\n org_keys = [org.key for org in orgs]\n for extra_org in extra_orgs:\n if extra_org.key not in org_keys:\n orgs.append(extra_org)\n\n # if the requested number of organizations have been found, they are cached\n if len(orgs) == limit:\n memcache.set(\n cache_key, CachedData(orgs, datetime.datetime.now(), next_cursor))\n\n return orgs", "title": "" }, { "docid": "71058a0eb8a82ffaa3e5f13af12eb8de", "score": "0.51711035", "text": "def listCompanyTitles( self ):\n res = []\n for value in self._data[ COMPANIES ].values():\n res.append( value[ COMPANY_TITLES ][0] )\n return res", "title": "" }, { "docid": "e7e16dd2ab0a7b094a438148a78be9da", "score": "0.51704633", "text": "def getGroupIds():", "title": "" }, { "docid": "1a37afe1ad7b1419e3a23622ca3d33ca", "score": "0.51499987", "text": "def listOrganizations(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "b8e68a58e6d741101bc25fdfbbe9d701", "score": "0.5128477", "text": "def getDomains(self, company):\n return self.db.getDomains(company)", "title": "" }, { "docid": "9dbeb29fbb324ee12161f5e76cadbdbd", "score": "0.51252276", "text": "async def get_ids(self) -> List[int]:", "title": "" }, { "docid": "c10a3c00b6d945f390d345656bfa1ec9", "score": "0.5118473", "text": "def get_queryset(self):\n return Organisation.objects.filter()", "title": "" }, { "docid": "dc915f023b80e3d10668b8e4b8723e05", "score": "0.50944287", "text": "def get_result_ids(self) -> list[int]:\n # FIXME this should be removed in version 4.0 and above\n # note that the import below is required since this file (Organization)\n # is already imported in model.Result\n from vantage6.server.model.result import Result\n session = DatabaseSessionManager.get_session()\n result_ids = session.query(Result.id)\\\n .filter(Result.organization_id == self.id).all()\n session.commit()\n return result_ids", "title": "" }, { "docid": "e0df3da1292e911e9ae391db51c08259", "score": "0.5085051", "text": "def organizations(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"organizations\")", "title": "" }, { "docid": "e2274a09bab99ef3df084b7726d6fa3d", "score": "0.5073022", "text": "def get_all_adrs_ids():\n query = \"\"\"SELECT DISTINCT adr.id\n FROM adr\"\"\"\n \n cnx = get_connection()\n cursor = cnx.cursor()\n cursor.execute(query, id)\n\n ids = cursor.fetchall()\n\n cursor.close()\n close_connection(cnx)\n\n return ids", "title": "" }, { "docid": "94e2f2738dd88e68d161b396321f18e6", "score": "0.5063342", "text": "def get_ids(self):\n\n ids = []\n for row in self.get_data():\n ids.append(row['id'])\n\n return ids", "title": "" }, { "docid": "67ec9478a9181ef40ef2222e9a756d7d", "score": "0.50574774", "text": "def get_companies(cls, request):\n res = ClientAPI._serialize(Company.objects.all())\n return JsonResponse(res, safe=False)", "title": "" }, { "docid": "ec890726cc38b7e5c96e65bd2ef1eef9", "score": "0.5051754", "text": "def get_datasets_ids(self):\n return [dts.id for dts in self.datasets]", "title": "" }, { "docid": "7391ab6abdc591258573aa7f7b6a415c", "score": "0.50496143", "text": "def industries(self, entity_id, cycle=DEFAULT_CYCLE, limit=DEFAULT_LIMIT):\n return self._get_url_json('aggregates/pol/%s/contributors/industries.json' % entity_id, cycle, limit)", "title": "" }, { "docid": "b2e2d84e06121a0bfe0a4ec2957260f3", "score": "0.5040732", "text": "def get_IDs(self):\n return self.list_IDs", "title": "" }, { "docid": "5047ada8c90f3da8f7fd134993a1ba19", "score": "0.5023665", "text": "def identity_group_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"identity_group_ids\")", "title": "" }, { "docid": "012d7a40314ab3d1f5ecd52074c721e5", "score": "0.5019963", "text": "def all_organizations(\n self, number=-1, since=None, etag=None, per_page=None\n ):\n url = self._build_url(\"organizations\")\n return self._iter(\n int(number),\n url,\n orgs.ShortOrganization,\n params={\"since\": since, \"per_page\": per_page},\n etag=etag,\n )", "title": "" }, { "docid": "e66920db8c413bb75b24e5e8289c2dca", "score": "0.50191665", "text": "def get_all_ids(self):\n id_list = [ ]\n\n for key in self.__json:\n id_list.append(self.get_id(key))\n logger.debug('get_all_ids function --> %s'%id_list)\n return id_list", "title": "" }, { "docid": "05440a9ad8b3bcc9a28633e8caf14245", "score": "0.50096416", "text": "def test_get_all_organizations(self):\n pass", "title": "" }, { "docid": "bd3415c626e84f12532cb860a72499d3", "score": "0.50075054", "text": "def get_project_ids_from_org_id(self, org_id):\n\n self.print_progress(\"waiting for delay period\")\n time.sleep (self.setting('delay'))\n msg = \"collecting GTR grant IDs for %s\" % (org_id)\n self.print_progress(msg)\n\n url = \"%sorganisations/%s/projects\" % (self.setting('base-url'),\n org_id)\n\n params={}\n result = requests.get(\n url,\n headers = self.setting('base-headers')\n )\n totalpages = result.json().get('totalPages')\n projects = []\n for proj in result.json().get('project'):\n projects.append(proj.get('id'))\n\n page = 2\n while page <= totalpages:\n params['p'] = page\n result = requests.get(\n url,\n headers = self.setting('base-headers')\n )\n for proj in result.json().get('project'):\n projects.append(proj.get('id'))\n\n if self.setting('testing') == True and page > 3:\n page = totalpages\n page+=1\n\n return projects", "title": "" }, { "docid": "6ab42c351e16cafd6ba4ccd1b0b611bf", "score": "0.5001997", "text": "def get_org_invitation_emails(self):\n org_invitations = self.github_org.invitations()\n org_invitation_emails = set()\n\n print(\n f\"\\nOrg invitations {org_invitations.totalCount} \"\n \"(login - name - company - email - valid):\"\n )\n for org_invitation in org_invitations:\n print_users(org_invitation)\n if is_user_ignored(org_invitation):\n continue\n if is_intel_email(org_invitation.email):\n org_invitation_emails.add(org_invitation.email.lower())\n else:\n print(\"Strange org invitation:\", org_invitation)\n\n print(\n f\"\\nOrg invitation emails {len(org_invitation_emails)}:\",\n \"; \".join(org_invitation_emails),\n )\n return org_invitation_emails", "title": "" }, { "docid": "6d0a8cfa4fcc1615655ac121bd504be4", "score": "0.4999709", "text": "def get_companies() -> List[sqlite3.Row]:\n with sqlite3.connect(DATABASE_FILE) as con:\n con.row_factory = sqlite3.Row\n cur: Cursor = con.execute(\"SELECT * FROM company\")\n return cur.fetchall()", "title": "" }, { "docid": "9cef92ba3fc09630bba0c9a55bc47c79", "score": "0.49891567", "text": "def orgs(self):\n return self._clients.authority.orgs", "title": "" }, { "docid": "68bc06c681203c6ed36d4e1608a9ce65", "score": "0.4984209", "text": "def companies(self) -> Companies:\n\n # Grab the `Archives` object.\n object = Companies(session=self.edgar_session)\n\n return object", "title": "" }, { "docid": "39a7eeb35d497aeae91a4b5209b5f234", "score": "0.49829355", "text": "def get_team_ids(self):\n df = self.team_stats()\n if not df.empty:\n return df.index.tolist()\n else:\n print 'ERROR: no teams found'\n return []", "title": "" }, { "docid": "4842f35b7e915878081999c0bcf13d99", "score": "0.4969997", "text": "def get_all_organisms(self) -> List[Organism]:\n return self._get_query(table=Organism).order_by(Organism.internal_id)", "title": "" }, { "docid": "d2601c220ce6ca89777bf35b0259014c", "score": "0.49677885", "text": "def design_ids(self):\r\n\r\n return self.designs.objectIds()", "title": "" }, { "docid": "051182877006adbab53f95cc5c803ae0", "score": "0.4963237", "text": "def get_ids(amcat_server: Union[str, AmcatAPI], project: int, articleset: int) -> Iterable[int]:\n return (x['id'] for x in\n _amcat(amcat_server).get_articles(project, articleset, columns=['id']))", "title": "" }, { "docid": "e3ea0519951c44de06952e3279be043c", "score": "0.49551263", "text": "def get_agent_ids(cur, archetype):\n agents = cur.execute(\"SELECT agentid FROM agententry WHERE spec \"\n \"LIKE '%\" + archetype + \"%' COLLATE NOCASE\"\n ).fetchall()\n\n return list(str(agent['agentid']) for agent in agents)", "title": "" }, { "docid": "0c739be168ca00fb38468fdf211a6efe", "score": "0.49511206", "text": "def identities(self, generator=False, **kwargs):\n identities = super().identities(generator=True, **kwargs)\n\n i = getattr(self, \"id\", None)\n if i is None:\n g = identities\n else:\n g = chain((f\"id%{i}\",), identities)\n\n if generator:\n return g\n\n return list(g)", "title": "" }, { "docid": "d9b90ceebc98c081d4e4982e3f88ac5f", "score": "0.49463865", "text": "def get_list_of_companies(self):\n\t\tsearch_params = {\"keyword\":\"\",\"skills\":[],\"locations\":[{\"latitude\":40.67,\"longitude\":-73.94}],\"specializations\":[],\"remote\":int(self.remote)}\n\t\tsearch_params.update(self.search_params)\n\n\t\theaders = {'Accept-Encoding': ' gzip,deflate,sdch',\n\t\t'Accept-Language': ' en-US,en;q=0.8',\n\t\t'User-Agent': ' Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36',\n\t\t'Content-Type': ' application/json;charset=UTF-8',\n\t\t'Accept': ' application/json, text/plain, */*',\n\t\t'Referer': ' https://www.whitetruffle.com/c/explore',}\n\n\t\temployer_list = self.session.post(\"https://www.whitetruffle.com/c/search_explore\", data=json.dumps(search_params), headers=headers).json()\n\n\t\tif not isinstance(employer_list, dict):\n\t\t\timport ipdb; ipdb.set_trace()\n\n\t\treturn employer_list", "title": "" }, { "docid": "4cf78ea5976bb052398ef3763c25083e", "score": "0.49361444", "text": "def get_organizations_indexed_by_name():\n organizations = fts_queries.fetch_organizations_json_as_dataframe()\n return organizations.set_index('name')", "title": "" }, { "docid": "3862bd3e35eb41331838ac907ec5ddcc", "score": "0.49337226", "text": "def getCategoryIds(self):\n return list(self.categories.keys())", "title": "" }, { "docid": "3862bd3e35eb41331838ac907ec5ddcc", "score": "0.49337226", "text": "def getCategoryIds(self):\n return list(self.categories.keys())", "title": "" }, { "docid": "2598cae6b52d54a7f58a0fc624d32085", "score": "0.49159706", "text": "def test_organization_list(self):\n jur = Jurisdiction.objects.get(name=\"Missouri State Senate\")\n Organization.objects.create(jurisdiction=jur, name=\"Democratic\",\n classification='executive')\n Organization.objects.create(jurisdiction=jur, name=\"Republican\",\n classification='executive')\n orgs_list = Organization.objects.filter(\n jurisdiction=jur).values('classification').distinct()\n response = self.client.get(reverse('jurisdiction_intro',\n args=(jur.id,)))\n self.assertEqual(response.status_code, 200)\n self.assertQuerysetEqual(response.context['orgs'],\n [\"{'classification': 'executive'}\"])\n self.assertEqual(len(orgs_list), 1)", "title": "" }, { "docid": "43320c53d45dfe60b925786e97f28785", "score": "0.49121088", "text": "def find_organization():\n # _token = request.headers.get('token')\n # try:\n # verify_token(_token)\n # except Exception:\n # return not_loggin()\n\n with db.session() as session:\n organization_data = find_organization_from_db(session)\n data_all = [product.serialize() for product in organization_data]\n return jsonify(data_all)", "title": "" }, { "docid": "f593704a900c30260a00495b0d8109e6", "score": "0.49116838", "text": "def contact_ids(self):\n return self._contact_ids", "title": "" }, { "docid": "dcf57897bb6a55f8e4d95bf6bc895959", "score": "0.4907066", "text": "def get_penyakit_list(symptoms, cursor):\n\n rows = []\n id_penyakit_list = []\n\n for symp in symptoms:\n cursor.execute(\n \"SELECT id_penyakit FROM gejala_penyakit WHERE id_gejala = \" + str(symp)\n )\n rows.append(cursor.fetchall())\n\n for row in rows:\n for item in row:\n id_penyakit_list.append(item[0])\n\n # get only unique id\n id_penyakit_list = list(set(id_penyakit_list))\n \n return id_penyakit_list", "title": "" }, { "docid": "4b4ed5b700f3600a0ac1f746a6152d40", "score": "0.49024615", "text": "def list_client_ids(self):\n return self.clients.keys()", "title": "" }, { "docid": "8883fd5022435704cd85339f02a75c0f", "score": "0.49012944", "text": "def get_org_repos(self):\n print(\"\\nScraping repositories\")\n json_repos = []\n for org in self.orgs:\n print(f\"\\nScraping repositories of {org}\")\n json_repo = self.load_json(\n f\"https://api.github.com/orgs/{org}/repos\"\n )\n for repo in json_repo:\n # Add field for org to make CSV file more useful\n repo['organization'] = org\n json_repos.append(repo)\n # Create list of items that should appear as columns in the CSV\n scraping_items = [\n 'organization',\n 'name',\n 'full_name',\n 'stargazers_count',\n 'language',\n 'created_at',\n 'updated_at',\n 'homepage',\n 'fork',\n 'description'\n ]\n file_name = f\"org_repositories_{self.timestamp}.csv\"\n self.generate_csv(file_name, json_repos, scraping_items)", "title": "" }, { "docid": "0ef6afbdd553d6dd7ff0cc543b74f024", "score": "0.4896718", "text": "def get_list_of_ids(self) -> List[int]:\n id_list = []\n logging.info(f\"Get id's list {id_list}\")\n data = self.driver.find_elements(*AddressBookLocators.BTN_EDIT_LIST)\n for ui_id in data:\n id_list.append(int(ui_id.get_attribute(\"href\")[74:]))\n return sorted(id_list)", "title": "" }, { "docid": "1f638b3ea8fc6975b4e93fe95ef0048c", "score": "0.48945013", "text": "def get_list_of_ids(self):\n l = []\n for key in self.dict:\n l.append(self.dict[key][0])\n return l", "title": "" }, { "docid": "3aff2cf2f34147e0ded8d86f97e3a22e", "score": "0.4891057", "text": "def organization_id(self):\n return self._organization_id", "title": "" }, { "docid": "3aff2cf2f34147e0ded8d86f97e3a22e", "score": "0.4891057", "text": "def organization_id(self):\n return self._organization_id", "title": "" }, { "docid": "3aff2cf2f34147e0ded8d86f97e3a22e", "score": "0.4891057", "text": "def organization_id(self):\n return self._organization_id", "title": "" }, { "docid": "d36b073fb06f71f3e7b36d13f057c03e", "score": "0.48894873", "text": "def get_organisations(\n username: str,\n) -> Union[List[str], str]:\n try:\n user = _get_github_api_client().get_user(login=username)\n orgs = user.get_orgs()\n result = [x.login for x in orgs]\n return result\n except GithubException:\n return f\"{username} is no a valid user in github\"", "title": "" }, { "docid": "62edf597e3b2ca5af1dec7dd1260eb0b", "score": "0.48843354", "text": "def list_insee_codes_for_departments_and_coms() -> list:\n codes = sorted(\n Perimeter.objects.filter(\n scale=Perimeter.SCALES.department, is_obsolete=False\n ).values_list(\"code\", flat=True)\n )\n\n # Add the COMs\n codes += list(OVERSEAS_COLLECTIVITIES)\n\n return codes", "title": "" }, { "docid": "c2e8a82258b73c9197d1dbbbcec134f9", "score": "0.4880693", "text": "def organizations_adminof(self):\n if (\n self.userinfo\n and self.userinfo.get('organizations')\n and 'admin' in self.userinfo['organizations']\n ):\n return list(self.userinfo['organizations']['admin'])\n else:\n return []", "title": "" }, { "docid": "c278a53798667ba621ef419e800bc778", "score": "0.48749214", "text": "def get_all_Contigs_by_organism_id(id_organism):\n listOfOrganisms = []\n sqlObj = _Contig_sql_new()\n results = sqlObj.get_contig_by_organism_id(id_organism)\n for element in results:\n listOfOrganisms.append(Contig(element[0], element[1], element[2], element[3], element[4]))\n return listOfOrganisms", "title": "" } ]
26eaf61bb7f4a99fb0e89f50c19e4bf2
__init__(self) > TIntFltKd __init__(self, TIntFltKd KeyDat) > TIntFltKd
[ { "docid": "c593c371c475387a5595c875a3d9bcda", "score": "0.78430516", "text": "def __init__(self, *args):\n _snap.TIntFltKd_swiginit(self,_snap.new_TIntFltKd(*args))", "title": "" } ]
[ { "docid": "3debcee7b0b5e1e909aa548561aac8e6", "score": "0.7764347", "text": "def __init__(self, *args):\n _snap.TIntFltKdV_swiginit(self,_snap.new_TIntFltKdV(*args))", "title": "" }, { "docid": "c3a74e11c3091ae23a0f522af746fbb0", "score": "0.76881486", "text": "def __init__(self, *args):\n _snap.TIntPrFltKd_swiginit(self,_snap.new_TIntPrFltKd(*args))", "title": "" }, { "docid": "54d5e1835f12b8efcbfc5c65869e5bca", "score": "0.7661183", "text": "def __init__(self, *args):\n _snap.TIntFltPrKd_swiginit(self,_snap.new_TIntFltPrKd(*args))", "title": "" }, { "docid": "04c11c7ea52d52b0607e5cd9e181de2a", "score": "0.75228375", "text": "def __init__(self, *args):\n _snap.TIntFltPrKdV_swiginit(self,_snap.new_TIntFltPrKdV(*args))", "title": "" }, { "docid": "4b885bc32eff380261b8ee9f271778d7", "score": "0.75075114", "text": "def __init__(self, *args):\n _snap.TIntPrFltKdV_swiginit(self,_snap.new_TIntPrFltKdV(*args))", "title": "" }, { "docid": "33e29c97f95ead68fb86e3720b3a0cda", "score": "0.74761474", "text": "def __init__(self, *args):\n _snap.TIntKd_swiginit(self,_snap.new_TIntKd(*args))", "title": "" }, { "docid": "af5a5e601466485216911c3781710610", "score": "0.74596924", "text": "def __init__(self, *args):\n _snap.TIntSFltKd_swiginit(self,_snap.new_TIntSFltKd(*args))", "title": "" }, { "docid": "b0c4b521064443a73f841e87805d35e2", "score": "0.74413246", "text": "def __init__(self, *args):\n _snap.TIntKdV_swiginit(self,_snap.new_TIntKdV(*args))", "title": "" }, { "docid": "a72ca387e6b0c09ca8097d5b14662c37", "score": "0.71044654", "text": "def __init__(self, *args):\n _snap.TFltIntKd_swiginit(self,_snap.new_TFltIntKd(*args))", "title": "" }, { "docid": "60985ac4ee764598bf9eacdb86cb8627", "score": "0.70681363", "text": "def __init__(self, *args):\n _snap.TUInt64FltKd_swiginit(self,_snap.new_TUInt64FltKd(*args))", "title": "" }, { "docid": "a8faafd7e7f8578ae87e9479fcf54d78", "score": "0.70403886", "text": "def __init__(self, *args):\n _snap.TFltIntKdV_swiginit(self,_snap.new_TFltIntKdV(*args))", "title": "" }, { "docid": "c0bec58dbfc2fc1e7d5d7692afcbecc7", "score": "0.7038517", "text": "def __init__(self, *args):\n _snap.TUInt64FltKdV_swiginit(self,_snap.new_TUInt64FltKdV(*args))", "title": "" }, { "docid": "77a11bf67a2dc29cce74e1c99fb8efe2", "score": "0.7013783", "text": "def __init__(self, *args):\n _snap.TAscFltIntKd_swiginit(self,_snap.new_TAscFltIntKd(*args))", "title": "" }, { "docid": "f6568a6e578ff4334fd5d095be2f6b18", "score": "0.7002946", "text": "def __init__(self, *args):\n _snap.TAscFltIntKdV_swiginit(self,_snap.new_TAscFltIntKdV(*args))", "title": "" }, { "docid": "c7e266f4e45a77b9f88c5599d098c934", "score": "0.69205034", "text": "def __init__(self, *args):\n _snap.TFltUIntKd_swiginit(self,_snap.new_TFltUIntKd(*args))", "title": "" }, { "docid": "2b0e8a52540a88f03d2d8c62e6f3ad33", "score": "0.6867759", "text": "def __init__(self, *args):\n _snap.TFltIntPrKd_swiginit(self,_snap.new_TFltIntPrKd(*args))", "title": "" }, { "docid": "6d5204819cf4584b8c954a555534d622", "score": "0.6847169", "text": "def __init__(self):\n self.dic_k_c = {}\n self.dic_c_k = {}\n self.max_key = -float('inf')\n self.min_key = float('inf')", "title": "" }, { "docid": "2b33de2658c8de91bc37e9e4f8af58e0", "score": "0.6839703", "text": "def __init__(self, *args):\n _snap.TFltKd_swiginit(self,_snap.new_TFltKd(*args))", "title": "" }, { "docid": "3653857613eddc657ec20b97df4b08f1", "score": "0.68203974", "text": "def __init__(self, *args):\n _snap.TFltIntPrKdV_swiginit(self,_snap.new_TFltIntPrKdV(*args))", "title": "" }, { "docid": "336db5380a72bfb493b7a4a5010063ae", "score": "0.677727", "text": "def __init__(self, *args):\n _snap.TIntFltFltTr_swiginit(self,_snap.new_TIntFltFltTr(*args))", "title": "" }, { "docid": "f920c5e3d2ce1648abb62676bb589cfd", "score": "0.67769164", "text": "def __init__(self, *args):\n _snap.TIntFltTrH_swiginit(self,_snap.new_TIntFltTrH(*args))", "title": "" }, { "docid": "d3e84a661b545476a64b8b65a7d596d1", "score": "0.6772105", "text": "def __init__(self, *args):\n _snap.TFltKdV_swiginit(self,_snap.new_TFltKdV(*args))", "title": "" }, { "docid": "5e941e6abeff6c6e4aff7025f6aaf7d9", "score": "0.67339134", "text": "def __init__(self, *args):\n _snap.TIntTrFltH_swiginit(self,_snap.new_TIntTrFltH(*args))", "title": "" }, { "docid": "8d1e340d1b7d0629149338e5cb48e540", "score": "0.6733308", "text": "def __init__(self, *args):\n _snap.TIntFltH_swiginit(self,_snap.new_TIntFltH(*args))", "title": "" }, { "docid": "55a1c9c3b8acbf2949b8ce478d149b1e", "score": "0.668098", "text": "def __init__(self, k):\n self.k = k", "title": "" }, { "docid": "4283dd8f9dfe72128f1de2577b81a34e", "score": "0.6678871", "text": "def __init__(self, *args):\n _snap.TUIntKd_swiginit(self,_snap.new_TUIntKd(*args))", "title": "" }, { "docid": "5f7747e1d3963bd1dcf835c1dcb6d7cc", "score": "0.6656309", "text": "def __init__(self, *args):\n _snap.TIntFltPr_swiginit(self,_snap.new_TIntFltPr(*args))", "title": "" }, { "docid": "ee9936058258770142f4db6b4fb6b528", "score": "0.6651", "text": "def __init__(self, *args):\n _snap.TIntFltTrHI_swiginit(self,_snap.new_TIntFltTrHI(*args))", "title": "" }, { "docid": "bb15dbbf8f3f8077dece66c1f3a220d7", "score": "0.66146135", "text": "def __init__(self, *args):\n _snap.TIntUInt64KdV_swiginit(self,_snap.new_TIntUInt64KdV(*args))", "title": "" }, { "docid": "ebfaaf3b496e64bad088e43061c1eead", "score": "0.6592908", "text": "def __init__(self, *args):\n _snap.TIntIntFltTrV_swiginit(self,_snap.new_TIntIntFltTrV(*args))", "title": "" }, { "docid": "4ccd02aa154c487492a5c59b48b6951f", "score": "0.65893394", "text": "def __init__(self, *args):\n _snap.TIntIntFltTr_swiginit(self,_snap.new_TIntIntFltTr(*args))", "title": "" }, { "docid": "d90ad3cf6573cef0a526ec4b5f973139", "score": "0.6585227", "text": "def __init__(self, *args):\n _snap.TFltUInt64KdV_swiginit(self,_snap.new_TFltUInt64KdV(*args))", "title": "" }, { "docid": "8ca1790f1435f747bf70094a027c6a4c", "score": "0.6570959", "text": "def __init__(self, *args):\n _snap.TIntPrFltH_swiginit(self,_snap.new_TIntPrFltH(*args))", "title": "" }, { "docid": "8d1d7a79915424631a8867718593e2a0", "score": "0.6560584", "text": "def __init__(self, *args):\n _snap.TIntFltPrH_swiginit(self,_snap.new_TIntFltPrH(*args))", "title": "" }, { "docid": "e1aa894a0231cda372d485287b7df221", "score": "0.65529203", "text": "def __init__(self, *args):\n _snap.TIntTrFltHI_swiginit(self,_snap.new_TIntTrFltHI(*args))", "title": "" }, { "docid": "9253b948608445943c8cabd187ed8d9d", "score": "0.6531545", "text": "def __init__(self, *args):\n _snap.TIntFltPrV_swiginit(self,_snap.new_TIntFltPrV(*args))", "title": "" }, { "docid": "7b7ef7dc3771f81e167a47307a836f88", "score": "0.65212595", "text": "def __init__(self, *args):\n _snap.TFltUInt64Kd_swiginit(self,_snap.new_TFltUInt64Kd(*args))", "title": "" }, { "docid": "993c8db83f895ff8aee64749b557d31f", "score": "0.65184927", "text": "def __init__(self, *args):\n _snap.TIntFltHI_swiginit(self,_snap.new_TIntFltHI(*args))", "title": "" }, { "docid": "cf381c695f32e3457e3c034fcc9ccc84", "score": "0.64811236", "text": "def __init__(self, *args):\n _snap.TUIntIntKd_swiginit(self,_snap.new_TUIntIntKd(*args))", "title": "" }, { "docid": "e4f745d9e3a3fc8bcb2fad4bb0d4bc3e", "score": "0.6480433", "text": "def __init__(self, *args):\n _snap.TUInt64IntKdV_swiginit(self,_snap.new_TUInt64IntKdV(*args))", "title": "" }, { "docid": "510d4e0297faa69183c28b9f6b8ee02d", "score": "0.64607495", "text": "def __init__(self, *args):\n _snap.TIntFltVHI_swiginit(self,_snap.new_TIntFltVHI(*args))", "title": "" }, { "docid": "e57fa1abb64c43bd6622ab14cf431d93", "score": "0.645586", "text": "def __init__(self, *args):\n _snap.TStrFltKdV_swiginit(self,_snap.new_TStrFltKdV(*args))", "title": "" }, { "docid": "06c7301ef940c0885b62bda130d0a247", "score": "0.6435781", "text": "def __init__(self, *args):\n _snap.TStrFltKd_swiginit(self,_snap.new_TStrFltKd(*args))", "title": "" }, { "docid": "859f59ffd19d9df1cff2ba9c153a4bcd", "score": "0.64220124", "text": "def __init__(self, *args):\n _snap.TUIntIntKdV_swiginit(self,_snap.new_TUIntIntKdV(*args))", "title": "" }, { "docid": "619338bceb4515fa7c288fcd51803142", "score": "0.6421007", "text": "def __init__(self, *args):\n _snap.TIntTr_swiginit(self,_snap.new_TIntTr(*args))", "title": "" }, { "docid": "1c9b872f4b9fee0925a54fb9c634b6cd", "score": "0.64196736", "text": "def __init__(self, *args):\n _snap.TIntFltIntTr_swiginit(self,_snap.new_TIntFltIntTr(*args))", "title": "" }, { "docid": "0067cf3bdfb3c9a448c580e65eb67b12", "score": "0.64126426", "text": "def __init__(self, *args):\n _snap.TIntUInt64Kd_swiginit(self,_snap.new_TIntUInt64Kd(*args))", "title": "" }, { "docid": "c99ed613e713b01da4cb60d29e84ee8a", "score": "0.63946176", "text": "def __init__(self, *args):\n _snap.TStrAscFltKd_swiginit(self,_snap.new_TStrAscFltKd(*args))", "title": "" }, { "docid": "1469e80d6b151a661cbe4c0917a1b773", "score": "0.63931465", "text": "def __init__(self, *args):\n _snap.TIntFltIntTrV_swiginit(self,_snap.new_TIntFltIntTrV(*args))", "title": "" }, { "docid": "606555e7d8b7ca9cb91473605ad65ebb", "score": "0.6390941", "text": "def __init__(self, *args):\n _snap.TIntFltPrHI_swiginit(self,_snap.new_TIntFltPrHI(*args))", "title": "" }, { "docid": "b46720cc824b324baf6f0629b58acc36", "score": "0.6382494", "text": "def __init__(self, key: float) -> None:\n self.key = key\n self.dictionary = {}", "title": "" }, { "docid": "1e6341c01f62e3c9c7adb6e440f06245", "score": "0.6363171", "text": "def __init__(self, *args):\n _snap.TIntPrFltHI_swiginit(self,_snap.new_TIntPrFltHI(*args))", "title": "" }, { "docid": "5d0621a5df9eef3d2b413536c4827f0b", "score": "0.63579714", "text": "def __init__(self, k,st,last, step):\n self.key = k\n self.st = st\n self.ls = last\n self.step = step", "title": "" }, { "docid": "821c4f8221cb561aca17d0a723da29d7", "score": "0.63528234", "text": "def __init__(self, *args):\n _snap.TIntFltVH_swiginit(self,_snap.new_TIntFltVH(*args))", "title": "" }, { "docid": "c214f7cfb949a06cc2ec6f508fee4fe5", "score": "0.6351212", "text": "def __init__(self, *args):\n _snap.TStrAscFltKdV_swiginit(self,_snap.new_TStrAscFltKdV(*args))", "title": "" }, { "docid": "daaab1a35c002a8ac04655f7ba9d4b29", "score": "0.6347761", "text": "def __init__(self, *args):\n _snap.TIntTrV_swiginit(self,_snap.new_TIntTrV(*args))", "title": "" }, { "docid": "6d9b31fe6f096fd014be3860a02669ba", "score": "0.6290782", "text": "def __init__(self, *args):\n _snap.TIntIntFltFltQu_swiginit(self,_snap.new_TIntIntFltFltQu(*args))", "title": "" }, { "docid": "6f16fbe48f0be9dfb01f906ef1e8ff5e", "score": "0.6266339", "text": "def __init__(self, key):\n self.key = key", "title": "" }, { "docid": "d570dab0516f509ff4867be505aba460", "score": "0.6266208", "text": "def __init__(self, *args):\n _snap.TUInt64IntKd_swiginit(self,_snap.new_TUInt64IntKd(*args))", "title": "" }, { "docid": "7e09fc376538e050cb2db4b17ee4b4da", "score": "0.6260486", "text": "def __init__(self, *args):\n _snap.TFltBoolKdV_swiginit(self,_snap.new_TFltBoolKdV(*args))", "title": "" }, { "docid": "7a698e6eb3635b53f568c4d4c2c59733", "score": "0.61992", "text": "def __init__(self, k):\n self.k = k\n self.coeff = None", "title": "" }, { "docid": "a2532cbf56c8a9e676af418fb9ac21af", "score": "0.6192411", "text": "def __init__(self, kx=3, ky=3, kxy=3):\n super().__init__()\n self.kx = kx\n self.ky = ky\n self.kxy = kxy", "title": "" }, { "docid": "32759e2d2811ad1204e4143146903526", "score": "0.618363", "text": "def __init__(self, mat, dictionary_key, nc):\n\t\tself.mat = mat\n\t\tself.dictionary_key = dictionary_key\n\t\tself.nc = nc", "title": "" }, { "docid": "07d62bda4d650b69fed57444f63f190b", "score": "0.6172981", "text": "def __init__(self, Kp, Ki, Kd, setpoint=0):\n self.setpoint = setpoint\n self.Kp = Kp\n self.Ki = Ki\n self.Kd = Kd\n self._integral = 0\n self._previous_error = 0\n self._change_limit = 0", "title": "" }, { "docid": "142702d6394caa2d36830f5511618e7b", "score": "0.6158949", "text": "def __init__(self, *args):\n _snap.TFltBoolKd_swiginit(self,_snap.new_TFltBoolKd(*args))", "title": "" }, { "docid": "1dfcf0630c6813197b42732c28ec8c7c", "score": "0.6133054", "text": "def __init__(self, *args):\n _snap.TInt_swiginit(self,_snap.new_TInt(*args))", "title": "" }, { "docid": "93c155477d823eac204bd7b69624e1c0", "score": "0.61228466", "text": "def __init__(self, *args):\n _snap.TIntTrIntH_swiginit(self,_snap.new_TIntTrIntH(*args))", "title": "" }, { "docid": "7f81296cb288ca42960ff856ab4bf646", "score": "0.61182976", "text": "def __init__(self, *args):\n _snap.TIntH_swiginit(self,_snap.new_TIntH(*args))", "title": "" }, { "docid": "cc50453815e7d3ab39bd4f1e1a25a6d6", "score": "0.61104244", "text": "def __init__(self, *args):\n _snap.TIntV_swiginit(self,_snap.new_TIntV(*args))", "title": "" }, { "docid": "0680644a126185f4a4b6c98408a229e6", "score": "0.61028624", "text": "def __init__(self):\n\n self.keylist = []\n self.keysort = None\n\n # Python 2 dict default size\n self.mask = Keys.MINSIZE - 1", "title": "" }, { "docid": "079ac2352704d103e442ffed5436d5c2", "score": "0.6093063", "text": "def __init__(self, *args):\n _snap.TFltFltIntTr_swiginit(self,_snap.new_TFltFltIntTr(*args))", "title": "" }, { "docid": "5a20f9cc5208255434e1437cd9b9313f", "score": "0.60925287", "text": "def __init__(self, *args):\n _snap.TStrIntKdV_swiginit(self,_snap.new_TStrIntKdV(*args))", "title": "" }, { "docid": "d9c8787a4f2161a060b34f4b1aa7a27f", "score": "0.608114", "text": "def __init__(self, example_weight_key):\n self._example_weight_key = example_weight_key", "title": "" }, { "docid": "067659810459c9b7f7b07913e0f8f17d", "score": "0.60778713", "text": "def __init__(self, *args):\n _snap.TIntStrKdV_swiginit(self,_snap.new_TIntStrKdV(*args))", "title": "" }, { "docid": "9da5a08370b824ec76eef5b029793b4d", "score": "0.6071499", "text": "def __init__(self, clerk, fclass, fID):\n self.clerk = clerk\n self.fID = fID\n self.fclass = fclass", "title": "" }, { "docid": "df53f2cff30aba97968c90aebcb0ee95", "score": "0.6068672", "text": "def __init__(self,k):\n\n\t\tself.k = k \n\n\t\tself.ta_u = k['ta_u']\n\t\tself.ta_l = k['ta_l']\n\t\tself.tb_u = k['tb_u']\n\t\tself.tb_l = k['tb_l']\n\t\tself.alpha_b = k['alpha_b']\n\t\tself.alpha_c = k['alpha_c']\n\n\t\t'''try:\n\t\t\tself.coeff = []\n\t\t\tself.coeff = self._spline()\n\t\t\tprint self.coeff \n\t\t\tself._coef()\n\t\texcept TypeError:\n\t\t\traise Warning(\"Pass a dict with named coefficients\")'''", "title": "" }, { "docid": "10391c94c872831a0469cbca5a7f4845", "score": "0.6064531", "text": "def __init__(self, k, origin):\n self.k = k\n self.origin = origin\n return", "title": "" }, { "docid": "1e175cecafe05045584dd0e4ce0f39d6", "score": "0.60623395", "text": "def __init__(self, x: dict, k: int, lamb: float, seed: int):\n\t\tsuper().__init__(x, k)\n\t\tself.lamb = lamb\n\t\tself.slamb = None\n\t\tself.seed = seed", "title": "" }, { "docid": "97eb820d784f49b9ec4e095419cff87e", "score": "0.60564816", "text": "def __init__(self, data, k, n):\n self.data = data\n self.k = k\n self.n = n", "title": "" }, { "docid": "8b655b623191fcbf4e68f93d7dd783ca", "score": "0.6025896", "text": "def __init__(self, *args):\n _snap.TStrIntKd_swiginit(self,_snap.new_TStrIntKd(*args))", "title": "" }, { "docid": "3c3750be6593cf6bc547309e330e58ec", "score": "0.6013634", "text": "def __init__(self, *args):\n _snap.TIntTrIntHI_swiginit(self,_snap.new_TIntTrIntHI(*args))", "title": "" }, { "docid": "636925c77417b76f5a1a64ef50c64b1f", "score": "0.600813", "text": "def __init__(self, *args):\n _snap.TIntPrV_swiginit(self,_snap.new_TIntPrV(*args))", "title": "" }, { "docid": "6da4dc397960f88d3f7a171e2a0c5e87", "score": "0.60039544", "text": "def __init__(self, key, field):\n self.key = key\n self.field = field", "title": "" }, { "docid": "1fb64d57a85e55a19ab50f6587264932", "score": "0.60025585", "text": "def __init__(self, *args):\n _snap.TFltStrKdV_swiginit(self,_snap.new_TFltStrKdV(*args))", "title": "" }, { "docid": "5287c4608e1cc580fdec5b262826c2bd", "score": "0.5984827", "text": "def __init__(self, k, v):\n self.key = k\n self.value = v", "title": "" }, { "docid": "de0d3ab0016b8b122b7211f7c886b824", "score": "0.59846926", "text": "def __init__(self, *args):\n _snap.TIntPr_swiginit(self,_snap.new_TIntPr(*args))", "title": "" }, { "docid": "369e763f4e3c3be1a9e8eac7617c8c47", "score": "0.5982285", "text": "def __init__(self, *args):\n _snap.TIntHI_swiginit(self,_snap.new_TIntHI(*args))", "title": "" }, { "docid": "f7d41c09b7d4e3997540f078b73f5c64", "score": "0.5971457", "text": "def __init__(self, key, *a, **kw):\n self.key = key\n super().__init__(*a, **kw)", "title": "" }, { "docid": "0b38544e2cf04ea1a13404cc6e9a3115", "score": "0.5969983", "text": "def __init__(self, *args):\n _snap.TIntIntHH_swiginit(self,_snap.new_TIntIntHH(*args))", "title": "" }, { "docid": "51fd18f80972c5efb79e8e9cef1e129c", "score": "0.59666455", "text": "def __init__(self, *args):\n _snap.TIntS_swiginit(self,_snap.new_TIntS(*args))", "title": "" }, { "docid": "9e4d66a36d9129216225cc2590980617", "score": "0.59660226", "text": "def __init__(self, type, key, timestamp):\n\t\tself.type = type\n\t\tself.key = key\n\t\tself.timestamp = timestamp", "title": "" }, { "docid": "0f8b8f1cec10ca8123dadf56aa63b47a", "score": "0.5965139", "text": "def __init__(self, *args):\n _snap.TIntStrKd_swiginit(self,_snap.new_TIntStrKd(*args))", "title": "" }, { "docid": "343310fa56c8cd2fe50c978569d3022a", "score": "0.5958656", "text": "def __add__(self, *args):\n return _snap.TIntFltKdV___add__(self, *args)", "title": "" }, { "docid": "c641d5baa809aac3a1a985db56d310ff", "score": "0.5958146", "text": "def __init__(self, *args):\n _snap.TStrKdV_swiginit(self,_snap.new_TStrKdV(*args))", "title": "" }, { "docid": "79b71c4502350577086c16bdc95b2a1f", "score": "0.5957451", "text": "def __init__(self, *args):\n _snap.TFltStrKd_swiginit(self,_snap.new_TFltStrKd(*args))", "title": "" }, { "docid": "c7662734b5a14532ff8e37f7c9b19ec0", "score": "0.5938238", "text": "def __init__(self):\n self.key_to_val = dict()\n self.val_to_key = dict()\n self.mini = -1\n self.maxi = -1", "title": "" }, { "docid": "5e80f1a3ec7c05edd24e45edb0253317", "score": "0.59335923", "text": "def __init__(self, *args):\n _snap.TIntQuV_swiginit(self,_snap.new_TIntQuV(*args))", "title": "" }, { "docid": "c59a441932e5891bdb00637470187f4c", "score": "0.5924125", "text": "def __init__(self):\n self.pos = 0\n # The size below is the power of 2 of the hashtable, not the actual size of the table.\n self.size = 2\n self.htable = [None] * (2**self.size)\n self.entries = 0.0\n # Set the load factor to 0 initially.\n self.lf = 0.0\n return None", "title": "" }, { "docid": "cbb47d125dd27b071f871689ab3b2feb", "score": "0.59238964", "text": "def __init__(self, key, identifier, obj_type):\n self.key = key\n self.identifier = identifier\n self.obj_type = obj_type", "title": "" }, { "docid": "7af5c3da34c0b47c6083a0e9bb37593d", "score": "0.5920986", "text": "def __init__(self, *args):\n _snap.TIntQu_swiginit(self,_snap.new_TIntQu(*args))", "title": "" } ]
30cf92c27cfc5ed339a3444c06c710b5
To set up customized header
[ { "docid": "cbf1c5d4831d97c3deaa3c656596ed74", "score": "0.0", "text": "def init_setup(self, header_list):\n assert(isinstance(header_list, list))\n assert(len(header_list) == self._myNumCols)\n\n # Set up header\n for i_col in range(self._myNumCols):\n header = header_list[i_col]\n self.model().setHeaderData(0, Qt.Horizontal, header)\n # END-IF\n self._myHeaderList = header_list[:]\n\n # Enable scroll bar\n header = self.header()\n assert isinstance(header, QHeaderView), 'Header must be a QHeaderView instance.'\n # header.setHorizontalScrollBar(QScrollBar())\n\n header.setHorizontalScrollMode(QAbstractItemView.ScrollPerItem)\n header.setSectionResizeMode(QHeaderView.ResizeToContents)\n header.setStretchLastSection(False)\n\n return", "title": "" } ]
[ { "docid": "2a3fba76992b3059f9c767bed3bb2505", "score": "0.7375767", "text": "def create_header(self, controller, header):\n label = tkinter.Label(self, text=header, font=controller.header_font)\n label.pack(side='top', fill='x', pady=20)", "title": "" }, { "docid": "6b411a021cead2601db65d8a51750c21", "score": "0.72039455", "text": "def _add_header(self):\n \n (variables, types) = self._read_raw_header()\n (variables, types) = self._extract_variable_header(variables, types)\n \n sim_types = generate_simulation_data_types(types[1:])\n self._header.update(zip(variables[1:],sim_types))\n self._name_list.extend(variables[1:])", "title": "" }, { "docid": "ffd3200e7b1e7ad16ce5872196d63e2a", "score": "0.7136764", "text": "def initial_header_widget(self):\n return urwid.AttrWrap(\n urwid.Columns([\n urwid.Text(self.title, align='left'),\n urwid.AttrWrap(\n urwid.Text(self.current_page.page_title, align='center'),\n 'important_header'\n ),\n urwid.Text(self.current_page.page_usage, align='right')\n ]),\n 'header'\n )", "title": "" }, { "docid": "f071a7ab7719e21c1637ec470c7301c0", "score": "0.7095726", "text": "def set_header(self, header):\n self.header = header", "title": "" }, { "docid": "6f5823e61d0e252aba8fae37a222b629", "score": "0.7089395", "text": "def header(self):\n return self.renderer('/formish/form/header.html', {'form':self})", "title": "" }, { "docid": "026f5761ca36a3dacdf914b668d17fc5", "score": "0.70222545", "text": "def set_header_content(self, content):\n self.header = content", "title": "" }, { "docid": "0e1e8b3ed8f8c0463747497448457d59", "score": "0.7013275", "text": "def generate_headers(self):\r\n raise NotImplementedError()", "title": "" }, { "docid": "cef5d4bd72978ab298951d32cb607ad6", "score": "0.69448954", "text": "def add_header(self, config):\n # HEADER RECTANGLES\n # http://stackoverflow.com/questions/29703579/reportlab-rounded-rect\n self.doc.roundRect(*self.dim_header_rect1, radius=4, stroke=1, fill=0)\n self.doc.roundRect(*self.dim_header_rect2, radius=4, stroke=1, fill=0)\n\n # HEADER TEXT\n self.headerStyle = getSampleStyleSheet()['Normal']\n self.headerStyle.name = 'header'\n self.headerStyle.fontName = config['font_face']\n # self.headerStyle.fontSize = config['font_size']\n self.headerStyle.fontSize = 12\n self.headerStyle.alignment = TA_CENTER\n\n self.header_text_01 = []\n self.header_text_01.append(Paragraph(\n config['header_text_1'], self.headerStyle))\n self.headerFrame_01 = Frame(\n .5 * inch, 10.075 * inch, 7.6 * inch, 0.5 * inch)\n self.headerFrame_01.addFromList(self.header_text_01, self.doc)\n\n self.header_text_02 = []\n self.header_text_02.append(Paragraph(\n config['header_text_2'], self.headerStyle))\n self.headerFrame_02 = Frame(\n .5 * inch, 9.825 * inch, 7.6 * inch, 0.5 * inch)\n self.headerFrame_02.addFromList(self.header_text_02, self.doc)\n # self.doc.drawCentredString(*self.dim_header_string1,\n # text=config['header_text_1'])\n # self.doc.drawCentredString(*self.dim_header_string2,\n # text=config['header_text_2'])", "title": "" }, { "docid": "5593e91632aecdbf6e4c3f8a0c80b1be", "score": "0.6934681", "text": "def generate_header(self):\n return ''", "title": "" }, { "docid": "ed63451023783178225951396f63cc16", "score": "0.6898102", "text": "def enableheader(self):\n return True", "title": "" }, { "docid": "5632c5d7d7b36373b6d1e58bae094b92", "score": "0.6860151", "text": "def set_header(self, header_items):\n self.append_child(HeaderTR(header_items))", "title": "" }, { "docid": "ccf12ffb1c9ab94f763e16a3f3acc885", "score": "0.6845562", "text": "def header(self, text, level, raw=None):\n self.options['header_handler'](level, text)\n return '<h%d id=\"%s\">%s</h%d>\\n' % (level,text ,text, level)", "title": "" }, { "docid": "b5c4516ca591d13c05ab71d41e2179cd", "score": "0.6843031", "text": "def _setup_headers(self):\n self.encoding_header = self.config.get_value('header')\n\n copyright_file = self.config.get_value('copyright_file')\n copyright_path = os.path.join(self.cmd_root, copyright_file)\n if os.path.isfile(copyright_path):\n with open(copyright_path, 'r') as file_obj:\n self.copyright_header = file_obj.read()\n else:\n self.copyright_header = DEFAULT_COPYRIGHT_HEADER", "title": "" }, { "docid": "2f6e57a22dafa0d4f83209e23f4014b1", "score": "0.6828401", "text": "def _create_header(self, data):\n\n self.id = int(data[0])\n self.width = int(data[1])\n return", "title": "" }, { "docid": "51a2090c66b2b0494986c0f148f7e470", "score": "0.6808261", "text": "def _base_header(self, hdr=None):\n _hdr = super()._base_header(hdr=hdr)\n _hdr['CALIBTYP'] = (self.calib_type, 'PypeIt: Calibration frame type')\n if self.calib_dir is not None:\n _hdr['CALIBDIR'] = (self.calib_dir, 'PypeIt: Calibration file directory')\n if self.calib_key is not None:\n _hdr['CALIBKEY'] = (self.calib_key, 'PypeIt: Calibration key')\n if self.calib_id is not None:\n _hdr['CALIBID'] = (','.join(self.calib_id), 'PypeIt: Calibration groups')\n return _hdr", "title": "" }, { "docid": "e5dcadd53d0c9e8b6925909e6b1b4dfe", "score": "0.67691714", "text": "def wrapper_header(self):\n\n return \"\"\"\\n**********************************************************************\n********************** %s ********************\n**********************************************************************\n\n\"\"\" % self.wrapper_name.upper()", "title": "" }, { "docid": "648a7d3e41226c5515f6d3993f43552a", "score": "0.6749727", "text": "def _write_header(self, header):\n raise NotImplementedError", "title": "" }, { "docid": "3bd5e65a7186457594ebeadf60c35877", "score": "0.67386746", "text": "def set_header(self, header):\n self.__header = header", "title": "" }, { "docid": "1e970e5915b5ea3829639691b781ae9f", "score": "0.6734855", "text": "def header(self, **kwds):\n # nothing to do\n return ()", "title": "" }, { "docid": "b7da5f61013533fbc37e812bcc348b5c", "score": "0.6726724", "text": "def add_header():\r\n return make_response('Not yet implemented')", "title": "" }, { "docid": "63cbc20e6c8a125f646f04ddd0a155f3", "score": "0.66845614", "text": "def write_header_from(self):\n return None", "title": "" }, { "docid": "4f50227c233b7dffbe3256cbc840397c", "score": "0.66771525", "text": "def render_header(self):\n header_template = \"\\n{usage}: {app} {commands} {options}\\n\\n{desc}\\n\"\n\n puts(header_template.format(\n usage=colored.white(\"Usage\", bold=True),\n app=self.app,\n desc=self.desc,\n commands=colored.yellow(\"command\"),\n options=colored.green(\"options\")),\n )", "title": "" }, { "docid": "4d6b3b9c44d23daaff1593cf69310591", "score": "0.6661732", "text": "def create_header(self, output):\n output.writerow(['# artnr', 'tertial', 'datum',\n 'menge_avg', 'menge_median', 'menge_min', 'menge_max',\n 'menge_fakturiert'])\n output.writerow(['# stand:', datetime.date.today().strftime('%Y-%m-%d')])", "title": "" }, { "docid": "ee85b9cc89cf3dc2ab79e2fe2909238e", "score": "0.66505176", "text": "def header(self, line):\n self.header_lines([line])", "title": "" }, { "docid": "eff327db7e8c72f5f98a5852ed1a7e27", "score": "0.66409737", "text": "def _make_header(self, name, title=False):\n line = \"\\n\" + \"-\" * len(name) + \"\\n\"\n if title:\n return line + name + line\n else:\n return name + line", "title": "" }, { "docid": "3b9f68305733b0ec1336d3300571198f", "score": "0.6636152", "text": "def add_header(self, header, value):\n self[\"headers\"].update({header: value})", "title": "" }, { "docid": "839bf3df759ba28eab5d377faa5eb0be", "score": "0.66324514", "text": "def html_header(self):\n block = \"\"\"<!DOCTYPE html>\n<html>\n<head>\n <title>Fred Hutchinson Cancer Research Center</title>\n <title>EasyBuild Annotate extension list for R, Biocondotor and Python easyconfig files</title>\n <style>\n body {font-family: Helvetica,Arial,\"Calibri\",\"Lucida Grande\",sans-serif;}\n .ext_list a {color: black; text-decoration: none; font-weight: bold;}\n .ext_list li:hover a:hover {color: #89c348;}\n span.fh_green {color: #89c348;} <!-- Hutch Green -->\n </style>\n</head>\n<body>\n\"\"\"\n self.out.write(block)\n self.out.write('<h2><span class=\"fh_green\">%s</span></h2>' % self.pkg_name)\n self.out.write('<h3>Package List</h3>\\n<div class=\"ext_list\">\\n')", "title": "" }, { "docid": "7a43b8883f7946e75e7ceabe1ed24641", "score": "0.66272646", "text": "def getheader(self, name,default):\n\t\tpass", "title": "" }, { "docid": "638f44723fafbeca786f191c79db9410", "score": "0.6616269", "text": "def _widget_header(self):\n\n file = open(\"img/logo.png\", \"rb\")\n image = file.read()\n logo = widgets.Image(\n value=image,\n format='png',\n width=30,\n height=30,\n )\n\n title = widgets.HTML(\n '<h2>Data Manager and Metadata Collector for CAGS - DEV version <h2/>')\n text = widgets.HTML('''\n This gui is designed to help with the initial preparation of one\n geophysical dataset metadata. In order to make the dataset FAIR, simple metadata descriptors must be filled. \n if you require additionnal metadata, please let us know by opening an issue <a href=\"https://github.com/agrogeophy/geometadp\" target=\"_blank\">on github </a>\n Note that this is a lightened version of the metadata manager as the full version must be run locally to interact with files. See github for <a href=\"https://github.com/agrogeophy/geometadp\" target=\"_blank\">more informations.</a> \n Metdata templates for generic survey to upload are available on the github page.\n ''')\n\n vbox = widgets.VBox([logo, title, text])\n return vbox", "title": "" }, { "docid": "332127811cda509c7ed34239e6c331c1", "score": "0.66120374", "text": "def new_header(self, text, first_page=False):\n text = text.replace(\"_P#_\", r\"{\\field{\\*\\fldinst{PAGE}}{\\fldrslt}}\")\n out = HEADER.replace(\"__TEXT__\", text)\n out = out.replace(\"__FONT__\", self.font)\n if first_page:\n return out.replace(\"__HEADERTYPE__\", \"headerf\")\n return out.replace(\"__HEADERTYPE__\", \"header\")", "title": "" }, { "docid": "760c143b6b5d81926486ba640fffb794", "score": "0.66110605", "text": "def _header(self, line_iter):\n # The first line is the site name.\n self.meta[self._sid] = {\"name\": line_iter.next()}\n return", "title": "" }, { "docid": "a544f259df1a2ae0b9655d2753842de5", "score": "0.6603038", "text": "def init_header(self):\n self.worksheet.write(0, PRJ_COL, \"项目\")\n self.worksheet.set_column(PRJ_COL, PRJ_COL, 40)\n\n self.worksheet.write(0, DATE_COL, \"日期\")\n self.worksheet.set_column(DATE_COL, DATE_COL, 20)\n\n self.worksheet.write(0, BUYER_COL, \"采购方\")\n self.worksheet.set_column(BUYER_COL, BUYER_COL, 20)\n\n self.worksheet.write(0, LOC_COL, \"地点\")\n\n self.worksheet.write(0, MONEY_COL, \"金额\")\n self.worksheet.set_column(MONEY_COL, MONEY_COL, 20)\n\n self.worksheet.write(0, URL_COL, \"网址\")\n self.worksheet.set_column(URL_COL, URL_COL, 60)", "title": "" }, { "docid": "f6622ca1893e8acd0c964a3c977e5daf", "score": "0.65307486", "text": "def _header(self):\n header = [\n \"Portfolio Number\",\n \"Portfolio Name\",\n \"prt_BDA AccountNum\",\n \"CostCenter\",\n \"FundingIndicator\"\n ]\n return header", "title": "" }, { "docid": "80588d4886f3f5026299222b049f72ca", "score": "0.65130544", "text": "def __init__(self, info=''):\n\n super(XFHeader, self).__init__(1)\n self.info = \"Grid generated with Python \" + platform.python_version() if info == '' else info", "title": "" }, { "docid": "1a93da9874dd2e93099cafd27df42fa4", "score": "0.6505056", "text": "def _init_header(self):\n logo_label = tki.Label(self.header_frame,\n image=self.logo_image,\n background=MAIN_COLOR)\n logo_label.pack()\n\n start_button = tki.Button(self.header_frame,\n text=START_BUTTON_TEXT,\n command=self._start_game,\n width=START_BUTTON_WIDTH,\n height=START_BUTTON_HEIGHT,\n name=START_BUTTON_NAME,\n background=MAIN_COLOR)\n start_button.place(relx=START_BUTTON_REL_X, rely=START_BUTTON_REL_Y,\n anchor=ANCHOR_CENTER)\n\n timer = tki.Label(self.header_frame, name=TIMER_LABEL_NAME,\n background=MAIN_COLOR)\n timer.place(relx=TIMER_REL_X, rely=TIMER_REL_Y, anchor=ANCHOR_CENTER)\n\n score = tki.Label(self.header_frame, name=SCORE_LABEL_NAME,\n background=MAIN_COLOR)\n score.config(text=INIT_SCORE)\n score.place(relx=SCORE_REL_X, rely=SCORE_REL_Y, anchor=ANCHOR_CENTER)", "title": "" }, { "docid": "e8541db64040d949dbef9c113e46a341", "score": "0.6502787", "text": "def FileHeader(self):\n pass", "title": "" }, { "docid": "0e6bf75e0c66d6ea6ff1b1ce4e2ee653", "score": "0.6502626", "text": "def header(self, content):\n self.out(content)", "title": "" }, { "docid": "18851b5309ecc0b9aea94383c2da4e32", "score": "0.6489481", "text": "def HeaderText(self) -> str:", "title": "" }, { "docid": "39086686c0a8859f3db00143da233244", "score": "0.6482527", "text": "def header(self, header):\n\n self._header = header", "title": "" }, { "docid": "39086686c0a8859f3db00143da233244", "score": "0.6482527", "text": "def header(self, header):\n\n self._header = header", "title": "" }, { "docid": "672641d79e0bb958b7d68c3ba7e66ca9", "score": "0.6467155", "text": "def header(self, text, level):\n return '<h{0}>{1}</h{0}>'.format(level, text)", "title": "" }, { "docid": "c8278d6e2b7149360e6ddf8484d6875d", "score": "0.6467098", "text": "def create_header():\n\n header = \"{:<26}|{:^15}|{:^13}|{:>14}\".format(\n \"Donor Name\", \"Total Given\", \"Num Gifts\", \"Average Gift\")\n print(\"\\n\" + header)\n print(\"-\" * len(header))", "title": "" }, { "docid": "eaefa8c2bc7cf60c27d0cbd6b5f77835", "score": "0.646288", "text": "def get_header(self) -> dict:\n pass", "title": "" }, { "docid": "1208ec6c48f236eeed5cf9ed90366dd7", "score": "0.64452815", "text": "def custome_headers(self, **kargs): \r\n user_agent = 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.4 (KHTML, like Gecko)' \r\n user_agent = user_agent + ' Chrome/22.0.1229.79 Safari/537.4' \r\n self._headers = {\r\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\r\n 'Accept-Charset': 'gb18030,utf-8;q=0.7,*;q=0.3',\r\n 'Accept-Encoding': 'gzip,deflate,sdch',\r\n 'Accept-Language': 'en-US,en;q=0.8',\r\n 'Connection': 'keep-alive',\r\n 'User-Agent': \"'\" + user_agent + \"'\",\r\n 'Referer': \"'\" + self._url + \"'\",\r\n }\r\n self._headers.update(kargs)", "title": "" }, { "docid": "4dd6a31244e04d1e535ca78693de4c70", "score": "0.6444357", "text": "def make_top_header():\n\n # machine info\n mach_name, mach_arch = common.machine_info()\n\n # out file names\n script_file_name = sys.modules[__name__].__file__\n\n # general and file info lines\n header = [\"#\",\n \"# Machine: \" + mach_name + \" \" + mach_arch,\n \"# Date: \" + time.asctime(time.localtime()),\n \"#\"]\n header.extend(common.format_file_info(name=script_file_name, \n description=\"Input script\", extra=(\" \"+__version__)))\n header.append(\"# Working directory: \" + os.getcwd())\n header.extend(common.format_file_info(name=image_file_name, \n description=\"Image (in)\"))\n header.extend(common.format_file_info(name=labels_file_name, \n description=\"Boundaries (in)\"))\n\n return header", "title": "" }, { "docid": "df5aff8cafe815c5d8c4431ae1e3018c", "score": "0.6439793", "text": "def print_header():\n print(HEADER)", "title": "" }, { "docid": "b22c63bae5b6aa75579a628d60dfb352", "score": "0.6438183", "text": "def build_header(self):\n header_tags = self.tags.present()\n\n # add realname tag if needed\n if 'name' in header_tags:\n first_name = header_tags('name').first_value()\n if first_name not in self.path:\n header_tags.add_tag('realname', first_name)\n header_tags['name'] = header_tags('name').remaining()\n\n # add compound tags as needed\n header_tags = self.add_compound_tags(header_tags).present()\n\n lines = [tag.to_header() for tag in header_tags.values()]\n return \"\\n\".join(lines)", "title": "" }, { "docid": "b789b04c8f94ff95f469f4be1cb7ad43", "score": "0.64323497", "text": "def write_header(self):\n self.writer.writeheader()", "title": "" }, { "docid": "ab20e6dd6195acdd3ad941b2e5a69c9a", "score": "0.6423781", "text": "def __init__(__self__, *,\n header_name: Optional[str] = None,\n header_value: Optional[str] = None):\n if header_name is not None:\n pulumi.set(__self__, \"header_name\", header_name)\n if header_value is not None:\n pulumi.set(__self__, \"header_value\", header_value)", "title": "" }, { "docid": "0616ce387515d8cafe9f061d79871292", "score": "0.64074147", "text": "def update_header(self, header={}):\n self.header.update(header)\n self._write_header(self.path)", "title": "" }, { "docid": "2486e595f9fd2559704211cac7b7c994", "score": "0.6407111", "text": "def writeHeader(self, header):\n pass", "title": "" }, { "docid": "ad66cf666bcfd79b25d5bd27c6e7597a", "score": "0.64038116", "text": "def headers(self, *extra):\n\n headers = []\n if self.control.ids:\n headers = [\"CDR ID\"]\n headers.append(\"Title\")\n return headers + list(extra)", "title": "" }, { "docid": "34a6c987199698240404988665007ae6", "score": "0.64025635", "text": "def create_header(self, root):\n self.add_button(root, 'Back to main', 20, 1, 0, 0, lambda: self.back_to_main())\n self.add_label(root, \"input book name\", 20, 1, 1, 0)\n self.add_label(root, \"input book author\", 20, 1, 1, 1)\n self.add_label(root, \"input book year\", 20, 1, 1, 2)\n self.add_label(root, \"input book description\", 24, 1, 1, 3)\n self.__book_name_text = self.add_textbox(root, '', 20, 1, 2, 0)\n self.__book_author_text = self.add_textbox(root, '', 20, 1, 2, 1)\n self.__book_year_text = self.add_textbox(root, '', 20, 1, 2, 2)\n self.__book_desc_text = self.add_textbox(root, '', 24, 1, 2, 3)", "title": "" }, { "docid": "a18880f51866b00536ed132a2a099f72", "score": "0.63822234", "text": "def _update_headers(self):\n headers = {\n \"Authorization\": f'{self.headers[\"Authorization\"]}{self.auth_token}',\n # \"X-Auth-Key\": self.auth_token,\n \"X-Auth-Email\": self.auth_email,\n \"Content-Type\": \"application/json\",\n }\n self.headers = headers", "title": "" }, { "docid": "d6fddf2c9dfa7595bd598796b1b6b619", "score": "0.63796365", "text": "def _create_headers(self):\n headers = {}\n return headers", "title": "" }, { "docid": "fdfccff5cacc4ca7c22cd2a7b28fb546", "score": "0.63768566", "text": "def add_fields_to_file_header(self, header):\n header[self._ENCRYPTOR_FIELD_NAME] = self.NAME\n header[self._GPG_USER_FIELD_NAME] = self._gpg_key_user\n header[self._ENCRYPTED_KEY_FIELD_NAME] = self._encrypted_symmetric_key", "title": "" }, { "docid": "8fdc473537fc9acddb27785f10b14ebb", "score": "0.6374365", "text": "def headers(self):\n pass", "title": "" }, { "docid": "72a82451f0716edc84104dd7288bbaec", "score": "0.63574713", "text": "def name(self):\n return 'Global Header'", "title": "" }, { "docid": "37bb017de500fe74f1692179d1c5d070", "score": "0.63557255", "text": "def msg_header(cls, metadata, name, text):\n if 'header_messages' not in metadata:\n metadata['header_messages'] = dict()\n metadata['header_messages'][name] = text", "title": "" }, { "docid": "39fa97324eb0970da09e42c5b9fbd0f9", "score": "0.6341805", "text": "def add_header(self, _key, _value, **_kw):\n self.add(_key, _value, **_kw)", "title": "" }, { "docid": "842d87629e20655e2fc8b0cb43bfb32f", "score": "0.63417083", "text": "def headers( self ,value:dict ):\n self._headers = value", "title": "" }, { "docid": "96798fc0585a677c2a5d46d3bf37619f", "score": "0.6332635", "text": "def write_header(self):\n if self.path:\n self.file = open(self.path, \"w+\")\n self.file.write(self.version)\n for list_item in self.list_of_header_objects:\n self.file.write(list_item.line)\n self.file.write(self.body_header_line.line)\n self.file.close()\n else:\n print(self.version, end='')\n for list_item in self.list_of_header_objects:\n print(list_item.line, end='')\n print(self.body_header_line.line, end='')", "title": "" }, { "docid": "4326a5860dd0c83248048d13657a3254", "score": "0.6328994", "text": "def add_header(self, key, value):\n self.headers += [(key, value)]", "title": "" }, { "docid": "364b1ecbe2448c35012c089f3b712bc0", "score": "0.6326577", "text": "def _set_extra_headers(self):\n\n headers = {}\n headers.update({'Accept': 'application/vnd.github.squirrel-girl-preview'})\n return headers", "title": "" }, { "docid": "08bf72ebcfdefa1abdc0d1da08fee019", "score": "0.63191295", "text": "def add_header(self, text, im):\n with self.doc:\n h3(text)\n with a(href=im):\n img(style=\"width:1024px\", src=im)", "title": "" }, { "docid": "f1f49081eb031511191ebbe98b9ceb1f", "score": "0.62964237", "text": "def create_header_from_parent(self, *args, **kwargs):\n return self.get_vm().create_header_from_parent(*args, **kwargs)", "title": "" }, { "docid": "857e1739a51736c58e4623bbc83d26df", "score": "0.6294486", "text": "def add_header(doc, text, level):\n header = H(outlinelevel=level, text=text, stylename=\"header.%s\" % level)\n doc.add(header)", "title": "" }, { "docid": "de649a85d9d9da822fb58080130997de", "score": "0.6293629", "text": "def get_request_headers(self, *args, **kwargs):\n # Call super to get default headers\n headers = super(LSProxyView, self).get_request_headers()\n # Add new headers\n headers['LABELIT_USER'] = self.request.user.username\n headers['LABELIT_PROJECT'] = self.project_name\n return headers", "title": "" }, { "docid": "50efbdb704eb86f296c53d7c05165977", "score": "0.6291377", "text": "def create_default_headers(self):\n self.context.add_header(\n \"Authorization\",\n \"Bearer {}\".format(self.create_bearer_token().decode(\"utf-8\")),\n )\n self.context.add_header(\"Content-Type\", \"application/json\")\n self.context.add_header(\"Host\", self.context.address)", "title": "" }, { "docid": "ae064bed09b26d60f72acb114cf48c56", "score": "0.62873536", "text": "def send_header(self, header: str, value: str) -> None:\n assert self.response is not None # nosec\n self.response.headers.append((header, value))", "title": "" }, { "docid": "30b67ecc15b086b7ad5576f7cb19950b", "score": "0.6281423", "text": "def build_header(names_list):\n header = 'Derived output data from the TAMOC' + \\\n ' Stratified Plume Model\\n'\n header += 'Created on: ' + date.today().strftime(\n \"%Y-%m-%d %H:%M:%S\") + '\\n\\n'\n header += 'Data are stored in the following order:\\n'\n col = 0\n for name in names_list:\n header += ' Col %3.3d: ' % (col) + name + '\\n'\n col += 1\n \n return header", "title": "" }, { "docid": "a4409ee5502bc16e5cb428995b3da2ec", "score": "0.62810457", "text": "def set_headers(self, **kwargs):\n self.headers.clear()\n self.add_headers(**kwargs)", "title": "" }, { "docid": "b75aa44f7fd0c8ffcc77d1743d765999", "score": "0.62770665", "text": "def render_header(self):\r\n tag = 'div'\r\n attrs = {'class': 'navbar-header'}\r\n content = self.render_toggle()\r\n content = text_concat(content, '<a href=\"%s\" class=\"navbar-brand navbar-brand-custome\">%s'\r\n '</a>' % (self.get_brand_url(), self.brandname))\r\n return render_tag(tag, attrs=attrs, content=mark_safe(content), )", "title": "" }, { "docid": "fde48d3fe89ac50f4bc56f9ef1665ce2", "score": "0.62743056", "text": "def _popup_header(view, details):\n metadata = details.get(\"metadata\", {})\n version = metadata.get(\"version\", \"\")\n python_version = details.get(\"python_version\", \"\")\n url = metadata.get(\"url\", \"\")\n\n if version == \"\" and not details.get(\"is_shipped\", False):\n version = \"unknown version\"\n\n name = details.get(\"name\", \"Unknown\")\n is_disabled = details.get(\"is_disabled\", False)\n is_dependency = details.get(\"is_dependency\", False)\n is_complete = details.get(\"is_complete_override\", False)\n is_expired = details.get(\"is_complete_override_expired\", False)\n\n return \"\"\"\n <h1>\n {name}\n <span class=\"{is_complete}\">Overrides Shipped Package\n <span class=\"{is_complete_expired}\">[Expired]</span>\n <span class=\"help\">[<a href=\"help:complete_override:{name}\">?</a>]</span>\n </span>\n </h1>\n <div class=\"{is_disabled}\">This package is currently disabled</div>\n <div class=\"{is_dependency}\">This package is a dependency library</div>\n <div class=\"{has_version}\">Version: {version}</div>\n <div class=\"{has_url}\"><a href=\"{url}\">{url}</a></div>\n <div class=\"{has_python}\">Python: {python_version}</div>\n \"\"\".format(\n name=name,\n is_complete=_class(is_complete, \"complete\"),\n is_complete_expired=_class(is_expired, \"expired\"),\n is_disabled=_class(is_disabled, \"disabled\"),\n is_dependency=_class(is_dependency, \"dependency\"),\n has_version=_class(version != '', \"version\"),\n has_python=_class(python_version != \"\", \"python\"),\n version=version,\n python_version=python_version,\n has_url=_class(url != '', \"url\"),\n url=url\n )", "title": "" }, { "docid": "286d924fa0c3d47096e80942afe1c132", "score": "0.62689614", "text": "def _set_hdr(self, header: bytes) -> None:\n control_byte, flags, self.type_data, self.token = unpack(HEADER_FMT, header)\n self.msg_type = MsgType(control_byte >> 3)\n self.flags = MsgFlag(flags)", "title": "" }, { "docid": "96a6068ffef9e498c99faa9c4c06bc25", "score": "0.6267982", "text": "def add_header(self, header, value, enabled=None):\n h = {\"name\": header,\n \"value\": value,\n \"enabled\": enabled}\n if h['value'] and (enabled is None):\n h['enabled'] = True\n elif not h['value'] and (enabled is None):\n h['enabled'] = False\n self.headers.append(h)", "title": "" }, { "docid": "31f85a91c960aeb7973ca3733f947436", "score": "0.62513465", "text": "def set_auth_headers(self, access_token, client_id):\n self.headers['X-Udemy-Bearer-Token'] = access_token\n self.headers['X-Udemy-Client-Id'] = client_id\n self.headers['Authorization'] = \"Bearer {}\".format(access_token)\n self.headers['X-Udemy-Authorization'] = \"Bearer {}\".format(access_token)", "title": "" }, { "docid": "45f15521d4f83d062b646fd7eff0e7b8", "score": "0.62492853", "text": "def header(self, content):\n self.has_header = True\n self._write_row(content, self.header_format)", "title": "" }, { "docid": "7c0601518f7fabda0a9615b7ab6210bf", "score": "0.62491935", "text": "def header(style=u'default'):\n return (docType() + \n u'<html xmlns=\"http://www.w3.org/1999/xhtml\">\\n'\n u'<head>\\n'\n u'<style type=\"text/css\">\\n'\n u' @import url(/css/exe.css);\\n'\n u' @import url(/style/base.css);\\n'\n u' @import url(/style/%s/content.css);</style>\\n'\n u'<script type=\"text/javascript\" src=\"/scripts/common.js\">'\n u'</script>\\n'\n u'<script type=\"text/javascript\" src=\"/scripts/libot_drag.js\">'\n u'</script>\\n'\n u'<title>%s</title>\\n'\n u'<meta http-equiv=\"content-type\" '\n u' content=\"text/html; charset=UTF-8\"></meta>\\n'\n u'</head>\\n'\n % (style, _('eXe : elearning XHTML editor')))", "title": "" }, { "docid": "1938adaaed55e8dcf6f1fa4e9d4be048", "score": "0.624877", "text": "def _test_header(self, name, testlog):\n print(40 * '-', file=testlog)\n print(\"{0}... \".format(name), file=testlog)", "title": "" }, { "docid": "cd68e7f1f306d7b59a5094714f1bac3c", "score": "0.6248182", "text": "def write_main_header(html_document: HtmlDocument) -> None:\n html_document.open_html_document()\n write_head(html_document)\n html_document.open_body()\n logo_kununu = convert_path_image_64(get_project_path() + cons.path_image_kununu)\n html_document.insert_image(logo_kununu, class_txt=\"\\\"spacer--xs\\\"\", height=51,\n width=224)", "title": "" }, { "docid": "2bc363052e00639bbe046351c454ce8b", "score": "0.62479377", "text": "def header(self) -> str:\n return pulumi.get(self, \"header\")", "title": "" }, { "docid": "2bc363052e00639bbe046351c454ce8b", "score": "0.62479377", "text": "def header(self) -> str:\n return pulumi.get(self, \"header\")", "title": "" }, { "docid": "01e6dedc40ee7f8e0c8aeaa65dcf1b19", "score": "0.62418985", "text": "def defineGeneralHeader(self, header_items={}):\r\n\t# Check if DATE field previously known in NASA Ames file\r\n\ttime_now=time.strftime(\"%Y %m %d\", time.localtime(time.time())).split()\r\n\tif not self.na_dict.has_key(\"RDATE\"):\r\n\t self.na_dict[\"RDATE\"]=time_now\r\n\t\r\n if self.ax0.isTime():\r\n # Get first date in list\r\n\t try:\r\n (unit, start_date)=re.match(\"(\\w+)\\s+?since\\s+?(\\d+-\\d+-\\d+)\", self.ax0.units).groups() \r\n comptime=cdtime.s2c(start_date)\r\n first_day=comptime.add(self.na_dict[\"X\"][0][0], getattr(cdtime, unit.capitalize()))\r\n self.na_dict[\"DATE\"]=string.replace(str(first_day).split(\" \")[0], \"-\", \" \").split()\r\n\t except:\r\n\t msg=\"Nappy Warning: Could not get the first date in the file. You will need to manually edit the output file.\"\r\n\t\tprint msg\r\n\t\tself.outputMessage.append(msg)\r\n\t\tself.na_dict[\"DATE\"]=(\"DATE\", \"NOT\", \"KNOWN\")\r\n else: \r\n if not self.na_dict.has_key(\"DATE\"):\r\n\t msg=\"Nappy Warning: Could not get the first date in the file. You will need to manually edit the output file.\"\r\n\t\tprint msg\r\n\t\tself.outputMessage.append(msg)\r\n\t self.na_dict[\"DATE\"]=(\"DATE\", \"NOT\", \"KNOWN\")\r\n self.na_dict[\"IVOL\"]=1\r\n self.na_dict[\"NVOL\"]=1\r\n for key in header_items.keys():\r\n self.na_dict[key]=header_items[key]\r\n return", "title": "" }, { "docid": "f227d50064074078d53ba6209312f8e7", "score": "0.6231539", "text": "def _build_http_header(self) -> Dict[str, str]:\n return {}", "title": "" }, { "docid": "c6f2b0290338f93bf6182cf0af3df8d1", "score": "0.6228393", "text": "def setHeader(self, key, value):\r\n headers = self._browser.extra_headers\r\n for i, (k, v) in enumerate(headers):\r\n if k == key:\r\n if value is not None:\r\n headers[i] = (key, value)\r\n else:\r\n del headers[i]\r\n break\r\n else:\r\n if value is not None:\r\n headers.append((key, value))\r\n if key.lower() == 'accept-encoding':\r\n if value and value.lower() == 'gzip':\r\n self._accept_gzip = True\r\n else:\r\n self._accept_gzip = False", "title": "" }, { "docid": "8549b338372922660b1775461be82311", "score": "0.6225534", "text": "def create_headers():\n from bitcodin import api_key\n if api_key is None:\n if os.getenv('PYTHON_API_KEY', None) is None:\n raise BitcodinApiKeyNotSetError(\"bitcodin.api_key is not set!\", None)\n else:\n api_key = os.getenv('PYTHON_API_KEY')\n\n headers = {\n 'Content-Type': 'application/json',\n 'bitcodin-api-key': api_key\n }\n return headers", "title": "" }, { "docid": "4830d2fea7347d342a8212874aa8289e", "score": "0.6221215", "text": "def outputheader(self):\n return True", "title": "" }, { "docid": "c85d27ba13f64da597bad3061a5a350b", "score": "0.6205596", "text": "def header(self, QNetworkRequest_KnownHeaders): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "b92432c1cc60b6d5b8580f2927cd8eeb", "score": "0.6192407", "text": "def header_body(self):\n\n from pygments.formatters import HtmlFormatter\n\n header = []\n static = os.path.join(path.get_ipython_package_dir(),\n 'frontend', 'html', 'notebook', 'static',\n )\n here = os.path.split(os.path.realpath(__file__))[0]\n css = os.path.join(static, 'css')\n for sheet in [\n # do we need jquery and prettify?\n # os.path.join(static, 'jquery', 'css', 'themes', 'base',\n # 'jquery-ui.min.css'),\n # os.path.join(static, 'prettify', 'prettify.css'),\n os.path.join(css, 'boilerplate.css'),\n os.path.join(css, 'fbm.css'),\n os.path.join(css, 'notebook.css'),\n os.path.join(css, 'renderedhtml.css'),\n # our overrides:\n os.path.join(here, '..', 'css', 'static_html.css'),\n ]:\n header.extend(self._stylesheet(sheet))\n\n # pygments css\n pygments_css = HtmlFormatter().get_style_defs('.highlight')\n header.extend(['<meta charset=\"UTF-8\">'])\n header.extend(self.in_tag('style', pygments_css,\n dict(type='\"text/css\"')))\n\n # TODO: this should be allowed to use local mathjax:\n header.extend(self.in_tag('script', '', {'type': '\"text/javascript\"',\n 'src': '\"https://c328740.ssl.cf1.rackcdn.com/mathjax/'\n 'latest/MathJax.js?config=TeX-AMS_HTML\"',\n }))\n with io.open(os.path.join(here, '..', 'js', 'initmathjax.js'),\n encoding='utf-8') as f:\n header.extend(self.in_tag('script', f.read(),\n {'type': '\"text/javascript\"'}))\n return header", "title": "" }, { "docid": "e7ede02a2f421bdefa4eb49cf3a18c1c", "score": "0.618727", "text": "def set_header(self, header_name, header_value):\n if header_value is not None:\n self._headers[header_name] = header_value\n elif header_name in self._headers:\n del self._headers[header_name]", "title": "" }, { "docid": "ab2d81afe1adfecc80c97c18b59f26b9", "score": "0.6174547", "text": "def add_header(self, header_name, header_value):\n\n self.headers[header_name] = header_value", "title": "" }, { "docid": "4e71b9f9b5b824146dae4d38f3ae9cd1", "score": "0.61672443", "text": "def initHead():\r\n return dbc.Row(\r\n id=\"div-head-desc\",\r\n children=[\r\n dbc.Col(\r\n html.H1([fa(\"fab fa-freebsd\"), \" Mephisto \"], id=\"icon-about-mephisto\"),\r\n width=2\r\n ),\r\n dbc.Col([\r\n html.H6([\"v. \", __version__], id=\"version-check\"),\r\n ],\r\n align=\"end\"\r\n ),\r\n dbc.Col([\r\n dbc.Row(html.A(html.H6([fa(\"fas fa-external-link-alt\"), \" Visit Website\", \" \"]),\r\n href=PROJ_URL, target=\"_blank\")),\r\n dbc.Row(html.A(html.H6([fa(\"fab fa-github\"), \" Report issues!\"]),\r\n href=PROJ_ISSUE_URL, target=\"_blank\"))\r\n ],\r\n width=2,\r\n align=\"end\"\r\n ),\r\n html.Hr()\r\n ],\r\n className=\"sticky-top\",\r\n style={\"background\": \"white\", \"zIndex\": 999}\r\n )", "title": "" }, { "docid": "25133882e096fa472bd97a1cd82ede41", "score": "0.61632365", "text": "def _make_file_header(self):\n header = \"\"\n if self._title is not None:\n num_pad = max(2, self.LINE_LENGTH - len(self._title)) - 2\n banner = \"-\" * int(num_pad / 2)\n header += \"# {} {} {}\\n\".format(banner, self._title, banner)\n\n if self._comment is not None:\n comment_lines = self.create_comment_lines(self._comment)\n header += \"\".join(comment_lines)\n\n return header", "title": "" }, { "docid": "c7f18d2ffb0ea08ac9a7d447e20d3b42", "score": "0.61626154", "text": "def format_header(self):\n # backn is the up-directory of the dialog's parent.\n backn = self.horz_parent.neighbors[\"back\"]\n text = backn.parent.get_path()\n\n # Concatenate the header. join() for speed.\n header = \"\".join([\n self.root.box_drawing[0b0011],\n self.root.box_drawing[0b0101]*2,\n self.root.box_drawing[0b1110],\n \" \",\n misc.text_extend(text, 70, self.root.pad_char),\n \" \",\n self.root.box_drawing[0b1011],\n self.root.box_drawing[0b0101]*2,\n self.root.box_drawing[0b0110]\n ])\n return header", "title": "" }, { "docid": "47fe31b5eb30bb7d2d174c445efc79d2", "score": "0.61418676", "text": "def generate_header(self):\n _status = self.aggregated_results.get_status(executed_gate=self.executed_quality_gate, passing_gate=self.passing_quality_gate)\n _header = f\"# {GitHubIssueGenerator.header_symbols[_status]}\"\n if self.snapshot is not None:\n _header = _header + self.snapshot\n _header = _header + f\" {_status.capitalize()}\"\n if self.stage is not None:\n _header = _header + f\" on branch {self.stage.capitalize()}\"\n return _header", "title": "" }, { "docid": "1509c483c27b8b8b93ba2f658e9280b9", "score": "0.6139214", "text": "def set_extra_headers(self, path):\r\n pass", "title": "" }, { "docid": "1a75a61737ad87ddc533fec9d54718d0", "score": "0.61311436", "text": "def header(self, element, body):\n element.markdown.body = \"## %s\" % _clean_text(body)", "title": "" }, { "docid": "0588cac04d257e64eabc31a948668cc9", "score": "0.6123071", "text": "def _set_default_headers(self) -> None:\n self.set_header('Server', '%s/%s' % (\n PROGRAM.capitalize(),\n mockintosh.__version__\n ))\n self.set_header('x-%s-prompt' % PROGRAM.lower(), \"Hello, I'm Mockintosh.\") # clear signature that it's mock\n self.set_cors_headers()", "title": "" }, { "docid": "b5838c65fa400e6ef13f005cd29241c6", "score": "0.61170906", "text": "def __init__(self, header):\n self._hdr = header\n self._sections = []", "title": "" }, { "docid": "6d31b2e93774ddffe0789b3ec313bbf9", "score": "0.6111701", "text": "def set_capsule_headers(self, headers):\n fmt = self.newlibrary.fmtdict\n headers.add_shroud_file(fmt.C_header_utility)\n headers.add_shroud_dict(self.capsule_include)\n if self.language == \"c\":\n # Add header for NULL. C++ uses nullptr.\n headers.add_shroud_file(\"<stdlib.h>\")\n for ntypedefs in self.capsule_typedef_nodes.values():\n headers.add_typemap_list(ntypedefs.impl_header)", "title": "" } ]
209c4283cd4724a4d2adf5555c56efe6
A simple fully connected network with regression output
[ { "docid": "1545ffa21720990180abeb0b06f3acde", "score": "0.0", "text": "def SimpleFn(x, x_additional, architecture = [100, 1, 32, 32, 1], context = 'buy_'):\n with tf.name_scope(context + 'fc1'):\n W_fc1 = weight_variable([architecture[0], architecture[2]])\n b_fc1 = bias_variable([architecture[2]])\n h_fc1 = tf.nn.relu(tf.matmul(x, W_fc1) + b_fc1)\n\n with tf.name_scope(context + 'fc2'):\n W_fc2 = weight_variable([architecture[2] + architecture[1], architecture[3]])\n b_fc2 = bias_variable([architecture[3]])\n h_fc1_concat = tf.concat([h_fc1, x_additional], 1)\n h_fc2 = tf.nn.relu(tf.matmul(h_fc1_concat, W_fc2) + b_fc2)\n\n with tf.name_scope(context + 'fc3'):\n W_fc3 = weight_variable([architecture[3], architecture[4]])\n b_fc3 = bias_variable([architecture[4]])\n h_fc3 = tf.nn.relu(tf.matmul(h_fc2, W_fc3) + b_fc3)\n\n return tf.squeeze(h_fc3)", "title": "" } ]
[ { "docid": "06e365561eea5dfa4c7ad3164413baed", "score": "0.6496318", "text": "def _simple_network():\n input = paddle.static.data(\n name=\"input\", shape=[None, 2, 2], dtype=\"float32\"\n )\n weight = paddle.create_parameter(\n shape=[2, 3],\n dtype=\"float32\",\n attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(0.1)),\n )\n bias = paddle.create_parameter(shape=[3], dtype=\"float32\")\n linear_out = paddle.nn.functional.linear(x=input, weight=weight, bias=bias)\n out = paddle.tensor.sum(linear_out)\n return input, out, weight", "title": "" }, { "docid": "b52b9abee992cea49fb25dc483295e77", "score": "0.64602447", "text": "def regression_relu():\n # np.random.seed(1337)\n n_data_total = 400\n x1 = np.random.uniform(0, 1, n_data_total)\n x2 = np.random.uniform(0, 1, n_data_total)\n X = np.zeros(shape=(n_data_total, 2))\n for i in range(n_data_total): X[i] = x1[i], x2[i]\n y = common.franke_function(x1, x2)\n\n q1 = nn.FFNNRegressor(\n input_data = X,\n true_output = y,\n hidden_layer_sizes=(50, 25, 25),\n n_categories = 1,\n n_epochs = 300,\n batch_size = 40,\n hidden_layer_activation_function = af.relu,\n hidden_layer_activation_function_derivative = af.relu_derivative,\n output_activation_function = af.linear,\n cost_function_derivative = af.mse_derivative,\n verbose = True,\n debug = False,\n scaling = True)\n \n N = 10\n n_repetitions = 1 # Average to smooth the data.\n learning_rates = np.linspace(1e-3, 2e-2, N)\n mse_train = np.zeros(shape=(N))\n mse_test = np.zeros(shape=(N))\n r_train = np.zeros(shape=(N))\n r_test = np.zeros(shape=(N))\n\n for rep in range(n_repetitions):\n print(f\"\\nrepetition {rep+1} of {n_repetitions}\")\n\n for j in range(N):\n print(f\"{j+1} of {N}, {learning_rates[j]=}\")\n q1.train_neural_network(learning_rate=learning_rates[j])\n q1.score()\n mse_train[j] += q1.mse_train\n mse_test[j] += q1.mse_test\n r_train[j] += q1.r_train\n r_test[j] += q1.r_test\n\n mse_train /= n_repetitions\n mse_test /= n_repetitions\n r_train /= n_repetitions\n r_test /= n_repetitions\n\n plt.title(\"relu\")\n plt.plot(learning_rates, mse_train, label=f\"train\")\n plt.plot(learning_rates, mse_test, label=f\"test\")\n plt.xlabel(\"learning rates\")\n plt.ylabel(\"mse\")\n plt.legend()\n plt.show()", "title": "" }, { "docid": "ae8da2890618a5fcb77406bb22daf927", "score": "0.62567747", "text": "def NN_regression(X_train, y_train, X_test, y_test):\n cost = MSE()\n layers = [2, 100, 60, 1]\n act_fns = [\"tanh\", \"tanh\", \"linear\"]\n NN = NeuralNetwork(layers=layers, cost=cost, act_fns=act_fns)\n epochs = 750\n batch_size = 100\n learning_rates = np.logspace(-2, -4, 3)\n regular_params = np.logspace(-4, -1, 4)\n r2_scores = np.zeros((3, 4))\n epoch_arr = np.linspace(1, epochs, epochs)\n\n # MSE vs. epochs plot:\n fig = plt.figure()\n ax = plt.subplot(111)\n line = [[\"b--\", \"b-.\", \"b:\"],\n [\"g--\", \"g-.\", \"g:\"],\n [\"r--\", \"r-.\", \"r:\"],\n [\"y--\", \"y-.\", \"y:\"]\n ]\n\n for i, eta in enumerate(learning_rates):\n for j, reg in enumerate(regular_params):\n NN.SGD(\n X_train, y_train, validation_data=(X_test, y_test),\n epochs=epochs, batch_size=batch_size, eta=eta, reg=reg\n )\n r2_scores[i, j] = r2_score(y_test, NN.predict(X_test))\n ind = np.argmin(NN.cost_arr)\n e = epoch_arr[ind]\n c = NN.cost_arr[ind]\n plt.plot(epoch_arr, NN.cost_arr, line[j][i], label=rf\"$\\eta, \\lambda=$ ({eta:1.0e},{reg:1.0e})\")\n plt.plot(e, c, 'ro')\n\n plt.plot(epoch_arr, np.ones(epoch_arr.shape)*0.09, \"k-\", label=r\"Irreducible error $\\sigma^2$\")\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.75, box.height])\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), frameon=True, shadow=True)\n plt.ylim(0.089, 0.115)\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"MSE\")\n plt.grid()\n plt.show()\n\n fig, ax = plt.subplots(1, 1)\n sns.heatmap(\n r2_scores,\n square=True,\n annot=True,\n cmap=\"YlGnBu\",\n xticklabels=[f\"{i:1g}\" for i in regular_params],\n yticklabels=[f\"{i:1g}\" for i in learning_rates],\n )\n bottom, top = ax.get_ylim()\n ax.set_ylim(bottom + 0.5, top - 0.5)\n plt.title(r\"$R^2$ scores\")\n plt.xlabel(\"L2 Regularization parameter\")\n plt.ylabel(\"Learning rate\")\n plt.show()\n\n # Plot of franke function and prediction on mesh\n fig = plt.figure()\n ax = fig.add_subplot(111, projection=\"3d\")\n ax.set_xlabel(r\"$x_1$\")\n ax.set_ylabel(r\"$x_2$\")\n ax.set_zlabel(\"y\")\n ax.set_zlim(-0.5, 1.5)\n # predict a mesh of data\n l = np.linspace(0, 1, 101)\n x1_mesh, x2_mesh = np.meshgrid(l, l)\n x1_flat, x2_flat = x1_mesh.flatten(), x2_mesh.flatten()\n y_pred = NN.predict(np.column_stack((x1_flat, x2_flat)))\n y_pred_mesh = np.reshape(y_pred, x1_mesh.shape)\n func_mesh = franke_function(x1_flat, x2_flat).reshape(x1_mesh.shape)\n surface_pred = ax.plot_surface(x1_mesh, x2_mesh, y_pred_mesh, cmap=mpl.cm.coolwarm, alpha=.7)\n surface_true = ax.plot_surface(x1_mesh, x2_mesh, func_mesh, alpha=.3)\n fig.colorbar(surface_pred, shrink=0.5)\n plt.show()", "title": "" }, { "docid": "36a060b98aad3b3adaf009a4db1fb05d", "score": "0.61959326", "text": "def test_multiple_layers():\n rng = numpy.random.RandomState(0)\n x = tensor.vector(\"x\", dtype=NNTYPE)\n n_in = 3\n n_out = 4\n regressnetwork = RegressionNetwork(rng, x, n_in, n_hidden_list=[20,20,n_out],\n activations=[nnet.relu, nnet.relu, None],\n prefix=\"hnetwork\")\n f = theano.function(inputs=[x], outputs=regressnetwork.output)\n f_numpy_list = []\n relu = lambda x: numpy.maximum(0, x)\n f_numpy0 = lambda x: relu(numpy.dot(x, regressnetwork.parameters[0].get_value()) + regressnetwork.parameters[1].get_value())\n f_numpy1 = lambda x: relu(numpy.dot(f_numpy0(x), regressnetwork.parameters[2].get_value()) + regressnetwork.parameters[3].get_value())\n f_numpy2 = lambda x: numpy.dot(f_numpy1(x), regressnetwork.parameters[4].get_value()) + regressnetwork.parameters[5].get_value()\n x0 = rng.randn(n_in).astype(NNTYPE)\n numpy.testing.assert_array_almost_equal(f(x0), f_numpy2(x0))\n\n xs = tensor.matrix(\"xs\", dtype=NNTYPE)\n regressnetwork2 = RegressionNetwork(rng, xs, n_in, n_hidden_list=[20,20,n_out],\n activations=[nnet.relu, nnet.relu, None],\n parameters=regressnetwork.parameters,\n prefix=\"hnetwork\")\n fs = theano.function(inputs=[xs], outputs=regressnetwork2.layer_list[0].output)\n f = theano.function(inputs=[x], outputs=regressnetwork.layer_list[0].output)\n xs0 = rng.randn(2, n_in).astype(NNTYPE)\n\n numpy.testing.assert_array_almost_equal(f(xs0[0]), fs(xs0)[0])\n numpy.testing.assert_array_almost_equal(f(xs0[1]), fs(xs0)[1])", "title": "" }, { "docid": "0d873fdc94cc5175b7c780a26a0e45b8", "score": "0.61856556", "text": "def neural_network(X, Y, X_test, Y_test, num_neurons, activation):\n ## YOUR CODE HERE\n #################\n return 0", "title": "" }, { "docid": "32393cd95355e6f084f9775bfb92c2ba", "score": "0.61775", "text": "def neural_network_model(data):\n\t\"\"\" (input_data * Weights) + bias (bias helps avoid having a zero output) \"\"\"\n\t\n\thidden_1_layer = {'weights': tf.Variable(tf.random_normal([784,n_nodes_hl1])),\n\t\t\t\t\t 'biases': tf.Variable(tf.random_normal([n_nodes_hl1]))}\n\t\n\thidden_2_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl1,n_nodes_hl2])),\n\t\t\t\t\t 'biases': tf.Variable(tf.random_normal([n_nodes_hl2]))}\n\n\thidden_3_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl2,n_nodes_hl3])),\n\t\t\t\t\t 'biases': tf.Variable(tf.random_normal([n_nodes_hl3]))}\n\n\toutput_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl3,n_classes])),\n\t\t\t\t\t 'biases': tf.Variable(tf.random_normal([n_classes]))}\n\n\tl1 = tf.add(tf.matmul(data, hidden_1_layer['weights']), hidden_1_layer['biases'])\n\tl1 = tf.nn.relu(l1)\n\n\tl2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']) , hidden_2_layer['biases'])\n\tl2 = tf.nn.relu(l2)\n\n\tl3 = tf.add(tf.matmul(l2, hidden_3_layer['weights']), hidden_3_layer['biases'])\n\tl3 = tf.nn.relu(l3)\n\n\toutput = tf.matmul(l3, output_layer['weights']) + output_layer['biases']\n\n\treturn output", "title": "" }, { "docid": "8c3bd584008caed89a6de5feb474da66", "score": "0.6114907", "text": "def get_functional_regression_model1(X):\n input_layer = keras.layers.Input(shape=X.shape[1])\n layer1 = keras.layers.Dense(30, activation=\"relu\")(input_layer)\n layer2 = keras.layers.Dense(30, activation=\"relu\")(layer1)\n concat = keras.layers.Concatenate()([input_layer, layer2])\n output = keras.layers.Dense(1, activation=\"softplus\")(concat)\n model = keras.models.Model(inputs=[input_layer], outputs=[output])\n return model", "title": "" }, { "docid": "01177e1c99725e4a1046555dd230d39d", "score": "0.6108673", "text": "def two_layer_net(X, model, y=None, reg=0.0):\n\n # unpack variables from the model dictionary\n W1,b1,W2,b2 = model['W1'], model['b1'], model['W2'], model['b2']\n N, D = X.shape \n # input - fully connected layer - ReLU - fully connected layer - softmax\n #Activation function\n reluF = lambda x: np.maximum(0, x)\n #Compared to lecture notes we switch X and W1 because to multiple the inputs with the correct weights\n h1 = reluF(np.dot(X, W1) + b1)\n scores = np.dot(h1, W2) + b2\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n \n # If the targets are not given then jump out, we're done\n if y is None:\n return scores\n\n #############################################################################\n # TODO: Finish the forward pass, and compute the loss. This should include #\n # both the data loss and L2 regularization for W1 and W2. Store the result #\n # in the variable loss, which should be a scalar. Use the Softmax #\n # classifier loss. So that your results match ours, multiply the #\n # regularization loss by 0.5 #\n #############################################################################\n # compute the loss\n #http://stackoverflow.com/questions/8904694/how-to-normalize-a-2-dimensional-numpy-array-in-python-less-verbose\n expScores = np.exp(scores)\n rowSum = expScores.sum(axis=1, keepdims=True)\n # Normalized scores\n propScores = expScores / rowSum\n logprob_correctLabel = -np.log(propScores[range(N),y])\n softmax_loss = 1/float(N) * np.sum(logprob_correctLabel)\n #regulization loss\n reg_loss = 0.5 * reg * np.sum(W1*W1) + 0.5 * reg * np.sum(W2 * W2)\n #Final loss \n loss = softmax_loss + reg_loss\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n # compute the gradients\n grads = {}\n \n #############################################################################\n # TODO: Compute the backward pass, computing the derivatives of the weights #\n # and biases. Store the results in the grads dictionary. For example, #\n # grads['W1'] should store the gradient on W1, and be a matrix of same size #\n #############################################################################\n # Firstly we calculate the gradient on the scores.\n # The gradient from the loss function is simply -1.\n # This is subtracted from the correct scores for each\n # dscores are the probabilities for all classes as a row for each sample\n dscores = propScores\n #For each row(sample) in dscores 1 is subtracted from the correct element specified by y\n dscores[range(N),y] -= 1\n # We then divide all elements with N(number of samples)\n dscores /= N\n \n # The gradient for W2 is simply the output from the RELU activation function (h1)\n # multiplyed with the dscores that contains the gradient on the scores.\n # d/dw(w*x) = x which is our h1 then we get the input times dscores\n grads['W2'] = np.dot(h1.T, dscores)\n #bias is just the sum of the dscores\n grads['b2'] = np.sum(dscores, axis=0)\n\n # next backprop into hidden layer. This is the scores multiplied with the weights\n # for second layer\n dhidden = np.dot(dscores, W2.T)\n # backprop the ReLU non-linearity. \n #For elements < or equals 0 we set them equals to 0\n # remember how Relu is just max, so it routes the gradients\n dhidden[h1 <= 0] = 0\n # same thing as second layer - d/dw(w*x) = x, so x times our gradient for dhidden\n grads['W1'] = np.dot(X.T, dhidden)\n grads['b1'] = np.sum(dhidden, axis=0)\n \n # adding gradient for regulization\n # d/dw(1/2*reg*W1*w1) = reg * W1\n grads['W1'] += reg * W1 \n grads['W2'] += reg * W2\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, grads", "title": "" }, { "docid": "0efd705894bda92f7172dd9051becc3d", "score": "0.60923594", "text": "def network():", "title": "" }, { "docid": "8a5ac2d52ae6febb36b5561d2490f4a8", "score": "0.6078144", "text": "def regression_model(inputs, is_training=True, scope=\"deep_regression\"):\n with tf.variable_scope(scope, 'deep_regression', [inputs]):\n end_points = {}\n # Set the default weight _regularizer and acvitation for each fully_connected layer.\n with slim.arg_scope([slim.fully_connected],\n activation_fn=tf.nn.relu,\n weights_regularizer=slim.l2_regularizer(0.01)):\n\n # Creates a fully connected layer from the inputs with 32 hidden units.\n net = slim.fully_connected(inputs, 32, scope='fc1')\n end_points['fc1'] = net\n\n # Adds a dropout layer to prevent over-fitting.\n net = slim.dropout(net, 0.8, is_training=is_training)\n\n # Adds another fully connected layer with 16 hidden units.\n net = slim.fully_connected(net, 16, scope='fc2')\n end_points['fc2'] = net\n\n # Creates a fully-connected layer with a single hidden unit. Note that the\n # layer is made linear by setting activation_fn=None.\n predictions = slim.fully_connected(net, 1, activation_fn=None, scope='prediction')\n end_points['out'] = predictions\n\n return predictions, end_points", "title": "" }, { "docid": "721c1e02fbc9c36c4472bb87523edcd4", "score": "0.6073557", "text": "def ModularModel(train_data, optimizer=None, layers=7, nodes=4,\r\n activation='relu'):\r\n model = keras.Sequential()\r\n model.add(keras.layers.Dense(train_data.shape[1],name='Input',\r\n activation='linear',\r\n input_shape=(train_data.shape[1],)))\r\n for n in range(layers):\r\n model.add(keras.layers.Dense(nodes,name='layer'+str(n+1),\r\n activation=activation))\r\n model.add(keras.layers.Dense(4, name='Output'))\r\n\r\n if optimizer is None:\r\n optimizer = keras.optimizers.Adam(lr=0.001, beta_1=0.95, beta_2=0.999,\r\n epsilon=1e-4)\r\n \r\n model.compile(loss=my_loss, optimizer=optimizer, metrics=['mae','logcosh','mape'])\r\n model.summary()\r\n return model", "title": "" }, { "docid": "409a6ddecd3eb83a5f9d93b3ca604f02", "score": "0.6044459", "text": "def trainBasicRegression(hidden, inputDims, outputDims, width, cycles, epochs, patience, trainIn, trainOut, valIn, valOut):\n \n #Set seed\n tf.random.set_seed(1000)\n \n #Create model\n model = tf.keras.Sequential()\n \n model.add(tf.keras.layers.Dense(width, kernel_initializer='glorot_uniform', input_shape=(inputDims, )))\n model.add(tf.keras.layers.PReLU())\n \n for n in range(hidden-1):\n model.add(tf.keras.layers.Dense(width, kernel_initializer='glorot_uniform'))\n model.add(tf.keras.layers.PReLU())\n \n model.add(tf.keras.layers.Dense(outputDims, kernel_initializer='glorot_uniform'))\n \n callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience, restore_best_weights=True)\n \n #Train with decreasing learning rate\n for x in range(cycles):\n model.compile(optimizer=tf.keras.optimizers.Adam(0.01*(10**(-x)),amsgrad=True),\n loss='mean_squared_error',\n metrics=['mean_absolute_error', 'mean_squared_error'])\n model.summary()\n model.fit(trainIn, trainOut, validation_data=(valIn, valOut), epochs=epochs, batch_size=32, callbacks=[callback])\n \n #Save the backup \n model.save(\"backup\")\n \n #Extract weights and biases\n weights=[]\n biases=[]\n activation=[]\n for layer in model.layers:\n weightBias=layer.get_weights()\n if(len(weightBias)==2):\n weights.append(weightBias[0].T)\n bias=weightBias[1]\n bias=np.reshape(bias, (len(bias),1))\n biases.append(bias)\n if(len(weightBias)==1):\n activation.append(weightBias[0])\n \n return(weights, biases, activation)", "title": "" }, { "docid": "b3923fa46072a8e323def2662bf2434e", "score": "0.60339916", "text": "def build_network(self):\n model = Sequential()\n model.add(InputLayer(input_shape=(1, 81)))\n model.add(Dense(64, activation='relu'))\n model.add(Dense(32, activation='relu'))\n model.add(Dense(32, activation='relu'))\n model.add(Dense(1, activation='linear'))\n model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate), metrics=['mae'])\n return model", "title": "" }, { "docid": "c0bc57d63bf7432af8670a25c14f0177", "score": "0.59885246", "text": "def get_functional_regression_model3(X):\n X1, X2 = X\n input_layer1 = keras.layers.Input(shape=X1.shape[1])\n input_layer2 = keras.layers.Input(shape=X2.shape[1])\n layer1 = keras.layers.Dense(30, activation=\"relu\")(input_layer1)\n layer2 = keras.layers.Dense(30, activation=\"relu\")(layer1)\n concat = keras.layers.Concatenate()([layer2, input_layer2])\n\n output = keras.layers.Dense(1, activation=\"softplus\")(concat)\n aux_output = keras.layers.Dense(1, activation=\"softplus\")(layer2)\n model = keras.models.Model(\n inputs=[input_layer1, input_layer2], outputs=[output, aux_output]\n )\n return model", "title": "" }, { "docid": "3ab6f5d92fb05988504ca115382bc8fc", "score": "0.59876966", "text": "def build_network(self):\n model = Sequential()\n model.add(InputLayer(input_shape=(1,61)))\n model.add(Dense(64, activation='relu'))\n model.add(Dense(32, activation='relu'))\n model.add(Dense(32, activation='relu'))\n model.add(Dense(1, activation='linear'))\n model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate), metrics=[\"mse\"])\n return model", "title": "" }, { "docid": "dbbd587f42a623fca1a1904ddfd84c7d", "score": "0.5986711", "text": "def fit_model(train_x, y, test_x):\n print(\"\\n\\nRunning Convetional Net. Optimization progress below\\n\\n\")\n net1 = NeuralNet(\n layers=[ #list the layers here\n ('input', layers.InputLayer),\n ('hidden1', layers.DenseLayer),\n ('output', layers.DenseLayer),\n ],\n\n # layer parameters:\n input_shape=(None, train_x.shape[1]),\n hidden1_num_units=200, hidden1_nonlinearity=rectify, #params of first layer\n output_nonlinearity=softmax, # softmax for classification problems\n output_num_units=10, # 10 target values\n\n # optimization method:\n update=nesterov_momentum,\n update_learning_rate=0.05,\n update_momentum=0.7,\n\n regression=False,\n max_epochs=10, # Intentionally limited for execution speed\n verbose=1,\n )\n\n net1.fit(train_x, y)\n predictions = net1.predict(test_x)\n return(predictions)", "title": "" }, { "docid": "64ecfa5377827ee081b7e857a2711ae5", "score": "0.59682417", "text": "def regression_leaky_relu():\n # np.random.seed(1337)\n n_data_total = 400\n x1 = np.random.uniform(0, 1, n_data_total)\n x2 = np.random.uniform(0, 1, n_data_total)\n X = np.zeros(shape=(n_data_total, 2))\n for i in range(n_data_total): X[i] = x1[i], x2[i]\n y = common.franke_function(x1, x2)\n noise = np.random.normal(size=n_data_total)*0.1\n y += noise\n\n q1 = nn.FFNNRegressor(\n input_data = X,\n true_output = y,\n hidden_layer_sizes=(50, 25, 25),\n n_categories = 1,\n n_epochs = 10,\n batch_size = 50,\n hidden_layer_activation_function = af.leaky_relu,\n hidden_layer_activation_function_derivative = af.leaky_relu_derivative,\n output_activation_function = af.linear,\n cost_function_derivative = af.mse_derivative,\n verbose = True,\n debug = False,\n scaling = True)\n \n n_learning_rates = 6\n n_regularization_parameters = 10\n n_repetitions = 1 # Average to smooth the data.\n learning_rates = np.linspace(1e-5, 3e-4, n_learning_rates)\n regularization_parameters = np.linspace(0, 1e-3, n_regularization_parameters)\n \n mse_train = np.zeros(shape=(n_learning_rates, n_regularization_parameters))\n mse_test = np.zeros(shape=(n_learning_rates, n_regularization_parameters))\n r_train = np.zeros(shape=(n_learning_rates, n_regularization_parameters))\n r_test = np.zeros(shape=(n_learning_rates, n_regularization_parameters))\n\n for rep in range(n_repetitions):\n print(f\"\\nrepetition {rep+1} of {n_repetitions}\")\n for i in range(n_learning_rates):\n for j in range(n_regularization_parameters):\n \n print(f\"{j+1} of {n_regularization_parameters}, {learning_rates[i]=}\")\n q1.train_neural_network(learning_rate=learning_rates[i], lambd=regularization_parameters[j])\n q1.score()\n mse_train[i, j] += q1.mse_train\n mse_test[i, j] += q1.mse_test\n r_train[i, j] += q1.r_train\n r_test[i, j] += q1.r_test\n\n mse_train /= n_repetitions\n mse_test /= n_repetitions\n r_train /= n_repetitions\n r_test /= n_repetitions\n\n fig, ax = plt.subplots(figsize=(9, 7))\n ax = sns.heatmap(\n data = mse_train,\n xticklabels = [f\"{x*1e4:.1f}\" for x in regularization_parameters],\n yticklabels = [f\"{x*1e5:.1f}\" for x in learning_rates],\n linewidth = 0.5,\n annot = True,\n cmap = 'viridis',\n ax = ax,\n annot_kws = {\"size\": 14})\n \n ax.tick_params(axis='y', rotation=0)\n ax.tick_params(axis='x', rotation=0)\n ax.tick_params(labelsize=15)\n ax.set_ylabel(r\"$\\eta [10^{-5}]$\", fontsize=15, rotation=90)\n ax.set_xlabel(r\"$\\lambda [10^{-4}]$\", fontsize=15, rotation=0)\n cbar = ax.collections[0].colorbar\n cbar.ax.tick_params(labelsize=15)\n cbar.ax.set_ylabel('MSE', fontsize=15, rotation=90)\n # plt.savefig(fname=\"../fig/task_c_leaky_relu_lambda_eta.png\", dpi=300)\n plt.show()", "title": "" }, { "docid": "dc3280c888b5354a05d3efea0f8eaf8e", "score": "0.5933742", "text": "def get_functional_regression_model2(X):\n X1, X2 = X\n input_layer1 = keras.layers.Input(shape=X1.shape[1])\n input_layer2 = keras.layers.Input(shape=X2.shape[1])\n layer1 = keras.layers.Dense(30, activation=\"relu\")(input_layer1)\n layer2 = keras.layers.Dense(30, activation=\"relu\")(layer1)\n concat = keras.layers.Concatenate()([layer2, input_layer2])\n output = keras.layers.Dense(1, activation=\"softplus\")(concat)\n\n model = keras.models.Model(inputs=[input_layer1, input_layer2], outputs=[output])\n return model", "title": "" }, { "docid": "2b9f502fa1810fe65acfa1b227d1c39b", "score": "0.5916235", "text": "def two_layer_net(X, model, y=None, reg=0.0):\n\n # Unlike the original assignment, this function uses sigmoid activation instead of ReLU.\n\n W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']\n N, D = X.shape\n H, C = W2.shape\n\n scores = None\n\n H_in = np.dot(X, W1) + b1\n # H_in[H_in < 0] = 0\n # H_out = H_in\n H_out = sigmoid(H_in)\n scores = np.dot(H_out, W2) + b2\n\n if y is None:\n return scores\n\n loss = None\n scores_hat = scores - np.max(scores, axis=1).reshape(-1, 1)\n softmax_res = np.exp(scores_hat) / np.sum(np.exp(scores_hat), axis=1).reshape(-1, 1)\n loss = -np.mean(np.log(softmax_res[range(N), list(y)]))\n\n loss += 0.5 * reg * (np.sum(W1 * W1) + np.sum(W2 * W2))\n\n grads = {}\n\n delta_scores = softmax_res.copy()\n delta_scores[range(N), list(y)] -= 1\n\n grads['W2'] = H_out.T.dot(delta_scores.reshape(N, -1)) + reg * W2\n grads['W2'] /= N\n grads['b2'] = np.mean(delta_scores, axis=0)\n\n delta_H = delta_scores.dot(W2.T) * H_out * (1 - H_out)\n\n grads['W1'] = X.T.dot(delta_H) + reg * W1\n grads['W1'] /= N\n grads['b1'] = np.mean(delta_H, axis=0)\n\n # grads['b2'] = np.mean(grads['b2'], axis=0)\n # grads['W2'] = np.mean(grads['W2'], axis=0) + W2 * reg\n # grads['b1'] = np.mean(grads['b1'], axis=0)\n # grads['W1'] = np.mean(grads['W1'], axis=0) + W1 * reg\n\n return loss, grads", "title": "" }, { "docid": "ecae1097ea5a00e7e86a369ff7793c04", "score": "0.5906051", "text": "def add_head(self, network):\n x = Dense(256, activation='relu', name=\"critic_head_slot\")(network.output)\n #x = Dropout(0.5)(x)\n out = Dense(1, activation='linear', name=\"value_est\")(x)\n return Model(network.input, out)", "title": "" }, { "docid": "3f5abad7ff1c24389c1339261b1b5d30", "score": "0.58847475", "text": "def NN(self,\n X=None,\n Y=None):\n from sklearn.neural_network import MLPRegressor\n import matplotlib.pyplot as plt\n rgr=MLPRegressor(hidden_layer_sizes=(8),\n activation=\"tanh\",\n solver=\"lbfgs\")\n \n if (X is not None and \n Y is not None):\n (self.sampled_X,self.sampled_Y)=(X,Y)\n \n # train \n rgr.fit(self.sampled_X,self.sampled_Y)\n # test\n Y_pred=rgr.predict(self.X)\n # compute metric\n m_nmse=self.metric.normalized_mean_square_error(Y_pred,self.Y)\n m_mape=self.metric.mean_absolute_percentage_error(Y_pred,self.Y) \n return (m_nmse,m_mape)", "title": "" }, { "docid": "9a602ceef1d085ffdcbaf7f834313b2c", "score": "0.58767325", "text": "def __init__(self, num_features, num_targets):\n\n model = Sequential()\n # First layer\n model.add(Dense(num_features + 2, input_dim=num_features))\n model.add(Activation('linear'))\n model.add(Dropout(0.15))\n\n # Second layer\n model.add(Dense(num_features - 6))\n model.add(Activation('linear'))\n\n # Final layer\n model.add(Dense(num_targets))\n\n model.compile(loss=mean_squared_error, optimizer='adam', metrics=['mse', 'accuracy'])\n\n self.model = model", "title": "" }, { "docid": "c8cfbc788dcb76c007627b67a606376c", "score": "0.587239", "text": "def train_X(self):\n X, y = self.preprocess()\n\n if self.library == 'mxnet':\n data = mx.sym.Variable('data')\n\n fc1 = mx.sym.FullyConnected(data=data, name='fc1', num_hidden=self.num_labels * 10)\n act1 = mx.sym.Activation(data=fc1, name='relu1', act_type=\"relu\")\n\n # The second fully-connected layer and the according activation function\n fc2 = mx.sym.FullyConnected(data=act1, name='fc2', num_hidden=self.num_labels * 5)\n act2 = mx.sym.Activation(data=fc2, name='relu2', act_type=\"relu\")\n\n # The thrid fully-connected layer, note that the hidden size should be 10, which is the number of unique digits\n fc4 = mx.sym.FullyConnected(data=act2, name='fc4', num_hidden=self.num_labels)\n # The softmax and loss layer\n mlp = mx.sym.SoftmaxOutput(data=fc4, name='softmax')\n # create a model\n # mx.viz.plot_network(symbol=mlp, shape={\"data\": (28, 22)}).render(\"NaiveNet\", view=True)\n examples = mx.io.NDArrayIter(X, y)\n\n import logging\n logging.basicConfig(level=logging.INFO)\n self.model = mx.model.FeedForward(symbol=mlp,\n num_epoch=350,\n learning_rate=0.001,\n wd=0.00001,\n momentum=0.9)\n\n self.model.fit(X=examples)\n if self.library == 'lasagne':\n if self.data_model == 'linear':\n input_var = T.matrix('inputs')\n elif self.data_model == 'matrix':\n input_var = T.tensor3('inputs')\n target_var = T.ivector('targets')\n\n shape = (None, self.sequence_length)\n if self.data_model == 'matrix':\n shape = (None, self.sequence_length, self.sequence_length)\n\n l_in = lasagne.layers.InputLayer(shape=shape,\n input_var=input_var)\n l_in_drop = lasagne.layers.DropoutLayer(l_in, p=0.2)\n l_hid1 = lasagne.layers.DenseLayer(\n l_in_drop, num_units=800,\n nonlinearity=lasagne.nonlinearities.rectify,\n W=lasagne.init.GlorotUniform())\n l_hid1_drop = lasagne.layers.DropoutLayer(l_hid1, p=0.5)\n\n l_hid2 = lasagne.layers.DenseLayer(\n l_hid1_drop, num_units=800,\n nonlinearity=lasagne.nonlinearities.rectify)\n\n l_hid2_drop = lasagne.layers.DropoutLayer(l_hid2, p=0.5)\n l_out = lasagne.layers.DenseLayer(\n l_hid2_drop, num_units=self.num_labels,\n nonlinearity=lasagne.nonlinearities.softmax)\n\n prediction = lasagne.layers.get_output(l_out)\n loss = lasagne.objectives.categorical_crossentropy(prediction, target_var).mean()\n params = lasagne.layers.get_all_params(l_out, trainable=True)\n updates = lasagne.updates.sgd(loss, params, learning_rate=0.01)\n\n f_learn = theano.function([input_var, target_var], loss, updates=updates, allow_input_downcast=True)\n self.model = theano.function([input_var], prediction, allow_input_downcast=True)\n\n # Training\n it = 5000\n for i in range(it):\n l = f_learn(X, y)", "title": "" }, { "docid": "d0df2277b6d77da7472751cf7636ab9a", "score": "0.58525497", "text": "def get_network(model_name, input_data, input_names, batch_size):\n if model_name == 'WRN-40-2':\n out_shape = (batch_size, 10)\n elif model_name == 'resnet34':\n out_shape = (batch_size, 1000)\n shape_dict = {input_names[0]: input_data.shape}\n model = onnx.load(f'{model_name}.onnx')\n mod, params = relay.frontend.from_onnx(model, shape_dict)\n\n return mod, params", "title": "" }, { "docid": "6662dbe00ba61c78e1be63ae0aa8dd6b", "score": "0.58361924", "text": "def main():\n bias = 1.0\n \n P1 = [1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, bias]\n\n P2 = [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, bias]\n\n P3 = [1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, bias]\n\n weights = [uniform(-1, 1) for x in range(36)]\n coefficient = 0.002 # coefficient of learing \n marginE = 0.0001 # margin of error\n\n # intialization of object\n obj = net.Network(weights, coefficient)\n \n e = 0.0\n # process of learning. If our start variable e, as an error will be smaller than margin - then our net is quazi-intelligent \n while True:\n e = 0.0\n rnd = random() % 2\n \n # for each loop, e is increased in learning process.\n if rnd == 0:\n e += obj.learn(P1, 1.0)\n e += obj.learn(P2, 0.0) \n else:\n e += obj.learn(P2, 0.0)\n e += obj.learn(P1, 1.0)\n if e < marginE:\n break\n \n # output for learned network\n print(obj.calc(P1))\n print(obj.calc(P2))\n print(obj.calc(P3))", "title": "" }, { "docid": "04896b16b10d76ddc3026e0f2800c2b4", "score": "0.58346355", "text": "def __init__(self, input_features, hidden_dim1, hidden_dim2, output_dim):\n super(Regression, self).__init__()\n\n self.fc1 = nn.Linear(input_features, hidden_dim1)\n self.fc2 = nn.Linear(hidden_dim1, hidden_dim2)\n self.fc3 = nn.Linear(hidden_dim2, output_dim)\n self.drop = nn.Dropout(0.3)", "title": "" }, { "docid": "b0adb01e6c0f607dd073603f926c1bc8", "score": "0.5823028", "text": "def fancy_nn(weights_path=None):\n\n vanilla_input = layers.Input(shape=(21,))\n img_input = layers.Input(shape=(64, 64, 3))\n\n vanilla = layers.Dense(15, name='hidden')(vanilla_input)\n vanilla = models.Model(inputs=vanilla_input, outputs=vanilla, name='vanilla_hidden')\n\n x = layers.concatenate([vanilla.output, Xception(img_input).output], name='hidden_concatenated')\n\n x = layers.Dense(15, name='hidden_hidden')(x)\n\n # MOVED INTO ACTOR AND CRITIC\n\n binary_output = layers.Dense(8, name='binary_prediction')(x)\n linear_output = layers.Dense(7, activation='linear', name='linear_prediction')(x)\n\n binary_model = models.Model(inputs=[vanilla_input, img_input], outputs=binary_output)\n linear_model = models.Model(inputs=[vanilla_input, img_input], outputs=linear_output)\n\n model = models.Model(inputs=[vanilla_input, img_input], outputs=[binary_model.output, linear_model.output])\n\n # model = models.Model(inputs=[vanilla_input, img_input], outputs=x)\n\n from keras.utils import plot_model\n plot_model(model, to_file='fancy_model.png', show_shapes=True)\n\n model.compile(optimizer='adam', loss='mean_squared_error')\n if weights_path != None: model.load_weights(weights_path)\n\n return model", "title": "" }, { "docid": "88c1069f9d485c5c58f982b4af239236", "score": "0.580179", "text": "def __init__(self, input_dim=3 * 32 * 32, hidden_dim=100, num_classes=10,\r\n weight_scale=1e-3, reg=0.0):\r\n self.params = {}\r\n self.reg = reg\r\n\r\n ############################################################################\r\n # TODO: Initialize the weights and biases of the two-layer net. Weights #\r\n # should be initialized from a Gaussian with standard deviation equal to #\r\n # weight_scale, and biases should be initialized to zero. All weights and #\r\n # biases should be stored in the dictionary self.params, with first layer #\r\n # weights and biases using the keys 'W1' and 'b1' and second layer weights #\r\n # and biases using the keys 'W2' and 'b2'. #\r\n # See also: http://cs231n.github.io/neural-networks-2/#init #\r\n ############################################################################\r\n # Initialize the weights in the self.params dictionary, with random numbers\r\n # multiplied by the weight scales. Dimensions should follow network layer\r\n # dimensions, i.e. first layer dim [input x hidden], second layer [hiddenxoutput]\r\n self.params[\"W1\"] = weight_scale * np.random.randn(input_dim, hidden_dim)\r\n self.params[\"W2\"] = weight_scale * np.random.randn(hidden_dim, num_classes)\r\n # Initialize the biases with 0s corresponding to the right vector dimensions\r\n self.params[\"b1\"] = np.zeros(hidden_dim)\r\n self.params[\"b2\"] = np.zeros(num_classes)\r\n ############################################################################\r\n # END OF YOUR CODE #\r\n ############################################################################\r", "title": "" }, { "docid": "939315ccbdbbcb879be982da71382323", "score": "0.5801409", "text": "def train(X, y):\n from theano import shared\n from nolearn.lasagne import NeuralNet\n from nolearn.lasagne import TrainSplit\n from lasagne.layers import InputLayer\n from lasagne.layers import Conv2DLayer\n from lasagne.layers import DenseLayer\n from lasagne.layers import MaxPool2DLayer\n from lasagne.layers import DropoutLayer\n\n layers = [\n (InputLayer, {'shape': (None, X.shape[1], X.shape[2], X.shape[3])}),\n (Conv2DLayer, {'num_filters': 32, 'filter_size': 3, 'pad': 'same'}),\n (MaxPool2DLayer, {'pool_size': 2}),\n (DropoutLayer, {'p': 0.1}),\n (Conv2DLayer, {'num_filters': 64, 'filter_size': 3, 'pad': 'same'}),\n (MaxPool2DLayer, {'pool_size': 2}),\n (DropoutLayer, {'p': 0.2}),\n (Conv2DLayer, {'num_filters': 128, 'filter_size': 5, 'pad': 'same'}),\n (MaxPool2DLayer, {'pool_size': 2}),\n (DropoutLayer, {'p': 0.3}),\n (DenseLayer, {'num_units': 512}),\n (DropoutLayer, {'p': 0.5}),\n (DenseLayer, {'num_units': 512}),\n (DenseLayer, {'num_units': y.shape[1], 'nonlinearity': None}),\n ]\n model = NeuralNet(\n layers,\n # update=<function nesterov_momentum at 0x7f14b1fe9b70>,\n # loss=None,\n # objective=<function objective at 0x7f14b1f78510>,\n # objective_loss_function=None,\n # batch_iterator_train=<nolearn.lasagne.base.BatchIterator object at 0x7f14b1f745f8>,\n # batch_iterator_test=<nolearn.lasagne.base.BatchIterator object at 0x7f14b1f74668>,\n regression=True,\n max_epochs=300,\n #max_epochs=1000,\n train_split=TrainSplit(eval_size=0.2),\n # custom_score=None,\n # X_tensor_type=None,\n # y_tensor_type=None,\n # use_label_encoder=False,\n on_epoch_finished=[\n AdjustParameter('update_learning_rate', start=0.01, stop=0.00001),\n AdjustParameter('update_momentum', start=0.9, stop=0.999),\n EarlyStopping(patience=50),\n ],\n # on_training_started=None,\n # on_training_finished=None,\n verbose=2,\n update_learning_rate=shared(0.01),\n update_momentum=shared(0.9),\n )\n\n return model.fit(X, y)", "title": "" }, { "docid": "239b003ffdc2a7313116594ef16ada63", "score": "0.5799659", "text": "def get_mlp_network_regressor(num_inputs, num_hidden, num_outputs, apply_bias=True):\n hidden_layer = SigmoidLayer(num_inputs, num_hidden, apply_bias)\n output_layer = LinearLayer(num_hidden, num_outputs)\n network = Network(hidden_layer, output_layer)\n return network", "title": "" }, { "docid": "e84f329ebdab5bc15ca283d61b07c792", "score": "0.577642", "text": "def test_relu_00():\n\n class ReluTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Relu\", inputs=[\"x\"], outputs=[\"y\"])\n inputs = [info(\"x\", TensorProto.FLOAT, (1, 3, 4, 5))]\n outputs = [info(\"y\", TensorProto.FLOAT, (1, 3, 4, 5))]\n\n graph = make_graph([node], \"add_graph\", inputs, outputs)\n return make_model(graph)\n\n inputs = {\"x\": (np.random.rand(1, 3, 4, 5).astype(np.float32) * 10.0)}\n outputs = [\"y\"]\n ReluTester(inputs, outputs).run()", "title": "" }, { "docid": "ec688fbbea83e3e2065601d2f3982953", "score": "0.5770218", "text": "def __init__(self, num_features, num_targets):\n\n model = Sequential()\n # First layer\n model.add(Dense(num_features + 2, input_dim=num_features))\n model.add(Activation('linear'))\n model.add(Dropout(0.1))\n\n # Second layer\n model.add(Dense(num_features - 6))\n model.add(Activation('linear'))\n model.add(Dropout(0.2))\n\n # Third layer\n model.add(Dense(num_features - 8))\n model.add(Activation('linear'))\n model.add(Dropout(0.12))\n\n # Final layer\n model.add(Dense(num_targets))\n\n model.compile(loss=mean_squared_error, optimizer='adam', metrics=['mse', 'accuracy'])\n\n self.model = model", "title": "" }, { "docid": "7f02dfd85f54730a3c215875f5c3b1f4", "score": "0.57700145", "text": "def nn_train(dataset, hidden_layers = 1, hidden_nodes = [10], targets = ['target'], verbose = True):\n alpha = 1\n epsilon = 1e-4\n iteration = 0\n \n ys = dataset[targets].values\n xs = copy.deepcopy(dataset)\n xs['intercept'] = 1\n xs = xs.drop(targets, axis = 1).values\n\n n, x_nodes, y_nodes = xs.shape[0], xs.shape[1], ys.shape[1]\n \n hidden_thetas = []\n lower_node_count = x_nodes \n\n for layer in range(hidden_layers): \n hidden_thetas += [[[random.random()-0.5 for x in range(lower_node_count)] \n for x in range(hidden_nodes[layer])]]\n lower_node_count = hidden_nodes[layer] + 1\n \n output_thetas = [[random.random()-0.5 for x in range(lower_node_count)] for x in range(y_nodes)]\n\n current_error, previous_error = 20.0, 10.0\n \n while abs(current_error - previous_error) > epsilon:\n for obs in range(n):\n hidden_thetas, output_thetas = forward_backprop(xs[obs], ys[obs,:], hidden_thetas, output_thetas, alpha)\n \n prev_layer = xs\n for thetas_layer in hidden_thetas: \n y_hid_hats = y_predict_layer(thetas_layer, prev_layer) \n prev_layer = [[1] + yh for yh in y_hid_hats] \n \n y_out_hats = y_predict_layer(output_thetas, prev_layer) \n \n iteration += 1\n previous_error = current_error\n current_error = calculate_error_multiclass(ys, y_out_hats)\n \n if current_error > previous_error: \n alpha /= 10.0\n if verbose: print(\"Increased Error; Shrinking Alpha to \" + str(alpha))\n\n if verbose and iteration%10 == 0: print(\"Iteration \" + str(iteration) + \": \\tError = \" + str(current_error))\n \n return((hidden_thetas, output_thetas))", "title": "" }, { "docid": "5ddfd0be5c0250d54f4fd21ddbe10359", "score": "0.5766219", "text": "def make_cnn_model():\n # inputs are v,vw,o,c,h,l,t,n (X) label is action (Y)\n input_layer = input_data(shape=[None, 7])\n conv_layer_1 = conv_2d(input_layer,\n nb_filter=12,\n filter_size=2,\n activation='relu',\n name='conv_layer_1')\n pool_layer_1 = max_pool_2d(conv_layer_1, 2, name='pool_layer_1')\n conv_layer_2 = conv_2d(pool_layer_1,\n nb_filter=10,\n filter_size=2,\n activation='sigmoid',\n name='conv_layer_2')\n pool_layer_2 = max_pool_2d(conv_layer_2, 2, name='pool_layer_2')\n fc_layer_1 = fully_connected(pool_layer_2, 64,\n activation='relu',\n name='fc_layer_1')\n fc_layer_1 = tflearn.dropout(fc_layer_1, 0.5)\n fc_layer_2 = fully_connected(fc_layer_1, 64,\n activation='relu',\n name='fc_layer_2')\n fc_layer_3 = fully_connected(fc_layer_2, 2,\n activation='softmax',\n name='fc_layer_3')\n network = regression(fc_layer_3, optimizer='sgd',\n loss='categorical_crossentropy',\n learning_rate=0.1)\n model = tflearn.DNN(network)\n return model", "title": "" }, { "docid": "19da26a1f78cd05ef0d89abdd220b9a3", "score": "0.5764147", "text": "def two_layer_net(X, model, y=None, reg=0.0, dropout=None,maxout=None,bn=None):\n\n # unpack variables from the model dictionary\n W1,b1,W2,b2 = model['W1'], model['b1'], model['W2'], model['b2']\n N, D = X.shape\n scores = None\n a = Relu(X.dot(W1)+b1)\n scores = a.dot(W2)+b2 \n if y is None:\n return scores\n\n loss = None\n soft = softmax(scores)\n loss = np.sum(-np.log(soft[range(N),y]))/N + 0.5*reg*(np.sum(W1**2)+np.sum(W2**2))\n\n grads = {}\n #############################################################################\n # TODO: Compute the backward pass, computing the derivatives of the weights #\n # and biases. Store the results in the grads dictionary. For example, #\n # grads['W1'] should store the gradient on W1, and be a matrix of same size #\n #############################################################################\n \n grads['W2'] = a.T.dot(soft - make_onehot(y,W2.shape[1]))/N + reg*W2\n grads['b2'] = np.sum(soft-make_onehot(y,W2.shape[1]),axis=0)/N \n grads['W1'] = reg*W1 + X.T.dot((soft-make_onehot(y,W2.shape[1])).dot(W2.T)*dRelu(a))/N\n grads['b1'] = np.sum((soft-make_onehot(y,W2.shape[1])).dot(W2.T)*dRelu(a),axis=0)/N\n return loss, grads", "title": "" }, { "docid": "2e63cf014ad9282d9a4ede86c9c26a17", "score": "0.5763414", "text": "def run_part3():\n # GET THE INPUT SPIRAL DATA AND THE LEARNING RATES\n X, T = twospirals()\n print(\"WHOLE SET: \", len(X))\n print(X)\n print(\"WHOLE SET TARGETS: \", len(T))\n print(T)\n\n learning_rates = [0.01]\n # , 0.03, 0.07, 0.01, 0.1, 0.05]\n\n # GET THE TRAINING AND TEST DATA WITH SIZE N\n test_size = 96\n train_size = 384\n test_X, test_T, train_X, train_T = get_input_data(X, T, len(X), train_size, test_size)\n print(\"TEST SET\")\n print(test_X)\n print(\"TEST SET TARGETS\")\n print(test_T)\n print(\"TRAIN SET\")\n print(train_X)\n print(\"TRAIN SET TARGETS\")\n print(train_T)\n\n\n # some variables for plotting\n runs_info = []\n train_plot = []\n test_plot = []\n # CREATE THE NEURAL NETWORK AND TRAIN IT\n nn = NeuralNetwork()\n plt.plot_boundary(nn, test_X, test_T)\n plt.plot_boundary(nn, train_X, train_T)\n\n # variables containing errors the initial values are to test convergence\n train_mse, test_mse = 2, 2\n old_train_mse = 1\n momentum = {}\n # variable to test convergence\n landa = 0.00000001\n # counter for iterations\n n = 0\n # predictions\n for learning_rate in learning_rates:\n # convergence testing\n while abs(old_train_mse - train_mse) >= landa or n <= 1:\n y = nn.forward(train_X)\n error = dMSE(y, train_T)\n if n < 1:\n # first iteration its batch gradient descent to generate momentum pre values\n weight_adjustments = nn.backward(error)\n nn.old_momentum = nn.scale_weights(weight_adjustments, learning_rate)\n nn.adjust_weights(weight_adjustments, learning_rate)\n else:\n # from second iteration on the network uses momentum with nesterov accellerated descent\n weight_adjustments = nn.backward_2(error)\n momentum = nn.momentum(weight_adjustments, learning_rate)\n nn.adjust_momentum(momentum)\n nn.old_momentum = momentum.copy()\n old_train_mse = train_mse\n # calculates mean squared error\n train_mse = MSE(y, train_T)\n train_plot.append(train_mse)\n\n # Do a run with the test set without changing weights\n y = nn.forward(test_X)\n test_mse = MSE(y, test_T)\n test_plot.append(test_mse)\n n += 1\n if(n > 100):\n if(test_plot[-1] - test_plot[-2] > 0.01):\n print(\"error increasing too much\")\n break\n\n print(\"Train MSE: \",train_mse,\" - Test MSE: \", test_mse)\n\n print(\"Iterations: \",n)\n plt.compare_plots(train_plot, test_plot, train_mse, test_mse, learning_rate)\n plt.plot_boundary(nn, test_X, test_T, 0.5)\n plt.plot_boundary(nn, train_X, train_T, 0.5)\n runs_info.append(run_info(train_plot, test_plot, nn.var.copy(), learning_rate))\n train_mse, test_mse = 1, 1\n n = 0\n train_plot = []\n test_plot = []\n nn.reset()", "title": "" }, { "docid": "c5582f6f53f4a55d307dd97598de6de2", "score": "0.5757965", "text": "def pre_train_nn(X_train, y_train, nodes_per_layer, epochs=100):\n layers = [tf.keras.layers.Dense(n, activation=\"relu\") for n in nodes_per_layer]\n layers[-1].activation = tf.identity # Make last layer linear.\n model = tf.keras.Sequential(layers)\n\n model.compile(loss=\"mse\", optimizer=\"adam\")\n model.fit(X_train, y_train, epochs=epochs, verbose=0)\n return model.get_weights(), model", "title": "" }, { "docid": "7b1521d46a36baa46c860ad5bb85e970", "score": "0.5757598", "text": "def transfer_learning_with_vggnet():\n #######\n\n #######\n return model", "title": "" }, { "docid": "0518d94453f1fc9ba9a9ad60c489d270", "score": "0.57219267", "text": "def reg():\n\n lambdas = np.logspace(-4, 2, 7)\n #lambdas = [0.01, 0.1]\n n = len(lambdas)\n train_mse = np.zeros((3,n))\n test_mse = np.zeros((3,n))\n regs = [None, 'l1', 'l2']\n\n for j in range(3):\n for i in range(n):\n nn = NeuralNet(X,Y, nodes=[X.shape[1], 1], activations=[None], regularization=regs[j], lamb=lambdas[i])\n nn.TrainNN(epochs = 1000, batchSize = 200, eta0 = 0.01, n_print = 10)\n\n ypred_test = nn.feed_forward(nn.xTest, isTraining=False)\n mse_test = nn.cost_function(nn.yTest, ypred_test)\n\n test_mse[j,i] = mse_test\n\n if j == 0:\n test_mse[0].fill(test_mse[0,0])\n break\n\n\n plt.semilogx(lambdas, test_mse.T)\n plt.xlabel(r'$\\lambda$')\n plt.ylabel('MSE')\n plt.title(\"MSE on Test Set, %i samples\" %(n_samples))\n plt.legend(['No Reg', 'L1', 'L2'])\n plt.grid(True)\n plt.ylim([0, 25])\n plt.show()", "title": "" }, { "docid": "c0dd6d8910a581d4d7f805b424607fe3", "score": "0.57211393", "text": "def run(self, input):\n input = np.array(input)\n if input.shape != self.nodes[0].shape:\n raise Exception(\n f'Input shape does not match the input shape of the network!\\n {input.shape} should be {self.nodes[0].shape}')\n self.nodes[0] = input\n\n for i in range(self.layers - 1):\n self.z[i+1] = self.weights[i] @ self.nodes[i] + self.biases[i]\n self.nodes[i+1] = sig(self.z[i+1])\n\n return self.nodes[-1]", "title": "" }, { "docid": "96269aba255add4cc8c4d262a0ddd29d", "score": "0.56962013", "text": "def test_model(network, data, labels, verbose=True):\n return network.evaluate(\n x=data,\n y=labels,\n verbose=verbose\n )", "title": "" }, { "docid": "95deec970aa5fea4798d2994ad4ac75b", "score": "0.5689069", "text": "def test_createNetwork(self):\n # DropOutRate = 1 --> all Nodes will be dropped\n nbrOfNodesArray = [2, 100, 100, 2]\n activationFunction = 'sigmoid'\n dropOutRate = 0.01\n lossFunction = 'binary_crossentropy'\n modelOptimizer = 'Adam'\n learningRate = 0.001\n decay = 1e-6\n\n hyperParamsObj = HyperParamsObj()\n hyperParamsObj.nbrOfNodesArray = nbrOfNodesArray\n hyperParamsObj.activationFunction = activationFunction\n hyperParamsObj.dropOutRate = dropOutRate\n hyperParamsObj.lossFunction = lossFunction\n hyperParamsObj.modelOptimizer = modelOptimizer\n hyperParamsObj.learningRate = learningRate\n hyperParamsObj.learningRateDecay = decay\n\n machineLearningModel = MachineLearningModel(hyperParamsObj, modelName='', modelId=0, model=tf.keras.models.Sequential())\n\n machineLearningModel.createNetwork()", "title": "" }, { "docid": "4bdf94842058c16b268ba097a2e99e14", "score": "0.5685773", "text": "def reconstruct( x, y, batchsize=50, numepochs=10, architecture=[784,1000,10]):\n\t\n\tsize = architecture\n\tassert len(size) == 3, \"Only able to do 3-layer reconstructions right now\"\n\t\n\t# set up options\n\topts = Opts()\n\topts.batchsize = batchsize\n\topts.numepochs = numepochs\n\topts.plot = 1\n\t\t\n\tdisp(\"Setting up initial network\")\n\t# train the original network\n\tnn = NeuralNetwork( size, output='softmax' )\n\tdisp(\"Training initial network\")\n\tnntrain( nn, x, y, opts )\n\tfeedforward(nn,x,y)\n\t\n\t# train the inversion network\n\tdisp(\"Training the inversion network\")\n\tsize_n = [size[1], size[0]]\n\tnn_i = NeuralNetwork( size_n, output='sigm' )\n\ty_n = nn.a[0][:,1:]\n\tx_n = nn.a[1][:,1:]\n\t\n\t# shift x\n\t# x_n = nproll( x_n, 1, 0 )\n\t\n\tnntrain( nn_i, x_n, y_n, opts )\t\n\t\t\n\tdisp(\"Compiling reconstruction network\")\n\t# compile into reconstruction network\n\tnn_r = NeuralNetwork( [size[0],size[1],size[0]], output='sigm' )\n\tnn_r.W[0] = nn.W[0]\n\tnn_r.W[1] = nn_i.W[0]\n\t\t\n\treturn nn_r, nn, nn_i", "title": "" }, { "docid": "b833e105a8cc843b65519db0d51d0f10", "score": "0.5683874", "text": "def fully_conn(x,\n num_output,\n name='fc',\n activation='lrelu',\n keep_prob=1.):\n with tf.variable_scope(name):\n weights = tf.get_variable(name='fc_w',\n shape=[x.get_shape().as_list()[-1], num_output],\n initializer=tf.random_normal_initializer(stddev=0.02))\n biases = tf.get_variable(name='fc_b',\n shape=[num_output],\n initializer=tf.zeros_initializer())\n\n output = tf.nn.bias_add(tf.matmul(x, weights), biases)\n output = tf.nn.dropout(output, keep_prob=keep_prob)\n\n if activation == 'sigmoid':\n output = tf.sigmoid(output)\n elif activation == 'lrelu':\n output = lrelu(output)\n else:\n pass\n\n return output", "title": "" }, { "docid": "65463fff5f4b05d23d6f4f01fc483dfa", "score": "0.5681806", "text": "def neural_network(x, g, sigma, alpha_0, alpha, beta_0, beta):\n if x.ndim == 1:\n w = np.dot(x, alpha) + alpha_0\n z = sigma(w)\n t = np.dot(z, beta) + beta_0\n y_tilde = g(t)\n return y_tilde, t, z, w\n else:\n w = np.dot(x, alpha) + alpha_0\n z = sigma(w)\n t = np.dot(z, beta) + beta_0\n y_tilde = g(t)\n return y_tilde, t, z, w", "title": "" }, { "docid": "406c0355ef66a0d2ad72a043ad8e13bd", "score": "0.56798667", "text": "def __init__(self, hidden_size, num_step=2000, print_interval=100):\n self.num_step = num_step\n self.print_interval = print_interval\n self.learning_rate = 0.01\n self.size = hidden_size\n\n # Model parameters initialization\n # Please initiate your network parameters here.\n \n # Init weight\n \n self.w1 = np.array([[random.random() for i in range(2)] for j in range(hidden_size)])\n self.w2 = np.array([[random.random() for i in range(hidden_size)] for j in range(hidden_size)])\n self.w3 = np.array([random.random() for i in range(hidden_size)])\n \n # Store the output of neurons for back propagation\n self.Z1 = [0.] * hidden_size\n self.Z2 = [0.] * hidden_size\n \n # Store the gradient of weights\n self.w1_gradient = np.array([[0.] * 2] * hidden_size)\n self.w2_gradient = np.array([[0.] * hidden_size] * hidden_size)\n self.w3_gradient = np.array([0.] * hidden_size)\n \n ...", "title": "" }, { "docid": "f50808d3a18913ee2b2096e58626b27c", "score": "0.5659395", "text": "def create_network(self):\n if self.vision:\n raise NotImplementedError\n else:\n state_input = tf.placeholder(tf.float32, [None, self.frameskip, self.observation_dim])\n net = tflearn.fully_connected(state_input, 40, activation='relu')\n net = tflearn.fully_connected(net, 30, activation ='relu')\n # net = tflearn.fully_connected(net, 300, activation = 'relu')\n output = tflearn.fully_connected(net, self.action_dim, activation = 'linear')\n return state_input, output", "title": "" }, { "docid": "a0254e86128b23ea491647538dbec315", "score": "0.5650168", "text": "def fully_connected_layer(self, input, num_in, num_out, name, relu=True):\r\n with tf.variable_scope(name) as scope:\r\n # Create tf variables for the weights and biases\r\n weights = tf.get_variable('weights', shape=[num_in, num_out], trainable=True)\r\n biases = tf.get_variable('biases', [num_out], trainable=True)\r\n\r\n # Matrix multiply weights and inputs and add bias\r\n act = tf.nn.xw_plus_b(input, weights, biases, name=scope.name)\r\n\r\n # tf histogram summay\r\n # tf.summary.histogram(\"weights\",weights)\r\n # tf.summary.histogram(\"biases\", biases)\r\n\r\n if relu:\r\n # Apply ReLu non linearity\r\n relu = tf.nn.relu(act)\r\n # tf.summary.histogram(\"activations\", relu)\r\n return relu\r\n else:\r\n # tf.summary.histogram(\"activations\", act)\r\n return act", "title": "" }, { "docid": "52f09956539d2e1e0d4dc439b0e2554c", "score": "0.56455237", "text": "def train_model(X, X_train, Y_train, n_hidden=10, plot=False):\n ann_input = theano.shared(X_train)\n ann_output = theano.shared(Y_train)\n\n # Initialize random weights between each layer\n init_1 = np.random.randn(X.shape[1], n_hidden)\n init_2 = np.random.randn(n_hidden, n_hidden)\n init_out = np.random.randn(n_hidden)\n\n with pm.Model() as neural_network:\n # Weights from input to hidden layer\n weights_in_1 = pm.Normal('w_in_1', 0, sd=1,\n shape=(X.shape[1], n_hidden),\n testval=init_1)\n\n # Weights from 1st to 2nd layer\n weights_1_2 = pm.Normal('w_1_2', 0, sd=1,\n shape=(n_hidden, n_hidden),\n testval=init_2)\n\n # Weights from hidden layer to output\n weights_2_out = pm.Normal('w_2_out', 0, sd=1,\n shape=(n_hidden,),\n testval=init_out)\n\n # Build neural-network using tanh activation function\n act_1 = T.tanh(T.dot(ann_input,\n weights_in_1))\n act_2 = T.tanh(T.dot(act_1,\n weights_1_2))\n act_out = T.nnet.sigmoid(T.dot(act_2,\n weights_2_out))\n\n # Binary classification -> Bernoulli likelihood\n out = pm.Bernoulli('out',\n act_out,\n observed=ann_output)\n\n # Set back to original data to retrain\n ann_input.set_value(X_train)\n ann_output.set_value(Y_train)\n\n # Tensors and RV that will be using mini-batches\n minibatch_tensors = [ann_input, ann_output]\n minibatch_RVs = [out]\n\n minibatches = zip(\n create_minibatch(X_train),\n create_minibatch(Y_train),\n )\n\n total_size = len(Y_train)\n\n # mean-field approximation so we ignore correlations in the posterior\n with neural_network:\n # Run advi_minibatch\n v_params = pm.variational.advi_minibatch(\n n=50000, minibatch_tensors=minibatch_tensors,\n minibatch_RVs=minibatch_RVs, minibatches=minibatches,\n total_size=total_size, learning_rate=1e-2, epsilon=1.0\n )\n\n # draw samples from the variational posterior\n with neural_network:\n trace = pm.variational.sample_vp(v_params, draws=5000)\n\n if plot:\n plt.plot(v_params.elbo_vals)\n plt.ylabel('ELBO')\n plt.xlabel('iteration')\n plt.show()\n\n return ann_input, ann_output, trace, neural_network", "title": "" }, { "docid": "9496f486d46a14e4815e55c2cb2bd386", "score": "0.5644325", "text": "def model(X_train, Y_train, X_test, Y_test, layer_dims, learning_rate = 0.0001, num_epochs = 1500, minibatch_size = 32,\n print_cost = True):\n\n tf.reset_default_graph() # to be able to rerun the model without overwriting tf variables\n tf.set_random_seed(1) # to keep consistent results\n seed = 3 # to keep consistent results\n (n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set)\n n_y = Y_train.shape[0] # n_y : output size\n costs = [] # To keep track of the cost\n\n #with tf.Graph().as_default():\n #\ttf.reset_default_graph()\t\n\n #with tf.device(tf.train.replica_device_setter(cluster=cluster_spec)):\n\n # Create Placeholders of shape (n_x, n_y)\n X, Y = create_placeholders(n_x, n_y)\n\n # Initialize parameters\n parameters = initialize_parameters(layer_dims)\n\n # Forward propagation: Build the forward propagation in the tensorflow graph\n Z_l = forward_propagation(X, parameters)\n\n # Cost function: Add cost function to tensorflow graph\n cost = compute_cost(Z_l, Y)\n\n # Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.\n optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)\n\n # Initialize all the variables\n init = tf.global_variables_initializer()\n\n start = timeit.default_timer()\n\n # Start the session to compute the tensorflow graph\n with tf.Session(\"grpc://\"+ worker1, config = tf.ConfigProto(allow_soft_placement= True)) as sess:\n # Run the initialization\n sess.run(init)\n\n # Do the training loop\n for epoch in range(num_epochs):\n\n epoch_cost = 0. # Defines a cost related to an epoch\n num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set\n seed = seed + 1\n minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)\n\n for minibatch in minibatches:\n\n # Select a minibatch\n (minibatch_X, minibatch_Y) = minibatch\n\n # Run the session to execute the \"optimizer\" and the \"cost\", the feedict should contain a minibatch for (X,Y).\n _ , minibatch_cost = sess.run([optimizer, cost], feed_dict = {X: minibatch_X, Y: minibatch_Y})\n #print (minibatch_cost)\n #exit()\n epoch_cost += minibatch_cost / num_minibatches\n # Print the cost every epoch\n #if print_cost == True and epoch % 100 == 0:\n f.write(\"Cost after epoch %i: %f\\n\" % (epoch, epoch_cost) )\n if print_cost == True and epoch % 5 == 0:\n costs.append(epoch_cost)\n stop = timeit.default_timer()\n\n # plot the cost\n #plt.plot(np.squeeze(costs))\n #plt.ylabel('cost')\n #plt.xlabel('iterations (per tens)')\n #plt.title(\"Learning rate =\" + str(learning_rate))\n #plt.show()\n\n # lets save the parameters in a variable\n parameters = sess.run(parameters)\n #print (\"Parameters have been trained!\")\n\n # Calculate the correct predictions\n correct_prediction = tf.equal(tf.argmax(Z_l), tf.argmax(Y))\n\n # Calculate accuracy on the test set\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n\n f.write(\"\\n\")\n f.write(\"Train Accuracy: {}%\\n\".format(accuracy.eval({X: X_train, Y: Y_train})*100))\n f.write(\"Test Accuracy: {}%\\n\".format(accuracy.eval({X: X_test, Y: Y_test})*100))\n f.write(\"Calculation time: {} secs\\n\".format(np.round(stop-start,2)))\n f.close()\n\n return parameters", "title": "" }, { "docid": "80919db7165c3a6be241097e8384f761", "score": "0.5642981", "text": "def create_network(network_input,high, look_back):\n inputs = Input(shape=(network_input.shape[1], network_input.shape[2]))\n x = LSTM(512, input_shape=(network_input.shape[1], network_input.shape[2]), return_sequences=True)(inputs)\n x = (Dropout(0.4)(x))\n x = (LSTM(1024, return_sequences=True)(x))\n x = (Dropout(0.4)(x))\n x = (LSTM(2048, return_sequences=True)(x))\n x = (Dropout(0.4)(x))\n x = (LSTM(1024, return_sequences=True)(x))\n x = (Dense(512)(x))\n x = (LSTM(512)(x))\n x = (Dense(256)(x))\n output1 = Dense(1)(x)\n output2 = (Dense(1)(x))\n model = Model(input=inputs, output=[output1, output2])\n model.compile(loss='mean_squared_error', optimizer='adam')\n\n model.load_weights('weights-improvement-0.06-full.hdf5')\n return model", "title": "" }, { "docid": "fa449e61ae35f46ed7a76e1b8ea5847b", "score": "0.5634746", "text": "def __init__(self, nnModel=None, nnTestData=None):\n self.nnModel = nnModel\n if (nnModel is None):\n l1 = NNLayer(3,activation=NNLayer.A_Linear, layerType=NNLayer.T_Input)\n l2 = NNLayer(2,activation=NNLayer.A_Linear, layerType=NNLayer.T_Hidden)\n l3 = NNLayer(1,activation=NNLayer.A_Linear, layerType=NNLayer.T_Output)\n c1 = NNConnection(l1,l2)\n c2 = NNConnection(l2,l3)\n #c1 = NNConnection(l1,l3)\n self.nnModel = NNModel()\n self.nnModel.addConnection(c1)\n self.nnModel.addConnection(c2)\n \n \"\"\"\n Create the test data using the number of nodes in the InputLayer for the col\n dimension of the X input\n \"\"\"\n # test data\n self.m = 100\n self.n = nnModel.getInputLayer().getNodeCnt()\n self.x = rng.rand(self.m,self.n)\n # the larger\n self.w = np.linspace(0,3,self.n) + 1.0\n self.b = 3.5\n #err = rng.randn(m)\n z = np.dot(self.x,np.transpose(self.w))\n self.y = np.asmatrix(z + self.b).reshape(self.m,1)\n else:\n if (nnTestData is None):\n raise ValueError(\"TestData must be supplied when not using a default nnModel value\")\n # transfer test data to gradient tester\n self.m = nnTestData.m \n self.n = nnTestData.n\n self.x = nnTestData.x\n self.w = nnTestData.w\n self.b = nnTestData.b\n self.y = nnTestData.y", "title": "" }, { "docid": "432d51315770d1ba9c3c997f5a52cd18", "score": "0.562187", "text": "def full_conn(self,input_tensor, input_dim, output_dim, layer_name,is_test, act=tf.nn.relu,apply_bn=True,add_summ=True,summ_list=None,stddev=5e-2,wd=None,bias=0.0):\n print(input_tensor, input_dim, output_dim, layer_name,is_test, act,apply_bn,add_summ,summ_list,stddev,wd,bias)\n # Adding a name scope ensures logical grouping of the layers in the graph.\n with tf.name_scope(layer_name):\n # This Variable will hold the state of the weights for the layer\n if apply_bn:\n with tf.name_scope('weights'):\n weights = self.weight_variable([input_dim, output_dim],'weights' + str(self._weights_cnt),stddev,wd)\n self._weights_cnt += 1\n self._weights_list.append(weights)\n if add_summ:\n self.variable_summaries(weights,summ_list)\n with tf.name_scope('WX'):\n preactivate = tf.matmul(input_tensor, weights)\n if add_summ:\n self.variable_summaries(preactivate,summ_list)\n \n \n scale = tf.Variable(tf.ones(preactivate.get_shape()[1:].as_list()))\n beta = tf.Variable(tf.zeros(preactivate.get_shape()[1:].as_list()))\n pop_mean = tf.Variable(tf.zeros(preactivate.get_shape()[1:].as_list()), trainable=False)\n pop_var = tf.Variable(tf.ones(preactivate.get_shape()[1:].as_list()), trainable=False)\n \n bn,batch_mean,batch_var = tf.cond(is_test,\n lambda:self.bn_wrap(False,preactivate,scale,beta,pop_mean,pop_var),\n lambda:self.bn_wrap(True,preactivate,scale,beta,pop_mean,pop_var)\n )\n '''\n bn = tf.cond(is_test,\n lambda:tf.contrib.layers.batch_norm(preactivate,decay=0.999,center=True,scale=True,is_training=False,updates_collections=None),\n lambda:tf.contrib.layers.batch_norm(preactivate,decay=0.999,center=True,scale=True,is_training=True,updates_collections=None)\n )\n '''\n activations = act(bn, name='activation')\n if add_summ:\n with tf.name_scope('activation'):\n self.variable_summaries(activations,summ_list)\n if apply_bn:\n self.variable_summaries(pop_mean, ['hist'])\n self.variable_summaries(pop_var, ['hist'])\n self.variable_summaries(batch_mean, ['hist'])\n self.variable_summaries(batch_var, ['hist']) \n else:\n with tf.name_scope('weights'):\n weights = self.weight_variable([input_dim, output_dim],'weights' + str(self._weights_cnt),stddev,wd)\n self._weights_cnt += 1\n self._weights_list.append(weights)\n if add_summ:\n self.variable_summaries(weights,summ_list)\n with tf.name_scope('biases'):\n biases = self.bias_variable([output_dim],'biases' + str(self._biases_cnt),bias)\n self._biases_cnt += 1\n if add_summ:\n self.variable_summaries(biases,summ_list)\n with tf.name_scope('Wx_plus_b'):\n preactivate = tf.matmul(input_tensor, weights) + biases\n self.variable_summaries(preactivate,summ_list)\n with tf.name_scope('activation'): \n activations = act(preactivate)\n if add_summ:\n with tf.name_scope('activation'):\n self.variable_summaries(activations,summ_list)\n return activations", "title": "" }, { "docid": "5d1a4c3b5328234beff77d3c8342ff23", "score": "0.56196576", "text": "def NN_classification(X_train, y_train, X_test, y_test):\n print(\"Neural Network\\n\")\n layers = [41, 100, 75, 50, 34, 1]\n cost = CrossEntropy()\n act_fns = [\"sigmoid\", \"sigmoid\", \"sigmoid\", \"sigmoid\", \"sigmoid\"]\n NN = NeuralNetwork(layers=layers, cost=cost, act_fns=act_fns)\n\n epochs = 10\n batch_size = 100\n eta = 0.1\n reg = 1e-5\n NN.SGD(\n X_train, y_train, validation_data=(X_test, y_test),\n epochs=epochs, batch_size=batch_size, eta=eta, reg=reg\n )\n # Error rates\n y_pred = NN.predict(X_train, binary=True)\n err_tr = 1 - np.sum(y_pred == y_train) / len(y_train)\n y_pred = NN.predict(X_test, binary=True)\n err_te = 1 - np.sum(y_pred == y_test) / len(y_test)\n\n y_pred = NN.predict(X_train, binary=False)\n area_ratio_tr = roc_curve(y_train, y_pred, show=False)\n y_pred = NN.predict(X_test, binary=False)\n area_ratio_te = roc_curve(y_test, y_pred, show=True)\n sorting_smoothing_method(y_pred, y_test)\n\n print(\"\\tTraining data:\")\n print(f\"\\t\\tBest err rate = {err_tr:2.2}, area ratio = {area_ratio_tr:2.2}\")\n print(\"\\tTest data:\")\n print(f\"\\t\\tBest err rate = {err_te:2.2}, area ratio = {area_ratio_te:2.2}\")\n \"\"\"\n Evaluating error rates and creating ROC curve\n\tTraining Neural Network with 10 epochs\n\t[##############################] 100.00 % Done.\n\tTraining data:\n\t\tBest err rate = 0.29, area ratio = 0.55\n\tTest data:\n\t\tBest err rate = 0.21, area ratio = 0.55\n \"\"\"", "title": "" }, { "docid": "eeef0da388cbfe423eba8faa95dee200", "score": "0.561596", "text": "def constructNetwork(self):\n ci = self.ni\n for l in self.ls:\n self.layers.append(Layer(ci,l, activation=sigmoid))\n ci=l\n\n self.layers.append(Layer(ci, self.no, activation=softmax))", "title": "" }, { "docid": "d66992cd6f3815b5dab97f13b0889e5b", "score": "0.5606849", "text": "def fit_cnn(x, y, model_weights='weights/model_weights.hdf5', network_type='simple', trials=1):\n print('=== Convolution Neural Network ===')\n test_accuracy = np.zeros(trials)\n running_time = np.zeros(trials)\n x = scale_input(x)\n x = grey_scale(x)\n x = add_dimension(x)\n # x, y = add_pictures_without_chars(x, y)\n y = to_categorical(y, int(np.max(y)+1))\n for i in range(trials):\n print('Training network ', i + 1)\n start = time.time()\n random_state = 100 + i\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2,\n random_state=random_state,\n stratify=y)\n network = CNN(num_classes=len(y[0]), sample=x_train[0], network_type=network_type)\n network.train(x_train, y_train, model_weights)\n test_accuracy[i] = network.test(x_test, y_test, model_weights)\n # network.plot_predictions(x_test, y_test, model_weights)\n running_time[i] = time.time() - start\n print('Running time: ', running_time[i])\n print('Average test accuracy over ', trials, ' trials: ', np.mean(test_accuracy))\n print('Average running time over ', trials, ' trials: ', np.mean(running_time))", "title": "" }, { "docid": "ea865ca33bdd0440f2a0c18d52821199", "score": "0.5603607", "text": "def __init__(self, n_input, n_output, hidden_layer_size, reg):\n self.reg = reg\n self.fc_layer_1 = FullyConnectedLayer(n_input, hidden_layer_size)\n self.relu_layer_1 = ReLULayer()\n self.fc_layer_2 = FullyConnectedLayer(hidden_layer_size, n_output)", "title": "" }, { "docid": "458ad0e335699a3cde5d488361a032e7", "score": "0.5595658", "text": "def create_network():\n l_rate = .0001\n model = Sequential()\n print(sequence_length)\n model.add(Dense(100, input_shape=(sequence_length,), kernel_initializer='random_normal', activation='relu'))\n model.add(Dense(60, kernel_initializer='random_normal', activation='relu'))\n model.add(Dropout(.5))\n model.add(Dense(60, kernel_initializer='random_normal', activation='relu'))\n # model.add(BatchNorm())\n model.add(Dense(45, kernel_initializer='random_normal', activation='relu'))\n model.add(Dropout(.5))\n model.add(Dense(20, kernel_initializer='random_normal', activation='relu'))\n model.add(Dense(2, kernel_initializer='random_normal', activation='softmax'))\n adam = optimizers.Adam(lr=l_rate, beta_1=0.9, beta_2=0.999, amsgrad=False)\n model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['categorical_accuracy'])\n # model.load_weights('classification-improvement-10.hdf5')\n\n return model", "title": "" }, { "docid": "d0dc2f378b7d9487912604f209a35250", "score": "0.55949515", "text": "def main():\n\n # get the data\n trainingSet = makeData(test=True)\n\n composition = [inputSize, 30,\n outputSize] # the network composition\n md.Network.eta = random.uniform(0.000001, 0.1)\n print(\"eta: \" + str(md.Network.eta))\n md.Network.alpha = 0.1\n\n nn = md.Network(composition)\n\n # train the network\n test(trainingSet[\"tData\"], trainingSet[\"tGoal\"], nn)\n train(trainingSet[\"data\"], trainingSet[\"goal\"], nn)\n test(trainingSet[\"tData\"], trainingSet[\"tGoal\"], nn)", "title": "" }, { "docid": "8dd8f1baf95f317495f4500f03555b84", "score": "0.5587424", "text": "def inference(self, g, x, device, args):\n\t\t# During inference with sampling, multi-layer blocks are very inefficient because\n\t\t# lots of computations in the first few layers are repeated.\n\t\t# Therefore, we compute the representation of all nodes layer by layer. The nodes\n\t\t# on each layer are of course splitted in batches.\n\t\t# TODO: can we standardize this?\n\t\tfor l, layer in enumerate(self.layers):\n\t\t\ty = th.zeros(g.num_nodes(), self.n_hidden if l!=len(self.layers) - 1 else self.n_classes)\n\n\t\t\tsampler = dgl.dataloading.MultiLayerFullNeighborSampler(1)\n\t\t\tdataloader = dgl.dataloading.NodeDataLoader(\n\t\t\t\tg,\n\t\t\t\tth.arange(g.num_nodes(),dtype=th.long),\n\t\t\t\tsampler,\n\t\t\t\tbatch_size=args.batch_size,\n\t\t\t\tshuffle=True,\n\t\t\t\tdrop_last=False,\n\t\t\t\tnum_workers=args.num_workers)\n\n\t\t\t# print('x')\n\t\t\t# print(type(x))\n\t\t\t# print(len(x))\n\t\t\t# print(x[0].size())\n\n\t\t\tfor input_nodes, output_nodes, blocks in tqdm.tqdm(dataloader):\n\t\t\t\tblock = blocks[0]\n\t\t\t\t# print('input_nodes')\n\t\t\t\t# print(type(input_nodes))\n\t\t\t\t# input_nodes=input_nodes.tolist()\n\t\t\t\t# print(type(input_nodes))\n\t\t\t\t# print(input_nodes)\n\n\n\t\t\t\tblock = block.int().to(device)\n\t\t\t\th = x[input_nodes].to(device)\n\t\t\t\th = layer(block, h)\n\t\t\t\tif l!=len(self.layers) - 1:\n\t\t\t\t\th = self.activation(h)\n\t\t\t\t\th = self.dropout(h)\n\n\t\t\t\ty[output_nodes] = h.cpu()\n\n\t\t\tx = y\n\t\treturn y", "title": "" }, { "docid": "68aed89305a1a0b0dfd1a6ccb0d9bb8a", "score": "0.5586728", "text": "def BuildInferenceNetwork(x, l2_reg_val, is_training):\n global EMBEDDING_VAR\n #EMBEDDING_VAR = None\n # ** TASK 4: Move and set appropriately.\n \n #<tf.Variable 'fully_connected/weights:0' shape=(1000, 40) dtype=float32_ref> \n ## Build layers starting from input.\n net = x\n #t_Var1 = tf.trainable_variables()\n #for v in t_Var1:\n \n l2_reg = tf.contrib.layers.l2_regularizer(l2_reg_val)\n\n ## First Layer\n print(\"After first Layer\")\n net = FirstLayer(net, l2_reg_val, is_training)\n t_Var2 = tf.trainable_variables()\n for v in t_Var2:\n if v.name == \"fully_connected/weights:0\":\n EMBEDDING_VAR = tf.Variable(tf.zeros(shape=v.shape))\n EMBEDDING_VAR = v\n \n \n ## Second Layer.\n net = tf.contrib.layers.fully_connected(\n net, 10, activation_fn=None, weights_regularizer=l2_reg)\n net = tf.contrib.layers.dropout(net, keep_prob=0.5, is_training=is_training)\n net = tf.nn.relu(net)\n\n net = tf.contrib.layers.fully_connected(\n net, 2, activation_fn=None, weights_regularizer=l2_reg)\n\n return net", "title": "" }, { "docid": "ef2202bf2bb87e8b56b73d044b859e19", "score": "0.5579544", "text": "def __init__(self, layers, activation_functions = None, cost_function = None):\n\n # Network layer data - number of layers, number of inputs, number of outputs\n self.N = layers[0]\n self.M = layers[-1]\n self.numLayers = len(layers) \n\n\n # Initialize all the weights to zero. For simplicity, keep a \"dummy\" layer of \n # weights (which would be weights leading into the input layer...)\n self.weights = [np.zeros((0,0))]\n self.biases = [np.zeros((0,0))]\n\n for i in range(1,len(layers)):\n self.weights.append(np.zeros( (layers[i], layers[i-1]) ))\n self.biases.append(np.zeros( (layers[i], 1) ))\n\n\n # Store the activation and cost functions \n # Default is sigmoid activation functions, and squared error cost function\n self.activation_functions = [None]*self.numLayers\n self.gradient_functions = [None]*self.numLayers\n\n if activation_functions == None:\n for i in range(1, self.numLayers):\n self.activation_functions[i] = SIGMOID[0]\n self.gradient_functions[i] = SIGMOID[1]\n else:\n for i in range(1,self.numLayers):\n self.activation_functions[i] = activation_functions[i][0]\n self.gradient_functions[i] = activation_functions[i][1]\n\n if cost_function == None:\n self.cost_function = cost_squared_error\n self.cost_gradient = gradient_squared_error\n else:\n self.cost_function = cost_function[0]\n self.cost_gradient = cost_function[1]\n \n \n # Weight symmetry *MUST* be broken. User can set weights later if desired.\n self.randomize_weights()", "title": "" }, { "docid": "9e2ec71d34a390749d30a82a026833aa", "score": "0.55759704", "text": "def simple(input_size, output_size):\n # the layer sizes\n hidden_layer_1_size = 128\n hidden_layer_2_size = 128\n \n # create variables for each layers weights and biases\n layer1_weights = tf.Variable(tf.truncated_normal([input_size, hidden_layer_1_size]))\n layer1_biases = tf.Variable(tf.truncated_normal([hidden_layer_1_size]))\n layer2_weights = tf.Variable(tf.truncated_normal([hidden_layer_1_size, hidden_layer_2_size]))\n layer2_biases = tf.Variable(tf.truncated_normal([hidden_layer_2_size]))\n output_weights = tf.Variable(tf.truncated_normal([hidden_layer_2_size, output_size]))\n output_biases = tf.Variable(tf.truncated_normal([output_size]))\n \n # the placeholder for the input and output data\n x = tf.placeholder(tf.float32, shape=(None,input_size), name=\"X\")\n y = tf.placeholder(tf.float32, shape=(None,output_size), name=\"Y\")\n \n # construct the layers\n # the input is multiplied by layer1_weights, and then layer1_biases are added\n layer1 = tf.add(tf.matmul(x, layer1_weights), layer1_biases)\n # layer 1 non-linearity\n layer1 = tf.nn.relu(layer1)\n\n # layer 2 takes output of layer1\n layer2 = tf.add(tf.matmul(layer1, layer2_weights), layer2_biases)\n layer2 = tf.nn.relu(layer2)\n \n # output layer takes output of layer2\n output = tf.add(tf.matmul(layer2, output_weights), output_biases)\n return x, y, output", "title": "" }, { "docid": "1b2c053ecee0086f088378a215ceacc8", "score": "0.5574372", "text": "def forward(self, x):\n\n \"\"\" YOUR CODE HERE!\n Complete this function, based on the network architecture\n that you have chosen in Net.__init__\n \"\"\"\n # Hidden layer\n x = F.relu(self.fc1(x))\n\n # Output layer\n actions_value = self.out(x)\n return actions_value", "title": "" }, { "docid": "8d778ab8d0b94bba4de10ab05146d2e0", "score": "0.55703086", "text": "def print(self):\n G = nx.Graph()\n self.nodes = []\n\n # connecting the input to the first deep layer\n nodes = []\n for idx, i in enumerate(self.inputs.neurons):\n for j in self.layers_deep[0].neurons:\n G.add_edge(i, j, weight=round(j.prev_weight[idx], 5))\n nodes.append(i)\n self.nodes.append(nodes)\n\n # connecting the deep layers together\n for i in range(self.n_layers-1):\n for idx, neuron1 in enumerate(self.layers_deep[i].neurons):\n for neuron2 in self.layers_deep[i+1].neurons:\n G.add_edge(neuron1, neuron2,\n weight=round(neuron2.prev_weight[idx], 5))\n for layer in self.layers_deep:\n nodes = []\n for neuron in layer.neurons:\n nodes.append(neuron)\n self.nodes.append(nodes)\n\n # connecting the final deep layer to the output layer\n nodes = []\n for idx, neuron in enumerate(self.layers_deep[self.n_layers-1].neurons):\n for outputNeuron in self.outputs.neurons:\n G.add_edge(neuron, outputNeuron,\n weight=round(outputNeuron.prev_weight[idx], 5))\n for outputNeuron in self.outputs.neurons:\n nodes.append(outputNeuron)\n self.nodes.append(nodes)\n\n # stores the graph inside the object\n self.graph = G\n\n # defines each layer in the network by a priority number (count). This is to maintain the order of the layers when displaying the graph.\n nx.set_node_attributes(self.graph, 0, \"layer\")\n count = 0\n for layer in self.nodes:\n count += 1\n for neuron in layer:\n self.graph.nodes[neuron][\"layer\"] = count\n\n pos = nx.multipartite_layout(self.graph, subset_key=\"layer\")\n nx.draw(self.graph, pos, with_labels=False)\n labels = nx.get_edge_attributes(self.graph, \"weight\")\n nx.draw_networkx_edge_labels(self.graph, pos, labels)\n\n # The following two lines display the graph for only a moment to create an animation of the visual changing over time.\n # plt.pause(0.00001)\n # plt.ion()\n\n # plt.figure()\n # plt.show()", "title": "" }, { "docid": "9250381049acabe797d452fc8fae27d0", "score": "0.55691034", "text": "def logistic_regression(input_dim, output_dim):\r\n tf.reset_default_graph()\r\n\r\n x = tf.placeholder(tf.float32, [None, input_dim])\r\n y = tf.placeholder(tf.float32, [None, output_dim])\r\n learning_r = tf.placeholder(tf.float32, 1)[0]\r\n drop_out = tf.placeholder(tf.float32, 1)[0]\r\n\r\n w_init = tf.contrib.layers.xavier_initializer()\r\n b_init = tf.initializers.truncated_normal(mean=0.1, stddev=0.025)\r\n w = tf.get_variable('weights1', shape=[input_dim, output_dim], initializer=w_init)\r\n b = tf.get_variable('bias1', shape=[output_dim], initializer=b_init)\r\n\r\n logits = tf.matmul(tf.nn.dropout(x, keep_prob=drop_out), w) + b\r\n y_ = tf.nn.softmax(logits)\r\n\r\n [print(var) for var in tf.trainable_variables()]\r\n return x, y, logits, y_, learning_r, drop_out", "title": "" }, { "docid": "30d2546a424967a37664ca5e84799905", "score": "0.556901", "text": "def train(self, inputs, outputs):\n pass", "title": "" }, { "docid": "fbdf629d5e056ef2625c4bcc75e5d869", "score": "0.5566767", "text": "def __init__(self, meta):\n \n super(TailNetwork, self).__init__()\n \n self.net = Sequential(\n [Dense(units, \n activation=meta['activation'], \n kernel_initializer=meta['initializer'])\n for units in meta['units']] +\n [Dense(1, kernel_initializer=meta['initializer'], activation='relu')]\n )", "title": "" }, { "docid": "a60b807a01b49f93800b9cbce40a312e", "score": "0.55589193", "text": "def one_forward_regression(data,target,model,X):\n features = data.drop(target,axis=1)\n target = data[target]\n \n model.fit(features,target)\n result = model.predict(X)\n\n return result", "title": "" }, { "docid": "bea7f8f5228744a7d67c6734fc37052c", "score": "0.5558364", "text": "def main():\n weights = np.ndarray(conf.LAYERS_NUM - 1, dtype=np.matrix)\n bias = np.ndarray(conf.LAYERS_NUM - 1, dtype=np.ndarray)\n inp = input(\"Continue training from saved parameters? (y/n)\\n\").strip()\n if inp == \"y\":\n weights[0] = np.load(\"neural_network/weights_1.npy\")\n weights[1] = np.load(\"neural_network/weights_2.npy\")\n bias[0] = np.load(\"neural_network/bias_1.npy\")\n bias[1] = np.load(\"neural_network/bias_2.npy\")\n else:\n rand_init_weights(weights, bias)\n weights, bias = gradient_descent(weights, bias)\n inp = input(\"Save trained parameters? (y/n)\\n\").strip()\n if inp == \"y\":\n np.save(\"neural_network/weights_1.npy\", weights[0])\n np.save(\"neural_network/bias_1.npy\", bias[0])\n np.save(\"neural_network/weights_2.npy\", weights[1])\n np.save(\"neural_network/bias_2.npy\", bias[1])", "title": "" }, { "docid": "0deffc3cfa9ae62b4058ff3ee7ed1e13", "score": "0.55567425", "text": "def training2(inputs,training_outputs,activation_function):\r\n\r\n #weights = np.random.random((1, 1))*100 # on randomise le poid de chaques synapses\r\n weights = np.random.uniform(-2.0,2.0, size=(1,1))\r\n\r\n \r\n if (activation_function == 0): #NUL\r\n \r\n def function(x):\r\n \r\n return 0\r\n \r\n if (activation_function == 1): # Sigmoid\r\n \r\n def function(x):\r\n \r\n return 1 /(1 + np.exp(-x))\r\n \r\n if (activation_function == 2): # Tangent hyperbolic\r\n \r\n def function(x):\r\n \r\n return np.tanh(x)\r\n \r\n if (activation_function == 3): # Cosinus\r\n \r\n def function(x):\r\n \r\n return np.cos(x)\r\n \r\n if (activation_function == 4): # Gaussian\r\n \r\n def function(x):\r\n \r\n return np.exp(-(x**2)/2)\r\n \r\n \r\n error_total = 0\r\n\r\n\r\n for iteration in range(1):\r\n \r\n input_layer = inputs\r\n \r\n outputs = function(np.dot(input_layer,weights)) # maintenant on va faire la somme des inputs * weight \r\n \r\n error = training_outputs - outputs\r\n \r\n for i in range(len(error)):\r\n \r\n error_total += error[i]**2\r\n error_total=error_total/len(inputs)\r\n \r\n return weights[0][0], error_total[0]\r\n #print (synaptic_weights)\r", "title": "" }, { "docid": "8a9b89c800b9e699f25ee659344f2bb3", "score": "0.55555725", "text": "def nn_4_layer(hl_neuron=10, decay=1e-6):\n learning_rate = 0.01\n\n # theano expressions\n x_mat = T.matrix() # features\n y_mat = T.matrix() # output\n\n # weights and biases from input to hidden layer 1\n weight_1, bias_1 = init_weights(36, hl_neuron), init_bias(hl_neuron)\n # weights and biases from hidden layer 1 to hidden layer 2\n weight_2, bias_2 = init_weights(hl_neuron, hl_neuron), init_bias(hl_neuron)\n # weights and biases from hidden layer 2 to output layer\n weight_3, bias_3 = init_weights(hl_neuron, 6, logistic=False), init_bias(6)\n\n hidden_1 = T.nnet.sigmoid(T.dot(x_mat, weight_1) + bias_1)\n hidden_2 = T.nnet.sigmoid(T.dot(hidden_1, weight_2) + bias_2)\n output_1 = T.nnet.softmax(T.dot(hidden_2, weight_3) + bias_3)\n\n y_x = T.argmax(output_1, axis=1)\n\n cost = T.mean(T.nnet.categorical_crossentropy(output_1, y_mat)) + \\\n decay * (T.sum(T.sqr(weight_1) + T.sum(T.sqr(weight_2) +\n T.sum(T.sqr(weight_3)))))\n params = [weight_1, bias_1, weight_2, bias_2, weight_3, bias_3]\n updates = sgd(cost, params, learning_rate)\n\n # compile\n train = theano.function(\n inputs=[x_mat, y_mat], outputs=cost, updates=updates, allow_input_downcast=True)\n predict = theano.function(\n inputs=[x_mat], outputs=y_x, allow_input_downcast=True)\n\n return train, predict", "title": "" }, { "docid": "a860a1b4ba4019621b39d7826f280841", "score": "0.55511045", "text": "def model(X, Y, layers_dims, optimizer, learning_rate = 0.0007, mini_batch_size = 64, beta = 0.9, beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8, num_epochs = 10000, print_cost = True):\n\n L = len(layers_dims) # Number of layers in the neural networks\n costs = []\n t = 0\n seed = 10\n\n # Initialize parameters\n parameters = initialize_parameters(layers_dims)\n\n # Initialize the optimizer\n if optimizer == \"gd\":\n pass\n elif optimizer == \"momentum\":\n v = initialize_velocity(parameters)\n elif optimizer == \"adam\":\n v, s = initialize_adam(parameters)\n\n # Optimization loop\n for i in range(num_epochs):\n seed = seed + 1 # Random for every epoch\n minibatches = random_mini_batches(X, Y, mini_batch_size, seed)\n\n for minibatch in minibatches:\n (minibatch_X, minibatch_Y) = minibatch # Select a minibatch\n a3, caches = forward_propagation(minibatch_X, parameters) # Forward propagation\n cost = compute_cost(a3, minibatch_Y) # Compute cost\n grads = backward_propagation(minibatch_X, minibatch_Y, caches) # Backward propagation\n\n # Update parameters\n if optimizer == \"gd\":\n parameters = update_parameters_with_gd(parameters, grads, learning_rate)\n elif optimizer == \"momentum\":\n parameters, v = update_parameters_with_momentum(parameters, grads, v, beta, learning_rate)\n elif optimizer == \"adam\":\n t = t + 1 # Adam counter\n parameters, v, s = update_parameters_with_adam(parameters, grads, v, s, t, learning_rate, beta1, beta2, epsilon)\n\n # Print cost every 1000 epoch\n if i % 100 == 0:\n costs.append(cost)\n if print_cost and i % 1000 == 0:\n print(\"Cost after epoch %i: %f\" %(i, cost))\n\n # Plot the cost\n plt.figure(2)\n plt.plot(costs)\n plt.xlabel('epochs (per 100)')\n plt.ylabel('cost')\n plt.title('Learning rate = ' + str(learning_rate))\n\n return parameters", "title": "" }, { "docid": "e0e9a1067bba6342932c298e806ec892", "score": "0.5547932", "text": "def constructNetwork(self):\n input_layer = Layer(self.ni, self.ls, sigmoid)\n self.layers = [input_layer]\n for i in range(0, self.nhl):\n hidden_layer = Layer(self.ls, self.ls, sigmoid)\n self.layers.append(hidden_layer)\n output_layer = Layer(self.ls, self.no, softmax)\n self.layers.append(output_layer)", "title": "" }, { "docid": "f7aa248dd90ac1cb0f8fd9c10154c7a1", "score": "0.5547431", "text": "def forward(self):\n self.out = self.network(self.batch_x)", "title": "" }, { "docid": "ffe93954b8243d60f167c1088f9ee4eb", "score": "0.55402225", "text": "def test(self):\n with torch.no_grad():\n # Disable the Backward\n self.set_requires_grad([self.network], False)\n # Set network in eval mode\n self.network.eval()\n self.forward()", "title": "" }, { "docid": "f47567ed1de72e212e6432b97777e1b8", "score": "0.5533958", "text": "def __init__(self, n_in, n_out):\n\n batch_size=32\n state_length=n_in\n action_length=n_out\n # data types for model\n State = T.dmatrix(\"State\")\n State.tag.test_value = np.random.rand(batch_size,state_length)\n ResultState = T.dmatrix(\"ResultState\")\n ResultState.tag.test_value = np.random.rand(batch_size,state_length)\n Reward = T.col(\"Reward\")\n Reward.tag.test_value = np.random.rand(batch_size,1)\n Action = T.dmatrix(\"Action\")\n Action.tag.test_value = np.random.rand(batch_size, action_length)\n # create a small convolutional neural network\n inputLayerActA = lasagne.layers.InputLayer((None, state_length), State)\n l_hid2ActA = lasagne.layers.DenseLayer(\n inputLayerActA, num_units=64,\n nonlinearity=lasagne.nonlinearities.leaky_rectify)\n \n l_hid3ActA = lasagne.layers.DenseLayer(\n l_hid2ActA, num_units=32,\n nonlinearity=lasagne.nonlinearities.leaky_rectify)\n \n self._l_outActA = lasagne.layers.DenseLayer(\n l_hid3ActA, num_units=n_out,\n nonlinearity=lasagne.nonlinearities.linear)\n \n inputLayerA = lasagne.layers.InputLayer((None, state_length), State)\n\n concatLayer = lasagne.layers.ConcatLayer([inputLayerA, self._l_outActA])\n l_hid2A = lasagne.layers.DenseLayer(\n concatLayer, num_units=64,\n nonlinearity=lasagne.nonlinearities.leaky_rectify)\n \n l_hid3A = lasagne.layers.DenseLayer(\n l_hid2A, num_units=32,\n nonlinearity=lasagne.nonlinearities.leaky_rectify)\n \n self._l_outA = lasagne.layers.DenseLayer(\n l_hid3A, num_units=1,\n nonlinearity=lasagne.nonlinearities.linear)\n # self._b_o = init_b_weights((n_out,))\n\n # self.updateTargetModel()\n inputLayerActB = lasagne.layers.InputLayer((None, state_length), State)\n l_hid2ActB = lasagne.layers.DenseLayer(\n inputLayerActB, num_units=64,\n nonlinearity=lasagne.nonlinearities.leaky_rectify)\n \n l_hid3ActB = lasagne.layers.DenseLayer(\n l_hid2ActB, num_units=32,\n nonlinearity=lasagne.nonlinearities.leaky_rectify)\n \n self._l_outActB = lasagne.layers.DenseLayer(\n l_hid3ActB, num_units=n_out,\n nonlinearity=lasagne.nonlinearities.linear)\n\n inputLayerB = lasagne.layers.InputLayer((None, state_length), State)\n concatLayerB = lasagne.layers.ConcatLayer([inputLayerB, self._l_outActB])\n l_hid2B = lasagne.layers.DenseLayer(\n concatLayerB, num_units=64,\n nonlinearity=lasagne.nonlinearities.leaky_rectify)\n \n l_hid3B = lasagne.layers.DenseLayer(\n l_hid2B, num_units=32,\n nonlinearity=lasagne.nonlinearities.leaky_rectify)\n \n self._l_outB = lasagne.layers.DenseLayer(\n l_hid3B, num_units=1,\n nonlinearity=lasagne.nonlinearities.linear)\n \n # print (\"Initial W \" + str(self._w_o.get_value()) )\n \n self._learning_rate = 0.001\n self._discount_factor= 0.8\n self._rho = 0.95\n self._rms_epsilon = 0.001\n \n self._weight_update_steps=5\n self._updates=0\n \n self._states_shared = theano.shared(\n np.zeros((batch_size, state_length),\n dtype=theano.config.floatX))\n\n self._next_states_shared = theano.shared(\n np.zeros((batch_size, state_length),\n dtype=theano.config.floatX))\n\n self._rewards_shared = theano.shared(\n np.zeros((batch_size, 1), dtype=theano.config.floatX),\n broadcastable=(False, True))\n\n self._actions_shared = theano.shared(\n np.zeros((batch_size, n_out), dtype=theano.config.floatX),\n )\n \n self._q_valsActA = lasagne.layers.get_output(self._l_outActA, State)\n self._q_valsActB = lasagne.layers.get_output(self._l_outActB, ResultState)\n self._q_valsActB2 = lasagne.layers.get_output(self._l_outActB, State)\n inputs_ = {\n State: self._states_shared,\n Action: self._q_valsActA,\n }\n self._q_valsA = lasagne.layers.get_output(self._l_outA, inputs_)\n inputs_ = {\n ResultState: self._next_states_shared,\n Action: self._q_valsActB,\n }\n self._q_valsB = lasagne.layers.get_output(self._l_outB, inputs_)\n \n \n self._q_func = self._q_valsA\n self._q_funcAct = self._q_valsActA\n self._q_funcB = self._q_valsB\n self._q_funcActB = self._q_valsActB\n \n # self._q_funcAct = theano.function(inputs=[State], outputs=self._q_valsActA, allow_input_downcast=True)\n \n self._target = (Reward + self._discount_factor * self._q_valsB)\n self._diff = self._target - self._q_valsA\n self._loss = 0.5 * self._diff ** 2 + (1e-4 * lasagne.regularization.regularize_network_params(\n self._l_outA, lasagne.regularization.l2))\n self._loss = T.mean(self._loss)\n \n self._params = lasagne.layers.helper.get_all_params(self._l_outA)[-6:]\n self._actionParams = lasagne.layers.helper.get_all_params(self._l_outActA)\n self._givens_ = {\n State: self._states_shared,\n # ResultState: self._next_states_shared,\n Reward: self._rewards_shared,\n # Action: self._actions_shared,\n }\n self._actGivens = {\n State: self._states_shared,\n # ResultState: self._next_states_shared,\n # Reward: self._rewards_shared,\n # Action: self._actions_shared,\n }\n \n # SGD update\n #updates_ = lasagne.updates.rmsprop(loss, params, self._learning_rate, self._rho,\n # self._rms_epsilon)\n # TD update\n # minimize Value function error\n self._updates_ = lasagne.updates.rmsprop(T.mean(self._q_func) + (1e-4 * lasagne.regularization.regularize_network_params(\n self._l_outA, lasagne.regularization.l2)), self._params, \n self._learning_rate * -T.mean(self._diff), self._rho, self._rms_epsilon)\n \n \n # actDiff1 = (Action - self._q_valsActB) #TODO is this correct?\n # actDiff = (actDiff1 - (Action - self._q_valsActA))\n # actDiff = ((Action - self._q_valsActB2)) # Target network does not work well here?\n #self._actDiff = ((Action - self._q_valsActA)) # Target network does not work well here?\n #self._actLoss = 0.5 * self._actDiff ** 2 + (1e-4 * lasagne.regularization.regularize_network_params( self._l_outActA, lasagne.regularization.l2))\n #self._actLoss = T.mean(self._actLoss)\n \n # actionUpdates = lasagne.updates.rmsprop(actLoss + \n # (1e-4 * lasagne.regularization.regularize_network_params(\n # self._l_outActA, lasagne.regularization.l2)), actionParams, \n # self._learning_rate * 0.01 * (-actLoss), self._rho, self._rms_epsilon)\n \n # Maximize wrt q function\n \n # theano.gradient.grad_clip(x, lower_bound, upper_bound) # // TODO\n actionUpdates = lasagne.updates.rmsprop(T.mean(self._q_func) + \n (1e-4 * lasagne.regularization.regularize_network_params(\n self._l_outActA, lasagne.regularization.l2)), self._actionParams, \n self._learning_rate * 0.1, self._rho, self._rms_epsilon)\n \n \n \n self._train = theano.function([], [self._loss, self._q_func], updates=self._updates_, givens=self._givens_)\n # self._trainActor = theano.function([], [actLoss, self._q_valsActA], updates=actionUpdates, givens=actGivens)\n self._trainActor = theano.function([], [self._q_func], updates=actionUpdates, givens=self._actGivens)\n self._q_val = theano.function([], self._q_valsA,\n givens={State: self._states_shared})\n self._q_action = theano.function([], self._q_valsActA,\n givens={State: self._states_shared})\n inputs_ = [\n State, \n Reward, \n # ResultState\n ]\n self._bellman_error = theano.function(inputs=inputs_, outputs=self._diff, allow_input_downcast=True)\n # self._diffs = theano.function(input=[State])", "title": "" }, { "docid": "7b7c63427a67e25971410bef48437040", "score": "0.55306226", "text": "def train(self, inputs: list[int], targets: list[int]) -> None:\n # Feedforward the inputs\n input_matrix = Matrix.fromList(inputs)\n\n # Generating the hidden layer \n hidden_matrix = Matrix.dot(self.weights_input_hidden, input_matrix)\n hidden_matrix = Matrix.add_matrix(hidden_matrix, self.bias_hidden)\n hidden_matrix.map(sigmoid)\n\n # Generating the outputs\n output_matrix = Matrix.dot(self.weights_hidden_output, hidden_matrix)\n output_matrix = Matrix.add_matrix(output_matrix, self.bias_output)\n output_matrix.map(sigmoid)\n \n # Calculate the output errors\n targets_matrix = Matrix.fromList(targets) \n output_errors = Matrix.subtract_matrix(targets_matrix, output_matrix)\n\n # Calculate output gradient\n output_matrix.map(derivative_sigmoid)\n output_matrix = Matrix.multiply_matrix(output_matrix, output_errors)\n output_matrix.scale(self.learning_rate)\n \n # Calculate hidden -> outputs deltas \n hidden_transpose = Matrix.transpose(hidden_matrix)\n weights_hidden_output_deltas = Matrix.dot(output_matrix, hidden_transpose)\n\n # Adjust the weights and biases\n self.weights_hidden_output = Matrix.add_matrix(self.weights_hidden_output, weights_hidden_output_deltas)\n self.bias_output = Matrix.add_matrix(self.bias_output, output_matrix)\n\n # Calculate the hidden error\n weights_hidden_output_transpose = Matrix.transpose(self.weights_hidden_output)\n hidden_errors = Matrix.dot(weights_hidden_output_transpose, output_errors)\n \n # Calculate hidden gradient\n hidden_matrix.map(derivative_sigmoid)\n hidden_matrix = Matrix.multiply_matrix(hidden_matrix, hidden_errors)\n hidden_matrix.scale(self.learning_rate)\n\n # Calculate inputs -> hiddens deltas \n inputs_transpose = Matrix.transpose(input_matrix)\n weights_input_hidden_deltas = Matrix.dot(hidden_matrix, inputs_transpose)\n\n # Adjust the weights and biases\n self.weights_input_hidden = Matrix.add_matrix(self.weights_input_hidden, weights_input_hidden_deltas)\n self.bias_hidden = Matrix.add_matrix(self.bias_hidden, hidden_matrix)", "title": "" }, { "docid": "000a715333863d38880c8ffe6142ebd0", "score": "0.5529184", "text": "def test_matmul_00():\n\n shape = (3, 4)\n w_shape = (4, 5)\n out_shape = (3, 5)\n\n class MatMulTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"MatMul\", inputs=[\"x\", \"W\"], outputs=[\"y\"])\n inputs = [info(\"x\", TensorProto.FLOAT, shape)]\n outputs = [info(\"y\", TensorProto.FLOAT, out_shape)]\n\n init_W = from_array(np.random.rand(*w_shape).astype(np.float32), \"W\")\n\n graph = make_graph([node], \"add_graph\", inputs, outputs, initializer=[init_W])\n model = make_model(graph)\n return model\n\n x = np.random.rand(*shape).astype(np.float32)\n\n outputs = [\"y\"]\n MatMulTester({\"x\": x}, outputs).run()", "title": "" }, { "docid": "cdd3e2065c012b258ae0ab7acdb0f0a8", "score": "0.5528658", "text": "def nn_2_5_1(X, y, l1_activation_f, l1_activation_f_deriv):\n # topology of our Neural Network: 2-5-1\n # 2: input layer\n # 5: layer 1\n # 1: layer 2\n input_size = 2\n layer1 = 5\n layer2 = 1\n # initialize parameters\n W1 = 2 * np.random.random((input_size, layer1)) - 1 # weights matrix of the first layer\n b1 = 2 * np.random.random((1, layer1)) - 1 # bias vector of the first layer\n W2 = 2 * np.random.random((layer1, layer2)) - 1 # weights matrix of the second layer\n b2 = 2 * np.random.random((1, layer2)) - 1 # bias vector of the second layer\n\n # lr: learning rate for parameters updating\n lr = 0.1\n for j in range(20000):\n # FEED FORWARD PROPAGATION\n z1 = np.dot(X, W1) + b1 # first layer perceptrons\n a1 = l1_activation_f(z1) # first layer activation\n z2 = np.dot(a1, W2) + b2 # second layer perceptrons\n a2 = sigmoid(z2) # second layer activation\n\n # BACKWARD PROPAGATION\n # C = 1/2 * (a2-y)^2 <-- loss function\n # ∂C/∂a2 = a2 - y <-- simple calculus exercise\n # partial derivative of sigmoid = a2*(1-a2)\n # other partial derivatives are described above\n a2_delta = (a2 - y) * sigmoid_derivative(a2)\n a1_delta = a2_delta.dot(W2.T) * l1_activation_f_deriv(a1)\n\n # UPDATE PARAMETERS\n W1 = W1 - lr * X.T.dot(a1_delta)\n b1 = b1 - lr * a1_delta.sum(axis=0, keepdims=True)\n W2 = W2 - lr * a1.T.dot(a2_delta)\n b2 = b2 - lr * a2_delta.sum(axis=0, keepdims=True)\n\n def predict(X):\n \"\"\"\n feedforward\n input: X\n output: final layer output\n W1, b1: parameters of the first layer\n W2, b2: parameters of the second layer\n z: perceptron = ∑w*x + b\n a: activation of perceptron = sigmoid(z) or other for the first layer\n \"\"\"\n z1 = np.dot(X, W1) + b1 # first layer perceptrons\n a1 = l1_activation_f(z1) # first layer activation\n z2 = np.dot(a1, W2) + b2 # second layer perceptrons\n a2 = sigmoid(z2) # second layer activation\n return a2\n\n _, axes = plt.subplots(1, 2, figsize=(9, 4), sharey=True)\n\n axes[0].scatter(X[:, 0], X[:, 1], c=labels, cmap=plt.cm.Spectral)\n axes[0].set_xlim((-1.5, 2.5))\n axes[0].set_ylim((-1, 1.5))\n\n test_x1 = np.linspace(-1.5, 2.5, 20)\n test_x2 = np.linspace(-1, 1.5, 20)\n for x1 in test_x1:\n for x2 in test_x2:\n y = predict([[x1, x2]])\n color = 'blue' if y > 0.5 else 'red'\n axes[1].scatter(x1, x2, c=color)\n plt.title(\"2-5-1 with {} \".format(l1_activation_f.__name__))\n plt.show()", "title": "" }, { "docid": "e7240edccafdf2c25f43bb031e587131", "score": "0.5526551", "text": "def nn_2_5_5_1(X, y, l1_activation_f, l1_activation_f_deriv):\n # topology of our Neural Network: 2-5-5-1\n # 2: input layer\n # 5: layer 1\n # 5: layer 2\n # 1: layer 3\n input_size = 2\n layer1 = 5\n layer2 = 5\n layer3 = 1\n\n # initialize parameters\n W1 = 2 * np.random.random((input_size, layer1)) - 1 # weights matrix of the first layer\n b1 = 2 * np.random.random((1, layer1)) - 1 # bias vector of the first layer\n W2 = 2 * np.random.random((layer1, layer2)) - 1 # weights matrix of the second layer\n b2 = 2 * np.random.random((1, layer2)) - 1 # bias vector of the second layer\n W3 = 2 * np.random.random((layer2, layer3)) - 1 # weights matrix of the third layer\n b3 = 2 * np.random.random((1, layer3)) - 1 # bias vector of the third layer\n\n # lr: learning rate for parameters updating\n lr = 0.1\n for j in range(20000):\n # FEED FORWARD PROPAGATION\n z1 = np.dot(X, W1) + b1 # first layer perceptrons\n a1 = l1_activation_f(z1) # first layer activation\n z2 = np.dot(a1, W2) + b2 # second layer perceptrons\n a2 = sigmoid(z2) # second layer activation\n z3 = np.dot(a2, W3) + b3 # third layer perceptrons\n a3 = sigmoid(z3) # third layer activation\n\n # BACKWARD PROPAGATION\n # C = 1/2 * (a2-y)^2 <-- loss function\n # ∂C/∂a2 = a2 - y <-- simple calculus exercise\n # partial derivative of sigmoid = a2*(1-a2)\n # other partial derivatives are described above\n a3_delta = (a3 - y) * sigmoid_derivative(a3)\n a2_delta = a3_delta.dot(W3.T) * sigmoid_derivative(a2)\n a1_delta = a2_delta.dot(W2.T) * l1_activation_f_deriv(a1)\n\n # UPDATE PARAMETERS\n W1 = W1 - lr * X.T.dot(a1_delta)\n b1 = b1 - lr * a1_delta.sum(axis=0, keepdims=True)\n W2 = W2 - lr * a1.T.dot(a2_delta)\n b2 = b2 - lr * a2_delta.sum(axis=0, keepdims=True)\n W3 = W3 - lr * a2.T.dot(a3_delta)\n b3 = b3 - lr * a3_delta.sum(axis=0, keepdims=True)\n\n def predict(X):\n \"\"\"\n feedforward\n input: X\n output: final layer output\n W1, b1: parameters of the first layer\n W2, b2: parameters of the second layer\n W3, b3: parameters of the third layer\n z: perceptron = ∑w*x + b\n a: activation of perceptron = sigmoid(z) or other for the first layer\n \"\"\"\n z1 = np.dot(X, W1) + b1 # first layer perceptrons\n a1 = l1_activation_f(z1) # first layer activation\n z2 = np.dot(a1, W2) + b2 # second layer perceptrons\n a2 = sigmoid(z2) # second layer activation\n z3 = np.dot(a2, W3) + b3 # third layer perceptrons\n a3 = sigmoid(z3) # third layer activation\n return a3\n\n _, axes = plt.subplots(1, 2, figsize=(9, 4), sharey=True)\n\n axes[0].scatter(X[:, 0], X[:, 1], c=labels, cmap=plt.cm.Spectral)\n axes[0].set_xlim((-1.5, 2.5))\n axes[0].set_ylim((-1, 1.5))\n\n test_x1 = np.linspace(-1.5, 2.5, 20)\n test_x2 = np.linspace(-1, 1.5, 20)\n for x1 in test_x1:\n for x2 in test_x2:\n y = predict([[x1, x2]])\n color = 'blue' if y > 0.5 else 'red'\n axes[1].scatter(x1, x2, c=color)\n plt.title(\"2-5-5-1 with {} \".format(l1_activation_f.__name__))\n plt.show()", "title": "" }, { "docid": "6b6c605c9480998526edbfcc1ec3353a", "score": "0.5524383", "text": "def __init__(self, n_input, n_output, hidden_layer_size, reg):\n self.reg = reg\n # TODO Create necessary layers\n self.hidden_layer = FullyConnectedLayer(n_input, hidden_layer_size)\n self.relu_hidden = ReLULayer()\n self.output_layer = FullyConnectedLayer(hidden_layer_size, n_output)", "title": "" }, { "docid": "0406d0c6a48cf822e6afcb9f605f832e", "score": "0.55190295", "text": "def do_a_propagation(self):\n alpha=1.0\n print(alpha)\n random.seed(self.seeding)\n #random.shuffle(self.nodes)\n print(self.y)\n #lst = [None]* len(self.nodes)\n print(len(self.nodes))\n i=0\n for node in tqdm(self.nodes):\n print(i)\n self.lst[node]=self.y.result[node-1]\n #print(lst[node])\n i=i+1\n print(i)\n print(self.lst)\n #degree calcuation\n for node in tqdm(self.nodes): \n self.degree[node]=self.graph.degree[node]\n self.labels[node]=node\n #node influence calculation \n #self.ni=[None]* len(self.nodes)\n ni=[None]* len(self.nodes)\n for node in tqdm(self.nodes):\n self.ni[node]=self.lst[node]\n # ni[node]=self.lst[node]\n neighbors = nx.neighbors(self.graph, node)\n for neighbor in tqdm(neighbors): \n self.ni[node]=self.ni[node]+((alpha*self.lst[neighbor])/self.degree[neighbor])\n #ni[node]=ni[node]+self.lst[neighbor]\n for node in tqdm(self.nodes):\n ni[node-1]=self.ni[node]\n print(len(set(self.labels.values())))\n \n print(ni[0])\n \n #print(ni)\n sort_value = np.sort(ni)\n #print(sort_value[3489])\n #print(sort_value[3488])\n #print(sort_value[3487])\n sort_index = np.argsort(ni)\n #print(sort_index[3489])\n #print(sort_index[3488])\n #print(sort_index[3487])\n \n \n #label influence calculation \n for node in tqdm(self.nodes):\n self.li[node]=(self.ni[node]/self.degree[node])\n print(self.li[node])\n #print(self.li[3489])\n #print(self.li[3488]) \n \n \n \n for node in tqdm(self.nodes): \n self.label_countarr[node]=1\n \n \n #best label to update\n for i in reversed(sort_index):\n print(i)\n j=i+1\n print(\"first\")\n neighbors = nx.neighbors(self.graph, j)\n print(self.li[j])\n pick = self.make_a_pick(j, neighbors)\n print(pick)\n self.labels[j] = pick\n print(self.labels[j])\n self.label_countarr[j]=self.label_countarr[j]+1\n current_label_count = len(set(self.labels.values())) \n print(len(set(self.labels.values())))\n if self.label_count == current_label_count:\n self.flag = False\n else:\n self.label_count = current_label_count", "title": "" }, { "docid": "9581fed8571c77ded9160f8d6cb9f248", "score": "0.5517217", "text": "def lolnn(x):\n\n # First fully connected layer\n with tf.name_scope('fc1'):\n W_fc1 = weight_variable([18, 50])\n b_fc1 = bias_variable([50])\n\n h_fc1 = tf.nn.relu(tf.matmul(x, W_fc1) + b_fc1)\n\n # Second fully connected layer\n with tf.name_scope('fc2'):\n W_fc2 = weight_variable([50,100])\n b_fc2 = bias_variable([100])\n\n h_fc2 = tf.nn.relu(tf.matmul(h_fc1, W_fc2) + b_fc2)\n\n # Third fully connected layer\n with tf.name_scope('fc3'):\n W_fc3 = weight_variable([100,20])\n b_fc3 = bias_variable([20])\n\n h_fc3 = tf.nn.relu(tf.matmul(h_fc2, W_fc3) + b_fc3)\n\n # Dropout - minimizes overfitting\n with tf.name_scope('dropout'):\n keep_prob = tf.placeholder(tf.float32)\n h_fc3_drop = tf.nn.dropout(h_fc3, keep_prob)\n\n # Third fully connected layer\n with tf.name_scope('fc4'):\n W_fc4 = weight_variable([20,2])\n b_fc4 = bias_variable([2])\n\n h_fc4 = tf.matmul(h_fc3_drop, W_fc4) + b_fc4\n\n return h_fc4, keep_prob", "title": "" }, { "docid": "ec9537d0bf974b5dcecffd95957a1406", "score": "0.5515118", "text": "def nnPredict(w1,w2,data):\n\n print(\"Predict start\")\n\n data = np.array(data)\n data.resize(len(data),785)\n data[:,784] = 1\n ffOutputOfFirstLayer = np.array(sigmoid(np.dot(data,np.transpose(w1))))\n ffOutputOfFirstLayer.resize(len(ffOutputOfFirstLayer),len(ffOutputOfFirstLayer[0])+1)\n ffOutputOfFirstLayer[:,len(ffOutputOfFirstLayer[0])-1] = 1\n ffOutputOfSecondLayer = np.array(sigmoid(np.dot(ffOutputOfFirstLayer,np.transpose(w2))))\n # data = data[:15]\n # doing a feed forward using the updated weights\n #ffOutputOfFirstLayer = []\n labels = []\n for output in ffOutputOfSecondLayer:\n #finding the max index of the second layer output\n maxVlaueIndex = np.argmax(output)\n #print(maxVlaue)\n #labels.append(getLabelArray(str(maxVlaueIndex)))\n labels.append(maxVlaueIndex)\n\n labels = np.array(labels)\n print(\"Predict end\")\n #print(labels)\n return labels", "title": "" }, { "docid": "e17a061ca810bdb67da74e6824692780", "score": "0.55147004", "text": "def convnet_model():\n\n #return model", "title": "" }, { "docid": "302fff52cd4ce62195ea72d4125b4261", "score": "0.55135024", "text": "def forward(self, x):\n output = self.network(x)\n return output", "title": "" }, { "docid": "b8a29494605c64d0de57f5d5f716f2ba", "score": "0.5512923", "text": "def linear_model_eval(X_train, y_train, X_test, y_test):\n clf = LogisticRegression(random_state=0, max_iter=10000, solver='lbfgs', C=1.0)\n clf.fit(X_train, y_train)\n print(\"Logistic Regression feature eval\")\n print(\"Train score:\", clf.score(X_train, y_train))\n print(\"Test score:\", clf.score(X_test, y_test))\n print(\"-------------------------------\")\n # consider KNN to slow on my computer, so remove it\n # neigh = KNeighborsClassifier(n_neighbors=10)\n # neigh.fit(X_train, y_train)\n # print(\"KNN feature eval\")\n # print(\"Train score:\", neigh.score(X_train, y_train))\n # print(\"Test score:\", neigh.score(X_test, y_test))", "title": "" }, { "docid": "38d1001de3747fc0134d2811494ebed9", "score": "0.5507859", "text": "def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:\n return self._network(inputs)", "title": "" }, { "docid": "430e486f95799657c40639809199e6f4", "score": "0.550263", "text": "def forward_prop(self):\n # Forward propagation to generate model output\n self.fwd_neurons = []\n\n def sigmoid_activation(neurons):\n # Activation for the input, basically 1/(1 + e^-x)\n return 1 / (1 + exp(-neurons))\n\n def fwd_input_hl(self):\n # Forward to Hidden Layer\n self.fwd_neurons.append(self.X.dot(self.weights[\"Input-HL\"]))\n self.fwd_neurons[-1].columns = self.weights[\"HL-HL\"][0].index\n self.fwd_neurons[-1] = (\n self.fwd_neurons[-1],\n sigmoid_activation(self.fwd_neurons[-1]),\n )\n\n def fwd_hl_hl(self):\n # Hidden layer to hidden layer\n for weight_1, weight_2 in zip(\n self.weights[\"HL-HL\"][:-1],\n self.weights[\"HL-HL\"][1:],\n ):\n self.fwd_neurons.append(self.fwd_neurons[-1][1].dot(weight_1))\n self.fwd_neurons[-1].columns = weight_2.index\n self.fwd_neurons[-1] = (\n self.fwd_neurons[-1],\n sigmoid_activation(self.fwd_neurons[-1]),\n )\n\n def fwd_hl_output(self):\n # Hidden layer to output\n self.fwd_neurons.append(self.fwd_neurons[-1][1].dot(\n self.weights[\"HL-HL\"][-1]))\n self.fwd_neurons[-1].columns = self.weights[\"HL-Output\"].index\n self.fwd_neurons[-1] = (\n self.fwd_neurons[-1],\n sigmoid_activation(self.fwd_neurons[-1]),\n )\n\n def fwd_output(self):\n # Finalize output\n self.fwd_neurons.append(self.fwd_neurons[-1][1].dot(\n self.weights[\"HL-Output\"]))\n self.fwd_neurons[-1].columns = [\"Output Neuron\"]\n self.fwd_neurons[-1] = (\n self.fwd_neurons[-1],\n sigmoid_activation(self.fwd_neurons[-1]),\n )\n\n def fwd_input_hl_single(self):\n # If single layer, the weight multiplications finish here\n self.fwd_neurons.append(self.X.dot(self.weights[\"Input-HL\"]))\n self.fwd_neurons[-1].columns = self.weights[\"HL-Output\"].index\n self.fwd_neurons[-1] = (\n self.fwd_neurons[-1],\n sigmoid_activation(self.fwd_neurons[-1]),\n )\n\n def fwd_hl_output_single(self):\n # If single layer, output is finalized here\n self.fwd_neurons.append(self.fwd_neurons[-1][1].dot(\n self.weights[\"HL-Output\"]))\n self.fwd_neurons[-1].columns = [\"Output Neuron\"]\n self.fwd_neurons[-1] = (\n self.fwd_neurons[-1],\n sigmoid_activation(self.fwd_neurons[-1]),\n )\n\n def fwd_check_single(self):\n # Check if single or multilayered and perform forward propagation\n if \"HL-HL\" in self.weights:\n fwd_input_hl(self)\n fwd_hl_hl(self)\n fwd_hl_output(self)\n fwd_output(self)\n else:\n fwd_input_hl_single(self)\n fwd_hl_output_single(self)\n\n fwd_check_single(self)", "title": "" }, { "docid": "64263574b964e571cb9c98603b260ca7", "score": "0.550111", "text": "def build_network(model_cfg,voxel_generator,target_assigner):\r\n # print(\"++++++++++++++++++++++++++++++++++++START TRAIN CONFIG++++++++++++++++++++++++++++++++++++++++++++++++\")\r\n pfn_num_filters = model_cfg.PILLAR_FEATURE_EXTRACTOR.num_filters\r\n grid_size = voxel_generator.grid_size\r\n pc_range= voxel_generator.point_cloud_range\r\n voxel_size = voxel_generator.voxel_size\r\n\r\n # dense_shape = [1] + grid_size[::-1].tolist() + [pfn_num_filters[-1]]\r\n dense_shape = [1] + grid_size[::-1].tolist() + [pfn_num_filters[-1]]\r\n # print('dense_shape,',dense_shape)\r\n num_input_features = model_cfg.NUM_POINT_FEATURES\r\n if model_cfg.WITHOUT_REFLECTIVITY:\r\n num_input_features = 3\r\n # loss config\r\n loss_norm_type = model_cfg.LOSS.loss_norm_type\r\n pos_cls_weight = model_cfg.LOSS.pos_class_weight\r\n neg_cls_weight = model_cfg.LOSS.neg_class_weight\r\n encode_rad_error_by_sin = model_cfg.ENCODE_RAD_ERROR_BY_SIN\r\n direction_loss_weight = model_cfg.LOSS.direction_loss_weight\r\n # print(\"++++++++++++++++++++++++++++++++++++START BULID LOSS++++++++++++++++++++++++++++++++++++++++++++++++\")\r\n losses = build_losses(model_cfg.LOSS)\r\n # print(\"++++++++++++++++++++++++++++++++++++OVER BULID LOSS++++++++++++++++++++++++++++++++++++++++++++++++\")\r\n cls_loss_ftor, loc_loss_ftor, cls_weight, loc_weight, _ = losses\r\n\r\n # print(\"++++++++++++++++++++++++++++++++++++START MODEL CFG++++++++++++++++++++++++++++++++++++++++++++++++\")\r\n model_cfg.update({\r\n 'pc_range' : pc_range,\r\n 'voxel_size' : voxel_size,\r\n 'pfn_num_filters' : pfn_num_filters,\r\n 'num_input_features': num_input_features,\r\n 'loss_norm_type': loss_norm_type,\r\n 'pos_cls_weight': pos_cls_weight,\r\n 'neg_cls_weight': neg_cls_weight,\r\n 'direction_loss_weight': direction_loss_weight,\r\n 'cls_loss_ftor': cls_loss_ftor,\r\n 'loc_loss_ftor': loc_loss_ftor,\r\n 'cls_weight': cls_weight,\r\n 'loc_weight': loc_weight,\r\n })\r\n # print(\"++++++++++++++++++++++++++++++++++++OVER MODEL CFG++++++++++++++++++++++++++++++++++++++++++++++++\")\r\n # print(\"++++++++++++++++++++++++++++++++++++START POINTPILLARS++++++++++++++++++++++++++++++++++++++++++++++++\")\r\n model = PointPillars(output_shape = dense_shape,\r\n model_cfg=model_cfg,\r\n target_assigner= target_assigner)\r\n # print(\"++++++++++++++++++++++++++++++++++++OVER POINTPILLARS++++++++++++++++++++++++++++++++++++++++++++++++\")\r\n\r\n return model", "title": "" }, { "docid": "b259a7e6ffaa9fde353e20cdc4d73251", "score": "0.5498966", "text": "def create_network(self, x, name, initializer=\"he_init\", activation=\"tanh\"):\n # regularizer to use:\n # self.l1_reg = tf.contrib.layers.l1_regularizer(self.reg_scale)\n self.l2_reg = tf.contrib.layers.l2_regularizer(self.reg_scale)\n\n if initializer == \"he_init\":\n # he/MRSA initialization:\n init = tf.contrib.layers.variance_scaling_initializer()\n elif initializer == \"xavier_init\":\n # Xavier initialization:\n init = tf.contrib.layers.xavier_initializer()\n\n if activation == \"tanh\":\n activation = tf.nn.tanh\n elif activation == \"relu\":\n activation = tf.nn.relu\n\n with tf.variable_scope(name):\n fc1 = tf.layers.dense(inputs=x, units=64, activation=activation, kernel_initializer=init,\n kernel_regularizer=self.l2_reg, )\n dropout1 = tf.layers.dropout(fc1, rate=self.dropout_rate, training=True)\n fc2 = tf.layers.dense(inputs=dropout1, units=128, activation=activation, kernel_initializer=init,\n kernel_regularizer=self.l2_reg, )\n dropout2 = tf.layers.dropout(fc2, rate=self.dropout_rate, training=True)\n fc3 = tf.layers.dense(inputs=dropout2, units=128, activation=activation, kernel_initializer=init,\n kernel_regularizer=self.l2_reg, )\n dropout3 = tf.layers.dropout(fc3, rate=self.dropout_rate, training=True)\n output = tf.layers.dense(inputs=dropout3, units=self.n_action, kernel_initializer=init)\n return output", "title": "" }, { "docid": "5c9531221e33a21c233cc11405d53ecb", "score": "0.54989505", "text": "def sigmoidANDNetwork():\n expected_output_and = (([1, 1], [True, True]),\n ([1, 0], [False, False]),\n ([0, 1], [False, False]),\n ([0, 0], [False, False]))[::-1]\n print(f\"And Gate expectation = {expected_output_and}\")\n table = makeTruthTable(2)[::-1]\n AND1 = im.Neuron(inputWeight=[-0.5, 0.5], bias=1.5, idNeuron='AND 1')\n AND2 = im.Neuron(inputWeight=[-1, 1], bias=3, idNeuron='AND 2')\n\n layerone = ptl.NeuronLayer([AND1,AND2], idLayer='FirstLayer')\n networkOneAND1 = ptn.NeuronNetwork([layerone])\n\n print(networkOneAND1)\n print(f\"total loss = {networkOneAND1.calc_total_loss(expected_output_and)}\")\n networkOneAND1.train(expected_output_and,1,10000,10)\n print(networkOneAND1)\n print(f\"total loss = {networkOneAND1.calc_total_loss(expected_output_and)}\")", "title": "" }, { "docid": "0880f488f910850c2b85e82b81f5de52", "score": "0.5495728", "text": "def multilayer_perceptron(self):\n # Hidden layer with RELU activation\n layer_1 = tf.add(tf.matmul(self.x, self.weights['h1']), self.biases['b1'])\n layer_1 = tf.nn.relu(layer_1)\n # Hidden layer with RELU activation\n layer_2 = tf.add(tf.matmul(layer_1, self.weights['h2']), self.biases['b2'])\n layer_2 = tf.nn.relu(layer_2)\n # Output layer with linear activation\n for i in range(self.n_heads):\n head_output = tf.matmul(layer_2, self.weights['out'][i]) + self.biases['out'][i]\n self.out_layer.append(head_output)", "title": "" }, { "docid": "fe8f8e687604fbffc12dca9059e04cac", "score": "0.5495723", "text": "def __init__(self):\n super(Net, self).__init__()\n resnet = models.resnet50()\n self.features = nn.Sequential(*list(resnet.children())[:-1])\n self.fc = nn.Sequential(\n nn.Linear(2048, 1024),\n nn.Dropout(0.4),\n nn.LeakyReLU(),\n nn.Linear(1024, 256),\n nn.Dropout(0.4),\n nn.LeakyReLU(),\n )\n\n self.fc_s = nn.Sequential(nn.Linear(256, 1),\n nn.LeakyReLU())", "title": "" }, { "docid": "3b78e1ee511f4de9f554283a14fb8703", "score": "0.54942685", "text": "def train_neural_net(x_train, y_train, root, tk, output_text_area):\n output_text_area.insert(5.0, 'Training neural net...\\n\\n')\n root.update_idletasks()\n neural_net = MLPClassifier()\n neural_net.fit(x_train, y_train) # train neural net\n\n output_text_area.insert(7.0, 'Saving trained neural net to file...\\n\\n')\n root.update_idletasks()\n pickle.dump(neural_net, open('trained_neural_net', 'wb'))\n\n # visualize(pd.Series(neural_net.loss_curve_), graph_type='area') # plot loss curve\n\n return neural_net", "title": "" }, { "docid": "c36621af6e858abc1d254ff4b8acc52b", "score": "0.5490769", "text": "def test_matmul_01():\n\n shape = (1, 2, 3, 4)\n w_shape = (1, 2, 4, 3)\n out_shape = (1, 2, 3, 3)\n\n class MatMulTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"MatMul\", inputs=[\"x\", \"W\"], outputs=[\"y\"])\n inputs = [info(\"x\", TensorProto.FLOAT, shape)]\n outputs = [info(\"y\", TensorProto.FLOAT, out_shape)]\n\n init_W = from_array(np.random.rand(*w_shape).astype(np.float32), \"W\")\n\n graph = make_graph([node], \"add_graph\", inputs, outputs, initializer=[init_W])\n model = make_model(graph)\n return model\n\n x = np.random.rand(*shape).astype(np.float32)\n\n outputs = [\"y\"]\n MatMulTester({\"x\": x}, outputs).run()", "title": "" }, { "docid": "436a1cf3732cebaaadee37ab25a3459f", "score": "0.54896486", "text": "def test_serialize():\n input_dims = np.random.randint(1, 32)\n output_dims = np.random.randint(1, 64)\n\n weights = np.random.uniform(size=(input_dims, output_dims))\n biases = np.random.uniform(size=(output_dims))\n\n fullyconnected_layer = FullyConnectedLayer(weights, biases)\n relu_layer = ReluLayer()\n\n network = Network([fullyconnected_layer, relu_layer])\n serialized = network.serialize()\n assert len(serialized.layers) == 2\n assert serialized.layers[0] == fullyconnected_layer.serialize()\n assert serialized.layers[1] == relu_layer.serialize()\n\n deserialized = Network.deserialize(serialized)\n assert deserialized.serialize() == serialized", "title": "" }, { "docid": "74a0390ac4769d4c8304acdf7dfd58e9", "score": "0.5487436", "text": "def model(data, train=False):\n def nn_layer(data, weights, biases, name):\n \"\"\"Reusable code for making a simple neural net layer.\n It sets up name scoping so that the resultant graph is easy to read,\n and adds a number of summary ops.\"\"\"\n conv = tf.nn.conv2d(data, # 4d tensor [batch, in_height, in_width, in_channels]\n weights,\n strides=[1, 1, 1, 1],\n padding='SAME')\n # Bias and rectified linear non-linearity.\n relu = tf.nn.relu(tf.nn.bias_add(conv, biases))\n # Max pooling. The kernel size spec {ksize} also follows the layout of\n # the data. Here we have a pooling window of 2, and a stride of 2.\n pool = tf.nn.max_pool(relu,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')\n return pool\n\n layer1 = nn_layer(data, conv1_weights, conv1_biases, 'layer1')\n layer2 = nn_layer(layer1, conv2_weights, conv2_biases, 'layer2')\n\n # Reshape the feature map cuboid into a 2D matrix to feed it to the\n # fully connected layers.\n pool_shape = layer2.get_shape().as_list()\n reshape = tf.reshape(\n layer2,\n [pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])\n # Fully connected layer. Note that the '+' operation automatically\n # broadcasts the biases.\n hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)\n tf.histogram_summary('fc1/activations', hidden)\n # Add a 50% dropout during training only. Dropout also scales\n # activations such that no rescaling is needed at evaluation time.\n if train:\n hidden = tf.nn.dropout(hidden, 0.5, seed=SEED)\n return tf.matmul(hidden, fc2_weights) + fc2_biases", "title": "" }, { "docid": "7703d0f5e41c3d64a90e068e054daf90", "score": "0.54865193", "text": "def SimpleModel(train_data, optimizer=None):\r\n \r\n model = keras.Sequential([\r\n keras.layers.Dense(train_data.shape[1], name='Input', activation='linear',\r\n input_shape=(train_data.shape[1],)),\r\n keras.layers.Dense(1024,name='Layer1', activation='relu'),\r\n keras.layers.Dense(1024,name='Layer2', activation='relu'),\r\n keras.layers.Dense(1024,name='Layer3', activation='relu'),\r\n keras.layers.Dense(1024,name='Layer4', activation='relu'),\r\n keras.layers.Dense(1024,name='Layer5', activation='relu'),\r\n keras.layers.Dense(1024,name='Layer6', activation='relu'),\r\n keras.layers.Dense(1024,name='Layer7', activation='relu'),\r\n keras.layers.Dense(4, name='Output')])\r\n\r\n if optimizer is None:\r\n optimizer = keras.optimizers.Adam(lr=0.001, beta_1=0.95, beta_2=0.999,\r\n epsilon=1e-4)\r\n \r\n model.compile(loss=my_loss, optimizer=optimizer, metrics=['mae','mape'])\r\n model.summary()\r\n return model", "title": "" }, { "docid": "d01102fa0167f670b6d7e6b6be86e0fc", "score": "0.54829895", "text": "def train_flat_NN(data, val_data, batch_size=64, n_epochs=50, \n out_path = 'output/', dropout = 0.2, \n n_layers=3, layer_nodes=100, batch_norm=False, activation='relu'):\n\n try:\n X_train = data['feature_matrix']\n y_train = data['target']\n train_data = {'feature_matrix': X_train}\n except KeyError:\n raise KeyError('The data-dictionary provided does not contain' \\\n 'all necessary keys for the selected run-mode (run_mode_user)')\n\n print('X_train shape {}'.format(X_train.shape))\n\n if 99 in y_train:\n raise ValueError('The data contains more than 2 different targets!')\n\n input_train = Input(shape=(X_train.shape[-1],), name='feature_matrix')\n x = Dense(layer_nodes, kernel_initializer='glorot_normal')(input_train)\n x = (getattr(keras.layers, activation)())(x)\n\n for i in range(0,n_layers-1):\n x = Dense(layer_nodes, kernel_initializer='glorot_normal')(x)\n x = (getattr(keras.layers, activation)())(x)\n if batch_norm:\n x = BatchNormalization()(x)\n if dropout > 0.0:\n x = Dropout(dropout)(x)\n\n main_output = Dense(1, \n activation = 'sigmoid', \n name = 'main_output', \n kernel_initializer = 'glorot_normal')(x)\n model = Model(inputs=input_train, outputs=main_output)\n\n model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n \n print(model.summary())\n try:\n checkpointer = ModelCheckpoint(filepath=out_path+'best_model.h5',\n verbose=0,\n save_best_only=True)\n\n history = model.fit(train_data,\n y_train, \n epochs = n_epochs, \n batch_size = batch_size,\n validation_data = val_data,\n callbacks = [checkpointer]).history\n # ,callbacks = [callback_ROC(train_data_dic, \n # output_targets, \n # output_prefix=out_path)])\n\n except KeyboardInterrupt:\n print('Training ended early.')\n\n return history", "title": "" } ]
c937572e1392a3d6b5d31b56b7b8c048
Checks if the packet is of a certain type
[ { "docid": "4e73f806677fd2173749ceb58404fe15", "score": "0.8320609", "text": "def check_packet_type(packet, target_packet):\n\n # Grabs the first section of the Packet\n packet_string = str(packet)\n split = packet_string.split(' ')\n\n # Checks for the target packet type\n if target_packet == split[0]:\n return True\n else:\n return False", "title": "" } ]
[ { "docid": "28a3b0fb6b49327dc364d8b14122cbd8", "score": "0.76044613", "text": "def check_type(cls, packet_type=None, data=None):\n if data is None:\n raise ValueError('Not data supplied. Data cannot be None')\n elif packet_type is None:\n raise ValueError('Not packet_type supplied.')\n else:\n return packet_type.upper() == OP_CODES[data[:2]]", "title": "" }, { "docid": "489d582d6f4a7359dd46c6aaf219e9ea", "score": "0.7022754", "text": "def validate_packet(packet):\n if packet.magicno != magicno:\n return False\n if packet.packet_type != 0:\n return False\n return True", "title": "" }, { "docid": "ebadbb82f85045236962cb496b8771ab", "score": "0.6816787", "text": "def is_valid(cls, packet_type=None, data=None):\n op_code = data[:2]\n if op_code not in OP_CODES:\n raise MalformedPacketException('Unidentified Packet Type - {}'.format(op_code))\n elif packet_type is None:\n raise ValueError('Packet Type cannot be None')\n else:\n if packet_type.upper() == 'INITRQ':\n return (len(data) >= MIN_PACKET_SIZE) and (len(data) <= MAX_PACKET_SIZE)\n elif packet_type.upper() == 'DATA':\n return (len(data) >= 2) and (len(data) <= MAX_PACKET_SIZE)\n elif packet_type.upper() == 'ACK':\n return len(data) == 4\n else:\n # must be error. Error hasn't been fully implemented yet :-(\n raise NotImplementedError('Error packet type is being encoded. '\n 'Err packet is currently not supported :-(')", "title": "" }, { "docid": "1559bce0f29d1a184145fe63fded2bcf", "score": "0.66085273", "text": "def is_type_of(self, data):\n return False", "title": "" }, { "docid": "8926e342fefe579105a352055df6685d", "score": "0.6559139", "text": "def can_handle_packet(self, packet):\n\n return True", "title": "" }, { "docid": "f36df11f77ef5f3cdf52df742e4b8308", "score": "0.6453469", "text": "def is_type(self, instance: Any, type: str) -> bool:", "title": "" }, { "docid": "697d82f85df1e36120cb997f46822977", "score": "0.6389738", "text": "def _is_socket(self, item):\n return 'type' in item", "title": "" }, { "docid": "14031c0870f934b2f8379bb2a0d31a12", "score": "0.6382936", "text": "def _typeok(self, value):\n return isinstance(value, self.type)", "title": "" }, { "docid": "b8cfc76a29bd9dc460840d141d1a5386", "score": "0.63719887", "text": "def is_clean_packet(packet): # pragma: no cover\n if not packet['src_port'].isdigit(): return False\n if not packet['dest_port'].isdigit(): return False\n\n if packet['src_ip'].isalpha(): return False\n if packet['dest_ip'].isalpha(): return False\n\n if 'data' in packet:\n try:\n int(packet['data'], 16)\n except:\n return False\n\n return True", "title": "" }, { "docid": "7deb73cba00ec1e4ce172bd56f375bb0", "score": "0.63291854", "text": "def is_type(self, xtyp):\n return self.type == xtyp", "title": "" }, { "docid": "4f99f0b77d6744e1fbcf74bca7e504ee", "score": "0.63139844", "text": "def verify_type(type):\n types = ['INTEGER','INT','BOOLEAN','REAL','TEXT','BOOL','BLOB','NOTYPE']\n if type.upper() not in types:\n raise TypeError('%s is not a legal type.'%type)\n return True", "title": "" }, { "docid": "80808caa0db09c3edb690ce9d9a54eb8", "score": "0.6287318", "text": "def is_valid_traffic_type(self, traffic_type_name):\n pass", "title": "" }, { "docid": "70da8cc2574f7de86e757cf58290cca4", "score": "0.62346315", "text": "def __isValid(self):\r\n return _libvncxx.Packet___isValid(self)", "title": "" }, { "docid": "cd838fa8f0cf3f57e85d2b9a2e947de1", "score": "0.61914986", "text": "def is_valid_type(self):\n\n if self.datatype == \"type\":\n if self.value == \"int\" or self.value == \"bool\" or self.value == \"string\" or self.value == \"float\":\n return True\n return False", "title": "" }, { "docid": "439249dcbe431037f0737c3af5543e64", "score": "0.6123811", "text": "def can_handle(cls, data_type: str) -> bool:\n return cls.data_type == data_type", "title": "" }, { "docid": "361f8997decaf0bd078002898f7ecad7", "score": "0.60639745", "text": "def _check_valid_packet(self, header):\n return bool(header & self._packet_header_checkbit)", "title": "" }, { "docid": "e661ec90e10f069023d3b0d11f80a953", "score": "0.605191", "text": "def _is_type(self, instance, type):\n\n py_type = self._types.get(type)\n\n if py_type is None:\n return self._schema_error(\n self._unknown_type, u\"%r is not a known type\" % (type,)\n )\n\n # the only thing we're careful about here is evading bool inheriting\n # from int, so let's be even dirtier than usual\n\n elif (\n # it's not a bool, so no worries\n not isinstance(instance, bool) or\n\n # it is a bool, but we're checking for a bool, so no worries\n (\n py_type is bool or\n isinstance(py_type, tuple) and bool in py_type\n )\n\n ):\n return isinstance(instance, py_type)", "title": "" }, { "docid": "b253e76c6cdb7787a5ff161b329ca62a", "score": "0.60478026", "text": "def _check_type(self):\n pass", "title": "" }, { "docid": "13d64d91c85233a14abc3ed99a57c2e0", "score": "0.6029596", "text": "def affect_packet(packet):\n\n # Saving packets\n if save_active:\n pktdump.write(IP(packet.get_payload()))\n\n if target_packet_type == \"ALL\":\n return True\n else:\n return check_packet_type(packet, target_packet_type)", "title": "" }, { "docid": "f154d1223da70db36fa37eb3f69204ef", "score": "0.60175306", "text": "def check_type(x: str, inp_type: type) -> bool:\n try:\n inp_type(x)\n return True\n except:\n return False", "title": "" }, { "docid": "6bc0d9701f9414fa4977221a1d556391", "score": "0.6006593", "text": "def is_type(node, typename):\n return typename in mc.nodeType(node, inherited=True)", "title": "" }, { "docid": "77d0f26f0ae48c52d37e3b35584b0952", "score": "0.5988992", "text": "def handle_packet(self, packet):\n\n # Default implementation: use can_handle_packet to determine if we\n # handle the given type of packet; and call `.consume_packet` to\n # consume the given packet.\n\n if self.can_handle_packet(packet):\n try:\n self.consume_packet(packet)\n return True\n except UnhandledPacket:\n return False\n else:\n return False", "title": "" }, { "docid": "4323108e88899a0cfe417df8e6276b22", "score": "0.5930523", "text": "def check_ptype( self, api_dct ):\n return_val = False\n patron_ptype = api_dct['response']['p_type']['value']\n if patron_ptype in self.PATRON_API_LEGIT_PTYPES:\n return_val = True\n log.debug( 'ptype check, `%s`' % return_val )\n return return_val", "title": "" }, { "docid": "073f6a0220c10cb36e86e7f2909cef4e", "score": "0.58817995", "text": "def _class_d(packet):\n source = packet.source_ip.split('.')\n dest = packet.source_ip.split('.')\n\n return int(source[0]) in range(224,240) or int(dest[0]) in range(224, 240)", "title": "" }, { "docid": "8cdb8574ddcd56e34817912731b835ae", "score": "0.5881224", "text": "def check_data_type(): #ANDREW", "title": "" }, { "docid": "f9703a2042d62cc17f62cbad799ed36a", "score": "0.58746505", "text": "def payload_valid(self, payload):\n return isinstance(payload, DPTBinary)", "title": "" }, { "docid": "d015e2a9d2acf0252623051384e05a28", "score": "0.58741766", "text": "def is_type(self, *seg_type):\n return self.class_is_type(*seg_type)", "title": "" }, { "docid": "255b21884d6fb36e571d6e7df7323536", "score": "0.5835274", "text": "def matches_type(cls, type_name):\n return type_name and type_name[-1] in CardinalityField.pattern_chars", "title": "" }, { "docid": "6afc30ccdd6877613e1d86b1cd3c3bb7", "score": "0.58159953", "text": "def is_valid_ethertype(logger, value_to_check):\n if value_to_check[:2] == '0x':\n #*** Looks like hex:\n try:\n if not (int(value_to_check, 16) > 0 and \\\n int(value_to_check, 16) < 65536):\n logger.debug(\"Check of \"\n \"is_valid_ethertype as hex on %s returned false\",\n value_to_check)\n return 0\n except:\n logger.debug(\"Check of \"\n \"is_valid_ethertype as hex on %s raised an exception\",\n value_to_check)\n return 0\n else:\n #*** Perhaps it's decimal?\n try:\n if not (int(value_to_check) > 0 and \\\n int(value_to_check) < 65536):\n logger.debug(\"Check of \"\n \"is_valid_ethertype as decimal on %s returned false\",\n value_to_check)\n return 0\n except:\n logger.debug(\"Check of \"\n \"is_valid_ethertype as decimal on %s raised an exception\",\n value_to_check)\n return 0\n return 1", "title": "" }, { "docid": "b068412fdf55901faa365e06387f3159", "score": "0.5807751", "text": "def check_type(cls, typestr):\n return typestr == cls.MESSAGE_TYPE_ALIAS or typestr == \"EVENT-RESUME\"", "title": "" }, { "docid": "43633154bd114fadd4ef0eee524bf293", "score": "0.578616", "text": "def verify(self, packet):\n if not packet:\n return False\n\n data_bytes = packet[3:]\n data_sum = sum(data_bytes)\n result = data_sum & 0xff\n if result == 0xff:\n return True\n return False", "title": "" }, { "docid": "2fc6c6bb5e450a75b35be02cab765be5", "score": "0.57804406", "text": "def _check_packet(packet, wait_id):\n len_packet = len(packet)\n if len_packet < HEADER_SIZE:\n raise RbcpError(\"RBCP header too short(%d/%d)\" %\n (len_packet, HEADER_SIZE))\n if packet[0] != HEADER_VERTYPE:\n raise RbcpError(\"RBCP Header Version Mismatch\")\n if (packet[1] & 0x1) == 1:\n raise RbcpBusError()\n if packet[2] != wait_id:\n raise RbcpError(\"RBCP Packet ID Mismatch\")", "title": "" }, { "docid": "905e24f6f7e835446b9888c7c5510098", "score": "0.5733033", "text": "def isType(node, typename):\r\n return typename in node.nodeType(inherited=True)", "title": "" }, { "docid": "7efc6a445b5a42eac37a197165b3c77a", "score": "0.57161754", "text": "def typecheck(self, types):\r\n if any(types in val for val in self.types):\r\n return True\r\n return False", "title": "" }, { "docid": "55364c7aed8c48699aa5c9d7df33d730", "score": "0.5707482", "text": "def __contains__(self, x):\n from pcapkit.protocols.protocol import Protocol # pylint: disable=import-outside-toplevel\n\n try:\n flag = issubclass(x, Protocol)\n except TypeError:\n flag = issubclass(type(x), Protocol)\n\n if flag or isinstance(x, Protocol):\n x = x.id()\n if isinstance(x, tuple):\n x = r'|'.join(x)\n\n with contextlib.suppress(Exception):\n for data in self.__data__:\n if re.fullmatch(x, data, re.IGNORECASE):\n return True\n return False", "title": "" }, { "docid": "8703af054545cdc56d09946be58033b2", "score": "0.5702271", "text": "def _is_event_of_type(\n event: Union[Dict[Text, Any], Text, None], target_type: Type[\"Event\"]\n) -> bool:\n if not event:\n return False\n\n if isinstance(event, str):\n return event == target_type.type_name\n\n return event.get(\"event\") == target_type.type_name", "title": "" }, { "docid": "4895c339c207d67ab91732e2a0dea6e8", "score": "0.56853765", "text": "def is_kind_of(self, tp):\n # Check if required type is the same\n return self.type.is_kind_of(tp)", "title": "" }, { "docid": "56c3c1d7e8f3c98bc2f16a8bda196c17", "score": "0.56736887", "text": "def check_type(self, *types):\r\n if self.decoded.get('typ', '') not in types:\r\n log_info('Receipt type not in %s' % ','.join(types))\r\n raise InvalidReceipt('WRONG_TYPE')", "title": "" }, { "docid": "65b6642e652e9d823afac3a0e2a32072", "score": "0.56266636", "text": "def packet_type(self):\n packet_type = \"\"\n if self.flags == 2:\n packet_type = \"SYN\"\n elif self.flags == 16:\n packet_type = \"ACK\"\n elif self.flags == 1:\n packet_type = \"FIN\"\n elif self.flags == 18:\n packet_type = \"SYN-ACK\"\n elif self.flags == 17:\n packet_type = \"FIN-ACK\"\n elif self.data != b\"\":\n packet_type = \"DATA\"\n return packet_type", "title": "" }, { "docid": "a590ec90a37b733abc3b6f2cedf1f6ea", "score": "0.560724", "text": "def analyze_packet(timestamp, packet, nth):\n\n eth = dpkt.ethernet.Ethernet(packet)\n if isinstance(eth.data, dpkt.ip.IP):\n parse_ip_packet(eth, nth, timestamp)", "title": "" }, { "docid": "3cfacabe3a939a98e15f06c4cea4d611", "score": "0.5593315", "text": "def _parse_packet_header(self, header, handle):\n # Highest bit set?\n if not self._check_valid_packet(header):\n raise Exception(\"Not a valid packet: highest bit not set: %s\" %\n hex(header))\n\n # Lets find out if its an old or a new packet\n if _check_new_packet(header):\n packet_type = self._get_new_packet_type(header)\n else: # we have an old packet\n packet_type = self._get_old_packet_type(header)\n return packet_type", "title": "" }, { "docid": "a06ffa518c44ba603e76b667ceb3455c", "score": "0.5557629", "text": "def is_client(_packet):\n t = _packet[TCP]\n if t.flags & 0x02 and not t.flags & 0x10:\n return True\n tcp = _packet.getlayer(TCP)\n if tcp.sport > tcp.dport: # if the sport is higher then likely it is the client\n return True\n return False", "title": "" }, { "docid": "b4a8e8e606f3832a0d5d6c4671f85e1e", "score": "0.5548917", "text": "def __contains__(self, x):\n from pcapkit.protocols.protocol import Protocol # pylint: disable=import-outside-toplevel\n\n try:\n flag = issubclass(x, Protocol)\n except TypeError:\n flag = issubclass(type(x), Protocol)\n if flag or isinstance(x, Protocol):\n return x in self.__data__\n\n with contextlib.suppress(Exception):\n for data in self.__data__:\n index = data.id()\n if isinstance(index, tuple):\n index = r'|'.join(index)\n if re.fullmatch(index, x, re.IGNORECASE):\n return True\n return False", "title": "" }, { "docid": "a69762b504a5f55c559cd405131f75e6", "score": "0.5536771", "text": "def is_proto(self) -> bool:\n return hasattr(self, '_data') and self._data.type_url != ''", "title": "" }, { "docid": "a6c000589b6f0a25279b37bf45701b48", "score": "0.55284816", "text": "def _valid(self, val):\n return isinstance(val, self._type)", "title": "" }, { "docid": "9e57baf9b2bb74753d5ef9309554f596", "score": "0.55147696", "text": "def verify_request(request_line):\n is_request = True\n if request_line.count(b'\\x00') != 3:\n is_request = False\n req_type = request_line[0: 2]\n if req_type != b'\\x00\\x01':\n is_request = False\n return is_request", "title": "" }, { "docid": "722a695bf4bfde83f0abffd5929a6feb", "score": "0.5493177", "text": "def _checker(self):\n if self.field_type not in range(1, 8):\n raise ValueError(\"bad field type\")", "title": "" }, { "docid": "53e5398dc6d10c426f6b80aed1f1051b", "score": "0.5490391", "text": "def is_type_unknown(type_: Optional[Type]) -> bool:\n return type_ is None", "title": "" }, { "docid": "fabd47ed69d1ac93984a28b88142692a", "score": "0.54847324", "text": "def check(self, packet, logger):\n return self.trigger.is_applicable(packet, logger)", "title": "" }, { "docid": "d2388917975f58005a639a3b285e851e", "score": "0.54845226", "text": "def check_json_type(self) -> bool:\n return isinstance(self.server, requests)", "title": "" }, { "docid": "827eeda386b10e5d64d87a8194bdf14a", "score": "0.5474683", "text": "def is_ip_frame(cls, eth_frame):\n return eth_frame.type in [dpkt.ethernet.ETH_TYPE_IP,\n dpkt.ethernet.ETH_TYPE_IP6]", "title": "" }, { "docid": "77cf1b761d55d89aa0e22f9b89e1993e", "score": "0.54708797", "text": "def filter_packet(self, rawmsg, hdr):\r\n # XXX didn't actually check for packet-in...\r\n return False\r\n # Add check for packet in and rate limit\r\n if self.filter_packet_in:\r\n # If we were dropping packets, report number dropped\r\n # TODO dont drop expected packet ins\r\n if self.pkt_in_run > self.pkt_in_filter_limit:\r\n self.logger.debug(\"Dropped %d packet ins (%d total)\"\r\n % ((self.pkt_in_run -\r\n self.pkt_in_filter_limit),\r\n self.pkt_in_dropped))\r\n self.pkt_in_run = 0\r\n\r\n return False", "title": "" }, { "docid": "1f3f0677f6f6d3812f357be16fe82862", "score": "0.5462087", "text": "def validate_type(self):\n if not (self.is_physical or self.is_physical):\n self.is_postal = True\n self.is_physical = True", "title": "" }, { "docid": "f81394e1ae1e4d1d3415958def2713c7", "score": "0.54476583", "text": "def check_type(self, persistent: bool) -> bool:\n if self.match_type == \"p\" and not persistent:\n return False\n if self.match_type == \"f\" and persistent:\n return False\n return True", "title": "" }, { "docid": "c467167b5103db53cb852436d839e473", "score": "0.54293334", "text": "def check_type(df, type_rule):\n if type_rule not in [\"integer\", \"float\", \"numeric\", \"string\"]:\n raise ValueError(\"type_rule {} not supported, please choose one \"\n \"of integer, float, numeric, or string\")\n # Integer: Check residual from rounding\n if type_rule == \"integer\":\n nonint_indices = np.arange(len(df))[(df != np.round(df))]\n if nonint_indices.size > 0:\n value = df.iloc[nonint_indices[0]]\n return False, \"integer type check failed at index {} with value {}\".format(\n nonint_indices[0], value\n )\n # Float: just check numpy dtyping\n elif type_rule == \"float\":\n if not np.issubdtype(df.dtype, np.floating):\n return False, \"float type check failed, type is {}\".format(df.dtype)\n\n # Numeric: check numpy number dtyping\n elif type_rule == \"numeric\":\n if not np.issubdtype(df.dtype, np.number):\n return False, \"number type check failed, type is {}\".format(df.dtype)\n\n # String: check string/unicode subdtype\n elif type_rule == \"string\":\n if not (np.issubdtype(df.dtype, np.object_) or np.issubdtype(df.dtype, np.unicode_)):\n return False, \"string type check failed, type is {}\".format(df.dtype)\n return True, \"\"", "title": "" }, { "docid": "3197377c782625e3a14fd32a21ed18bf", "score": "0.54289776", "text": "def has_type(self, ctype):\n return self.type_counts.get(ctype.__name__, 0)", "title": "" }, { "docid": "882959d494ca4c996d7a5d8ea18c7661", "score": "0.5420818", "text": "def _checkType(self, node: Node):\n\n if type(node) != Node:\n raise TypeError(f\"Input must be a Node - {type(node)} != Node\")", "title": "" }, { "docid": "58cec1feab220e4bb8488c19a50ac3ad", "score": "0.54124177", "text": "def has_at(self, type_, pos):\n return type_ in self[pos]", "title": "" }, { "docid": "d2de1c98218705d4906ca5930b7e408c", "score": "0.5408658", "text": "def _check_changetype(self, dn, changetype, attr_value):\n if dn is None:\n self._error(\"Read changetype: before getting valid dn: line.\")\n if changetype is not None:\n self._error(\"Two lines starting with changetype: in one record.\")\n if attr_value not in CHANGE_TYPES:\n self._error(\"changetype value %s is invalid.\" % attr_value)", "title": "" }, { "docid": "cac18cdc86ee6bf8ef6cceea367a94ad", "score": "0.5406169", "text": "def TypeInformation(self) -> bool:", "title": "" }, { "docid": "86474792adec778f0514125662d2e728", "score": "0.5404357", "text": "def isType(nodeName, nodeType):\n # Check object exists\n if not cmds.objExists(nodeName): return False\n # Check node type\n if cmds.objectType(nodeName) != nodeType: return False\n # Return result\n return True", "title": "" }, { "docid": "fa7d49fafcbea3bc2f70779465c904ad", "score": "0.5396368", "text": "def type_filter(type_: Optional[str], b: Dict) -> bool:\n if type_ is None:\n return True\n return b[\"info\"].strip() == type_ if b[\"info\"] is not None else False", "title": "" }, { "docid": "c8a4abc765b0adc47ed7d05bd178efa9", "score": "0.53957266", "text": "def is_bytes(obj: Any) -> bool:\n return isinstance(obj, bytes)", "title": "" }, { "docid": "a09094f2d6a922f75407982c3159019e", "score": "0.5392501", "text": "def type_check(input, desired_type, convert=False):\n if convert: # toggle casting stuff\n try: # catch all the errors\n return desired_type(input) # naming conventions are awwwesome\n except TypeError: # should only happen when types dont convert as want\n return False # we couldn't convert\n # could put a blanket except here, but I want to see all other errors\n # if you're here, convert is False\n return isinstance(input, desired_type) # python handles this natively", "title": "" }, { "docid": "ca873f9733c56a47ffc6385ca98756fb", "score": "0.5391475", "text": "def valid_response(self, data):\n if len(data) >= 4 and data[0] == ord('R') and data[1] == self._addr and data[2] == 1:\n # Extract payload\n data = data[3:]\n return self.valid_device_response(data)\n if len(data) == 2 and data[0] == ord('Y') and data[1] == self._pkt_id:\n return True\n if len(data) == 3 and data[0] == ord('N') and data[1] == self._pkt_id:\n raise DeviceUnreachable(data[2])", "title": "" }, { "docid": "423a894d10b614b9cad4a24e1dd95462", "score": "0.538151", "text": "def check_data_type(theFeatureClass):\n## arcpy.AddMessage(\"funcs.check_data_type\")\n desc = arcpy.Describe(theFeatureClass)\n## arcpy.AddMessage(str(desc))\n isM = desc.hasM\n return isM", "title": "" }, { "docid": "c2ea083c5b2a16b301dbf2a8f31e4e6e", "score": "0.5372137", "text": "def acceptPacket(self, direction, protocol, port, ip_address):\r\n x = direction + \"_\" + protocol + \"_\" + ip_address\r\n if self.__checkInWithoutIPRange(x,port):\r\n return True\r\n elif self.__checkInWithIPRange(direction+\"_\"+protocol,port,ip_address):\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "74b787ddcee7839d575db0e612834e5b", "score": "0.5357657", "text": "def has_attribute_type(self, attribute: str, typ: Type) -> bool:\n if not self.has_attribute(attribute):\n return False\n\n attr_node = self.get_attribute(attribute).yaml_node\n\n if typ in scalar_type_to_tag:\n tag = scalar_type_to_tag[typ]\n return attr_node.tag == tag\n elif typ == list:\n return isinstance(attr_node, yaml.SequenceNode)\n elif typ == dict:\n return isinstance(attr_node, yaml.MappingNode)\n\n raise ValueError('Invalid argument for typ attribute')", "title": "" }, { "docid": "40c322727816989e8a5a09802f2b0a75", "score": "0.5353223", "text": "def check_type(\n instance_context: InstanceContext,\n) -> bool:\n return all([\n _check_matching_types(\n instance_context.runtime_type,\n instance_context.instance_type,\n instance_context.delegate,\n instance_context.ctx,\n ),\n _check_runtime_protocol(\n instance_context.runtime_type,\n is_protocol=instance_context.is_protocol,\n ctx=instance_context.ctx,\n ),\n _check_delegate_type(\n instance_context.runtime_type,\n instance_context.instance_signature,\n instance_context.delegate,\n instance_context.ctx,\n ),\n _check_concrete_generics(\n instance_context.runtime_type,\n instance_context.instance_type,\n instance_context.delegate,\n instance_context.ctx,\n ),\n _check_tuple_size(\n instance_context.instance_type,\n instance_context.delegate,\n instance_context.ctx,\n ),\n ])", "title": "" }, { "docid": "96360790455ed69774ea69c1278441e7", "score": "0.53506273", "text": "def is_packet_caught(scanner, delay=0):\n\n #the packet will get to the scanner at time delay + depth\n packet_arrival = delay + scanner.depth\n\n #the scanner will be in spot 0 at multiples of its cycle length, so\n #figure out if the packet's arrival time is one\n return packet_arrival % scanner.cycle_length == 0", "title": "" }, { "docid": "c9439c8805e736d8ae33474a417f1379", "score": "0.5345006", "text": "def validate_type(node):\n if not isinstance(node, Node):\n raise TypeError('Unrecognized node type: %s' % type(node))", "title": "" }, { "docid": "771648701df59c04acd17f52aaca6333", "score": "0.53357565", "text": "def _IsTypeCheck(interface, node, args):\n if len(args) == 0 or args[0][0] != 'PP_Resource':\n return False\n return node.GetName() == 'Is%s' % _GetShortName(interface, ['Dev', 'Private'])", "title": "" }, { "docid": "86bb1fa515fa1018a3937f88bb40747a", "score": "0.5331884", "text": "def is_valid_type(element, cls):\n return isinstance(element, cls) and element.id is not None", "title": "" }, { "docid": "ae10a2bdc08acd94856b2e19424d42d3", "score": "0.5318519", "text": "def check_type_support(self, input_data: OrderedDict) -> tuple:\n # look for the supported data type input format, and validate if the data type is currently supported in the system or not\n if (\"type\" in input_data) and (input_data[\"type\"] in self.support_type_functions):\n return True, input_data[\"type\"]\n return False, None", "title": "" }, { "docid": "c8915ac702f81bf20168d53599e69262", "score": "0.52955633", "text": "def _check_response_type(self,\n response: Response,\n proper_type: str) -> None:\n content_type = response['Content-Type']\n self.assertEqual(\n content_type,\n proper_type,\n f'Incorrect content type ({content_type}) in {response.content}',\n )", "title": "" }, { "docid": "319a09d9e1d58189be24ecc914808a14", "score": "0.52888775", "text": "def isImageType(t):\r\n return hasattr(t, \"im\")", "title": "" }, { "docid": "3ae2718532239d472b455c341db03d56", "score": "0.5270244", "text": "def is_type(user_type, enum_type):\n try:\n return user_type == UserType[enum_type]\n except KeyError:\n return", "title": "" }, { "docid": "f5d818cfca8c367a02152978a4380e70", "score": "0.52701366", "text": "def check_expected_street_types(output_type, street_name, language):\n if language == 'English':\n match = street_type_re_english.search(street_name)\n\n if language == 'French':\n match = street_type_re_french.search(street_name)\n\n if match:\n street_type = match.group()\n\n if street_type in expected:\n if output_type == 'type':\n street_types_set[street_type].add(street_name)\n\n elif output_type == 'frequency':\n street_types_frequency[street_type] += 1", "title": "" }, { "docid": "275d64037dd10e892ff03e271139b115", "score": "0.52666026", "text": "def _ValidateType(self, universe):\n\n if universe.GetEntityTypeNamespace(self.namespace) is None:\n print('Invalid namespace: ', self.namespace)\n return False\n\n entity_type = universe.GetEntityType(self.namespace, self.type_name)\n if entity_type is None:\n print('Invalid entity type: ', self.type_name)\n return False\n elif entity_type.is_abstract:\n print('Abstract types cannot be instantiated: ', self.type_name)\n return False\n\n return True", "title": "" }, { "docid": "9ab575adb8c81f4059188b8c87707fbf", "score": "0.5265259", "text": "def get_packet_type(kls, data, protocol_register):\n if isinstance(data, str):\n data = binascii.unhexlify(data)\n\n protocol, pkt_type = PacketTypeExtractor.packet_type(data)\n\n prot = protocol_register.get(protocol)\n if prot is None:\n raise BadConversion(\n \"Unknown packet protocol\", wanted=protocol, available=list(protocol_register)\n )\n Packet, messages_register = prot\n\n mkls = None\n for k in messages_register:\n if pkt_type in k.by_type:\n mkls = k.by_type[pkt_type]\n break\n\n return protocol, pkt_type, Packet, mkls, data", "title": "" }, { "docid": "c286e7399a0a776e19f9092d818e6a68", "score": "0.52628005", "text": "def validateType(type):\n if type and (type.lower() == 'puts' or type.lower() == 'put' or type.lower() == 'p'):\n return 'put'\n else:\n return 'call'", "title": "" }, { "docid": "c10df11f0996e0abe70fb29dfeab7d82", "score": "0.52614105", "text": "def check_rule(packet, rule, ip):\n # print('* Rule state: ' + str(rule[rk.STATE]))\n\n if rule[rk.STATE]:\n self.output_list.append((rule[rk.DEST_PORT], packet))\n colored_print_switch(' packet forwarded into output interface ' + str(rule[rk.DEST_PORT]))\n return True\n else:\n self.dropped_packets.append(('DROPPED for suspension of the rule', packet))\n colored_print_switch(' dropped packet, rule for ip address ' + str(ip) + ' is suspended')\n return False", "title": "" }, { "docid": "33a4e6d6bda7d44c97587b6989f59a4a", "score": "0.5259094", "text": "def parse_packet(self, packet):\n opcode = packet[0:2]\n if opcode == b'\\x00\\x03':\n self.handle_data(packet)\n elif opcode == b'\\x00\\x04':\n self.handle_ack(packet)\n elif opcode == b'\\x00\\x05':\n self.handle_error(packet)\n else:\n sys.exit(\"Opcode não tratado\")", "title": "" }, { "docid": "246ef7ed8babfba8e879aeb2c9d214ed", "score": "0.52568394", "text": "def __check_type(self, make_advance=True):\n # checks for builtin types\n if self.__check_keyword_symbol(KEYWORD_TYPE, TYPE_LIST, make_advance):\n return True\n # checks for user-defined class types\n if not self.__check_keyword_symbol(IDENTIFIER_TYPE, make_advance=False):\n return False\n\n return True", "title": "" }, { "docid": "e78465167f524a9afe0c91577abae22b", "score": "0.5255103", "text": "def valid_for_io(self) -> bool:\n return self.value in {\n 'void',\n 'integer', 'boolean', 'string', 'float', 'script',\n 'vector', 'target_destination', 'color255'\n }", "title": "" }, { "docid": "56658b07adc2431fce4f2e41c1a1f86b", "score": "0.5253012", "text": "def typeFromNick ( nick ) :\n for t in _types_ :\n if nick == _types_[t] : return t\n for t in _types_ :\n print 'checking: %s == %s : %s ' % ( nick, _types_[t], nick==_types_[t] )\n raise AttributeError, \" No type is defined for nick '%s'\"%nick", "title": "" }, { "docid": "510eae038540bb3d3e253da0398d266c", "score": "0.52483547", "text": "def is_type(self, *args):\n return _gnsdk.GnDataObject_is_type(self, *args)", "title": "" }, { "docid": "8e9477bd6e378ad1943bdc333fece350", "score": "0.5245575", "text": "def is_bytes(obj):\n return isinstance(obj, bytes)", "title": "" }, { "docid": "d8f189bdd345427c5206b693c7136841", "score": "0.5234898", "text": "def test_supports(data_type, expected):\n assert my_len.supports(data_type) is expected\n assert type(data_type) in my_len._dispatch_cache # noqa: WPS437, WPS516", "title": "" }, { "docid": "165720b3754935e0535ea9612033b25a", "score": "0.5233237", "text": "def supportsType(self, type_uri):\r\n return (\r\n (type_uri in self.type_uris) or \r\n (type_uri == OPENID_2_0_TYPE and self.isOPIdentifier())\r\n )", "title": "" }, { "docid": "e9f6f5fa2728d4d8369b6bcc51e31eb5", "score": "0.5232234", "text": "def check_rtt_event(self, tcp, last_ack, event, zero_k):\n # if [seq num, seq_num + payload len) contains xi, where xi is any\n # number containing K zeros in the least significant bits, then\n # this packet should be considered for sequence number based RTT event.\n if self.check_rtt_event_tcp_seq(tcp.seq, len(tcp.data), zero_k):\n event.event_type_rtt_seq = True\n if last_ack is not None:\n if self.check_rtt_event_tcp_ack(last_ack, tcp.ack, zero_k):\n event.event_type_rtt_ack = True\n return event.event_type_rtt_seq or event.event_type_rtt_ack", "title": "" }, { "docid": "952ecc066326d72574f863338ec0a295", "score": "0.5229475", "text": "def takes_allocator(type):\n return not type in ['byte', 'int8', 'int16', 'int32', 'int64',\n 'char', 'uint8', 'uint16', 'uint32', 'uint64',\n 'float32', 'float64', 'bool', 'time', 'duration']", "title": "" }, { "docid": "55042d9d68cf75525d8859d515a35508", "score": "0.5224879", "text": "def _validability(self, ability):\n return (isinstance(ability, PhysicalAbility) or\n isinstance(ability, MentalAbility))", "title": "" }, { "docid": "836177c3b4317a5ca4728f35a55539e7", "score": "0.52228564", "text": "def kind_is(self, kind: str) -> bool:\n return self.kind == kind", "title": "" }, { "docid": "a6aec3c078f12eac3bf84506b048f810", "score": "0.52210104", "text": "def validate_resource_type(self, resource_type):\n if isinstance(self.resource_type, (list, tuple)):\n return resource_type in self.resource_type\n return resource_type == self.resource_type", "title": "" }, { "docid": "03081d126a78d3d1a8aaf8e2e69fb8de", "score": "0.52183247", "text": "def type_test(data, structure):\n\n if type(data) is list:\n # LIST CASE\n # Check every element in a list, if list of structure and data is equal\n # Then finaly check if the lists is equal in size.\n return list_test(data, structure)\n elif type(data) is dict:\n # DICT CASE\n # Checks every key in structure against data. If equal, returns True\n return dict_test(data, structure)\n else:\n # Basic type case.\n if type(data) is structure:\n return True\n else:\n return False", "title": "" }, { "docid": "4323707eebb7cdb8c1358526afbccbab", "score": "0.52093524", "text": "def check(self, body_type_id):\n for i in self.list_of_descriptors:\n if i == body_type_id:\n return True\n return False", "title": "" }, { "docid": "a51aca88032184f5dbc7d7cd3cfc108b", "score": "0.52088016", "text": "def type (self, value):\n\n try:\n value = str(value)\n\n if self._regex.match(value):\n return (True, value)\n\n except:\n pass\n\n error = self.error(self.type_err, value, \"type ip address\")\n\n return (False, value, error)", "title": "" }, { "docid": "df55ae80eb84e220db80067ca86be73a", "score": "0.52037734", "text": "def check(self, stanza):\n # Could optimize this by replacing this at init time, based on\n # the arguments\n\n if (self.name):\n if (self.name != stanza.getname()):\n return\n if (self.xmlns):\n if (self.xmlns != stanza.getnamespace()):\n return\n if (self.resource):\n tostr = stanza.getattr('to')\n if (not tostr):\n return\n pos = tostr.find('/')\n if (pos < 0):\n return\n if (self.resource != tostr[ pos+1 : ]):\n return\n if (self.typ):\n if (type(self.typ) == tuple):\n if (not (stanza.getattr('type') in self.typ)):\n return\n else:\n if (self.typ != stanza.getattr('type')):\n return\n if (self.id):\n if (self.id != stanza.getattr('id')):\n return\n\n self.op(stanza)\n if (self.autoaccept):\n raise interface.StanzaHandled", "title": "" }, { "docid": "fc931f6389e2fe1c00ec7b093642e312", "score": "0.51972634", "text": "def check(self, event) -> bool:\n return event.type in self.event_types", "title": "" } ]
840baef06fe1889c2a7a3f59e70fdb1c
Starts a scan and returns a waitable for when the scan completes
[ { "docid": "d8337579a571ba249ad925553b7e5870", "score": "0.65212065", "text": "def start_scan(self, scan_parameters: ScanParameters = None, clear_scan_reports=True) -> scan_waitable.ScanFinishedWaitable:\r\n self.stop()\r\n # Cache the device's address on scan start\r\n self._own_address = self.ble_device.address\r\n if clear_scan_reports:\r\n self.scan_report = ScanReportCollection()\r\n if not scan_parameters:\r\n scan_parameters = self._default_scan_params\r\n else:\r\n # Make sure the scan parameters are valid\r\n scan_parameters.validate()\r\n self.ble_device.ble_driver.ble_gap_scan_start(scan_parameters)\r\n self._is_scanning = True\r\n return scan_waitable.ScanFinishedWaitable(self.ble_device)", "title": "" } ]
[ { "docid": "f992fe331afed5a1f4fcc791d974e11d", "score": "0.7121392", "text": "def start(self):\n\n print(\"Starting scan task...\")\n if self.t_scan is None:\n self.t_scan = Timer(-1)\n self.t_scan.init(\n period=self.scan_period,\n mode=Timer.PERIODIC,\n callback=lambda t: self.scan(),\n )\n self.ready.value(0)\n self.flag_running = True\n print(\"Finished starting scan task.\")", "title": "" }, { "docid": "d36279741ee9c15e5ba8123306feeada", "score": "0.69425035", "text": "def start_scan(self):\n try:\n out = self.get_output(\"scan on\")\n print(\"out\")\n print(out)\n except BluetoothctlError as e:\n print(e)\n return None", "title": "" }, { "docid": "ac000bb311d971c69e231a8f1f729c05", "score": "0.64885527", "text": "def scan(self, timeout=5.0):\n # Run the loop for 5 seconds\n scan_loop = asyncio.new_event_loop()\n scan_loop.run_until_complete(self.scan_run())\n\n return self.scan_devices", "title": "" }, { "docid": "20e7aacd62df2042f106947781fb0487", "score": "0.6398141", "text": "def spawn_scanning_thread(self):\n _thread.start_new_thread(HostDiscovery.scan,(self, None))", "title": "" }, { "docid": "da4e89520869603a4dfa2939b5492fcc", "score": "0.6365697", "text": "def start_scan(self):\n url = f'{self.path}api/interception/scan/start'\n LOGGER.info(f'Sending request to start system scan: {url}')\n response = self.session.get(url, verify=False)\n # Verifying operation status\n if response.status_code != 200:\n LOGGER.error(f'Did no receive status 200 as expected: {response.status_code}')\n return False\n json = response.json()\n if json['status'] == 'Success':\n LOGGER.info(f'System scan started successfully: {response.json()}')\n return True\n else:\n LOGGER.error(f'Failed to start System scan: {response.json()}')\n return False", "title": "" }, { "docid": "18c6ed030450d010cf566848c042d1c4", "score": "0.6363469", "text": "def Scan(self):\n LOG.info('-------------------------------------------------------')\n LOG.info('Scan (%s)', self.get_name())\n LOG.info('-------------------------------------------------------')\n\n # Check obsState is READY\n self._require_obs_state([ObsState.READY])\n\n # self.set_state(DevState.ON)\n\n # Set obsState to SCANNING\n self._set_obs_state(ObsState.SCANNING)\n\n LOG.info('-------------------------------------------------------')\n LOG.info('Scan Successful')\n LOG.info('-------------------------------------------------------')", "title": "" }, { "docid": "6affb09affc852990451e2dade297ccc", "score": "0.6276067", "text": "def start_scanning():\n while SCAN_RUNNING:\n # For some reason pyautogui.press() doesn't play nice with the EVE\n # client so the following are used instead\n pyautogui.keyDown(WRITE_KEY)\n pyautogui.keyUp(WRITE_KEY)\n time.sleep(SLEEP_TIME)", "title": "" }, { "docid": "ef6618cfd4448dbd4122d4cfcd0d8ad3", "score": "0.6239873", "text": "def perform_scan(self):\n self.scan_success = self.sem.acquire_frame(\n self.acq.base_dir + '\\\\' + self.file_name + '.tif')\n self.finish_trigger.signal.emit()", "title": "" }, { "docid": "d4a0dfb281ba113f29818102075706bd", "score": "0.6099379", "text": "def _Acquire(self):\r\n if not self.SD:\r\n self.OpenScanner()\r\n if not self.SD: return\r\n try:\r\n self.SD.SetCapability(twain.ICAP_YRESOLUTION, twain.TWTY_FIX32, 100.0) \r\n except:\r\n pass\r\n self.SD.RequestAcquire(1, 1) # 1,1 to show scanner user interface\r\n self.AcquirePending=True\r\n self.LogMessage(self.ProductName + ':' + 'Waiting for Scanner')", "title": "" }, { "docid": "2db725b85c1f86d54b0e3f05223d9f36", "score": "0.6054723", "text": "def start_scan_ui(self):\n try:\n LOGGER.info(f'Starting system scan...')\n self.driver.find_element_by_css_selector('[data-test=\"start_scan\"]').click()\n time.sleep(1)\n return True\n\n except Exception as error:\n LOGGER.error(f'Was unable to run system scan. Got error: {error}')\n return False", "title": "" }, { "docid": "4be79fe8bbf674f1278166827a95222d", "score": "0.59363073", "text": "def _launch_scans(self, session):\n if not self.scans:\n self._initialize_new_scans(session)\n\n scan_id_list = [scan.id for scan in self.scans if scan.status not in [status['FINISHED'], status['FAILED']]]\n\n if not scan_id_list:\n return\n\n # with thr_pool(max_workers=6, thread_name_prefix='-'.join(['req', str(self.id), 'scan'])) as executor:\n # future_to_id = {executor.submit(process_scan, id): id for id in scan_id_list}\n # for future in concurrent.futures.as_completed(future_to_id):\n # try:\n # future.result()\n # except:\n # logger.error('scan id {} failed'.format(future_to_id[future]))\n\n with thr_pool(max_workers=6, thread_name_prefix='-'.join(['req', str(self.id), 'scan'])) as executor:\n # future_to_id = {}\n # future_to_id[executor.submit(process_scan, id)] = id\n future_to_id = {executor.submit(process_scan, id): id for id in scan_id_list}\n # for future, id in [future_id for future_id in future_to_id.items() if future_id[0].done()]:\n done = [future_id for future_id in future_to_id.items() if future_id[0].done()]\n logger.info('threads done: {}'.format(done))\n for future, id in done:\n try:\n future.result()\n except BaseException:\n # maybe set request to failed\n print('sna raised exception !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n logger.exception('scan {} process raised exception'.format(id))\n else:\n logger.info('scan {} thread finished correctly'.format(id))\n finally:\n future_to_id.pop(future)\n session.commit()", "title": "" }, { "docid": "eda1424419fd86b75b8942bcdd7f80b6", "score": "0.5884435", "text": "def start(self):\n _LOGGER.info(\"Starting scanner thread on interface %s\", self._interface)\n self.hci_events.clear()\n self.thread = ScannerThread(\n hci_events=self.hci_events,\n interface=self._interface,\n )\n self.thread.start()", "title": "" }, { "docid": "6ed43c28fb3246c52e7dd0af168fc7a4", "score": "0.58226144", "text": "def scan():\n scanner = Scanner()\n devices = scanner.scan(10.0)\n return filter_keyble(devices)", "title": "" }, { "docid": "1d0a6e7e584fd4e6ecf18a7e566f44b1", "score": "0.5721437", "text": "def wait_for_scan(self):\n while self.driver.get_elements(self.barcode_element):\n sleep(1)", "title": "" }, { "docid": "761122e4b772b7a0d76dd0fcb3b5419e", "score": "0.566818", "text": "def scan(self):\n\n print(\"Start scanning t={}\".format(time.time()))\n self.scan_results = []\n found = False\n\n nets = self.nic.scan()\n for ssid, bssid, channel, rssi, authmode, hidden in nets:\n ssid = ssid.decode()\n bssid = ubinascii.hexlify(bssid).decode()\n network = (ssid, bssid, channel, rssi, authmode, hidden)\n print(\n 'Checking ssid \"{}\" bssid \"{}\" channel {} rssi {}'.format(\n ssid, bssid, channel, rssi\n )\n )\n if bssid in self.targets:\n print('Found ssid \"{}\" bssid \"{}\"'.format(ssid, bssid))\n found = True\n self.scan_results.append((True, network))\n else:\n self.scan_results.append((False, network))\n\n if found:\n if not self.flag_silent:\n self.alarm.value(0)\n self.flag_alarm = True\n else:\n self.alarm.value(1)\n self.flag_alarm = False\n\n print(\"Finished scanning t={}\".format(time.time()))", "title": "" }, { "docid": "d1460d8c31751c1bdd5f3543978a808f", "score": "0.56485915", "text": "def poll_scan(api_client, id =\"\"):\n status = \"\"\n\n logging.debug('Checking status of scan {}'.format(id))\n while status != 'finished':\n api_instance = swagger_client.ScanApi(api_client)\n\n try:\n api_response = api_instance.get_scan(id)\n status = api_response.status\n logging.info('Scan {} is in {} status'.format(id, status))\n time.sleep(5)\n except ApiException as e:\n print(\"Exception when calling ScanApi->get_scan: %s\\n\" % e)\n \n if api_response.assets == 0:\n logging.error(\"No assets were scanned in scan {}\".format(api_response.id))\n sys.exit()\n\n return api_response", "title": "" }, { "docid": "721633765466f3f96764e0975bdf187b", "score": "0.5631903", "text": "def __startScan(self) -> None:\n failed = True\n\n try:\n self.__setStatus(\"STARTING\", time.time() * 1000, None)\n self.__sf.status(f\"Scan [{self.__scanId}] for '{self.__target.targetValue}' initiated.\")\n\n self.eventQueue = queue.Queue()\n\n self.__sharedThreadPool.start()\n\n # moduleList = list of modules the user wants to run\n self.__sf.debug(f\"Loading {len(self.__moduleList)} modules ...\")\n for modName in self.__moduleList:\n if not modName:\n continue\n\n # Module may have been renamed or removed\n if modName not in self.__config['__modules__']:\n self.__sf.error(f\"Failed to load module: {modName}\")\n continue\n\n try:\n module = __import__('modules.' + modName, globals(), locals(), [modName])\n except ImportError:\n self.__sf.error(f\"Failed to load module: {modName}\")\n continue\n\n try:\n mod = getattr(module, modName)()\n mod.__name__ = modName\n except Exception:\n self.__sf.error(f\"Module {modName} initialization failed\", exc_info=True)\n continue\n\n # Set up the module options, scan ID, database handle and listeners\n try:\n # Configuration is a combined global config with module-specific options\n self.__modconfig[modName] = deepcopy(self.__config['__modules__'][modName]['opts'])\n for opt in list(self.__config.keys()):\n self.__modconfig[modName][opt] = deepcopy(self.__config[opt])\n\n # clear any listener relationships from the past\n mod.clearListeners()\n mod.setScanId(self.__scanId)\n mod.setSharedThreadPool(self.__sharedThreadPool)\n mod.setDbh(self.__dbh)\n mod.setup(self.__sf, self.__modconfig[modName])\n except Exception:\n self.__sf.error(f\"Module {modName} initialization failed\", exc_info=True)\n mod.errorState = True\n continue\n\n # Override the module's local socket module to be the SOCKS one.\n if self.__config['_socks1type'] != '':\n try:\n mod._updateSocket(socket)\n except Exception as e:\n self.__sf.error(f\"Module {modName} socket setup failed: {e}\")\n continue\n\n # Set up event output filters if requested\n if self.__config['__outputfilter']:\n try:\n mod.setOutputFilter(self.__config['__outputfilter'])\n except Exception as e:\n self.__sf.error(f\"Module {modName} output filter setup failed: {e}\")\n continue\n\n # Give modules a chance to 'enrich' the original target with aliases of that target.\n try:\n newTarget = mod.enrichTarget(self.__target)\n if newTarget is not None:\n self.__target = newTarget\n except Exception as e:\n self.__sf.error(f\"Module {modName} target enrichment failed: {e}\")\n continue\n\n # Register the target with the module\n try:\n mod.setTarget(self.__target)\n except Exception as e:\n self.__sf.error(f\"Module {modName} failed to set target '{self.__target}': {e}\")\n continue\n\n # Set up the outgoing event queue\n try:\n mod.outgoingEventQueue = self.eventQueue\n mod.incomingEventQueue = queue.Queue()\n except Exception as e:\n self.__sf.error(f\"Module {modName} event queue setup failed: {e}\")\n continue\n\n self.__moduleInstances[modName] = mod\n self.__sf.status(f\"{modName} module loaded.\")\n\n self.__sf.debug(f\"Scan [{self.__scanId}] loaded {len(self.__moduleInstances)} modules.\")\n\n if not self.__moduleInstances:\n self.__setStatus(\"ERROR-FAILED\", None, time.time() * 1000)\n self.__dbh.close()\n return\n\n # sort modules by priority\n self.__moduleInstances = OrderedDict(sorted(self.__moduleInstances.items(), key=lambda m: m[-1]._priority))\n\n # Now we are ready to roll..\n self.__setStatus(\"RUNNING\")\n\n # Create a pseudo module for the root event to originate from\n psMod = SpiderFootPlugin()\n psMod.__name__ = \"SpiderFoot UI\"\n psMod.setTarget(self.__target)\n psMod.setDbh(self.__dbh)\n psMod.clearListeners()\n psMod.outgoingEventQueue = self.eventQueue\n psMod.incomingEventQueue = queue.Queue()\n\n # Create the \"ROOT\" event which un-triggered modules will link events to\n rootEvent = SpiderFootEvent(\"ROOT\", self.__targetValue, \"\", None)\n psMod.notifyListeners(rootEvent)\n firstEvent = SpiderFootEvent(self.__targetType, self.__targetValue,\n \"SpiderFoot UI\", rootEvent)\n psMod.notifyListeners(firstEvent)\n\n # Special case.. check if an INTERNET_NAME is also a domain\n if self.__targetType == 'INTERNET_NAME' and self.__sf.isDomain(self.__targetValue, self.__config['_internettlds']):\n firstEvent = SpiderFootEvent('DOMAIN_NAME', self.__targetValue, \"SpiderFoot UI\", rootEvent)\n psMod.notifyListeners(firstEvent)\n\n # If in interactive mode, loop through this shared global variable\n # waiting for inputs, and process them until my status is set to\n # FINISHED.\n\n # Check in case the user requested to stop the scan between modules\n # initializing\n scanstatus = self.__dbh.scanInstanceGet(self.__scanId)\n if scanstatus and scanstatus[5] == \"ABORT-REQUESTED\":\n raise AssertionError(\"ABORT-REQUESTED\")\n\n # start threads\n self.waitForThreads()\n failed = False\n\n except (KeyboardInterrupt, AssertionError):\n self.__sf.status(f\"Scan [{self.__scanId}] aborted.\")\n self.__setStatus(\"ABORTED\", None, time.time() * 1000)\n\n except BaseException as e:\n self.__sf.error(\n f\"Unhandled exception ({e.__class__.__name__}) encountered during scan. Please report this as a bug\",\n exc_info=True\n )\n self.__sf.status(f\"Scan [{self.__scanId}] failed: {e}\")\n self.__setStatus(\"ERROR-FAILED\", None, time.time() * 1000)\n\n finally:\n if not failed:\n self.__setStatus(\"FINISHED\", None, time.time() * 1000)\n self.runCorrelations()\n self.__sf.status(f\"Scan [{self.__scanId}] completed.\")\n self.__dbh.close()", "title": "" }, { "docid": "7adce6b6755108fce9a54ece81308bdd", "score": "0.55974334", "text": "def scan(\n self,\n completed: queue.Queue,\n path: str,\n hostname: str,\n machine: Machine,\n service: Service,\n silent=False,\n ) -> None:\n return", "title": "" }, { "docid": "4201a5bd7b9bf429981571f7d600a87b", "score": "0.5593043", "text": "def DoScans(self):\n now = gettime()\n if not self.did_initial_scan:\n log.Log('startup on %s (initial_scans=%d).', self.vdevname,\n opt.initial_scans)\n self._ReadArpTable()\n RunProc(callback=self._PhyResults,\n args=['iw', 'phy', self.phyname, 'info'])\n RunProc(callback=self._DevResults,\n args=['iw', 'dev', self.vdevname, 'info'])\n # channel scan more than once in case we miss hearing a beacon\n for _ in range(opt.initial_scans):\n if self.flags & wgdata.ApFlags.Can2G:\n band = '2.4'\n elif self.flags & wgdata.ApFlags.Can5G:\n band = '5'\n\n RunProc(\n callback=self._ScanResults,\n args=['wifi', 'scan', '-b', band, '--scan-ap-force',\n '--scan-passive'])\n self.UpdateStationInfo()\n self.next_scan_time = now\n self.did_initial_scan = True\n elif not self.allowed_freqs:\n self.Log('%s: no allowed frequencies.', self.vdevname)\n elif self.next_scan_time and now > self.next_scan_time:\n self.scan_idx = (self.scan_idx + 1) % len(self.allowed_freqs)\n scan_freq = list(sorted(self.allowed_freqs))[self.scan_idx]\n self.Log('scanning %d MHz (%d/%d)', scan_freq, self.scan_idx + 1,\n len(self.allowed_freqs))\n RunProc(callback=self._ScanResults,\n args=['wifi', 'scan', '-b', BandForFreq(scan_freq),\n '--scan-freq', str(scan_freq), '--scan-ap-force',\n '--scan-passive'])\n chan_interval = opt.scan_interval / len(self.allowed_freqs)\n # Randomly fiddle with the timing to avoid permanent alignment with\n # other nodes also doing scans. If we're perfectly aligned with\n # another node, they might never see us in their periodic scan.\n chan_interval = random.uniform(chan_interval * 0.5, chan_interval * 1.5)\n self.next_scan_time += chan_interval\n if not self.scan_idx:\n log.WriteEventFile('%s.scanned' % self.vdevname)\n if not opt.scan_interval:\n self.next_scan_time = None", "title": "" }, { "docid": "557fd8e9534d6476e15ad1abc04c098f", "score": "0.5581169", "text": "async def async_wait_scancompleted(self):\n cnts = []\n while True:\n await self.connection.async_request(_CMD_FINDMSGDEFS)\n cnt = sum([1 async for line in self.connection.async_read()])\n cnts.append(cnt)\n if len(cnts) < self.scans or not all(cnt == cnts[-1] for cnt in cnts[-self.scans : -1]):\n await asyncio.sleep(self.scaninterval)\n else: # pragma: no cover\n # not properly collected during coverage analysis\n break", "title": "" }, { "docid": "813d8e0ac0905f25e51416d78cade015", "score": "0.55727017", "text": "def scan(self):\n return", "title": "" }, { "docid": "53944268808638ace56ffd88ea7adc4f", "score": "0.55607027", "text": "async def scan(self, timeout=1):\n adapters = await self.loop.run_in_executor(None, ifaddr.get_adapters)\n ips = [ip.ip for adapter in ifaddr.get_adapters() for ip in adapter.ips if ip.is_IPv4]\n\n if not ips:\n return []\n\n tasks = []\n discoveries = []\n for ip in ips:\n manager = ScanManager(ip)\n lifx_discovery = LifxDiscovery(self.loop, manager)\n discoveries.append(lifx_discovery)\n lifx_discovery.start(listen_ip=ip)\n tasks.append(self.loop.create_task(manager.lifx_ip()))\n\n (done, pending) = await aio.wait(tasks, timeout=timeout)\n\n for discovery in discoveries:\n discovery.cleanup()\n\n for task in pending:\n task.cancel()\n\n return [task.result() for task in done]", "title": "" }, { "docid": "661dfe05f9606f7734a313020edbb2fd", "score": "0.5509679", "text": "def wait_for_start(self, timeout=None):\n return self._running.wait(timeout)", "title": "" }, { "docid": "2cbc6c29e28460e547c7a1a49e673a73", "score": "0.54912955", "text": "def scan(self, scan_code: str):\n log.info('emit the scan event')\n scan_event_payload = {\n 'qrcode': scan_code,\n 'status': 3\n }\n response = MockerResponse(\n type=int(EventType.EVENT_TYPE_SCAN),\n payload=json.dumps(scan_event_payload)\n )\n self.emit('stream', response)", "title": "" }, { "docid": "8f9ba33680db418f3197b13ba07cb73b", "score": "0.5469953", "text": "def scan(browser, token, node=None, group=None, environment=None, wait=False, label=\"\"):\n job = {}\n found_obj = False\n if node:\n nodes = getNodes(browser, token)\n for n in nodes:\n if n[\"name\"].lower() == node:\n found_obj = True\n status, data = APICall(browser, token, \"POST\", \"/api/v2/nodes/{}/start_scan.json\".format(n[\"id\"]), params={\"label\": label})\n result = json.loads(data)\n job[\"id\"] = result[\"job_id\"]\n elif group:\n pass\n elif environment:\n pass\n else:\n raise AttributeError(\"One of node, group, or environment must be provided.\")\n\n if found_obj:\n if \"id\" in job:\n job[\"status\"] = 0\n while job[\"status\"] in [jobStatus[\"Pending\"], jobStatus[\"Processing\"]]:\n time.sleep(5)\n job = getJob(browser, token, job[\"id\"])\n else:\n raise AttributeError(\"Job ID was not found from the scan job.\")\n else:\n raise AttributeError(\"Object (node, group, or environment) was not found to start the scan.\")\n return job", "title": "" }, { "docid": "1ee624d185db6b7edbeea10cb08a14cf", "score": "0.5468336", "text": "def run_scan(self):\n\n if self.preview is not None:\n preview_was_running = self.preview.is_running\n self.preview.stop()\n\n if self.sc is not None:\n self.sc.run_scan()\n else:\n print(f\"Asked to run a scan but there is no active session to run.\")\n\n if self.preview is not None and preview_was_running:\n self.preview.start()", "title": "" }, { "docid": "8021f0bfb396caa3912782ac208676b1", "score": "0.5462548", "text": "def scan(self):\n setdefaulttimeout(self.timeout / 10.0) # 10%\n for server in self.srv:\n while active_count() > self.threads * 16:\n pass # do not exceed number of threads\n if self.switch: # scan them\n # now call the method in a separate thread\n Thread(target=self.scan_server, args=[server.strip()]).start()\n else: # or skip the scan\n self.good.append(server.strip())\n while active_count() > 1:\n pass # join all", "title": "" }, { "docid": "1fc577f6c5fb1f9150e6d703612caae7", "score": "0.5439658", "text": "async def scan_worker(self):\n log.debug(\"scan_worker started\")\n while True:\n with self.db.ensure_transaction():\n for i in range(50000):\n if self.scan_queue.empty():\n self.queue_unscanned()\n if self.scan_queue.empty():\n break\n sr = await self.scan_queue.get()\n if D_QUEUE: log.debug('Popped %r', sr)\n self.process_sr(sr)\n self.total_scanned += 1\n if self.total_scanned % 10000 == 0:\n log.info(\"Scanned %d items\", self.total_scanned)\n # If the queue is long (e.g. during a full rescan), we need to give\n # the event loop a chance to run.\n else:\n log.debug(\"Finished complete scan batch, clearing cache\")\n #self.db.clear_cache()\n if self.scan_queue.empty():\n break\n await asyncio.sleep(0) # https://github.com/python/asyncio/issues/284\n self.scan_task = None", "title": "" }, { "docid": "ad78091ebe498fa0520de5adefab8f95", "score": "0.54358596", "text": "async def scan(self, wait_for: int = 0, bcast_ifaces=None):\n self.scan_count += 1\n _LOGGER.info(\"CALLED SCAN %d TIMES\", self.scan_count)\n\n mock_infos = [x.device_info for x in self.mock_devices]\n\n new_infos = []\n updated_infos = []\n for info in mock_infos:\n if not [i for i in self.last_mock_infos if info.mac == i.mac]:\n new_infos.append(info)\n else:\n last_info = next(i for i in self.last_mock_infos if info.mac == i.mac)\n if info.ip != last_info.ip:\n updated_infos.append(info)\n\n self.last_mock_infos = mock_infos\n for listener in self._listeners:\n [await listener.device_found(x) for x in new_infos]\n [await listener.device_update(x) for x in updated_infos]\n\n if wait_for:\n await asyncio.sleep(wait_for)\n\n return new_infos", "title": "" }, { "docid": "bc539ada5fb4724e05baf0eb5c9fd14c", "score": "0.5417794", "text": "def scan(self, **kwargs):", "title": "" }, { "docid": "90dd13b6fba07931bbdaafafdf26f337", "score": "0.5409409", "text": "def scan():\n\tglobal address, devices\n\toutput_to_file(\"Scanning for devices\")\n\t# devices = ble.getCrownstonesByScanning(2)\n\tdevice = ble.getNearestCrownstone()\n\taddress = device['address']\n\tgreen('Found device\"')\n\toutput_to_file(\"Found device: \" + address)", "title": "" }, { "docid": "7a61737bb3f3c59bd909a75d63e0969f", "score": "0.539436", "text": "def scan(self, callback=None):\n def scan_finished():\n \"\"\"\n Callback when scan is finished\n \"\"\"\n time.sleep(3)\n logging.info('Scan finished')\n self._nb_of_modules_loaded = 0\n def module_loaded():\n self._nb_of_modules_loaded += 1\n if self._nb_of_modules_loaded >= len(self._modules):\n callback()\n for module in self._modules:\n self._modules[module].load(module_loaded)\n for address in range(0, 256):\n message = velbus.ModuleTypeRequestMessage(address)\n if address == 255:\n self.send(message, scan_finished)\n else:\n self.send(message)", "title": "" }, { "docid": "118956b5bebb429c9ee97e37534eff37", "score": "0.5356555", "text": "def openScanner(self, table, tscan):\n self.send_openScanner(table, tscan)\n return self.recv_openScanner()", "title": "" }, { "docid": "a98c34eb8dd1a51944e4f84f2441f77c", "score": "0.5326774", "text": "def startScannerThread():\n global modelScannerThread\n modelScannerThread = ModelScannerThread()\n modelScannerThread.start()", "title": "" }, { "docid": "850fdafafc3f67289e4b9a5a8b7a771d", "score": "0.53246695", "text": "def test_run_active_scan(self):\n def status_result():\n \"\"\"Return value of the status property.\"\"\"\n if status.call_count > 2:\n return '100'\n return '50'\n\n class_mock = MagicMock()\n class_mock.scan.return_value = '1'\n status = Mock(side_effect=status_result)\n class_mock.status = status\n self.zap_helper.zap.ascan = class_mock\n\n self.zap_helper.run_active_scan('http://localhost')", "title": "" }, { "docid": "07c773e7963acb7d9bf5facdff4513f9", "score": "0.52861387", "text": "def run_scan_service(self):\n self.rc = 1 # code de retour de l'application\n\n#--------------lancement du scan \n for i in range(self.ns.nb_scan) :\n \n self.nmb_tests = str(i+1)\n self.logger.info(\"-----------Scan %s----------------\\n\"%self.nmb_tests)\n #----------------verification du resultat de lancement du scan\n if self.scan_server.run() == 1 :\n self.fonc_message = self.scan_server.error_message.get_error_message()\n return self.rc\n #initialisation du fichier Nexus du resultat du scan\n self.logger.info(\"-----------Check Nexus file of the Scan result----------------\")\n \n nexus_file = self.NexusFile(self.scan_server.get_current_nexus_file(),self.logger)\n self.logger.info(\"-----------Scan %s----------------\\n\"%self.nmb_tests)\n \n #-------------initialisation de la liste des données à vérifier dans le fichier Nexus\n self.list_data = [\"/%s/scan_data/actuator_1_1\"%self.scan_server.get_data_root_name(),\n \"/%s/scan_data/data_01\"%self.scan_server.get_data_root_name(),\n \"/%s/scan_data/trajectory_1_1\"%self.scan_server.get_data_root_name()] \n #verification du contenu du fichier\n if not nexus_file.check_data(self.list_data):\n self.fonc_message = nexus_file.error_message.get_error_message() \n return self.rc\n # l'application est finie avec succès\n self.rc = 0\n \n return self.rc", "title": "" }, { "docid": "0491f0531f6b7118f7d298724fd45cea", "score": "0.5253621", "text": "def scan(self, scan_prefix='', timeout_s=5, revisit=0):\n # Parse inputs\n if scan_prefix == '':\n scan_prefix = self.scan_prefix\n if timeout_s == 0:\n timeout_s = self.timeout_s\n# scan_file = Path(f\"{scan_prefix}_{datetime.now():%Y%m%dT%H%M%S}.csv\")\n scan_file = Path(f\"{scan_prefix}.csv\")\n # Start scanning\n# self.__logger.info(f\"Starting beacon scanner with timeout {timeout_s}.\")\n## GPIO.output(6,GPIO.HIGH);\n run = True \n timestamps = []\n scans = []\n scan_count = 0\n start_time = time.monotonic()\n while run:\n scan_count += 1\n# self.__logger.debug(f\"Performing scan #{scan_count} at revisit \"\n# f\"{self.revisit}.\")\n timestamps.append(datetime.now())\n scans.append(self.__service.scan(self.revisit))\n # Stop scanning based on timeout \n if timeout_s is not None:\n if (time.monotonic()-start_time) > timeout_s:\n# self.__logger.debug(\"Beacon scanner timed out.\")\n run = False\n# self.__logger.info(\"Stopping beacon scanner.\")\n## GPIO.output(6,GPIO.LOW);\n # Cleanup\n # Process, filter, and output received scans\n advertisements = self.process_scans(scans, timestamps)\n advertisements = self.filter_advertisements(advertisements)\n\n # if file does not exist write header\n if not os.path.isfile(scan_file):\n advertisements.to_csv(scan_file, index=False, index_label=False)\n else:\n advertisements.to_csv(scan_file, mode='a', index=False, index_label=False, header=False);\n return advertisements", "title": "" }, { "docid": "5b3c0479fcf436524e086da842526cbf", "score": "0.5251714", "text": "def _get_scan(self):\n rospy.Subscriber(self._laserTopic,\n sensor_msgs.msg.LaserScan, self._laser_callback)\n\n rospy.Subscriber(self._objectTopic, Objects, self._object_callback)\n rospy.spin()", "title": "" }, { "docid": "04c9535809f007bcd8856e851f5c0808", "score": "0.52431834", "text": "def do_scan(self, _line):\n # Initial mock-up of scan\n self.handle_property(\"15\", SPINEL.PROP_MAC_SCAN_MASK)\n self.handle_property(\"4\", SPINEL.PROP_MAC_SCAN_PERIOD, 'H')\n self.handle_property(\"1\", SPINEL.PROP_MAC_SCAN_STATE)\n time.sleep(5)\n self.handle_property(\"\", SPINEL.PROP_MAC_SCAN_BEACON, 'U')", "title": "" }, { "docid": "967ad702dfe04a595aa31d046e7cd369", "score": "0.52196664", "text": "def scan(self):\n\n # TODO QCArchive/Fractal search; don't do a calc that has been done!\n\n # TODO\n # if the molecule has multiple scans to do they should all start at the same time as this is slow\n # We must also make sure that we don't exceed the core limit when we do this!\n # e.g. user gives 6 cores for QM and we run two drives that takes 12 cores!\n\n for scan in self.molecule.scan_order:\n try:\n os.mkdir(f'SCAN_{scan[0]}_{scan[1]}')\n except FileExistsError:\n # If the folder has only been used to test the torsions then use that folder\n if os.listdir(f'SCAN_{scan[0]}_{scan[1]}') == ['testing_torsion']:\n pass\n # However, if there is a full run in the folder, back the folder up and start again\n else:\n print(f'SCAN_{scan[0]}_{scan[1]} folder present backing up folder to SCAN_{scan[0]}_{scan[1]}_tmp')\n # Remove old backups\n try:\n rmtree(f'SCAN_{scan[0]}_{scan[1]}_tmp')\n except FileNotFoundError:\n pass\n os.system(f'mv SCAN_{scan[0]}_{scan[1]} SCAN_{scan[0]}_{scan[1]}_tmp')\n os.mkdir(f'SCAN_{scan[0]}_{scan[1]}')\n pass\n os.chdir(f'SCAN_{scan[0]}_{scan[1]}')\n os.mkdir('QM_torsiondrive')\n os.chdir('QM_torsiondrive')\n\n # Start the torsion drive if psi4 else run native separate optimisations using g09\n self.start_torsiondrive(scan)\n # Get the scan results and load into the molecule\n os.chdir(self.home)", "title": "" }, { "docid": "a5cbc655a81c870ae01ff1f90e96f33e", "score": "0.5169693", "text": "def _start(self):\n\n try:\n if \"oid\" in self.conf:\n return self._get_result()\n else:\n return self._get_result_set()\n except:\n return errors.Failure()", "title": "" }, { "docid": "0b610d004859c681613b9bfbd4cc2cc5", "score": "0.5151566", "text": "def scan_host(api_client, site_id, asset):\n\n # TODO - check for a running scan\n api_instance = swagger_client.ScanApi(api_client)\n body = swagger_client.AdhocScan() # AdhocScan | The details for the scan. (optional)\n\n try:\n api_response = api_instance.start_scan(site_id, body=body)\n logging.debug('Response: {}'.format(api_response))\n except ApiException as e:\n logging.error(\"Exception when calling ScanApi->start_scan: %s\\n\" % e)\n\n scan_id = api_response.id\n poll_scan(api_client, scan_id)\n\n return api_response", "title": "" }, { "docid": "a154f7cf738aae3654d3cec92179da32", "score": "0.5146481", "text": "def scan(self):\r\n\r\n self.__connectionDecorator.scan(self, [self._controller, self._controller.mainWindow, self])", "title": "" }, { "docid": "8fad9f4813a884b364f6e865feea2cf6", "score": "0.51250666", "text": "def on_press(key):\n global SCAN_RUNNING\n # Change 'f7' to whichever key you want to start\n if key == keyboard.Key.f7 and not SCAN_RUNNING:\n SCAN_RUNNING = True\n t = threading.Thread(target=start_scanning)\n print('\\nGScan running')\n t.start()\n # Change 'f8' to whichever key you want to stop\n elif key == keyboard.Key.f8:\n print('\\nGScan paused')\n SCAN_RUNNING = False", "title": "" }, { "docid": "cd8687d82180c86aa5812583ef222d27", "score": "0.51245385", "text": "def portscanloop(self):\n\t\tprintFunc = self.printJSONScan if self.conf.JSON else self.printScan\n\t\ttry:\n\t\t\twith ports.Scanner(self.host) as scanner:\n\t\t\t\twhile True:\n\t\t\t\t\tprintFunc(scanner.scan())\n\t\t\t\t\ttime.sleep(self.conf.SCAN / 1000)\n\t\texcept KeyboardInterrupt:\n\t\t\tpass", "title": "" }, { "docid": "1932be65c18e7cb54983b53906871904", "score": "0.5121655", "text": "def scan():\n click.echo('Scanning network for devices')\n network_ping_results = os.popen('arp -a').read()\n ip_list = [parse_ip_from_output(i) for i in network_ping_results.split('?')]\n with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:\n {executor.submit(query_ip_address_for_device_info, ip): ip for ip in ip_list}\n\n if __name__ == '__main__':\n pprint.pprint([device for device in device_pool if device is not None])\n else:\n return [device for device in device_pool if device is not None]", "title": "" }, { "docid": "07738c87e03a9f68478f3e21a57ec103", "score": "0.5121139", "text": "def searching(self) -> None:\n if self.target_estimator.is_ready():\n # print(f\"searching -> tracking {self.vision.get_vision_data()}\")\n self.time_target_lost = None\n self.next_state(\"tracking\")\n else:\n # Scan starting straight downrange. TODO: remove this if the above\n # seems worthwhile\n time_now = time.monotonic()\n # Start a scan only if it's been a minimum time since we lost the target\n if (\n self.time_target_lost is None\n or (time_now - self.time_target_lost) > self.TARGET_LOST_TO_SCAN\n ):\n # Start a scan only if it's been a minimum time since we started\n # a scan. This allows the given scan to grow enough to find the\n # target so that we don't just start the scan over every cycle.\n if (\n self.time_of_last_scan is None\n or (time_now - self.time_of_last_scan) > self.MIN_SCAN_PERIOD\n ):\n self.turret.scan(-self.chassis.get_heading())\n self.time_of_last_scan = time_now", "title": "" }, { "docid": "2f6f005a8ccf22e40ae82c07a097f164", "score": "0.51191515", "text": "async def test_scanner_by_source(hass: HomeAssistant, enable_bluetooth: None) -> None:\n\n hci2_scanner = FakeScanner(hass, \"hci2\", \"hci2\")\n cancel_hci2 = bluetooth.async_register_scanner(hass, hci2_scanner, True)\n\n assert async_scanner_by_source(hass, \"hci2\") is hci2_scanner\n cancel_hci2()\n assert async_scanner_by_source(hass, \"hci2\") is None", "title": "" }, { "docid": "18673d1e33018bac263be7688782f165", "score": "0.5113279", "text": "async def start(self):\n await self.wait()\n\n return self.program", "title": "" }, { "docid": "e6eb94a2677d3b8a2252edf0de4c0cf5", "score": "0.5097238", "text": "def scan(self, *args, **kwargs):\n raise NotImplementedError()", "title": "" }, { "docid": "1f0a5dd683bf65d28db35dd223965781", "score": "0.5093909", "text": "def do_scan(self, line):\n if line:\n num_ids = parse_int(line, \"number of ids\")\n else:\n num_ids = 32\n self.scan_idx = 0\n if num_ids < 100:\n servo_id_found = self.bus.scan(0, num_ids,\n self.cb_scan_dev_found,\n self.cb_scan_dev_missing)\n sensor_id_found = self.bus.scan(100, num_ids,\n self.cb_scan_dev_found,\n self.cb_scan_dev_missing)\n if not servo_id_found and not sensor_id_found:\n self.log.info(\"No devices found\")\n else:\n if not self.bus.scan(0, num_ids,\n self.cb_scan_dev_found,\n self.cb_scan_dev_missing):\n self.log.info(\"No devices found\")", "title": "" }, { "docid": "adcd2fa015a448293be0caefc5bcaccf", "score": "0.5091508", "text": "def on_scan_received(self) -> Event[Scanner, ScanReport]:\r\n return self._on_scan_received", "title": "" }, { "docid": "cba06b6be2c3b0cacf1f073481ee1426", "score": "0.5068169", "text": "def scan_callback(self,scan):\n self.scan = scan", "title": "" }, { "docid": "abb9174cd0639e02d894be660e0ad251", "score": "0.5056701", "text": "def do_scanning (self, event) :\n\t\t# Extracting which button was clicked\n\t\ttry :\n\t\t\tbutton = event.GetEventObject()\n\t\t\t# Mouse double clicking stops scanning\n\t\t\tif event.GetEventType() == wx.wxEVT_LEFT_DCLICK : button.SetLabel (button.__stop_label__)\n\t\texcept AttributeError : button = event\n\n\t\tif button.GetLabel() == button.__start_label__ :\n\t\t\t\n\t\t\tself.StopAllJobs ()\n\t\t\t\n\t\t\tif button.command in [CMD_SCAN, CMD_ADAPTIVE_SCAN, CMD_BOUNDARY_SCAN, CMD_BLEACHING_STUDY] :\n\t\t\t\t# Prepare for scanning the sample\n\t\t\t\n\t\t\t\t# Initiate Pico Harp\n\t\t\t\tself.input_pico_harp_queue.put( (CMD_INITIATE_PICO_HARP, self.get_scan_parametrs()) ) \n\t\t\t\tresult = self.output_pico_harp_queue.get ()\n\t\t\t\t# Exit, if initialization failed\n\t\t\t\tif result == RETURN_FAIL : return\n\t\t\t\t# otherwise, save the resolution\n\t\t\t\tself.PicoHarp_Resolution.SetValue (result)\n\t\t\telif button.command in [CMD_TEST_SCANNING, CMD_TAKE_SCANNING_VIDEO] :\n\t\t\t\t# Turn on camera\n\t\t\t\tself.use_ccd_camera ()\n\t\t\t\n\t\t\t# Getting all scanning parameters \n\t\t\tScanParameters = self.get_scan_parametrs()\n\t\t\t\n\t\t\t# Create list where the disabled objects will be saved\n\t\t\tself.__to_be_enabled__ = []\n\n\t\t\t# Disable all controls so that no parameters can be changed interactively\n\t\t\tfor key in dir(self) :\n\t\t\t\tobj = getattr (self, key)\n\t\t\t\tif isinstance(obj, (wx.SpinCtrl, wx.TextCtrl, wx.RadioButton)) and obj.IsEnabled() : \n\t\t\t\t\tobj.Disable (); self.__to_be_enabled__.append (obj) \n\t\t\t\n\t\t\t# Event indicating that scanning is continuing\n\t\t\tself.scannig_event = multiprocessing.Event()\n\t\t\tself.scannig_event.set ()\n\t\t\t\n\t\t\t# Event for pausing the scanning\n\t\t\tself.pause_scannig_event = multiprocessing.Event()\n\t\t\tself.pause_scannig_event.clear()\n\n\t\t\tself.scan_sample_process = multiprocessing.Process(target=scan_sample, \\\n\t\t\t\targs=(self.scannig_event, self.pause_scannig_event, \\\n\t\t\t\t\tself.input_pico_harp_queue, self.output_pico_harp_queue, self.histogram_buffer, \\\n\t\t\t\t\tself.write_serial_port_queue, self.read_serial_port_queue, \n\t\t\t\t\tself.input_camera_queue, self.output_camera_queue, self.camera_img_buffer, \n\t\t\t\t\tself.input_shutter_queue, self.output_shutter_queue, ScanParameters, button.command) )\n\t\t\t\n\t\t\tself.scan_sample_process.start ()\n\t\t\t\n\t\t\t# Start timer to monitor weather scanning is over\n\t\t\tTIMER_ID = wx.NewId()\n\t\t\tself.scanning_timer = wx.Timer (self, TIMER_ID)\n\t\t\tself.scanning_timer.Start (2000) # check every 2 seconds\n\t\t\t\n\t\t\tdef check_weather_scanning_finished (event) : \n\t\t\t\tif not self.scannig_event.is_set () : \n\t\t\t\t\tbutton.SetLabel (button.__stop_label__); self.do_scanning (button)\n\t\t\t\n\t\t\twx.EVT_TIMER (self, TIMER_ID, check_weather_scanning_finished)\n\t\t\t\n\t\t\t# Changing the button's label \n\t\t\tbutton.SetLabel (button.__pause_label__)\n\n\t\telif button.GetLabel() == button.__pause_label__ :\n\t\t# Pause scanning the sample\n\t\t\tself.pause_scannig_event.set()\n\t\t\tbutton.SetLabel (button.__resume_label__)\n\n\t\telif button.GetLabel() == button.__resume_label__ :\n\t\t# Resume scanning \n\t\t\tself.pause_scannig_event.clear()\n\t\t\tbutton.SetLabel (button.__pause_label__)\n\n\t\telif button.GetLabel() == button.__stop_label__ : \n\t\t# Stop scanning the sample\n\t\t\tself.scanning_timer.Stop ()\n\t\t\tself.scannig_event.clear ()\n\t\t\tself.pause_scannig_event.clear ()\n\t\t\tself.scan_sample_process.join ()\n\t\t\tdel self.scannig_event\n\t\t\tdel self.pause_scannig_event\n\t\t\tdel self.scan_sample_process\n\t\t\tdel self.scanning_timer\n\t\t\n\t\t\t# Enable all previously disabled controls except few controls are always disabled\n\t\t\tfor obj in self.__to_be_enabled__ : obj.Enable ()\n\t\t\tdel self.__to_be_enabled__\n\n\t\t\tbutton.SetLabel (button.__start_label__)\n\n\t\telse : raise ValueError (\"Unrecognized button label\")", "title": "" }, { "docid": "dfe2577457d6a7dc0c9ec728b79de7c8", "score": "0.50336295", "text": "async def discover(\n self, retry: int = DEFAULT_RETRY_COUNT, scan_timeout: int = DEFAULT_SCAN_TIMEOUT\n ) -> dict:\n\n devices = None\n devices = bleak.BleakScanner(\n # TODO: Find new UUIDs to filter on. For example, see\n # https://github.com/OpenWonderLabs/SwitchBotAPI-BLE/blob/4ad138bb09f0fbbfa41b152ca327a78c1d0b6ba9/devicetypes/meter.md\n adapter=self._interface,\n )\n devices.register_detection_callback(self.detection_callback)\n\n async with CONNECT_LOCK:\n await devices.start()\n await asyncio.sleep(scan_timeout)\n await devices.stop()\n\n if devices is None:\n if retry < 1:\n _LOGGER.error(\n \"Scanning for Switchbot devices failed. Stop trying\", exc_info=True\n )\n return self._adv_data\n\n _LOGGER.warning(\n \"Error scanning for Switchbot devices. Retrying (remaining: %d)\",\n retry,\n )\n await asyncio.sleep(DEFAULT_RETRY_TIMEOUT)\n return await self.discover(retry - 1, scan_timeout)\n\n return self._adv_data", "title": "" }, { "docid": "68c63484f78cf7c19e9f48f814184df3", "score": "0.50268036", "text": "def _do_background_scan(\n self,\n completed: queue.Queue,\n path: str,\n hostname: str,\n machine: Machine,\n service: Service,\n ) -> None:\n self.scan(path, hostname, machine, service, silent=True)\n completed.put((self, service))", "title": "" }, { "docid": "0c8ad3b446e5c29c57483558cb8489f9", "score": "0.49977133", "text": "def run_scans(self,scanlist):\r\n\t\tpresent_scans=self.present_scans\r\n\t\tmatched_scans=self.match(present_scans,scanlist)\r\n\t\t#print 'present_scans', present_scans\r\n\t\t#print 'matched_scans', matched_scans\r\n\t\tfor scan in matched_scans:\r\n\t\t\tprint 'running ',scan\r\n\t\t\tsc=ScanRunCommand(scan)\r\n\t\t\tsc.run()\r\n\t\t\tprint scan,' finished'\r\n\t\t#for scan in scanlist:\r\n\t\t# print 'running scan', scan\r", "title": "" }, { "docid": "3ceb2def133f209261538768717ba872", "score": "0.49691147", "text": "async def acquire(self) -> None:", "title": "" }, { "docid": "4496d01afa11d746bf4ce7c1e6a27e93", "score": "0.4967835", "text": "def test_zap_active_scan(self):\n scan_id = 1\n zap = Mock()\n zap.ascan.scan.return_value = scan_id\n zap.ascan.status.side_effect = [\"0\", \"50\", \"100\"]\n target = \"http://target.example.com\"\n scan_policy_name = \"MyScanPolicy.policy\"\n\n with patch(\"time.sleep\"):\n zap_common.zap_active_scan(zap, target, scan_policy_name)\n\n zap.ascan.scan.assert_called_once_with(target, recurse=True, scanpolicyname=scan_policy_name, contextid=None)\n zap.ascan.status.assert_called_with(scan_id)\n self.assertEqual(3, zap.ascan.status.call_count)", "title": "" }, { "docid": "34f13a8d7ae34b7b4030bcf4714c14ca", "score": "0.49592897", "text": "def scanIsPresent(self,scan):\r\n\t\tispresent=False\r\n\t\t\r\n\t\treturn ispresent", "title": "" }, { "docid": "b9d3d7868d8a3e93553043177c3f6ab4", "score": "0.49390975", "text": "def acquire(self, int_blocking=True, timeout=None): # real signature unknown; restored from __doc__\n return False", "title": "" }, { "docid": "69bb9dfac8f78fd2ffd0095035303e05", "score": "0.49352267", "text": "def scan(self, duration, number_of_results, number_of_dash_agents):\n results = range(0, number_of_results)\n hits = range(0, number_of_dash_agents)\n\n for i in results:\n results[i] = self.create_random_bluetooth_device()\n for _ in hits:\n results[randint(0, number_of_dash_agents-1)] = self.create_random_bluetooth_device(create_dash_agent=True)\n if duration > 0:\n sleep(duration)\n return results", "title": "" }, { "docid": "72aa83b41c4e1f09a8fdff9a7b43f1b5", "score": "0.49225113", "text": "def wait(fs, timeout=..., return_when=...):\n ...", "title": "" }, { "docid": "f3176850272c5d749e7af7a0d8d3c3f7", "score": "0.49205276", "text": "def discovery_ip(self, ip):\n logger.info(colored.cyan(\"Started scan.\"))\n time.sleep(self.scan_timeout)\n\n for node in self.graph.nodes:\n if Node(ip) == node:\n logger.info(colored.green(\"Successful scan.\"))\n return node.running\n\n logger.info(\"Failed scan.\")\n return {}", "title": "" }, { "docid": "748d5da2cf1718194f02b22ca2870d90", "score": "0.49182427", "text": "def scan(self, cursor=0, match=None, count=None):\n args = []\n if match is not None:\n args += [b'MATCH', match]\n if count is not None:\n args += [b'COUNT', count]\n fut = self.execute(b'SCAN', cursor, *args)\n return wait_convert(fut, lambda o: (int(o[0]), o[1]))", "title": "" }, { "docid": "bf2187d63b648b55113e07e7846d915a", "score": "0.49116322", "text": "def _wait_for_devpi_to_start(cls, input_dict, clientdir):\n retries_left = 30\n while retries_left > 0:\n try:\n run_devpi.select_server(\n input_dict['vargs']['server'], clientdir=clientdir)\n except SystemExit:\n retries_left -= 1\n time.sleep(1)\n continue\n return", "title": "" }, { "docid": "e7f239f707d6686e76b29782423c724b", "score": "0.49067584", "text": "def waitStart():\n global waiting\n waiting = True\n ans = \"\"\n while(ans!=\"start\"):\n ans = input()\n waiting = False", "title": "" }, { "docid": "ba9ccd98704bd05381ae3eda3d55aa94", "score": "0.49066204", "text": "def test_run_active_scan_as_user(self):\n def status_result():\n \"\"\"Return value of the status property.\"\"\"\n if status.call_count > 2:\n return '100'\n return '50'\n\n class_mock = MagicMock()\n class_mock.scan_as_user.return_value = '1'\n status = Mock(side_effect=status_result)\n class_mock.status = status\n self.zap_helper.zap.ascan = class_mock\n self.zap_helper.zap.context.context = Mock(return_value={'id': '1'})\n self.zap_helper.zap.users.users_list = Mock(return_value=[{'name': 'Test', 'id': '1'}])\n\n self.zap_helper.run_active_scan('http://localhost', False, 'Test', 'Test')", "title": "" }, { "docid": "6bddc6e481c3ad7f4d2171e25f5c0311", "score": "0.48884153", "text": "def _scan_resource(location, scanners):\n # `rid` is not needed in this context, yet required in the scan_resource args\n location_rid = location, 0\n _, _, errors, _, results, _ = scancode_cli.scan_resource(location_rid, scanners)\n return results, errors", "title": "" }, { "docid": "14a852acc8b16f5f57551947e8d1940b", "score": "0.4884079", "text": "def scan_devices(self):\n self._update_info()\n return self.last_results", "title": "" }, { "docid": "4a899d5d3a8e772200f9ad079f1c1118", "score": "0.48821905", "text": "async def scan(self, cursor=0, match=None, count=None):\n async def scan_coroutine(address, cur=cursor):\n \"\"\"\n\n :param address - address tuple\n :param cur\n Usage example:\n \"\"\"\n if not cur:\n cur = b'0'\n ks = []\n while cur:\n fut = await self._execute_node(\n address, b'SCAN', cursor=cur, match=match, count=count)\n cur, values = fut\n ks.extend(values)\n return ks\n\n result = await asyncio.gather(*[\n scan_coroutine(e, cur=cursor) for e in self._get_nodes_entities()\n ])\n\n flatten_result = []\n list(map(flatten_result.extend, result))\n return flatten_result", "title": "" }, { "docid": "1fc82419e05041624da0e90b22401ce3", "score": "0.48814642", "text": "def verify_start_scan_button(self):\n self.mouse_click_action_on_element_present(self.number_secondpage)\n result = self.is_element_present(self.start_scan_button, max_time_out=30)\n\n if not result:\n self.log.error(\"Unable to find start scan button\")\n\n return result", "title": "" }, { "docid": "cdc6cf71454e6eb12a0b3fdc9f06d314", "score": "0.4870563", "text": "async def test_async_scanner_devices_by_address_connectable(\n hass: HomeAssistant, enable_bluetooth: None\n) -> None:\n manager = _get_manager()\n\n class FakeInjectableScanner(BaseHaRemoteScanner):\n def inject_advertisement(\n self, device: BLEDevice, advertisement_data: AdvertisementData\n ) -> None:\n \"\"\"Inject an advertisement.\"\"\"\n self._async_on_advertisement(\n device.address,\n advertisement_data.rssi,\n device.name,\n advertisement_data.service_uuids,\n advertisement_data.service_data,\n advertisement_data.manufacturer_data,\n advertisement_data.tx_power,\n {\"scanner_specific_data\": \"test\"},\n MONOTONIC_TIME(),\n )\n\n new_info_callback = manager.scanner_adv_received\n connector = (\n HaBluetoothConnector(MockBleakClient, \"mock_bleak_client\", lambda: False),\n )\n scanner = FakeInjectableScanner(\n hass, \"esp32\", \"esp32\", new_info_callback, connector, False\n )\n unsetup = scanner.async_setup()\n cancel = manager.async_register_scanner(scanner, True)\n switchbot_device = generate_ble_device(\n \"44:44:33:11:23:45\",\n \"wohand\",\n {},\n rssi=-100,\n )\n switchbot_device_adv = generate_advertisement_data(\n local_name=\"wohand\",\n service_uuids=[\"050a021a-0000-1000-8000-00805f9b34fb\"],\n service_data={\"050a021a-0000-1000-8000-00805f9b34fb\": b\"\\n\\xff\"},\n manufacturer_data={1: b\"\\x01\"},\n rssi=-100,\n )\n scanner.inject_advertisement(switchbot_device, switchbot_device_adv)\n assert async_scanner_devices_by_address(\n hass, switchbot_device.address, connectable=True\n ) == async_scanner_devices_by_address(hass, \"44:44:33:11:23:45\", connectable=False)\n devices = async_scanner_devices_by_address(\n hass, switchbot_device.address, connectable=False\n )\n assert len(devices) == 1\n assert devices[0].scanner == scanner\n assert devices[0].ble_device.name == switchbot_device.name\n assert devices[0].advertisement.local_name == switchbot_device_adv.local_name\n unsetup()\n cancel()", "title": "" }, { "docid": "75d7546c4f57359ec9c285deabb25520", "score": "0.48697945", "text": "def setup_scanner(\n hass: HomeAssistant,\n config: ConfigType,\n see: SeeCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> bool:\n scanner = GoogleMapsScanner(hass, config, see)\n return scanner.success_init", "title": "" }, { "docid": "bec7007fb92897dbeb84791841369029", "score": "0.48676497", "text": "def begin_scan(self):\n\n log.info('begin scan')\n # Call the base class method\n super().begin_scan()\n \n # Set angles for the interlaced scan\n if(self.epics_pvs['InterlacedScan'].get(as_string=True)=='Yes'):\n interlacedfilename = self.epics_pvs['InterlacedFileName'].get(as_string=True)\n try:\n self.theta = np.load(interlacedfilename) \n # update angles and number of frames to be captured and collected\n self.total_images -= self.num_angles-len(self.theta)\n self.num_angles = len(self.theta) \n # print some information about angles\n stheta = np.sort(self.theta)\n log.info('file with interlaced angles %s', interlacedfilename)\n log.info('loaded %d interlaced angles',self.num_angles)\n log.info('min angle %f',stheta[0])\n log.info('max angle %f',stheta[-1])\n log.info('min distance between neigbouring sorted angles %f',np.amin(np.abs(stheta[1::2]-stheta[::2])))\n log.info('max distance between neigbouring sorted angles %f',np.amax(np.abs(stheta[1::2]-stheta[::2]))) \n except:\n log.error('%s file with interlaced angles is not valid', interlacedfilename)\n self.theta = []\n self.abort_scan()\n return \n else:\n self.theta = self.rotation_start + np.arange(self.num_angles) * self.rotation_step\n\n self.epics_pvs['FPNumCapture'].put(self.total_images, wait=True)\n self.epics_pvs['FPCapture'].put('Capture')", "title": "" }, { "docid": "c3971dc94e9e214fd364a3600e584286", "score": "0.48579764", "text": "async def __aenter__(self):\n while not self.bus.try_lock():\n await sleep()\n return self.bus", "title": "" }, { "docid": "0b3f20e21c2f8608a6b19eb1e2f94bd9", "score": "0.4854141", "text": "def Scan(self):\n if self.ports_bool == False:\n log.send_log(\"No ports specified for scanning\")\n exit(0)\n\n for item in self.sets:\n BU_SO = scanobject.ScanObject()\n # populate fields based on line input\n BU_SO.CreateCommand(item, self.exclude_string, self.ports, self.nmap_dir)\n self.scan_objs.append(BU_SO)\n self.machine_count = self.machine_count + BU_SO.GetMachineCount()\n\n pids = []\n for obj in self.scan_objs:\n pid = os.fork()\n if pid != 0:\n pids.append(pid)\n else:\n log.send_log(obj.command)\n os.system(obj.command)\n exit(0)\n for i in pids:\n os.waitpid(i, 0)", "title": "" }, { "docid": "bb04ed40bcbbe7d203e661adf1c1dc32", "score": "0.4849093", "text": "async def wait_until_ready(self):\n await self._monitor_ready.wait()", "title": "" }, { "docid": "9218b2e6ef97511f25d0664779fc0ca2", "score": "0.48467833", "text": "async def scan_start(\n handle: StoreHandle,\n profile: Optional[str],\n category: str,\n tag_filter: Union[str, dict] = None,\n offset: int = None,\n limit: int = None,\n) -> ScanHandle:\n if isinstance(tag_filter, dict):\n tag_filter = json.dumps(tag_filter)\n tag_filter = encode_str(tag_filter)\n return await do_call_async(\n \"askar_scan_start\",\n handle,\n encode_str(profile),\n encode_str(category),\n tag_filter,\n c_int64(offset or 0),\n c_int64(limit if limit is not None else -1),\n return_type=ScanHandle,\n )", "title": "" }, { "docid": "9659989b9afb577dcaee3bc25027d010", "score": "0.4839204", "text": "def do_rs(self,arg,opts=None):\r\n\t\t#should we also support rs scan1 scan2?\r\n\t\tdevices=Devices()\r\n\t\targs=arg.split()\r\n\t\tif len(args)==0:\r\n\t\t\tprint 'I need to know which scan to run!'\r\n\t\telse:\r\n\t\t\tdevices.run_scans(args)", "title": "" }, { "docid": "16c1556ff2b79a04e5ab1a55bccef389", "score": "0.4838845", "text": "def requires(self):\n args = {\n \"results_dir\": self.results_dir,\n \"rate\": self.rate,\n \"target_file\": self.target_file,\n \"top_ports\": self.top_ports,\n \"interface\": self.interface,\n \"ports\": self.ports,\n \"db_location\": self.db_location,\n }\n return MasscanScan(**args)", "title": "" }, { "docid": "5c7419eaf878d66d27f6b912066f7dab", "score": "0.48349673", "text": "async def bluetooth_scan():\n if not subnet:\n import netifaces\n\n gateway = netifaces.gateways().get(\"default\", {})\n ipscope = gateway.get(netifaces.AF_INET, ())[0][:-1] + \"0/24\"\n else:\n ipscope = subnet\n async with gdh_session() as session:\n googledevices = NetworkScan(loop, session)\n result = await googledevices.scan_for_units(ipscope)\n for host in result:\n if host[\"bluetooth\"]:\n await get_device_info(host)\n print(format_json(devices))", "title": "" }, { "docid": "b60a5c58a06aeaf6b1ab55c0a1bc8f0d", "score": "0.4830016", "text": "def scan(self, *args, **kw):\n return self.layer2.scan(self, *args, **kw)", "title": "" }, { "docid": "8c95c995ae3e2cea9c3c30abe5beb048", "score": "0.48260528", "text": "def start(self):\n return self.first.xfer", "title": "" }, { "docid": "cf05c3bd43786243c3b9c84bc9969b85", "score": "0.48143834", "text": "def scan(cls):\n scan_request = cls._index.search().query(MatchAll())\n response = scan_request.scan()\n return response", "title": "" }, { "docid": "26179803dd562afe33646bd23d7626a5", "score": "0.48140636", "text": "def run(self,scan_config):\n return self.RE(self.tomo_scan(scan_config))", "title": "" }, { "docid": "a898c34b7e238b3659b42a7680990c8d", "score": "0.48025686", "text": "def scan(one_start_range):\r\n for i in one_start_range:\r\n s = None\r\n try:\r\n s = serial.Serial(i-1,timeout=1,writeTimeout=1)\r\n s.timeout = 1\r\n if s.isOpen():\r\n print i, s.portstr\r\n check(s)\r\n except KeyboardInterrupt:\r\n print 'ctrl-c stop'\r\n world_stop = True\r\n except: pass\r\n finally:\r\n if s:s.close()\r\n return", "title": "" }, { "docid": "d425e8636a2a66577d6aa3bd48c37dcb", "score": "0.4797599", "text": "def start(self):\n self.input_drv.start()", "title": "" }, { "docid": "011881585e33acc848e3862cf902642b", "score": "0.47913036", "text": "def setup_scanner(\n hass: HomeAssistant,\n config: ConfigType,\n see: SeeCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> bool:\n scanner = FleetGoDeviceScanner(config, see)\n if not scanner.login(hass):\n _LOGGER.error(\"FleetGO authentication failed\")\n return False\n return True", "title": "" }, { "docid": "7307e592f452a367b394c1c1de15d712", "score": "0.47891298", "text": "def wait(self) -> None:\n pass", "title": "" }, { "docid": "dff88b95d38cf0b83c9dffe28d768c07", "score": "0.4787214", "text": "def search_s(self, query, timeout=60):\n response = self.search(query)\n wait_timeout = time.time() + timeout\n search_id = response[1]\n while True:\n status, record_count = self.status(search_id)\n if status in ('CANCELED', 'ERROR'):\n raise ArielSearchError(query,\n \"Ariel search_id {0} failed; {1}\"\n .format(search_id, status))\n if status == 'COMPLETED':\n return (search_id, record_count)\n\n if time.time() < wait_timeout:\n time.sleep(10)\n continue\n raise ArielSearchError(query,\n \"Ariel search_id {0} did not complete within {1}s!\"\n .format(search_id, timeout))", "title": "" }, { "docid": "e49a21c127f0d27580438ded122b1d32", "score": "0.4785026", "text": "def scan_status():\n try:\n r = requests.get(\n \"{}:{}/burp/scanner/status\".format(\n proxy_url,\n api_port\n )\n )\n r.raise_for_status()\n except requests.exceptions.RequestException as e:\n print(\"Error getting the scan status: {}\".format(e))\n else:\n resp = r.json()\n sys.stdout.write(\"\\r[-] Scan in progress: %{}\".format(\n resp['scanPercentage'])\n )\n sys.stdout.flush()\n return resp['scanPercentage']", "title": "" }, { "docid": "5bae6fbee21b99e7d5b90eb401bc83c9", "score": "0.47750822", "text": "def do(self):\n device_data = self.target\n device_data.is_release_resources = False\n device_data.is_abort_command_executed = False\n device_data.is_obsreset_command_executed = False\n device_data.is_restart_command_executed = False\n this_server = TangoServerHelper.get_instance()\n try:\n if device_data.scan_timer_handler.is_scan_running():\n device_data.scan_timer_handler.stop_scan_timer() # stop timer when EndScan command is called\n device_data.isScanRunning = False\n device_data.is_scan_completed = True\n mccs_subarray_ln_fqdn = \"\"\n property_val = this_server.read_property(\"MccsSubarrayLNFQDN\")\n mccs_subarray_ln_fqdn = mccs_subarray_ln_fqdn.join(property_val)\n mccs_subarray_ln_client = TangoClient(mccs_subarray_ln_fqdn)\n mccs_subarray_ln_client.send_command(const.CMD_END_SCAN)\n self.logger.debug(const.STR_MCCS_END_SCAN_INIT)\n this_server.write_attr(\n \"activityMessage\", const.STR_MCCS_END_SCAN_INIT, False\n )\n this_server.write_attr(\"scanID\", \"\")\n self.logger.info(const.STR_SCAN_COMPLETE)\n this_server.write_attr(\n \"activityMessage\", const.STR_END_SCAN_SUCCESS, False\n )\n return (ResultCode.OK, const.STR_END_SCAN_SUCCESS)\n\n except DevFailed as dev_failed:\n log_msg = f\"{const.ERR_END_SCAN_CMD_ON_MCCS}{dev_failed}\"\n self.logger.exception(dev_failed)\n tango.Except.throw_exception(\n const.STR_END_SCAN_EXEC,\n log_msg,\n \"SubarrayNode.EndScanCommand\",\n tango.ErrSeverity.ERR,\n )", "title": "" }, { "docid": "2b4cd7a194e11dcfe4e75d9c827d1b3b", "score": "0.47731847", "text": "def start_runner(self) -> None:\n with self._start_lock:\n self._stop.clear()\n if not self.reader_running():\n self._read_thread.start()", "title": "" }, { "docid": "cc0f836dcd7f3c026ca35f79f0380a84", "score": "0.47717094", "text": "async def start():", "title": "" }, { "docid": "2406923d84614bc35514c53cf50871a9", "score": "0.47600037", "text": "def start(self):\n @contextmanager\n def running():\n self._running = True\n try:\n yield\n finally:\n self._running = False\n\n with running():\n self._refresh_providers()\n\n try:\n with self._context as ctx:\n self._iterate(ctx)\n except AttributeError:\n self._iterate(self._context)", "title": "" }, { "docid": "bf0c524d0f0b1dbc0d32f8f60409aa85", "score": "0.47581", "text": "def begin_scan(self):\n log.info('begin scan')\n\n # Set data directory\n file_path = self.epics_pvs['DetectorTopDir'].get(as_string=True) + self.epics_pvs['ExperimentYearMonth'].get(as_string=True) + os.path.sep + self.epics_pvs['UserLastName'].get(as_string=True) + os.path.sep\n self.epics_pvs['FilePath'].put(file_path, wait=True)\n\n # set TomoScan xml files\n self.epics_pvs['CamNDAttributesFile'].put('TomoScanDetectorAttributes.xml')\n self.epics_pvs['FPXMLFileName'].put('TomoScanLayout.xml')\n\n # Call the base class method\n super().begin_scan()\n # Opens the front-end shutter\n self.open_frontend_shutter()", "title": "" }, { "docid": "01de47bf82b89f37b292128726391721", "score": "0.47452384", "text": "async def __aenter__(self):\n self.start()\n return self", "title": "" }, { "docid": "83351e5675adfafdc84b61d5f66f9faf", "score": "0.47272253", "text": "def result(self, wait=0):\n if self.started:\n return QUtilities.get_result(self.id, wait=wait, cached=self.cached)", "title": "" }, { "docid": "0037a08412502f3fc896d631d12211b5", "score": "0.47211677", "text": "def bportscan(*args, fork=False):\n\n return engine.call('bportscan', args, fork=fork)", "title": "" } ]
007d503ca4fa5e3e5cacc0a036098e48
Opens list column to rows Attributes
[ { "docid": "b212ced1fba0651b841b10e760e0a8b0", "score": "0.0", "text": "def listToRows(self, column:str, id:str, column_final:str):\n import pandas as pd\n import numpy as np\n # Creates new column converting the current list into a string\n self.data[column+'_str'] = self.data[column].apply(lambda x: ','.join(x))\n # Uses the new column and creates a new series with id as index for opening rows\n new_df = pd.DataFrame(self.data[column+'_str'].str.split(',').tolist(), index=self.data[id]).stack()\n # Resets the index using the id\n new_df = new_df.reset_index([0, id])\n # Renames columns\n new_df.columns = [id, column_final]\n # Drops the new column str and the column that was previewsly a list\n self.data.drop([column+'_str', column], axis=1, inplace=True)\n # Merges the two DataFrames\n self.data = pd.merge(self.data, new_df, how='inner', left_on=[id], right_on=[id])", "title": "" } ]
[ { "docid": "68dfd020f3fa8e0141a597e0e7a6ceae", "score": "0.642196", "text": "def column2list():", "title": "" }, { "docid": "bbf1d89fde156270f86018b1828732dc", "score": "0.59959775", "text": "def __init__(self, row, kw):\n super().__init__([], **kw)\n\n self._columns = []\n for column in row:\n self._columns.append(StructuredTextColumn(column[0],\n column[1],\n column[2],\n column[3],\n column[4],\n kw))", "title": "" }, { "docid": "b6d7eb28357976b3eada8e1311c65fa7", "score": "0.59312886", "text": "def _populate(self, row, item):", "title": "" }, { "docid": "39cacb55918aaae23bf407d7cdbdffb5", "score": "0.5875306", "text": "def __setup_row_list(self):\n default_data_funcs = ('text','toggle','pixbuf', 'progress')\n default_cell_renderers = (gtk.CellRendererText, gtk.CellRendererToggle,\n gtk.CellRendererPixbuf, gtk.CellRendererProgress)\n\n for ix, val in enumerate(self.__search_fields):\n field, title = val\n draw_type = ColumnDraw.TEXT\n drawer_name = '_draw_%s_as' % field\n # Check the type of column drawer\n if hasattr(self, drawer_name):\n draw_type = getattr(self, drawer_name)\n if draw_type == ColumnDraw.CUSTOM:\n custom_draw_func = \"_custom_draw_%s\" % field\n if hasattr(self, custom_draw_func):\n col = getattr(self, custom_draw_func).__call__(title)\n else:\n cr = default_cell_renderers[int(draw_type)]()\n col = gtk.TreeViewColumn(title, cr)\n #col.pack_start(cr, True)\n col.set_resizable(True)\n #\n if draw_type in [ColumnDraw.TOGGLE, ColumnDraw.PIXBUF]:\n cr.set_property('xalign', 0.0)\n\n # Get First the default data func\n data_func = getattr(self, \"_draw_data_func_%s\" %\n default_data_funcs[int(draw_type)])\n\n # Check for a custom data func\n data_func_name = '_draw_data_func_%s' % field\n if hasattr(self, data_func_name):\n data_func = getattr(self, data_func_name)\n col.set_cell_data_func(cr, data_func, field)\n col.connect('clicked', self.__tv_data_column_clicked, field)\n col.set_sort_column_id(ix)\n self.base_tv_data.append_column(col)\n self.__tree_model = self._get_tree_model()\n #self.__tree_model.set_sort_func(self.__tv_data_compare_method)\n self.base_tv_data.set_model(self.__tree_model)", "title": "" }, { "docid": "d01f5ff432c060412194273f73f6f5d7", "score": "0.5800063", "text": "def add_attr(self, attr, parent):\n row = parent.rowCount()\n for label, column in self.columns.items():\n item = NodeDataItem()\n item.set_data(self.default_value[label])\n parent.setChild(row, column, item)\n # end for label, column in self.columns.items()\n nameitem = NodeDataItem()\n nameitem.setText(attr)\n nameitem.set_data(attr)\n nameitem.setEditable(False)\n parent.setChild(row, self.columns['name'], nameitem)\n return row", "title": "" }, { "docid": "6623ed38e8b72a398b362626cfe52e37", "score": "0.56468517", "text": "def listAttributes():", "title": "" }, { "docid": "fca6b8c86d535578376492123c056d9d", "score": "0.563202", "text": "def setUpAttributeList(self):\n \n self.AttrList={}\n self.AttrList['add_offset'] = ['N', 'D']\n self.AttrList['ancillary_variables'] = ['S', 'D']\n self.AttrList['axis'] = ['S', 'C']\n self.AttrList['bounds'] = ['S', 'C']\n self.AttrList['calendar'] = ['S', 'C']\n self.AttrList['cell_measures'] = ['S', 'D']\n self.AttrList['cell_methods'] = ['S', 'D']\n self.AttrList['climatology'] = ['S', 'C']\n self.AttrList['comment'] = ['S', ('G', 'D')]\n self.AttrList['compress'] = ['S', 'C']\n self.AttrList['Conventions'] = ['S', 'G']\n self.AttrList['coordinates'] = ['S', 'D']\n self.AttrList['_FillValue'] = ['D', 'D']\n self.AttrList['flag_meanings'] = ['S', 'D']\n self.AttrList['flag_values'] = ['D', 'D']\n self.AttrList['formula_terms'] = ['S', 'C']\n self.AttrList['grid_mapping'] = ['S', 'D']\n self.AttrList['history'] = ['S', 'G']\n self.AttrList['institution'] = ['S', ('G', 'D')]\n self.AttrList['leap_month'] = ['N', 'C']\n self.AttrList['leap_year'] = ['N', 'C']\n self.AttrList['long_name'] = ['S', ('C', 'D')]\n self.AttrList['missing_value'] = ['D', 'D']\n self.AttrList['month_lengths'] = ['N', 'C']\n self.AttrList['positive'] = ['S', 'C']\n self.AttrList['references'] = ['S', ('G', 'D')]\n self.AttrList['scale_factor'] = ['N', 'D']\n self.AttrList['source'] = ['S', ('G', 'D')]\n self.AttrList['standard_error_multiplier'] = ['N', 'D']\n self.AttrList['standard_name'] = ['S', ('C', 'D')]\n self.AttrList['title'] = ['S', 'G']\n self.AttrList['units'] = ['S', ('C', 'D')]\n self.AttrList['valid_max'] = ['N', ('C', 'D')]\n self.AttrList['valid_min'] = ['N', ('C', 'D')]\n self.AttrList['valid_range'] = ['N', ('C', 'D')]\n\n if self.version >= vn1_3:\n self.AttrList['flag_masks'] = ['D', 'D']\n\n if self.version >= vn1_6:\n self.AttrList['cf_role'] = ['S', 'C']\n self.AttrList['_FillValue'] = ['D', ('C', 'D')]\n self.AttrList['featureType'] = ['S', 'G']\n self.AttrList['instance_dimension'] = ['S', 'D']\n self.AttrList['missing_value'] = ['D', ('C', 'D')]\n self.AttrList['sample_dimension'] = ['S', 'D']\n\n if self.version >= vn1_7:\n self.AttrList['actual_range'] = ['N', ('C', 'D')]\n self.AttrList['add_offset'] = ['N', ('C', 'D')]\n self.AttrList['comment'] = ['S', ('G', 'C', 'D')]\n self.AttrList['computed_standard_name'] = ['S', 'C']\n self.AttrList['external_variables'] = ['S', 'G']\n self.AttrList['instance_dimension'] = ['S', '-']\n self.AttrList['sample_dimension'] = ['S', '-']\n self.AttrList['scale_factor'] = ['N', ('C', 'D')]\n\n if self.version >= vn1_8:\n self.AttrList['coordinates'] = ['S', ('D', 'M')]\n self.AttrList['geometry'] = ['S', ('C', 'D')]\n self.AttrList['geometry_type'] = ['S', 'M']\n self.AttrList['grid_mapping'] = ['S', ('D', 'M')]\n self.AttrList['history'] = ['S', ('G', 'Gr')]\n self.AttrList['interior_ring'] = ['S', 'M']\n self.AttrList['node_coordinates'] = ['S', 'M']\n self.AttrList['node_count'] = ['S', 'M']\n self.AttrList['nodes'] = ['S', 'C']\n self.AttrList['part_node_count'] = ['S', 'M']\n self.AttrList['title'] = ['S', ('G', 'Gr')]\n \n return", "title": "" }, { "docid": "fd981f9e80a11f2839fb8917a734c0f3", "score": "0.56145525", "text": "def getRowWithColumns(self, tableName, row, columns, attributes):\r\n pass", "title": "" }, { "docid": "00df0423208e91c391ae3b70861dbf7d", "score": "0.54713804", "text": "def getRowsWithColumns(self, tableName, rows, columns, attributes):\r\n pass", "title": "" }, { "docid": "d60bf7ff04b3078242d2c8523210788d", "score": "0.54467803", "text": "def as_list_data(self):\n element = ElementTree.Element(self.list_type)\n id_ = ElementTree.SubElement(element, \"id\")\n id_.text = self.id\n name = ElementTree.SubElement(element, \"name\")\n name.text = self.name\n return element", "title": "" }, { "docid": "72f9970f844db7c7d97782dd7107d8c3", "score": "0.5432713", "text": "def fillItemNamesColumn(item_list, ws):\n ws.column_dimensions['A'].width = 35\n ws[\"A1\"] = \"Item\"\n for i in range(len(item_list)):\n ws[\"A\" + str(i+2)] = item_list[i]", "title": "" }, { "docid": "be9b1ebc0877e8be78e09b5305a6e857", "score": "0.54287624", "text": "def defineColumns(self):\n\t\tself.aTypeCol = self.ui.assetType_listWidget\n\t\tself.aNameCol = self.ui.assetName_listWidget\n\t\tself.aSubTypeCol = self.ui.assetSubType_listWidget\n\t\tself.aVersionCol = self.ui.assetVersion_listWidget", "title": "" }, { "docid": "cc7dee7acbda568a1be5c179ad9a55ad", "score": "0.54081786", "text": "def __init__(self, data_list, name, *args, **kwargs):\n super(ListTextWidget, self).__init__(*args, **kwargs)\n self._name = name\n self._list = data_list\n self.attrs.update({'list': 'list__%s' % self._name})", "title": "" }, { "docid": "fdcccbaed6c428a9798cb87a27d111f7", "score": "0.53960145", "text": "def _get_row_map(self, model_name, list_attrs=None): # получаем row_map из заголовка\n row_map = list()\n attr_dic = self.model_meta[model_name][ATTRIBUTE_KEY]\n for num in range(len(attr_dic)):\n row_map.append(attr_dic[num + 1][ATTRIBYTE_TYPE_KEY]) # range возвращает знач. от 0 поэтому num + 1\n if list_attrs is not None:\n if attr_dic[num + 1][ATTRIBUTE_NAME_KEY] not in list_attrs:\n row_map[num - 1] = OPTION_DEFAULT_KEY\n return row_map", "title": "" }, { "docid": "cfcf8a926e179abe8ffa075001092d76", "score": "0.5392685", "text": "def __init__(self, rows):\n self.rows = rows", "title": "" }, { "docid": "fa85a0e74eaf4ae15cd48ce9dde1ed64", "score": "0.5363205", "text": "def list_to_row(obj):\n f_de = lambda keys, **kwargs: d_extract(obj, keys, warn=True, **kwargs)\n return {\n 'list_id': f_de('id', required=True),\n 'list_name': f_de('name'),\n 'list_color': f_de('properties.watchlistColor')\n }", "title": "" }, { "docid": "f7b45ba949885ab33cc41878c32fc93e", "score": "0.5359614", "text": "def _init_struct(self, row: List[str]) -> None:\n for i in range(len(row) - 1):\n self.__new_dict()", "title": "" }, { "docid": "8bfb570c2beacd484e04dee74face41b", "score": "0.53587586", "text": "def row(self, row):\n return [self[x] for x in row]", "title": "" }, { "docid": "6f54d7219ac92c713950b2ce4359f00d", "score": "0.53516746", "text": "def setup_data(self):\n xml = example_xml()\n self.data = data_structure(xml)\n self.index = self.data.keys()\n self.current_ind = 0\n self.model = QtGui.QStandardItemModel()\n for item in self.index:\n qt_item = QtGui.QStandardItem(item) \n self.model.appendRow(qt_item)\n self.listView.setModel(self.model)", "title": "" }, { "docid": "204cb6d02816c2ddf3532a80f7e1deff", "score": "0.5338121", "text": "def _set_columns(self):\n for column_item in sorted(self.columns.values()):\n self.InsertColumn(column_item[0], column_item[1], width=wx.LIST_AUTOSIZE_USEHEADER)\n\n # If the column width obtained from wxLIST_AUTOSIZE_USEHEADER\n # is smaller than the minimum allowed column width\n # then set the column width to the minumum allowed size\n if self.GetColumnWidth(column_item[0]) < column_item[2]:\n self.SetColumnWidth(column_item[0], column_item[2])\n\n # Set auto-resize if enabled\n if column_item[3]:\n self.setResizeColumn(column_item[0])", "title": "" }, { "docid": "828359bcf79e4536f342a6c381a6d92d", "score": "0.5311207", "text": "def listExo(self):\n list_wid = QTableWidget()\n list_wid.setColumnCount(2)\n list_wid.setHorizontalHeaderLabels([\"Dot\", \"Expression\"])\n list_wid.setColumnWidth(0, 40)\n list_wid.horizontalHeader().setStretchLastSection(True)\n list_wid.setSortingEnabled(False)\n list_wid.setSelectionMode(QAbstractItemView.SingleSelection)\n list_wid.setEditTriggers(QAbstractItemView.AllEditTriggers)\n\n return list_wid", "title": "" }, { "docid": "4e09c1e60596e9f98896158b5b70576b", "score": "0.52980566", "text": "def __init__(self):\n self.myList = []\n self.lenth = None", "title": "" }, { "docid": "6971f3794f09112d5d9c4ff28dbad435", "score": "0.5281885", "text": "def displayItems(self, itemList):\r\n for i, column in enumerate(self.items, start=0):\r\n itemList.insert(\"\", 0, values=(self.items[i]))", "title": "" }, { "docid": "78a8dcfb4c91204dcb07de5a0c576c51", "score": "0.5248293", "text": "def initListView(self):\n self.lvData.InsertColumn(0, self.labels[self.axis])\n self.lvData.InsertColumn(1, \"Number of elements\")\n self.lvData.InsertColumn(2, \"frequency\")\n self.lvData.SetColumnWidth(0, wx.LIST_AUTOSIZE_USEHEADER)\n self.lvData.SetColumnWidth(1, wx.LIST_AUTOSIZE_USEHEADER)\n i = 0\n n = len(self.nonDrawn)\n name = \"\"\n for data in self.nonDrawn:\n if self.category[self.axis] == 1:\n k = 0\n for val in self.values:\n if data[0] == val:\n name = self.names[k]\n break\n k += 1\n else:\n name = str(data[0])\n pos = self.lvData.InsertItem(i, name)\n self.lvData.SetItem(pos, 1, str(data[1]))\n f = '{:.1f}%'.format((data[1] / self.N) * 100)\n self.lvData.SetItem(pos, 2, f)\n if i < 10:\n self.lvData.SetItemBackgroundColour(pos, wx.Colour(self.colors[-1-i][0] * 255, self.colors[-1-i][1] * 255, self.colors[-1-i][2] * 255))\n i += 1", "title": "" }, { "docid": "56805d1e6c8dbf57f70980f19dcd9734", "score": "0.52423525", "text": "def column(self, list_or_name):\n if not isinstance(list_or_name, basestring):\n for c in list_or_name:\n self.column(c)\n else:\n self._columns.append(list_or_name)\n return self", "title": "" }, { "docid": "e664bd52447fe39eb1c2b8def1850df5", "score": "0.5241283", "text": "def set_with_list(self, data):\r\n n = len(data)\r\n dumpable_attributes = self.dumpable_attributes\r\n if n != len(self.dumpable_attributes):\r\n if hasattr(self, \"alt_dumpable_attributes\") and len(self.alt_dumpable_attributes) == n:\r\n dumpable_attributes = self.alt_dumpable_attributes\r\n else:\r\n print(data)\r\n raise Exception(\"Wrong number of fields in data.\")\r\n for i, value in enumerate(data):\r\n key = dumpable_attributes[i]\r\n if value == \"None\":\r\n value = None\r\n elif key in self.int_attributes:\r\n value = int(value)\r\n elif key in self.float_attributes:\r\n value = float(value)\r\n elif key in self.list_attributes:\r\n value = value.split(\",\")\r\n value = [self.list_attributes_types[key](x) for x in value]\r\n setattr(self, key, value)", "title": "" }, { "docid": "6b88255e46821f58923615852d731207", "score": "0.5240201", "text": "def expand_list(df,lst_col):\n df = pd.DataFrame({\n col:np.repeat(df[col].values, df[lst_col].str.len())\n for col in df.columns.drop(lst_col)}\n ).assign(**{lst_col:np.concatenate(df[lst_col].values)})[df.columns]\n return df", "title": "" }, { "docid": "b0a29fa92cd7450c224c861fef14f7df", "score": "0.5220704", "text": "def to_frame(self, attrs=None, repr_col=None):\n if attrs is None and self.attrs is None:\n test = self[0]\n attrs = test.repr_attrs\n return super(EventList, self).to_frame(attrs, repr_col)", "title": "" }, { "docid": "1c6d86ff639ae38018be26ab0d2ce504", "score": "0.5210029", "text": "def get_attribute(data, rows, column, attribute_structure, return_list=False) -> dict or list:\r\n for row in rows:\r\n node = data[row][column]\r\n if return_list and node not in attribute_structure:\r\n attribute_structure.append(node)\r\n if not return_list:\r\n if node not in attribute_structure:\r\n attribute_structure[node] = 1\r\n else:\r\n attribute_structure[node] += 1\r\n return attribute_structure", "title": "" }, { "docid": "28276d8e37969e165b6c697f930c60b6", "score": "0.52023053", "text": "def GetAttrByRow(*args, **kwargs):\n return _dataview.DataViewIndexListModel_GetAttrByRow(*args, **kwargs)", "title": "" }, { "docid": "e87500058924fdeccdfc0dc42917c9e0", "score": "0.5194039", "text": "def get(self, tableName, row, column, attributes):\r\n pass", "title": "" }, { "docid": "89fb72695b177e63f778d18ab59a34e9", "score": "0.5184377", "text": "def __make_columns_safe(self):", "title": "" }, { "docid": "30e85f4f302ae07ef4285dec7ba5bd67", "score": "0.517678", "text": "def add_attribute_list(self, attrList):\n\t\tfor kvPair in attrList:\n\t\t\tself.add_attribute(kvPair[0], kvPair[1])", "title": "" }, { "docid": "64f02d857dfb5f1e576323a3475631f4", "score": "0.5173055", "text": "def set_column(self, new_column_name, new_list):\n if len(new_list) != self.len():\n raise UnequalLengthError\n else:\n self.dict[new_column_name] = new_list.copy()", "title": "" }, { "docid": "27369731c400e89e4c040284437ffd5d", "score": "0.5171891", "text": "def __init__(self, columns):\n self.columns = columns", "title": "" }, { "docid": "3ef61ae32a596bd44e349e2fe89cad19", "score": "0.5150856", "text": "def __init__(self, data):\n assert len(data) == len(self.cols)\n for colname, dat in zip(self.cols, data):\n setattr(self, colname, dat)", "title": "" }, { "docid": "468ee2d4f1d24d7cdce9317520733c79", "score": "0.5148919", "text": "def _create_list(self):\n pass", "title": "" }, { "docid": "468ee2d4f1d24d7cdce9317520733c79", "score": "0.5148919", "text": "def _create_list(self):\n pass", "title": "" }, { "docid": "5d484cc5bc7ed3f015137bd862d751c0", "score": "0.51475316", "text": "def _populate_entry_list_objects(self, rows):\n\t\tif rows:\n\n # def __init__(self, entry_number, entry_text, entry_keywords, entry_date, entry_db):\n\n\t#\tmembers = (entry_number, entry_text, entry_keywords, entry_date)\n\t\t\tfor row in rows:\n\t\t\t\tself.entry_list.append(entry.Entry(row[0], row[1], row[2], row[3], self.db))", "title": "" }, { "docid": "5213003e662dad5e762ac878628d1f27", "score": "0.5146679", "text": "def list():\n return [\n AttributeTypes.INTEGER,\n AttributeTypes.ENTITY,\n AttributeTypes.NAMED_REF,\n AttributeTypes.DATE,\n AttributeTypes.STRING\n ]", "title": "" }, { "docid": "db4def695a43b4cc3a1f315b3c7e279e", "score": "0.51431173", "text": "def add_property_row(self, formatted_row):\n raise NotImplementedError", "title": "" }, { "docid": "53bf9c71788bdf44cb1143ce9b40d237", "score": "0.5131842", "text": "def transform_row_data(self, row: List, *args, **kwargs):\n return NotImplemented", "title": "" }, { "docid": "4483d00dfc6d1d3e1e54cff74eeea336", "score": "0.51229817", "text": "def setup_row(self, item):\n cols = []\n append = cols.append\n colspan_counter = 0\n countdown = len(self.columns)\n for col in self.columns:\n countdown -= 1\n colspan = 0\n if colspan_counter == 0:\n colspan = colspan_counter = col.get_colspan(item)\n # adjust colspan because we define 0, 2, 3, etc.\n if colspan_counter > 0:\n colspan_counter -= 1\n\n if colspan == 0 and colspan_counter > 0:\n # override col if colspan is 0 and colspan coutner not 0\n colspan_counter -= 1\n colspan = 0\n # now we are ready to setup dummy colspan cells\n col = NoneCell(self.context, self.request, self)\n\n # we reached the end of the table and have still colspan\n if (countdown - colspan) < 0:\n raise ValueError(\"Colspan for column '%s' is larger than the table.\" % col)\n\n append((item, col, colspan))\n return cols", "title": "" }, { "docid": "f12df480cb9c59bb55891c1a5419470f", "score": "0.5113582", "text": "def fetch_row(self):\n row = []\n value = None\n m2m_row_values = []\n m2m_headers = []\n for field_name in self.field_names:\n value = self.get_row_value_from_attr(self.row_instance, field_name)\n row.append(self.strip_value(value))\n if self.show_all_fields:\n m2m_headers, m2m_row_values = self.get_row_values_from_m2m(self.row_instance) # add m2m fields if show_all_fields -- they are not listed in field_names generated from the model\n row.extend(m2m_row_values)\n if self.header_row == self.field_names:\n self.header_row = self.header_row + m2m_headers\n return row", "title": "" }, { "docid": "6caa769ce9cd1e0a941038c3863eeeba", "score": "0.50928223", "text": "def write_column(self, from_row, col_no, value_list):\n row_no = from_row\n for val in value_list:\n self.sheet.cell(row=row_no, column=col_no).value = val\n row_no += 1", "title": "" }, { "docid": "0c8e6a647adbf647fe107a58347ad299", "score": "0.5077369", "text": "def feature_selection (data, topAttributeList):\n df = data.loc[:,topAttributeList] #primo parametro: righe secondo parametro: lista di coloonne\n df[LABEL_NAME] = data[LABEL_NAME]\n return df", "title": "" }, { "docid": "62c7d95773572250d856f16d476f68a2", "score": "0.50762016", "text": "def add_attribute(self, col, attr_name):\n for i in range(len(self._objects)):\n self._table[i].append(col[i])\n self._attributes.append(attr_name)", "title": "" }, { "docid": "96295685ee2ed0a614a9a675bb38f5b0", "score": "0.5070912", "text": "def __init__(self, column_names):\n self.column_names = column_names\n self.column_position_map = {}\n\n for column_index, column_name in enumerate(column_names):\n self.column_position_map[column_name] = column_index\n\n self.position_column_map = {}\n\n for column_name, column_index in self.column_position_map.items():\n self.position_column_map[column_index] = column_name\n\n self.rows = []", "title": "" }, { "docid": "b1e72475cf866c9f613c9c349fb7c78f", "score": "0.50646996", "text": "def AppendColumn(*args, **kwargs):\n return _dataview.DataViewListCtrl_AppendColumn(*args, **kwargs)", "title": "" }, { "docid": "2b325a1069dddfccf682a4bf44e34511", "score": "0.5061595", "text": "def RefreshRows(self):\r\n selected_ids, selected = [], self.GetFirstSelected()\r\n while selected >= 0:\r\n selected_ids.append(self.GetItemData(selected))\r\n selected = self.GetNextSelected(selected)\r\n\r\n # Store row colour attributes\r\n for i in range(self.ItemCount):\r\n t, b = self.GetItemTextColour(i), self.GetItemBackgroundColour(i)\r\n id = self.GetItemData(i)\r\n for func, value in [(self.SetItemTextColour, t),\r\n (self.SetItemBackgroundColour, b)]:\r\n if wx.NullColour != value:\r\n self._row_colours[id][func] = value\r\n elif func in self._row_colours[id]:\r\n del self._row_colours[id][func]\r\n if id in self._row_colours and not self._row_colours[id]:\r\n del self._row_colours[id]\r\n\r\n self.Freeze()\r\n wx.ListView.DeleteAllItems(self)\r\n # To map list item data ID to row, ListView allows only integer per row\r\n row_data_map = {} # {item_id: {row dict}, }\r\n item_data_map = {} # {item_id: [row values], }\r\n # For measuring by which to set column width: header or value\r\n header_lengths = {} # {col_name: integer}\r\n col_lengths = {} # {col_name: integer}\r\n for col_name, col_label in self._columns:\r\n col_lengths[col_name] = 0\r\n # Keep space for sorting arrows.\r\n width = self.GetTextExtent(col_label + \" \")[0] + self.COL_PADDING\r\n header_lengths[col_name] = width\r\n index = 0\r\n for item_id, row in self._id_rows:\r\n if not self._RowMatchesFilter(row):\r\n continue # continue for index, (item_id, row) in enumerate(..)\r\n col_name = self._columns[0][0]\r\n col_value = self._formatters[col_name](row, col_name)\r\n col_lengths[col_name] = max(col_lengths[col_name],\r\n self.GetTextExtent(col_value)[0] + self.COL_PADDING)\r\n self.InsertStringItem(index, col_value)\r\n self.SetItemData(index, item_id)\r\n self.SetItemImage(index, -1)\r\n self.SetItemColumnImage(index, 0, -1)\r\n item_data_map[item_id] = {0: row[col_name]}\r\n row_data_map[item_id] = row\r\n col_index = 1 # First was already inserted\r\n for col_name, col_label in self._columns[col_index:]:\r\n col_value = self._formatters[col_name](row, col_name)\r\n col_width = self.GetTextExtent(col_value)[0] + self.COL_PADDING\r\n col_lengths[col_name] = max(col_lengths[col_name], col_width)\r\n self.SetStringItem(index, col_index, col_value)\r\n item_data_map[item_id][col_index] = row.get(col_name)\r\n col_index += 1\r\n index += 1\r\n self._data_map = row_data_map\r\n self.itemDataMap = item_data_map\r\n if self._id_rows and not self._col_widths:\r\n if self._col_maxwidth > 0:\r\n for col_name, width in col_lengths.items():\r\n col_lengths[col_name] = min(width, self._col_maxwidth)\r\n for col_name, width in header_lengths.items():\r\n header_lengths[col_name] = min(width, self._col_maxwidth)\r\n for i, (col_name, col_label) in enumerate(self._columns):\r\n col_width = max(col_lengths[col_name], header_lengths[col_name])\r\n self.SetColumnWidth(i, col_width)\r\n self._col_widths[i] = col_width\r\n #wx.LIST_AUTOSIZE, wx.LIST_AUTOSIZE_USEHEADER\r\n elif self._col_widths:\r\n for col, width in self._col_widths.items():\r\n self.SetColumnWidth(col, width)\r\n if self.GetSortState()[0] >= 0:\r\n self.SortListItems(*self.GetSortState())\r\n\r\n if selected_ids or self._row_colours:\r\n idindx = dict((self.GetItemData(i), i)\r\n for i in range(self.ItemCount))\r\n for item_id, attrs in self._row_colours.items(): # Re-colour rows\r\n if item_id not in idindx: continue\r\n [func(idindx[item_id], value) for func, value in attrs.items()]\r\n if selected_ids: # Re-select the previously selected items\r\n [self.Select(idindx[i]) for i in selected_ids if i in idindx]\r\n\r\n self.Thaw()", "title": "" }, { "docid": "67fd7c20e32d888a451eaef8d0f90262", "score": "0.5054875", "text": "def list():\n return [\n AttributeFields.VALUE,\n AttributeFields.SID,\n AttributeFields.SOURCE,\n AttributeFields.ID_TYPE,\n AttributeFields.ID_SEED,\n AttributeFields.NAME\n ]", "title": "" }, { "docid": "5d1eb0d179eabcc0ae485db33a9c785a", "score": "0.5050718", "text": "def _add_row(self, row: List[str], instance_num: int) -> None:\n # Skip the last column which is the class\n for i in range(len(row) - 1): \n \n self.__incr_cell(row[CLASS_CELL], row[i], i, instance_num)", "title": "" }, { "docid": "b6d16183294986fb81c8521d246f6bec", "score": "0.5049435", "text": "def _build_attributes(self):\r\n if self._attribute_list:\r\n for attribute_name in self._attribute_list:\r\n self._add_attribute(attribute_name)\r\n else:\r\n raise ValueError(\"{}._attribute_list is empty. unable to build attributes.\".format(self))", "title": "" }, { "docid": "ab120252b5e43041af2ae7ce750d10c3", "score": "0.50471723", "text": "def __init__(self, data):\n super(PickOrganizationToConnectList, self).__init__(data)\n self._list_config = lists.ListConfiguration()\n self._list_config.addSimpleColumn('org_id', 'Organization ID', hidden=True)\n self._list_config.addPlainTextColumn(\n 'name', 'Name', lambda e, *args: e.name.strip())", "title": "" }, { "docid": "f976c6ff556466c71c7594355e69a23e", "score": "0.50345093", "text": "def tablefy_to_row(self, *args):\n self.__class__._tablefy_check_init()\n return [get_dict_attr(self, value)(self) if callable(get_dict_attr(self, value)) else\n get_dict_attr(self, value).fget(self) for key, value in registry[self.__class__.__name__].items()\n if len(args) == 0 or len(args) >= 1 and key in args]", "title": "" }, { "docid": "48697cfce3d6107b838b93b4819dcac0", "score": "0.5031156", "text": "def listExo(self):\n list_wid = QTableWidget()\n list_wid.setColumnCount(1)\n list_wid.setHorizontalHeaderLabels([\"Expression\"])\n list_wid.horizontalHeader().setStretchLastSection(True)\n list_wid.setSortingEnabled(False)\n list_wid.setSelectionMode(QAbstractItemView.SingleSelection)\n list_wid.itemDoubleClicked.connect(self.openEditGraph)\n\n return list_wid", "title": "" }, { "docid": "07cd9135bdf3684e06f890f572eee99d", "score": "0.5028249", "text": "def __attrs_post_init__(self):\n if not self.list.id:\n self.list.id = self.list_id", "title": "" }, { "docid": "7896a9306addd6597a07061e22badb6b", "score": "0.5022821", "text": "def build(self):\n return self.columns", "title": "" }, { "docid": "9477163d2b6d079623ddd34c9253ed1b", "score": "0.5008137", "text": "def cols_from_paths(self):\r\n df = self.read_lb_list()\r\n for index, value in df.head(self.sample_size).iterrows():\r\n url = df.loc[index, 'EDG - URL - XML Filing']\r\n data_xml = ET.parse(urlopen(url))\r\n root = data_xml.getroot()\r\n self.path_of_elems(root, root.tag)\r\n self.columns_list = list(OrderedDict.fromkeys(self.columns_list))", "title": "" }, { "docid": "6689be6e376f83e6b48ae36c71c2db40", "score": "0.5002992", "text": "def getRow(self, tableName, row, attributes):\r\n pass", "title": "" }, { "docid": "6dec383af7c0eac404a5a6d456605250", "score": "0.5000461", "text": "def __getattr__(cls, attr):\n return Column(path=[attr], scope=False)", "title": "" }, { "docid": "15718eaa74a303f50b72a63f514bc3c7", "score": "0.49996278", "text": "def items(self):\r\n for elem in self._col:\r\n yield elem", "title": "" }, { "docid": "1217ad049080e8d465c9202e0363d03c", "score": "0.49973914", "text": "def __init__(self, rows: Iterable, columns: Iterable[str]):\n\n self.rows = rows\n self.columns = columns", "title": "" }, { "docid": "ab061919f728dc1d21f6fa416b7fcb74", "score": "0.4992462", "text": "def GetAttrByRow(*args, **kwargs):\n return _dataview.DataViewVirtualListModel_GetAttrByRow(*args, **kwargs)", "title": "" }, { "docid": "359b5468cbeb7569cabbf17d283642ed", "score": "0.49888524", "text": "def fillLC(self):\n self.listCtrl.DeleteAllItems() #since we're sorting, must delete all\n #then get a list of tuples of all the data\n data = self.con.execute(self.command).fetchall()\n for i in data:\n #loop through and add it\n self.listCtrl.Append(i[1:])", "title": "" }, { "docid": "b42c3e702086025964c0b7c10ac2aa3b", "score": "0.49769345", "text": "def columns(self):\n\t\treturn [\"name\", \"maj:min\", \"size\", \"ro\", \"type\", \"mountpoint\"]", "title": "" }, { "docid": "6c5040ec579d209c060f6561b8f58f8d", "score": "0.4972266", "text": "def _extendRow(self, row):\n rowWidth = self._rowWidth(row)\n extendWidth = self._width - rowWidth\n # skip first element, and extend\n return list(row[1:]) + [''] * extendWidth", "title": "" }, { "docid": "ff1185b0988e66652a348ed174dce32f", "score": "0.49699107", "text": "def addColumnsToRows(self, xml):\n self.xml_reader = FPrimeXmlUtils.PrimeXmlReader( xml )\n columnElem = self.xml_reader.columns()\n columns = columnElem.getiterator(\"ColumnId\")\n\n col = []\n for i in columns:\n col.append(i.text)\n \n rowElem = self.xml_reader.rows()\n rowIter = rowElem.getiterator(\"Row\")\n for r in rowIter:\n k = 0\n cells = r.find(\"Cells\")\n cellIter = cells.getiterator(\"Cell\") \n for c in cellIter:\n FPrimeXmlUtils.SubElement(c, \"ColumnId\").text = col[k]\n k = k+1\n \n return self.xml_reader.element", "title": "" }, { "docid": "b725979195f6c950a9902c93eafb9b15", "score": "0.49576393", "text": "def __init__(self, header=\"column\", *args, **kwargs):\n Table.__init__(self, *args, **kwargs)\n self._items = []\n self._width=0\n self.header = header", "title": "" }, { "docid": "a84a0bf1ccabdcc75083a983ed611bff", "score": "0.49467865", "text": "def construct_row(schema_obj, key):\n\treturn [schema_obj.version, \n\t\t\tschema_obj.form_type, \n\t\t\tschema_obj.elements[key].get('description'),\n\t\t\tschema_obj.elements[key].get('linenumber')]", "title": "" }, { "docid": "3a6546a1fdec3e8a7d7220feaa0d35bc", "score": "0.49465826", "text": "def __init__(self, row):\n\n self.first = row[1]\n self.last = row[2]\n self.email = row[0]", "title": "" }, { "docid": "fad291e9750d1f5e372f3a23baf653ce", "score": "0.49350542", "text": "def __init__(self, _list):\n self.text_ = \"\"\n self.list_ = _list\n self.fg_colors_ = [\"\" for item in _list]\n self.bg_colors_ = [\"\" for item in _list]\n self.delimiter_ = False\n\n self._append(\"\\\\section{Auto Generated}\\n\")\n self._append(\"\\\\begin{tabular}{\")\n self._append(\"| c \" * len(self.list_))\n self._append(\"|| l |}\\n\")", "title": "" }, { "docid": "6bbe97d48ec0657d2064c3e059a155b8", "score": "0.49337697", "text": "def add_row_to_list(product, price, list_of_rows):\r\n dicRow = {\"Product\": product, \"Price\": price}\r\n list_of_rows.append(dicRow)", "title": "" }, { "docid": "e6c05483fe3ab5ed902fbe272c89d860", "score": "0.4923342", "text": "def build_row(self, item):\n return [\n item['score'],\n item['website'] if 'website' in item else '',\n item['about'],\n item['category'],\n item['likes'],\n item['phone'],\n item['emails'],\n item['address'],\n item['city'],\n item['country'],\n datetime.datetime.now().strftime(\"%m%d%Y\")\n ]", "title": "" }, { "docid": "08b287ef2bec29a2558bff793ff46562", "score": "0.49190173", "text": "def activate(self):\n # Read the csv file if it exists and initialise the list\n self.l = []\n try:\n with open('todolist.csv', 'r') as csv_file:\n csv_reader = csv.reader(csv_file)\n for row in csv_reader:\n temp_entry = Entry(row[0], row[1])\n temp_entry.description = row[2]\n temp_entry.timestamp = float(row[3])\n temp_entry.assignees = [a for a in row[4::]]\n self.l.append(temp_entry)\n except IOError:\n logging.debug(\"Failed to load the list from the csv file. Creating a new list.\")\n\n super(TodoList, self).activate()", "title": "" }, { "docid": "d7940d3f64f9326c34703a5c5647f0ea", "score": "0.4915853", "text": "def listFieldHolder(cls, label, guts):\n\t\t__pychecker__ = 'unusednames=cls'\n\t\treturn row(cell(label) + cell(guts))", "title": "" }, { "docid": "c042fe548b7288a47b86a8d654bdcfa5", "score": "0.49006215", "text": "def _update_rows(self):\n for row,item in enumerate(self._items):\n item.row = row # Row is the Parent item\n item.column = 0\n \n for column,item in enumerate(self._columns):\n item.row = self.row # Row is the Parent item\n item.column = column", "title": "" }, { "docid": "e86516ab81ea04f8ee4be1a1135ec91d", "score": "0.4896097", "text": "def write_row(self, row_no, from_column, value_list, xlsstyle, column_width):\n col_no = from_column\n if type(value_list) == list:\n for val in value_list:\n self.sheet.cell(row=row_no, column=col_no).value = val\n sheet_obj = self.sheet.cell(row=row_no, column=col_no)\n XlsReportBuilder.set_style(sheet_obj, xlsstyle)\n col_no += 1\n else:\n self.sheet.cell(row=row_no, column=col_no).value = value_list\n self.column_width(value_list, column_width)", "title": "" }, { "docid": "93f9b4772bc4bc9f66fcd3640fa547a1", "score": "0.48954192", "text": "def extend2(self, lists):\n for list in lists:\n row = TableRow()\n row.extend(list)\n self.append(row)", "title": "" }, { "docid": "ff52ce3dd13a2647c20722b7934c3165", "score": "0.48921967", "text": "def set_table_column(table,item_list,column,bck_color = 'normal',enabled_flag = True,\n format_string = \"%3.1f\",checked = True):\n for row,new_item in enumerate(item_list):\n if format_string != None:\n new_item = format_string % new_item\n table_item = make_item(new_item,bck_color,enabled_flag,checked = True)\n table.setItem(row,column,table_item)", "title": "" }, { "docid": "fff4f981437f8059dabd6e579c1ea856", "score": "0.48914742", "text": "def columns(self):\n raise NotImplementedError()", "title": "" }, { "docid": "fff4f981437f8059dabd6e579c1ea856", "score": "0.48914742", "text": "def columns(self):\n raise NotImplementedError()", "title": "" }, { "docid": "88c053441d38455f4a1a43f24e651121", "score": "0.48875272", "text": "def __init__(self):\n self.data = []\n self.columnMap = {}\n\n with open(DATA_PATH, 'r', newline='') as file:\n for row in file:\n if not '#' in row and row:\n row = row.replace('\\n', '')\n self.data.append(row.split(','))\n\n for index, value in enumerate(self.data[0]):\n self.columnMap[index] = value\n\n self.data = self.data[1:]\n pass", "title": "" }, { "docid": "cb312beea4e6245b1493167dd5622735", "score": "0.48831278", "text": "def getRows(self, tableName, rows, attributes):\r\n pass", "title": "" }, { "docid": "6a9d2bd2fe793d8005dd069fbecb7845", "score": "0.48814768", "text": "def factory(cls, item: analysis.Data) -> Gtk.ListBoxRow:\n entry = cls()\n entry.set_item(item)\n row = Gtk.ListBoxRow(activatable=False, selectable=False)\n row.get_style_context().add_class('data-data-row')\n row.add(entry)\n return row", "title": "" }, { "docid": "814a98f714a670c2fd122d91767ee614", "score": "0.48783645", "text": "def listExo(self):\n list_wid = QTableWidget()\n list_wid.setColumnCount(1)\n list_wid.setHorizontalHeaderLabels([\"Expression\"])\n list_wid.horizontalHeader().setStretchLastSection(True)\n list_wid.setSortingEnabled(False)\n\n list_wid.setSelectionMode(QAbstractItemView.SingleSelection)\n list_wid.setEditTriggers(QAbstractItemView.AllEditTriggers)\n\n list_wid.itemChanged.connect(self.verify)\n\n return list_wid", "title": "" }, { "docid": "bc83999cb397a082fcff3f51da0db200", "score": "0.48758298", "text": "def __init__(self):\n self.selected = pd.DataFrame(columns=self.columns)", "title": "" }, { "docid": "c6e4603be34d0edd169ccbc64cc10121", "score": "0.48696882", "text": "def _obj_attr_line(self, content):\n return [\n self.get_content_string(content, name)\n for name in self._attribute_name_map\n ]", "title": "" }, { "docid": "300ccafe395f843d10d67ce8065e82aa", "score": "0.48663872", "text": "def __store_column_width(self):\n self.header_width = []\n for i in range(0, self.view.header().count()):\n self.header_width.append(self.view.columnWidth(i))", "title": "" }, { "docid": "03bdeba05094389fe4e8acac21030156", "score": "0.4853928", "text": "def list_attrs():\n return []", "title": "" }, { "docid": "8c94b62e4d3031843d913384c75f6e05", "score": "0.48533943", "text": "def _add_custom_attributes(csvloader: CSVLoader, list_users: list):\n list_attributes = csvloader.template[Template.CUSTOM_ATTRIBUTES]\n if list_attributes:\n def assign_one_attribute(user: KCUser):\n \"\"\"sub function used by map\n\n Args:\n user (KCUser): a user instance\n\n Raises:\n KCUserError: Exception raised for errors in the KCUser\n \"\"\"\n for attribute in list_attributes:\n try:\n user.attributes[attribute['key']] = attribute['value']\n except KeyError:\n raise KCUser.KCUserError(\n f'custom_attributes only have attribute key and value.')\n list(map(assign_one_attribute, list_users))\n logger.info(f'Assign {list_attributes} to users')", "title": "" }, { "docid": "0e7fbc39acdc25186067aba0651ff6e4", "score": "0.48452422", "text": "def parameters_construct_table_view(self):", "title": "" }, { "docid": "6525650c445975c6ae474cf861b3b760", "score": "0.4844517", "text": "def columns(self):\n from .metafiles import Column\n\n cols = [None] * len(self.geo()['LOGRECNO'][0]) + self._columns\n\n # Geez, what a mess ...\n short_descriptions_map = { c.unique_id:c.description for c in self.table.columns.values() }\n\n for i, (f, d, c) in enumerate(zip(self.file_headers, self.descriptions, cols)):\n\n if c is None:\n c = Column(None, f, i, d, f, )\n\n c.col_no = i\n c.description = d\n c.short_description = short_descriptions_map.get(c.unique_id)\n\n yield c", "title": "" }, { "docid": "638788af517c1de992e6752fe7741706", "score": "0.48340222", "text": "def add_to_nested_li(li, attributes, date, models=None):\n idx = generate_list_idx(attributes[\"timescale\"], attributes[\"timeidx\"])\n\n if li[idx] is None:\n obj = create_data_object(attributes[\"mean\"], attributes[\"timescale\"], date)\n li[idx] = obj\n else:\n li[idx][\"values\"].append(attributes[\"mean\"])\n\n if models is not None:\n models[idx] |= {attributes[\"model\"]}\n\n return li, models", "title": "" }, { "docid": "b02651c3ae86c91a20cf00aecc98de3f", "score": "0.4822738", "text": "def row_factory(row):\n return row", "title": "" }, { "docid": "88622dbdcacac20713a9773356a05ca4", "score": "0.48219866", "text": "def __getitem__(self, *args):\n return self._ctable.__getitem__(*args) # TODO should return a Column and not a carray", "title": "" }, { "docid": "311be43ed28df2a9e6b7d35829783c11", "score": "0.48199978", "text": "def __init__(self, width, height):\n self.width = width\n self.height = height\n column = []\n for i in range(height):\n column.append(None)\n self.grid = []\n for i in range(width):\n self.grid.append(column.copy())", "title": "" }, { "docid": "eb5170326b94b02295e6f29ea57f189f", "score": "0.48153964", "text": "def make_headless_row(self, html_args, html_el, column_count, col_attr='', row_attr=''):\n row_tag, col_head_tag, col_tag, single_col_tag, as_type, all_fieldsets = html_args\n if as_type == 'table' and column_count > 0:\n colspan = column_count * 2 if col_head_tag else column_count\n col_attr += f' colspan=\"{colspan}\"' if colspan > 1 else ''\n if single_col_tag:\n html_el = self._html_tag(single_col_tag, html_el, col_attr)\n else:\n row_attr += col_attr\n html_el = self._html_tag(row_tag, html_el, row_attr)\n return html_el", "title": "" }, { "docid": "b46305d7f51b46a80147776a88f3b7d7", "score": "0.48128572", "text": "def add_rows(self):\n raise NotImplementedError()", "title": "" }, { "docid": "6823df425d74682b2edd78fc48029e50", "score": "0.48127732", "text": "def _init_from_list(self, data, names, dtype, n_cols, copy):\n # Special case of initializing an empty table like `t = Table()`. No\n # action required at this point.\n if n_cols == 0:\n return\n\n cols = []\n default_names = _auto_names(n_cols)\n\n for col, name, default_name, dt in zip(data, names, default_names, dtype):\n col = self._convert_data_to_col(col, copy, default_name, dt, name)\n\n cols.append(col)\n\n self._init_from_cols(cols)", "title": "" }, { "docid": "cf9e38c8a2dd3915fd4166869b96eb68", "score": "0.48121068", "text": "def __init__(self, records):\n self.dataframe = pd.DataFrame.from_records(records)\n self.dataframe.columns = self.dataframe.loc[0].tolist()\n self.dataframe = self.dataframe[1:]", "title": "" } ]
6a5f4d250ef0388ef9618f6955fc436c
_insertStream_ Insert a stream into the stream table in T0AST. This will work even if the stream already exists inside the table.
[ { "docid": "98f7b4eae4e6b60dca02f4125dc85195", "score": "0.7189676", "text": "def insertStream(dbConn, streamName):\n sqlQuery = \"\"\"INSERT INTO stream (ID, NAME) SELECT stream_SEQ.nextval, :p_1\n FROM DUAL WHERE NOT EXISTS\n (SELECT NAME FROM stream WHERE NAME = :p_1)\"\"\"\n \n bindVars = {\"p_1\": streamName}\n dbConn.execute(sqlQuery, bindVars)\n return", "title": "" } ]
[ { "docid": "918e1ed716c3d91d297b2e53f580057e", "score": "0.64942634", "text": "def insert_datastream(self, stream):\n for flight in stream:\n # ArrDelay and DepDelay and TailNum can be 'NA' when cancelled is true\n if (flight.year != 'NA'\n and flight.month != 'NA'\n and flight.day_month != 'NA'\n and flight.day_week != 'NA'\n and flight.hour != 'NA'\n and ((flight.cancelled == False and flight.ArrDelay != 'NA' and flight.DepDelay != 'NA') or flight.cancelled == True)):\n for q in INSERTS_Q:\n query = q(flight)\n self._session.execute(query)", "title": "" }, { "docid": "c6aa3fe8a4ffc20a748289dc68ca7fc0", "score": "0.5919242", "text": "def stream(table, rows_to_insert, unique_ids):\n\n\trow_ids = []\n\tfor row in rows_to_insert:\n\t\tidx = ''\n\t\tfor col in unique_ids:\n\t\t\tidx += str(row[col]) + '_'\n\t\trow_ids.append(idx[:-1])\n\tlogging.info(\"BigQuery Streaming indexIds/uniqueIds/row_ids: {}\".format(row_ids))\n\n\terrors = client.insert_rows_json(table, rows_to_insert, row_ids=row_ids)\n\tif errors == []:\n\t\treturn True\n\telse:\n\t\traise Exception(errors)\n\t\treturn False", "title": "" }, { "docid": "8efbca3bbe12ebfd09c38d86e6b2ea6d", "score": "0.57515997", "text": "def insertStreamConfig(dbConn, runNumber, streamName, streamConfig):\n insertProcessingStyle(dbConn, runNumber, streamName,\n streamConfig.ProcessingStyle)\n\n if streamConfig.ProcessingStyle != \"Express\":\n return\n\n sqlQuery = \"\"\"INSERT INTO express_config\n (run_id, stream_id, processing_config_url,\n splitInProcessing, proc_version,\n alcamerge_config_url, global_tag)\n VALUES (:RUN_ID,\n (SELECT id FROM stream WHERE name = :STREAM),\n :PROC_URL, :SPLIT_IN_PROC, :PROC_VER,\n :ALCAMERGE_URL, :GLOBAL_TAG)\"\"\"\n\n bindParams = {\"RUN_ID\": runNumber, \"STREAM\": streamName,\n \"PROC_URL\": streamConfig.ProcessingConfigURL,\n \"ALCAMERGE_URL\": streamConfig.AlcaMergeConfigURL,\n \"GLOBAL_TAG\": streamConfig.GlobalTag,\n \"SPLIT_IN_PROC\": int(streamConfig.SplitInProcessing),\n \"PROC_VER\": streamConfig.ProcessingVersion}\n dbConn.execute(sqlQuery, bindParams)\n\n sqlQuery = \"\"\"INSERT INTO run_stream_tier_assoc\n (run_id, stream_id, data_tier_id)\n VALUES (:RUN_ID,\n (SELECT id FROM stream WHERE name = :STREAM),\n (SELECT id FROM data_tier WHERE name = :DATA_TIER))\"\"\"\n\n bindVars = []\n for data_tier in streamConfig.DataTiers:\n bindVars.append({\"RUN_ID\": runNumber,\n \"STREAM\": streamName,\n \"DATA_TIER\": data_tier})\n dbConn.execute(sqlQuery, bindVars)\n\n return", "title": "" }, { "docid": "7febb1678dfbc3cb3a74f7f2f37bfe88", "score": "0.5606126", "text": "def add_stream(self, stream):\n stream_id, input_ids, output_ids = self._get_identifiers(stream)\n self._add_vertices(stream, stream_id, input_ids, output_ids)\n self._add_edges(stream_id, input_ids, output_ids)\n return Node(self, stream_id, input_ids, output_ids)", "title": "" }, { "docid": "a05cea1ff270aa9f77c8220c51cf7c53", "score": "0.5322678", "text": "def addFileToStream(dataset, stream_name, time_file_pair):\n\t\ttry:\n\t\t\tdataset.addToStream(\n\t\t\t stream_name,\n\t\t\t time_file_pair\n\t\t\t)\n\t\texcept KeyError:\n\t\t\tdataset.createNewStream(\n\t\t\t stream_name,\n\t\t\t [time_file_pair]\n\t\t\t)", "title": "" }, { "docid": "1ed1cf32b9519585f0ce342499e2c5ae", "score": "0.5230901", "text": "def insert_into_podcast_table(db: Connection, podcast: PodcastIn) -> None:\n db.cursor().execute(\n \"INSERT INTO podcasts(title, url) VALUES (?, ?)\",\n (podcast.title, podcast.url),\n )\n db.commit()", "title": "" }, { "docid": "b9fcf3493e25506755bb0c970e6c5514", "score": "0.5228415", "text": "def tst_insert(dut):\n cocotb.fork(Clock(dut.clk, 6.4, 'ns').start())\n tb = axistream_fifo_TB(dut)\n yield tb.async_rst()\n tb.insertContinuousBatch(5, 5)\n tb.stream_in.append(895, tlast=1)\n dut.stream_out_tready <= 0\n yield ClockCycles(dut.clk, 10)", "title": "" }, { "docid": "637b10975bddee437c67d09fb0bdf16c", "score": "0.5215235", "text": "def insertProcessingStyle(dbConn, runNumber, streamName, processingStyle):\n if processingStyle == \"Express\":\n datasetName = \"Stream%s\" % streamName\n InsertDataset.insertPrimaryDataset(dbConn, datasetName)\n InsertDataset.assocPrimaryDatasetRunStream(dbConn, datasetName,\n streamName, runNumber)\n\n sqlQuery = \"\"\"INSERT INTO run_stream_style_assoc\n (run_id, stream_id, style_id)\n VALUES (:p_1,\n (SELECT id FROM stream WHERE name = :p_2),\n (SELECT id FROM processing_style WHERE name = :p_3))\"\"\"\n\n bindVars = {\"p_1\": runNumber, \"p_2\": streamName, \"p_3\": processingStyle}\n dbConn.execute(sqlQuery, bindVars)\n return", "title": "" }, { "docid": "f8141835cf42d0d7b56800d9a3297330", "score": "0.52029455", "text": "def step_insert_into_table(context):\n context.cli.sendline(f\"\"\"insert into a(x) values('{INITIAL_DATA}');\"\"\")", "title": "" }, { "docid": "765b06936f2ad6f426f1bf75560ed9eb", "score": "0.5171453", "text": "def insert_tx(self, tx):\n pass", "title": "" }, { "docid": "5912d8fa5345a4fbbf25222db00a12fe", "score": "0.51507765", "text": "def post(self):\n blockchain_name = data_stream_ns.payload[BLOCKCHAIN_NAME_FIELD_NAME]\n stream_name = data_stream_ns.payload[STREAM_NAME_FIELD_NAME]\n is_open = data_stream_ns.payload[IS_OPEN_FIELD_NAME]\n\n if not blockchain_name or not blockchain_name.strip():\n raise ValueError(\"The blockchain name can't be empty!\")\n\n if not stream_name or not stream_name.strip():\n raise ValueError(\"The stream name can't be empty!\")\n\n blockchain_name = blockchain_name.strip()\n stream_name = stream_name.strip()\n DataStreamController.create_stream(blockchain_name, stream_name, is_open)\n return {\"status\": stream_name + \" created!\"}, status.HTTP_200_OK", "title": "" }, { "docid": "0d5c6ad4df27eed7ca5029d01bb56b55", "score": "0.5142196", "text": "def create_stream(self, stream_id, sandbox=None):\n raise NotImplementedError", "title": "" }, { "docid": "6ccf227e7695188eda189d467eca0d44", "score": "0.5093264", "text": "def __newStream(self, args):\n name = getRequiredParam(args, 'stream')\n st = Stream(name)\n self.registerSource(name, st)", "title": "" }, { "docid": "f7356c8a1643f31ee692ba19b0d03e20", "score": "0.50628066", "text": "def stream_commit(self, stream_object):\n\t\tself.send_msg(stream_object.port_commands)\n\t\tfor message, expected_string in stream_object.port_queries.iteritems():\n\t\t\tself.socket.send(message)\n\t\t\ttry:\n\t\t\t\tdata = self.socket.recv(1024)\n\t\t\texcept socket.timeout:\n\t\t\t\traise error.AnritsuTimeout(message)\n\t\t\tif data != expected_string:\n\t\t\t\traise error.AnritsuCommandError(message, expected_string, data)\n\t\tstream_object.commands.append(':TSTReam:TABLe:WRITe\\n')\n\t\tself.send_msg(stream_object.commands)\n\t\t#First test with queries the variables of the 'stream setting'\n\t\tfor message, expected_string in stream_object.stream_queries.iteritems():\n\t\t\tself.socket.send(message)\n\t\t\ttry:\n\t\t\t\tdata = self.socket.recv(1024)\n\t\t\texcept socket.timeout:\n\t\t\t\traise error.AnritsuTimeout(message)\n\t\t\tif data != expected_string:\n\t\t\t\traise error.AnritsuQueryError(message, expected_string, data)\n\t\t#Then tests with queries the variables of the 'frame settings'.\n\t\t#This seperation of queries was required, as frame settings can't\n\t\t#be tested if the stream setting is untested.\n\t\tfor message, expected_string in stream_object.frame_queries.iteritems():\n\t\t\tself.socket.send(message)\n\t\t\ttry:\n\t\t\t\tdata = self.socket.recv(1024)\n\t\t\texcept socket.timeout:\n\t\t\t\traise error.AnritsuTimeout(message)\n\t\t\tif data != expected_string:\n\t\t\t\traise error.AnritsuQueryError(message, expected_string, data)", "title": "" }, { "docid": "d139fcd6dda3c691ad9839377255978b", "score": "0.50578445", "text": "def upload(self, sample_id: str, sample_name: str, group_id: str, group_name: str,\n bed_stream: io.TextIOWrapper):\n source = str(Path(bed_stream.name).absolute())\n result = load_transcripts(bed_stream, sample_id=sample_id, group_id=group_id,\n source=source, threshold=10)\n result.sample.name = sample_name\n result.sample.group_name = group_name\n\n try:\n self.add(result.sample)\n with click.progressbar(result.models, length=result.count,\n label=f\"loading {sample_id}\") as progress_bar:\n for tx_model in progress_bar:\n self.add(tx_model)\n self.save()\n except IntegrityError as error:\n self.session.rollback()\n raise error", "title": "" }, { "docid": "fd667e3d1ea44addfed798889356079c", "score": "0.5049418", "text": "async def insert(self, table_name: str, data: Dict[str, Any]) -> None:", "title": "" }, { "docid": "8580f55c767f699ade50fa0b5cdcdfa6", "score": "0.504652", "text": "def writeTS (tsid,ts, replace_table = False):\n\n try:\n cur = dbconn.cursor()\n if replace_table == True:\n try:\n cur.execute (\"DROP TABLE \"+tsid)\n except:\n pass\n cur.execute (\"CREATE TABLE IF NOT EXISTS \"+tsid+\"(DateTime INTEGER PRIMARY KEY, value FLOAT)\")#, flag INTEGER)\")\n for line in ts.data:\n sqltxt = \"INSERT OR REPLACE INTO \"+tsid+\" VALUES(%d,%f)\" % (int(time.mktime(line[0].timetuple())),line[1])#,int(line[2]))\n cur.execute(sqltxt)\n dbconn.commit()\n except Exception, e:\n status = \"\\nCould not store \"+tsid\n status += \"\\n%s\" % str(e)\n print status\n cur.close()", "title": "" }, { "docid": "459706020d2fbe9aa03965f70df76007", "score": "0.50246567", "text": "def insert_query(self, dump):\n try:\n if (self.connection is None):\n self.connection = self.connect()\n\n self.cursor = self.connection.cursor()\n # self.cursor.execute(\"PRAGMA key = '%s'\" % settings.key)\n self.cursor.execute(\"\"\"\n INSERT INTO ParsedHTTP (timestmp, page_id, tcp_session_id, browsing_session_id, source, dest, http_type,\n host, url, referer, cookie, content_type, no_children, payload, hrefs, iframes, images)\n VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\"\"\", dump)\n self.connection.commit()\n\n except sqlite3.Error as e:\n try:\n self.connection = self.connect()\n self.cursor = self.connection.cursor()\n self.cursor.execute(\"\"\"\n INSERT INTO ParsedHTTP (timestmp, page_id, tcp_session_id, browsing_session_id, source, dest, http_type,\n host, url, referer, cookie, content_type, no_children, payload, hrefs, iframes, images)\n VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\"\"\", dump)\n self.connection.commit()\n except sqlite3.OperationalError as e:\n db_logger.debug(str(e) + \" : \" + dump)", "title": "" }, { "docid": "da908c8a6dfc7f61fda178a11535392f", "score": "0.49905112", "text": "def InsertRecords(query):\n with conn.cursor() as curr:\n curr.execute(query)", "title": "" }, { "docid": "ab0096de679e38d7678ba4d02730021a", "score": "0.49737117", "text": "def insert_data(self):\n\n formatted_data = self.get_endpoint_data()\n table = self.endpoint_name.replace(' ', '_')\n\n if table in BULK_INSERT_LIST:\n self.execute_bulk_insert(table, formatted_data)\n else:\n self.execute_single_insert(table, formatted_data)\n\n print('Committing transaction...')\n self.conn.commit()\n\n self.cur.close()\n print('Database connection closed.')", "title": "" }, { "docid": "fc424e693ca03a4c37e23e8d8af81da1", "score": "0.49470034", "text": "def produce(self, stream_url, params, auth):\n response = requests.get(\n url = stream_url,\n params = params,\n auth = auth,\n stream = True\n )\n\n for line in response.iter_lines():\n if line:\n line = json.loads(line)\n\n # Storing tweets' language\n data_point = [{\n # \"timestamp\":\n \"measurement\": self.influxdb_database,\n \"tags\": {\n \"language\": line['data']['lang'],\n },\n \"fields\": {\n \"id\": line['data']['id']\n }\n }]\n\n try:\n self.influxdb_client.write_points(data_point)\n logging.info(\"Successfully stored ID '{}'.\".format(line['data']['id']))\n except (InfluxDBServerError, InfluxDBClientError) as e:\n logging.info(\"Failed at storing ID '{}'. Error: {}\".format(line['data']['id'], e))\n\n # Queueing tweets into Kafka for further processing\n if line['data']['lang'] == 'en':\n self.send(\n self.topic,\n json.dumps({\n 'id': line['data']['id'],\n 'tweet': line['data']['text']\n }).encode())\n\n logging.info(\"Queued tweet '{}'.\".format(line['data']['id']))\n # logging.info(self.metrics())", "title": "" }, { "docid": "0fc6cb0904948c23628076196699ce9e", "score": "0.49255326", "text": "def _insert_statistics(\n session: Session,\n table: type[StatisticsBase],\n metadata_id: int,\n statistic: StatisticData,\n) -> None:\n try:\n session.add(table.from_stats(metadata_id, statistic))\n except SQLAlchemyError:\n _LOGGER.exception(\n \"Unexpected exception when inserting statistics %s:%s \",\n metadata_id,\n statistic,\n )", "title": "" }, { "docid": "75be5c89e42bd040b7ceedbed58082f7", "score": "0.49032173", "text": "def insert(self, subject: str, predicate: str, obj: str) -> None:\n # do warmup if necessary\n self.open()\n # start transaction\n self.start_transaction()\n if subject is not None and predicate is not None and obj is not None:\n insert_query = get_insert_query(self._table_name)\n self._update_cursor.execute(insert_query, (subject, predicate, obj))", "title": "" }, { "docid": "2c2051883fe2b7c19c1c46343b1d860d", "score": "0.48823112", "text": "def __init__(self, stream):\n _WritelnDecorator.__init__(self, stream)\n self.tput = TPut()", "title": "" }, { "docid": "0ee130d00e4cd76125da6f3ccfccc5b7", "score": "0.4881039", "text": "def insert_table(self, table_maker):\n document = self.window.get_active_document()\n document.insert_at_cursor(table_maker.table())", "title": "" }, { "docid": "ec259349561c35652dc4272882f76312", "score": "0.48757243", "text": "def insert(self, table, data):\n self._crsr.insert(table, data)", "title": "" }, { "docid": "d847363f1d82a07df04fe5372af08087", "score": "0.48705325", "text": "def test_update_streams(self):\n hash_key = DynamoKey(\"id\", data_type=STRING)\n table = self.dynamo.create_table(\n \"foobar\",\n hash_key=hash_key,\n )\n assert table is not None\n self.assertIsNone(table.stream_type)\n table = self.dynamo.update_table(\"foobar\", stream=NEW_AND_OLD_IMAGES)\n assert table is not None\n self.assertEqual(table.stream_type, NEW_AND_OLD_IMAGES)\n table = self.dynamo.update_table(\"foobar\", stream=False)\n assert table is not None\n self.assertIsNone(table.stream_type)", "title": "" }, { "docid": "2c52a7c43b7da9f279d7fd26ab26a719", "score": "0.48672694", "text": "def insert_record(self, table, record):\n placeholder = '?'\n placeholders = ', '.join(placeholder * len(record))\n query = 'INSERT INTO %s VALUES (%s);' %(table, placeholders)\n self.__cursor.execute(query, record)", "title": "" }, { "docid": "3811a7ca5adaa770b4d316d750663fac", "score": "0.48520356", "text": "def register(self, stream: TextIO):\n self._redirected_streams[threading.get_ident()] = stream", "title": "" }, { "docid": "7c62fd0b002d7d43fec9ab4b27de66be", "score": "0.48481423", "text": "def singer_data(url, singer_schema, stream):\n data = requests.get(url)\n singer.write_schema(schema=singer_schema, stream_name=stream, key_properties=[\"\"])\n singer.write_record(stream_name=stream, record=data.json())", "title": "" }, { "docid": "c8a73700586fb0079d0d7e0653085808", "score": "0.48470247", "text": "def execute_single_insert(self, table, data):\n sql = Runner.build_insert_sql(table, data)\n query = self.cur.mogrify(sql, data)\n print(f'Executing insert on table {table}')\n self.cur.execute(query)", "title": "" }, { "docid": "3ecd8fd3266ff69cd9b1fa583d542395", "score": "0.48312595", "text": "def insert_tx_in(self, tx):\n pass", "title": "" }, { "docid": "68a983bb226023529d0c5666e2cf579b", "score": "0.4806483", "text": "def put(self, filename, stream, is_draft=False, metadata_only=False):\n self.non_transactional_put(\n filename, stream, is_draft=is_draft, metadata_only=metadata_only)", "title": "" }, { "docid": "944c12f8b54a0ad22894a644aeb09f46", "score": "0.4804865", "text": "def insert_mytable():\r\n\r\n insert_query = \"\"\"\r\n \tINSERT INTO public.my_table\r\n\t SELECT * FROM source_schema.my_table limit 5;\r\n \"\"\"\r\n\r\n pg_hook = PostgresHook(postgres_conn_id=\"postgre_dwh_test\")\r\n pg_hook.run(insert_query)", "title": "" }, { "docid": "cd1b087a5de06e1b95d3031862b464c9", "score": "0.48015147", "text": "def associateWithStream(self, xs):\n self.xmlstream = xs\n self.xmlstream.addOnetimeObserver(xmlstream.STREAM_START_EVENT,\n self.streamStarted)", "title": "" }, { "docid": "cd1b087a5de06e1b95d3031862b464c9", "score": "0.48015147", "text": "def associateWithStream(self, xs):\n self.xmlstream = xs\n self.xmlstream.addOnetimeObserver(xmlstream.STREAM_START_EVENT,\n self.streamStarted)", "title": "" }, { "docid": "bafed1d471afcbff3c79fc197ff28725", "score": "0.47948053", "text": "def handleTcpStream(self, tcp_stream):\n stream_id=tcp_stream.addr\n \n\n \n if self.debug: print \"Handling a TCP stream.\", \"Time: \", time, \" \",stream_id\n \n if tcp_stream.nids_state == nids.NIDS_JUST_EST: # New connection/stream\n if self.debug: print \"New stream.\", stream_id\n #self.stream_list = self.stream_list+[tcp_stream]\n tcp_stream.client.collect=1 # Signal to collect this data\n tcp_stream.server.collect=1\n \n # Store our own metadata -- twice.\n new_ct=self.certainty_table(self.all_idents)\n self.index_table[self.next_index]=(tcp_stream,new_ct,stream_id,'unknown') #TODO: These tuples should probably be the same.\n self.stream_table[stream_id]=(self.next_index,new_ct,tcp_stream,'unknown')\n self.next_index+=1\n self.identifyStream(stream_id)\n self.f_cb_new_tcp(tcp_stream) #Call back.\n elif tcp_stream.nids_state == nids.NIDS_DATA: # Established connection receiving new data\n index=self.stream_table[stream_id][0]\n ct = self.index_table[index][1]\n proto=self.index_table[index][3]\n if proto is 'unknown':\n self.identifyStream(stream_id)\n elif tcp_stream.nids_state in end_states: #TODO: This doesn't seem to work. Except sometimes.\n self.f_cb_end_tcp(tcp_stream)", "title": "" }, { "docid": "2ea130d795ca49539c1f2c1f5b10388a", "score": "0.47901478", "text": "def insert(self, rows):\n insert_query = self.location_table.insert()\n result = self.connection.execute(insert_query, rows)\n logger.debug('%d rows inserted', result.rowcount)", "title": "" }, { "docid": "051a86ae4aebc745359935696c5ba4a1", "score": "0.47884494", "text": "def insert(self, state):\n self.db_int.document.insert(did=self.doc_id, state=state, kind=self.doc_kind, repId=self.doc_id,\n batch_uuid=str(self.batch_uuid), direction_process=self.record_object['func'].data)\n self.db_int.commit()", "title": "" }, { "docid": "af3df6b79b92fabb2a8a325eaaf364f4", "score": "0.4766177", "text": "def df2stream(stream,df):\n for trace in range(len(df.columns)):\n stream[trace].data=df[trace].values\n return(stream)", "title": "" }, { "docid": "32441d696b6eb3648ef912a2ac741f80", "score": "0.47625312", "text": "def insert_tweet(tweet, cls, db_conn, annotations_table):\n tweet_id = tweet['id']\n config.logging.info(\"Inserting tweet_id '{}'\".format(tweet_id))\n tweet_text = unicode(tweet['text'])\n user_id = tweet['user']['id']\n user_name = tweet['user']['name'].lower()\n tweet_created_at = dt_parser.parse(tweet['created_at'])\n insert_tweet_details(tweet_id, tweet_text, tweet_created_at, cls, user_id, user_name, db_conn, annotations_table)", "title": "" }, { "docid": "b1c244c91b48aaaf33d8875c18fd4b51", "score": "0.47621", "text": "def insert(self, hash, sid, offset):\n with self.cursor(charset=\"utf8\") as cur:\n cur.execute(self.INSERT_FINGERPRINT, (hash, sid, offset))", "title": "" }, { "docid": "303eeeb5b6e7f9a0e7f14a8d0abb3b5f", "score": "0.47595763", "text": "def write_data_to_pravega(\n controller_uri: str, scope: str, stream: str,\n table_data: Generator[TripRecord, None, None]) -> None:\n manager = pravega_client.StreamManager(controller_uri)\n manager.create_scope(scope_name=scope)\n manager.create_stream(scope_name=scope,\n stream_name=stream,\n initial_segments=3)\n\n uncapitalize = lambda s: f'{s[0].lower()}{s[1:]}' if s else ''\n\n writer = manager.create_writer(scope, stream)\n for row in table_data:\n event = {\n # convert dataclass to dict with key in camel case\n uncapitalize(''.join(w.title() for w in k.split('_'))): v\n for k, v in dataclasses.asdict(row).items()\n }\n print(event)\n writer.write_event(json.dumps(event),\n routing_key=str(row.start_location_id))", "title": "" }, { "docid": "9996e7791f3110652f0e4c59bea35f37", "score": "0.47527763", "text": "def insert(self, dat, table):\n \n fields = ', '.join(dat.viewkeys())\n values = ')s, %('.join(dat.viewkeys())\n insert_sql = \"INSERT INTO \" + table + \" (\" + fields + \") VALUES (%(\" + values + \")s)\"\n \n self.cur.execute(insert_sql, dat)\n \n # shall we commit to the db? -- Oh the irony...\n if self.n >= 1000:\n self.con.commit()\n #self.logger.info(\" added %s records to '%s' table...\" % (format(self.n, \",d\"), table))\n self.n = 0\n self.n += 1\n self.N += 1", "title": "" }, { "docid": "cfbbcf58c82e3da1c983d440e40356af", "score": "0.4751986", "text": "def insert_tweet(company_symbol, tweet_text):\r\n company_id = get_company_id(company_symbol)\r\n cur = con.cursor()\r\n cur.execute('INSERT INTO \"Tweets\"(company_id, time, text) VALUES(%s, %s, %s)',\r\n (company_id, datetime.datetime.now(), tweet_text))\r\n con.commit()\r\n cur.close()", "title": "" }, { "docid": "c5ab4ecd1229009b3b7b4a3aeb89891c", "score": "0.47480544", "text": "def insert_file(db_file):\n # Convert raw bytes to bytes-stream object (Box SDK expects this obj type)\n client.folder(folder_id).upload_stream(BytesIO(db_file.bin), db_file.name)", "title": "" }, { "docid": "5d9d977a5ff3def37ffda253acac60f1", "score": "0.4741828", "text": "def insert_screening(screening_time, screening_date, hall_number, title, year_of_production):\n sql = \"\"\"INSERT INTO screenings\n VALUES(nextval('screening_id_seq'),%s,%s,%s,%s,%s);\"\"\"\n conn = None\n try:\n params = config()\n conn = psycopg2.connect(**params)\n cur = conn.cursor()\n cur.execute(sql, (screening_time, screening_date, hall_number, title, year_of_production))\n conn.commit()\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()", "title": "" }, { "docid": "5369b6d5bbdd0d5be6bc340f6975583c", "score": "0.47406104", "text": "def add_streams (self, streams, ports = None):\n\n\n ports = ports if ports is not None else self.get_acquired_ports()\n ports = self._validate_port_list(ports)\n\n if isinstance(streams, STLProfile):\n streams = streams.get_streams()\n\n # transform single stream\n if not isinstance(streams, list):\n streams = [streams]\n\n # check streams\n if not all([isinstance(stream, STLStream) for stream in streams]):\n raise STLArgumentError('streams', streams)\n\n self.logger.pre_cmd(\"Attaching {0} streams to port(s) {1}:\".format(len(streams), ports))\n rc = self.__add_streams(streams, ports)\n self.logger.post_cmd(rc)\n\n if not rc:\n raise STLError(rc)\n\n return [stream.get_id() for stream in streams]", "title": "" }, { "docid": "2d31e17ec9584bdd4e23a40fc127a8de", "score": "0.4739796", "text": "def test_stream(self, tg):\n iface = tg.ports[0]\n packet_count = 100\n src_mac = PACKET_DEFINITION[0][\"Ethernet\"][\"src\"]\n stream_id = tg.set_stream(PACKET_DEFINITION, count=packet_count,\n iface=iface, adjust_size=True, required_size=1450)\n tg.start_sniff([iface], sniffing_time=2, src_filter=src_mac)\n tg.send_stream(stream_id)\n data = tg.stop_sniff([iface])\n packets = data.get(iface, [])\n\n assert len(packets) == packet_count, \\\n \"Captured packets count {0} does not match expected {1}\".format(len(packets), packet_count)", "title": "" }, { "docid": "6739cc548e180f69389857d1ed358f8e", "score": "0.47362235", "text": "def insert_or_update_table(**context):\n try:\n sql = format(context[\"dag_run\"].conf[\"sql\"])\n table_name = format(context[\"dag_run\"].conf[\"table_name\"])\n logging.info('trying the task')\n logging.info('connecting to source')\n src = MySqlHook(mysql_conn_id='openemis')\n logging.info('connecting to destination')\n print(\"Remotely received value of {} for key=sql\".sql)\n print(\"Remotely received value of {} for key=table_name\".table_name)\n dest = MySqlHook(mysql_conn_id='analytics')\n src_conn = src.get_conn()\n cursor = src_conn.cursor()\n dest_conn = dest.get_conn()\n cursor.execute(sql)\n dest.insert_rows(table=table_name, rows=cursor, replace=True)\n except Exception as e3:\n logging.error('Table update is failed, please refer the logs more details')\n logging.exception(context)\n logging.exception(e3)", "title": "" }, { "docid": "6d18611666f6e035e4e3b00a3e54a520", "score": "0.4724483", "text": "def insert_batch(cls, batch):\n if batch:\n cls.get_database(for_write=True).insert_tuples(cls, batch, formatted=cls.sync_formatted_tuples)", "title": "" }, { "docid": "c5cb34ec2d4b44013262cc8047c3e78d", "score": "0.47227415", "text": "def insert(self, packet, time=None, **kwargs):\n pass", "title": "" }, { "docid": "c3aec11cddfa6029dfc1e46d54b165d3", "score": "0.47106296", "text": "def insert_or_update(\n self,\n source,\n signal,\n time_type,\n geo_type,\n time_value,\n geo_value,\n value,\n stderr,\n sample_size):\n\n sql = '''\n INSERT INTO `covidcast` VALUES\n (0, %s, %s, %s, %s, %s, %s, UNIX_TIMESTAMP(NOW()), %s, %s, %s, 0, NULL)\n ON DUPLICATE KEY UPDATE\n `timestamp1` = VALUES(`timestamp1`),\n `value` = VALUES(`value`),\n `stderr` = VALUES(`stderr`),\n `sample_size` = VALUES(`sample_size`)\n '''\n\n args = (\n source,\n signal,\n time_type,\n geo_type,\n time_value,\n geo_value,\n value,\n stderr,\n sample_size,\n )\n\n self._cursor.execute(sql, args)", "title": "" }, { "docid": "641ce1fa7f9d0db4fd21720a0cfc95cd", "score": "0.4709791", "text": "def start_stream(self) -> None:\n pass", "title": "" }, { "docid": "2915ac81e6787190ecf4c630c8dd55ac", "score": "0.47078827", "text": "def insert_into_tables(cur, conn):\n print('Inserting data from staging tables into analytics tables')\n for query in insert_commands:\n print('Running ' + query)\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "e134d203dcc3cae12628d3cd776934f4", "score": "0.47064388", "text": "def write_to(self, stream: StreamWrapper):\n stream.write_int(self.TAG)\n self.data.write_to(stream)", "title": "" }, { "docid": "4357c2b719db1d1707be49d149728db1", "score": "0.4698762", "text": "def open_stream(self, priority, associated_stream=None):\n assoc_id = associated_stream.stream_id if associated_stream else None\n\n syn = SYNStreamFrame()\n syn.version = self.version\n syn.stream_id = self.stream_id\n syn.assoc_stream_id = assoc_id\n syn.priority = priority\n\n # Assume this will be the last frame unless we find out otherwise.\n syn.flags.add(FLAG_FIN)\n\n self._queued_frames.append(syn)", "title": "" }, { "docid": "8b6ef36f6426b62d3144f267992afa01", "score": "0.46894556", "text": "def test_upload_stream(self):\n db = FileSystemFilestore(SERVER_DIR)\n file = FileStorage(filename=CSV_FILE)\n fh = db.upload_stream(file=file, file_name=os.path.basename(CSV_FILE))\n self.assertEqual(fh.file_name, os.path.basename(CSV_FILE))\n self.assertEqual(fh.mimetype, fs.FORMAT_CSV)\n self.assertTrue(os.path.isfile(fh.filepath))\n self.assertEqual(fh.identifier, db.get_file(fh.identifier).identifier)", "title": "" }, { "docid": "ddd57d93d8c11f7eaba656c126d627a1", "score": "0.46882677", "text": "def insert_tweet(tweet: str, cursor: pg8000.Cursor) -> TwResp:\n cursor.execute(f'''\n INSERT INTO tweets (tweet) VALUES (%s)\n RETURNING {TWEET_COLUMN_ORDER};\n ''', (tweet,))\n return cursor.fetchone()", "title": "" }, { "docid": "04c38d73df7c5e2555e230bfdd29f8e4", "score": "0.4685548", "text": "def insertSource(mutations, database):\n # Connect to MySQL Database (kinesin on kenobi)\n if database == 'kenobi':\n cnx = pymysql.connect(host=config_kinesin.database_config['dbhost'],\n user=config_kinesin.database_config['dbuser'],\n passwd=config_kinesin.database_config['dbpass'],\n db=config_kinesin.database_config['dbname'])\n else:\n ## if database is home mysql database\n cnx = pymysql.connect(host=config_home.database_config['dbhost'],\n port=config_home.database_config['port'],\n user=config_home.database_config['dbuser'],\n passwd=config_home.database_config['dbpass'],\n db=config_home.database_config['dbname'])\n cursor = cnx.cursor(pymysql.cursors.DictCursor)\n\n sql_source = \"INSERT IGNORE INTO source_info (source_id, source_db, mutation_id) VALUES(%s,%s,%s)\"\n gdc_source = mutations[1]\n i = 0\n for x in gdc_source:\n if x[2] != \"None\": # and x[4]==\"missense_variant\":\n rows = cursor.execute(sql_source, x)\n i += 1\n cnx.commit()\n cnx.close()\n return i", "title": "" }, { "docid": "8ae6aa5e4c44c3bf6f927249eea19433", "score": "0.46848434", "text": "def data_entry(c, conn, tablename, cont):\n unix = int(time.time())\n date = str(datetime.datetime.fromtimestamp(unix).strftime('%Y-%m-%d %H:%M:%S'))\n name = str(cont['name'])\n address = str(cont['address'])\n mobile = str(cont['mobile'])\n email = str(cont['email'])\n github = str(cont['github'])\n linkedin = str(cont['linkedin'])\n expertise = str(cont['expertise'])\n similarity_score = float(cont['similarity_score'])\n c.execute(\"\"\"\n Insert Into {}\n (unix,\n datestamp,\n name,\n address,\n mobile,\n email,\n github,\n linkedin,\n expertise,\n similarity_score)\n VALUES\n (\n ?, ?, ?, ?, ?, ?, ?, ?, ?, ?\n )\n \"\"\".format(tablename), (unix, date, name, address, mobile, email, github, linkedin, expertise, similarity_score))\n conn.commit()\n c.close()\n conn.close()", "title": "" }, { "docid": "be98df7995c1b86f0cc31eb26160d1e3", "score": "0.46844554", "text": "def _insert_transaction_into_a_db(self, txobj):\n #print(\"_insert_transaction_into_a_db: for txid =\", txobj.transaction_id.hex())\n if txobj.transaction_data is None:\n txobj.serialize()\n ret = self.exec_sql(sql=\"INSERT INTO transaction_table VALUES (%s,%s)\" % (self.db_adaptor.placeholder,\n self.db_adaptor.placeholder),\n args=(txobj.transaction_id, txobj.transaction_data), commit=True)\n if ret is None:\n return False\n\n for asset_group_id, asset_id, user_id in self.get_asset_info(txobj):\n self.exec_sql(sql=\"INSERT INTO asset_info_table(transaction_id, asset_group_id, asset_id, user_id) \"\n \"VALUES (%s, %s, %s, %s)\" % (\n self.db_adaptor.placeholder, self.db_adaptor.placeholder,\n self.db_adaptor.placeholder, self.db_adaptor.placeholder),\n args=(txobj.transaction_id, asset_group_id, asset_id, user_id), commit=True)\n for base, point_to in self._get_topology_info(txobj):\n self.exec_sql(sql=\"INSERT INTO topology_table(base, point_to) VALUES (%s, %s)\" %\n (self.db_adaptor.placeholder, self.db_adaptor.placeholder),\n args=(base, point_to), commit=True)\n #print(\"topology: base:%s, point_to:%s\" % (base.hex(), point_to.hex()))\n return True", "title": "" }, { "docid": "cb5e3648b0f12db0a595df9c0f1f195b", "score": "0.46823323", "text": "def insert(dbc, ref, txt, seq):\n qry = INS.format(ref=ref, txt=txt, seq=seq)\n dbc.execute(qry)\n dbc.commit()", "title": "" }, { "docid": "01e970a2efa52108fd9b5eff274743c2", "score": "0.46813023", "text": "def start_stream():\r\n print(\"\\nStarting data acquisition.\")\r\n board.prepare_session()\r\n board.start_stream(45000, input)\r\n key_marker()", "title": "" }, { "docid": "26202c55ea591d5ec9cc5fcae9443186", "score": "0.4679128", "text": "def addStreamInfo(self, type, values):\n\t\tprint \"*** addStreamInfo ***\", [type, values]\n\t\tvalues['type'] = type\n\t\tself.data['streams'].append({k.lower():values[k] for k in values.keys()})", "title": "" }, { "docid": "e7cfdd768f2677b10811f48f3056d9e5", "score": "0.46705902", "text": "def send_stream(self, stream_id):\n pass", "title": "" }, { "docid": "001dca6eca77c0538aab1910a1ca8a60", "score": "0.46537566", "text": "def insert_db_topic(self, topic_id, nb_posts):", "title": "" }, { "docid": "14df4fec91d65d401aaccea9d4cf54a7", "score": "0.46522933", "text": "def attach_stream(self, streamid, circid, hop=None):\n if hop:\n plog(\"DEBUG\", \"Attaching stream: \"+str(streamid)+\" to hop \"+str(hop)+\" of circuit \"+str(circid))\n return self.sendAndRecv(\"ATTACHSTREAM %d %d HOP=%d\\r\\n\"%(streamid, circid, hop))\n else:\n plog(\"DEBUG\", \"Attaching stream: \"+str(streamid)+\" to circuit \"+str(circid))\n return self.sendAndRecv(\"ATTACHSTREAM %d %d\\r\\n\"%(streamid, circid))", "title": "" }, { "docid": "6b4112db7192b91fd005a5101ed36f0b", "score": "0.46500057", "text": "async def stream(self, ctx, op : str = \"\"):\r\n if len(self.bot.stream['content']) == 0:\r\n await ctx.send(embed=self.bot.buildEmbed(title=\"No event or stream available\", color=self.color))\r\n elif op == \"raw\":\r\n msg = \"\"\r\n for c in self.bot.stream['content']:\r\n msg += c + '\\n'\r\n await ctx.send('`' + msg + '`')\r\n else:\r\n title = self.bot.stream['content'][0]\r\n msg = \"\"\r\n current_time = self.bot.getJST()\r\n if self.bot.stream['time'] is not None:\r\n if current_time < self.bot.stream['time']:\r\n d = self.bot.stream['time'] - current_time\r\n cd = \"{}\".format(self.bot.getTimedeltaStr(d, 2))\r\n else:\r\n cd = \"On going!!\"\r\n else:\r\n cd = \"\"\r\n for i in range(1, len(self.bot.stream['content'])):\r\n if cd != \"\" and self.bot.stream['content'][i].find('{}') != -1:\r\n msg += self.bot.stream['content'][i].format(cd) + \"\\n\"\r\n else:\r\n msg += self.bot.stream['content'][i] + \"\\n\"\r\n \r\n if cd != \"\" and title.find('{}') != -1:\r\n title = title.format(cd) + \"\\n\"\r\n\r\n await ctx.send(embed=self.bot.buildEmbed(title=title, description=msg, color=self.color))", "title": "" }, { "docid": "7347d3e1a85cf3382640dd55170bc132", "score": "0.4648191", "text": "def __init__(self, stream, callback, timestampFormat=\"[%x %X] \"):\r\n super(StreamRedirector, self).__init__()\r\n self._timestampFormat = timestampFormat\r\n self._buffer = \"\"\r\n self._stream = stream\r\n self._callback = callback", "title": "" }, { "docid": "0e31c13f7f1ec5fdf4733001f461b58d", "score": "0.4647839", "text": "def insert_stat_row(conn, to_db):\n return insert_row(conn, STAT_TABLE, STAT_FIELDS, to_db)", "title": "" }, { "docid": "3df70a1e9ed58e3382be7218dba75d0c", "score": "0.46471345", "text": "def insert(self, data):\n pass", "title": "" }, { "docid": "3c3937b1bfdfb384955b93e9d9e1ddb8", "score": "0.4634329", "text": "def create_mos_row(conn,MOS_row):\n print(MOS_row)\n sql_command = \"\"\"INSERT INTO MOS_Sites(stnident, stnname) VALUES (?,?)\"\"\"\n crsr=conn.cursor()\n crsr.execute(sql_command,MOS_row)", "title": "" }, { "docid": "414304bc9d5d5e8bf19ff0eeff0ce542", "score": "0.46296465", "text": "async def stream(self, ctx, op : str = \"\"):\r\n if len(self.bot.stream['content']) == 0:\r\n await ctx.send(embed=self.bot.buildEmbed(title=\"No event or stream available\", color=self.color))\r\n elif op == \"raw\":\r\n await ctx.send('`' + str(self.bot.stream['content']) + '`')\r\n else:\r\n title = self.bot.stream['content'][0]\r\n msg = \"\"\r\n current_time = self.bot.getJST()\r\n if self.bot.stream['time'] is not None:\r\n if current_time < self.bot.stream['time']:\r\n d = self.bot.stream['time'] - current_time\r\n cd = \"{}\".format(self.bot.getTimedeltaStr(d, True))\r\n else:\r\n cd = \"On going!!\"\r\n else:\r\n cd = \"\"\r\n for i in range(1, len(self.bot.stream['content'])):\r\n if cd != \"\" and self.bot.stream['content'][i].find('{}') != -1:\r\n msg += self.bot.stream['content'][i].format(cd) + \"\\n\"\r\n else:\r\n msg += self.bot.stream['content'][i] + \"\\n\"\r\n \r\n if cd != \"\" and title.find('{}') != -1:\r\n title = title.format(cd) + \"\\n\"\r\n\r\n await ctx.send(embed=self.bot.buildEmbed(title=title, description=msg, color=self.color))", "title": "" }, { "docid": "779a4d6843866874947bdbba59098162", "score": "0.46240315", "text": "def new_chunk(self, stream_data: np.array, stream_attrs: dict, chunk_idx: int):\n # Generate a time-base and a data model\n if self.cfg[\"name\"] == \"kstarecei\":\n # Adapt configuration file parameters for use in timebase_streaming constructor\n self.logger.info(f\"New chunk: attrs = {stream_attrs}, chunk_idx = {chunk_idx}\")\n tb_chunk = timebase_streaming(stream_attrs[\"TriggerTime\"][0],\n stream_attrs[\"TriggerTime\"][1],\n stream_attrs[\"SampleRate\"],\n self.chunk_size, chunk_idx)\n chunk = self.data_type(stream_data, tb_chunk, stream_attrs)\n\n # Determine whether we need to normalize the data\n tidx_norm = [tb_chunk.time_to_idx(t) for t in self.t_norm]\n\n if (tidx_norm[0] is not None) and (tidx_norm[1] is not None) and self.normalize is None:\n # TODO: Here we create a normalization object using explicit values for the\n # normalization It may be better to just pass the data and let the normalization\n # object figure out how to calculate the needed constants. This would be the best\n # way to allow different normalization.\n data_norm = stream_data[:, tidx_norm[0]:tidx_norm[1]]\n self.normalize = normalize_mean(data_norm)\n self.logger.info(f\"Calculated normalization using\\\n {tidx_norm[1] - tidx_norm[0]} samples.\")\n\n elif self.normalize is not None:\n self.normalize(chunk)\n else:\n self.logger.info(f\"new_chunk: {chunk_idx}: self.normalize has not been initialized\")\n\n return chunk\n\n elif self.cfg[\"name\"] == \"nstxgpi\":\n raise NotImplementedError(\"NSTX chunk generation not implemented\")\n\n else:\n raise NameError(f\"Data model name not understood: {self.cfg['diagnostic']['name']}\")", "title": "" }, { "docid": "1e1c706b542a6ceddef87cd757c10cd0", "score": "0.4619213", "text": "def insert_feed(self, feed):\n if not feed.is_valid():\n logger.error(\"Invalid feed.\")\n return\n\n stmt = sqlalchemy.text(\"\"\"\n INSERT INTO {mode}_feeds\n (url_key, url, feed_type, title, changerate, label,\n language, description, generator, popularity, first_fetched_time,\n latest_fetched_time, latest_item_url, latest_item_title,\n scheduled_fetch_time)\n VALUES \n (:url_key, :url, :feed_type, :title, :changerate, :label,\n :language, :description, :generator, :popularity,\n :first_fetched_time, :latest_fetched_time, :latest_item_url,\n :latest_item_title, :scheduled_fetch_time)\n \"\"\".format(mode=self.mode)\n )\n\n logger.info(stmt)\n\n try:\n with self.db_instance.connect() as conn:\n conn.execute(\n stmt, url_key=feed.url_key, url=feed.url,\n title=feed.title, changerate=feed.changerate,\n feed_type=feed.feed_type, label=feed.label,\n language=feed.language, description=feed.description,\n generator=feed.generator, popularity=feed.popularity,\n first_fetched_time=feed.first_fetched_time,\n latest_fetched_time=feed.latest_fetched_time,\n latest_item_url=feed.latest_item_url,\n latest_item_title=feed.latest_item_title,\n scheduled_fetch_time=feed.scheduled_fetch_time)\n except self.db_instance.Error as ex:\n logger.exception(ex)\n return", "title": "" }, { "docid": "52f1acc60fa7e398b70f545108068fd1", "score": "0.46171257", "text": "def insert_scan(db, start, stop, tid, plg_id):\n meta, conn = db\n tbl = meta.tables[__table_scans]\n\n with conn.begin():\n ins = tbl.insert().values(\n start=start,\n stop=stop,\n tid=tid,\n plg_id=plg_id\n )\n res = conn.execute(ins)\n conn.execute(\"commit\")\n\n return res.inserted_primary_key[0]", "title": "" }, { "docid": "f3c6dba5258694ec7e647024e62f307e", "score": "0.46168438", "text": "def insert(self, table, columns, values, schema='ableton'):\n query = \"\"\"INSERT INTO %s.\"%s\" (%s) VALUES (%s) \"\"\" \\\n % (schema, table, columns, values)\n try:\n self.cursor.execute(query)\n self.conn.commit()\n except StandardError, err:\n print 'Unable to execute:\\n', err", "title": "" }, { "docid": "7144a7d19119817a763981c00893dd82", "score": "0.4612851", "text": "def temp_insert(values):\n\n try:\n conn = sqlite3.connect(TEMP_DB_PATH)\n cur = conn.cursor()\n cur.execute('INSERT INTO temp (url, embedding, content) VALUES (?, ?, ?)', values)\n conn.commit() \n except sqlite3.Error as error:\n print(error)\n finally:\n if (conn): conn.close()", "title": "" }, { "docid": "feddfad0587911c92638c39a0db43d57", "score": "0.46064958", "text": "def line_to_sample(self, filename, stream_id, stream_owner_id, stream_owner_name, stream_name,\n data_descriptor, influxdb_insert, influxdb_client, nosql_insert):\n\n grouped_samples = []\n line_number = 1\n current_day = None # used to check boundry condition. For example, if half of the sample belong to next day\n last_start_time = None\n datapoints = []\n line_count = 0\n line_protocol = \"\"\n fields = \"\"\n\n if self.influx_blacklist:\n blacklist_streams = self.influx_blacklist.values()\n\n if data_descriptor:\n total_dd_columns = len(data_descriptor)\n data_descriptor = data_descriptor\n else:\n data_descriptor = []\n total_dd_columns = 0\n\n try:\n with gzip.open(filename) as lines:\n for line in lines:\n line_count += 1\n line = line.decode('utf-8')\n\n try:\n ts, offset, sample = line.split(',', 2)\n bad_row = 0 # if line is not properly formatted then rest of the code shall not be executed\n except:\n bad_row = 1\n\n if bad_row == 0:\n start_time = int(ts) / 1000.0\n offset = int(offset)\n # TODO: improve the performance of sample parsing\n if nosql_insert == True:\n values = convert_sample(sample, stream_name)\n elif influxdb_insert == True and stream_name not in blacklist_streams and line_count < self.influx_day_datapoints_limit:\n values = convert_sample(sample, stream_name)\n\n ############### START INFLUXDB BLOCK\n if influxdb_insert and line_count < self.influx_day_datapoints_limit:\n if stream_name not in blacklist_streams:\n measurement_and_tags = '%s,owner_id=%s,owner_name=%s,stream_id=%s' % (\n str(stream_name.replace(\" \", \"_\")), str(stream_owner_id), str(stream_owner_name),\n str(stream_id))\n\n try:\n if isinstance(values, list):\n for i, sample_val in enumerate(values):\n if isinstance(sample_val, str):\n ptrn = '%s=\"%s\",'\n else:\n ptrn = '%s=%s,'\n if len(values) == total_dd_columns:\n dd = data_descriptor[i]\n if \"NAME\" in dd:\n fields += ptrn % (\n str(dd[\"NAME\"]).replace(\" \", \"-\"), sample_val)\n else:\n fields += ptrn % ('value_' + str(i), sample_val)\n else:\n fields += ptrn % ('value_' + str(i), sample_val)\n elif len(data_descriptor) > 0:\n dd = data_descriptor[0]\n if isinstance(values, str):\n ptrn = '%s=\"%s\",'\n else:\n ptrn = '%s=%s,'\n if \"NAME\" in dd:\n fields = ptrn % (\n str(dd[\"NAME\"]).replace(\" \", \"-\"), values)\n else:\n fields = ptrn % ('value_0', values)\n else:\n if isinstance(values, str):\n ptrn = '%s=\"%s\",'\n else:\n ptrn = '%s=%s,'\n fields = ptrn % ('value_0', values)\n except Exception as e:\n try:\n values = json.loads(values)\n fields = '%s=\"%s\",' % ('value_0', values)\n except Exception as e:\n if isinstance(values, str):\n ptrn = '%s=\"%s\",'\n else:\n ptrn = '%s=%s,'\n fields = ptrn % ('value_0', values)\n line_protocol += \"%s %s %s\\n\" % (measurement_and_tags, fields.rstrip(\",\"), str(\n int(ts) * 1000000)) # line protocol requires nanoseconds accuracy for timestamp\n measurement_and_tags = \"\"\n fields = \"\"\n elif influxdb_client is not None and influxdb_insert and line_count > self.influx_day_datapoints_limit:\n try:\n influxdb_client.write_points(line_protocol, protocol=\"line\")\n line_protocol = \"\"\n line_count=0\n except:\n self.logging.log(\n error_message=\"STREAM ID: \" + str(stream_id) + \"Owner ID: \" + str(stream_owner_id) + \"Files: \" + str(\n filename) + \" - Error in writing data to influxdb. \" + str(\n traceback.format_exc()), error_type=self.logtypes.CRITICAL)\n\n\n ############### END INFLUXDB BLOCK\n\n ############### START OF NO-SQL (HDFS) DATA BLOCK\n start_time_dt = datetime.datetime.utcfromtimestamp(start_time)\n\n grouped_samples.append(DataPoint(start_time_dt, None, offset, values))\n\n ############### END OF NO-SQL (HDFS) DATA BLOCK\n\n if influxdb_client is not None and influxdb_insert and line_protocol is not None and line_protocol!=\"\":\n try:\n influxdb_client.write_points(line_protocol, protocol=\"line\")\n line_protocol = \"\"\n line_count=0\n except:\n self.logging.log(\n error_message=\"STREAM ID: \" + str(stream_id) + \"Owner ID: \" + str(stream_owner_id) + \"Files: \" + str(\n filename) + \" - Error in writing data to influxdb. \" + str(\n traceback.format_exc()), error_type=self.logtypes.CRITICAL)\n\n return grouped_samples\n except:\n self.logging.log(error_message=\"STREAM ID: \" + str(stream_id) + \" - Cannot process file data. \" + str(\n traceback.format_exc()), error_type=self.logtypes.MISSING_DATA)\n if line_count > self.influx_day_datapoints_limit:\n line_protocol = \"\"\n return grouped_samples", "title": "" }, { "docid": "e367602dad730a00ebab8724cc8f9180", "score": "0.46054593", "text": "def _append_stream_output(self, text, stream_name):\n self.outputs += (\n {'output_type': 'stream', 'name': stream_name, 'text': text},\n )", "title": "" }, { "docid": "3c5b62022288ab9d64e9a75ac50e415d", "score": "0.4605314", "text": "def insert(self, data: dict):\n table = self._storage._read()\n # Handles empty file case\n if table == None:\n table = {self._name: {\"rows\": []}}\n # Handles first usage of table case\n if self._name not in table:\n table[self._name] = {\"rows\": []}\n table[self._name][\"rows\"].append(data)\n self._storage._write(table)", "title": "" }, { "docid": "a113c792b39d4e8a0ff61f771d12a244", "score": "0.4605034", "text": "def test_send_stream_several_times(self, tg):\n iface = tg.ports[0]\n packet_count = 10000\n stream_id_1 = tg.set_stream(PACKET_DEFINITION, count=packet_count, rate=0.01, iface=iface)\n tg.clear_statistics([iface])\n\n tg.send_stream(stream_id_1)\n end_sent_statistics = tg.get_sent_frames_count(iface)\n assert end_sent_statistics == packet_count\n\n # Send stream again and verify all packets were sent\n tg.send_stream(stream_id_1)\n end_sent_statistics = tg.get_sent_frames_count(iface)\n assert end_sent_statistics == 2 * packet_count", "title": "" }, { "docid": "a3319f64ca716eb92e545b2416c435ed", "score": "0.46040666", "text": "def insert_tweets(self, data):\n if data[\"id_str\"] not in self.index_of_tweets:\n self.index_of_tweets[data[\"id_str\"]] = len(self.list_of_tweets)\n self.list_of_tweets.append(data)\n log.debug(\"INSERT SUCCESSFUL\")\n else:\n self.list_of_tweets[self.index_of_tweets[data[\"id_str\"]]] = data\n log.debug(\"UPDATE SUCCESSFUL ON BULK INSERT\")", "title": "" }, { "docid": "d027f24cf09e847ef34dd32c19614dbd", "score": "0.45874837", "text": "def start_stream(self):\n # self.watchdog.start()\n self.thrd.start()", "title": "" }, { "docid": "b48180a046c410615cd54799a4b06652", "score": "0.45830166", "text": "def beginStreamSession(self, sessionId, ttl, type, allowedClientIpAddresses, deviceId, channelId, startTimestamp, endTimestamp):\n pass", "title": "" }, { "docid": "9ac26e713cdec5c810950878d4621e56", "score": "0.45823434", "text": "def start_tweet_stream(self):\n print('Starting Tweet Stream Listener for location: %s' % self.location)\n\n if not debug_use_fake_twitter:\n # use the real twitter stream\n tw_stream = tweepy.Stream(auth=twitter.tw_auth,\n listener=twitter.TweetStreamListener(routing_key=self.ws_id),\n timeout=90)\n tw_stream.filter(locations=self.get_location_twitter_format, async=True)\n else:\n # use a fake twitter stream\n tw_stream = twitter.FakeTwitterStreamThread(routing_key=self.ws_id, location=self.location)\n tw_stream.setDaemon(True)\n tw_stream.start()\n self.tw_stream = tw_stream", "title": "" }, { "docid": "0b6381f281c5be9b29be1c1eb0b1e737", "score": "0.4579258", "text": "def TestSaveUpstream():\n msg = UpstreamMsg(\"2013-1-1 10:00:00\", \"8613601844147\", \"hello world!\")\n msg.insert_upstream_table()\n\n result = UpstreamMsg.select_upstream_table(\"8613601844147\")\n\n assert result.fetchone() is not None", "title": "" }, { "docid": "0a1dad7edc92daec9278bc197fcd600b", "score": "0.4561382", "text": "def insert_songplay_data(all_log_data, conn, cur):\n for entry in all_log_data:\n # find the song ID and artist ID based on the title, artist name, and duration of a song.\n # timestamp, user ID, level, song ID, artist ID, session ID, location, and user agent \n song_title = artist_name = duration = None\n song_title = entry['song_title'] \n artist_name = entry['artist_name']\n duration = entry['length']\n if song_title and artist_name and duration:\n # escape apostrophes in song titles\n song_title = re.sub(\"'\", \"''\", song_title)\n # and escape apostrophes in artist names\n artist_name = re.sub(\"'\", \"''\", artist_name) \n query = song_select_qry.format(song_title, artist_name, duration)\n try:\n song_id = artist_id = None\n cur.execute(query)\n row = cur.fetchone()\n if row:\n song_id, artist_id = row\n \n _, timestamp = get_timestamp(entry['ts'])\n user_id = entry['user_id']\n level = entry['level']\n session_id = entry['session_id']\n location = entry['location']\n user_agent = entry['user_agent']\n insert_vals = (timestamp, user_id, level, song_id, artist_id, \n session_id, location, duration)\n cur.execute(songplay_table_insert, insert_vals)\n\n except psycopg2.Error as e:\n logging.warning('caught psycopg2 exception!')\n logging.warning(e.pgerror)\n logging.warning(e.diag.message_primary)\n continue \n except KeyError as e:\n logging.warning(f'Key Error: {str(e)}')\n continue\n # end of for loop \n conn.commit()", "title": "" }, { "docid": "7b6c5af78b5772050855da23e40f3245", "score": "0.45580605", "text": "def insert_block(self, block):\n pass", "title": "" }, { "docid": "b010b82acf97d1c86208045760c703d9", "score": "0.45548534", "text": "def stream(self, stream):\n\n self._stream = stream", "title": "" }, { "docid": "61137d785fc97870208c561185dc0fb4", "score": "0.45526087", "text": "def handle_internal_stream(self, stream, address):\n self.__handle_stream(stream, address, from_ethernet = False)", "title": "" }, { "docid": "5fc93cd0a3250883846692e72e34ee9a", "score": "0.4551882", "text": "def insert_tweet(tweet):\n\n db = sqlite3.connect(db_filename)\n\n tweet_query = \"INSERT INTO Tweet (created_at, id, id_str, text, source,\" \\\n \" truncated, quoted_status_id, quoted_status_id_str, is_quote_status,\" \\\n \" quote_count, reply_count, retweet_count, favorite_count, favorited,\" \\\n \" retweeted, filter_level, lang, timestamp_ms, lat, lon, sentiment_score, place_id, user_id)\" \\\n \" VALUES \" \\\n \"(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?); \"\n lat = None\n lon = None\n coords = get_value(tweet, 'geo')\n if coords:\n coords = coords['coordinates']\n lat = coords[0]\n lon = coords[1]\n\n global content\n if 'extended_tweet' in tweet:\n content = tweet['extended_tweet']['full_text']\n else:\n content = tweet['text']\n place_id = None\n if 'place' in tweet:\n place = tweet['place']\n if place:\n place_id = tweet['place']['id']\n \"Thu Nov 08 19:16:38 +0000 2018\"\n\n created_at = datetime.strptime(tweet['created_at'], \"%a %b %d %H:%M:%S %z %Y\")\n tweet_params = [\n int(created_at.strftime('%Y%m%d%H%M%S')),\n get_value(tweet, 'id'),\n get_value(tweet, 'id_str'),\n content,\n get_value(tweet, 'source'),\n get_value(tweet, 'truncated'),\n get_value(tweet, 'quoted_status_id'),\n get_value(tweet, 'quoted_status_id_str'),\n get_value(tweet, 'is_quote_status'),\n get_value(tweet, 'quote_count'),\n get_value(tweet, 'reply_count'),\n get_value(tweet, 'retweet_count'),\n get_value(tweet, 'favorite_count'),\n get_value(tweet, 'favorited'),\n get_value(tweet, 'retweeted'),\n get_value(tweet, 'filter_level'),\n get_value(tweet, 'lang'),\n get_value(tweet, 'timestamp_ms'),\n lat,\n lon,\n get_value(tweet, 'sentiment_score'),\n place_id,\n tweet['user']['id']\n ]\n\n user_query = \"INSERT INTO User (id, id_str, name, screen_name, location, url, description, verified,\" \\\n \" followers_count, friends_count, favourites_count, statuses_count, profile_image_url, lang, created_at) \" \\\n \" VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);\"\n\n user_params = [\n tweet['user']['id'],\n tweet['user']['id_str'],\n tweet['user']['name'],\n tweet['user']['screen_name'],\n tweet['user']['location'],\n tweet['user']['url'],\n tweet['user']['description'],\n tweet['user']['verified'],\n tweet['user']['followers_count'],\n tweet['user']['friends_count'],\n tweet['user']['favourites_count'],\n tweet['user']['statuses_count'],\n tweet['user']['profile_image_url'],\n tweet['user']['lang'],\n tweet['user']['created_at']\n ]\n\n cursor = db.cursor()\n try:\n cursor.execute(tweet_query, tweet_params)\n cursor.execute(user_query, user_params)\n except sqlite3.IntegrityError:\n pass\n try:\n if place_id:\n place_query = \"INSERT INTO Place (id, url, place_type, name, full_name, country_code, country, bounding_box)\" \\\n \" values (?, ?, ?, ?, ?, ?, ?, ?);\"\n place_params = [\n tweet['place']['id'],\n tweet['place']['url'],\n tweet['place']['place_type'],\n tweet['place']['name'],\n tweet['place']['full_name'],\n tweet['place']['country_code'],\n tweet['place']['country'],\n json.dumps(tweet['place']['bounding_box'])\n ]\n cursor.execute(place_query, place_params)\n except sqlite3.IntegrityError as e:\n pass\n db.commit()", "title": "" }, { "docid": "cfda0cc61b41bf1915ff2b158391edd2", "score": "0.45461604", "text": "def sql_transit_stmt(id, station):\n # YOUR CODE HERE\n return (f\"DUMMY INSERT with \" +\n f\"{id}, {station['name']}, {station['types'][0]}, {station['place_id']}\")", "title": "" }, { "docid": "e1d3c4630a00ca12a9ea4004c21fe950", "score": "0.45457023", "text": "def insert(self, packet, time=None, **kwargs):\n if isinstance(time, dt.datetime):\n time = time.strftime(dmc.RFC3339_Format)\n\n sql = f'INSERT INTO \"{packet._defn.name}\" (PKTDATA, time) VALUES (?, ?)' \\\n if time \\\n else f'INSERT INTO \"{packet._defn.name}\" (PKTDATA) VALUES (?)'\n values = (sqlite3.Binary(packet._data), time) \\\n if time \\\n else (sqlite3.Binary(packet._data))\n\n self._conn.execute(sql, values)\n self._conn.commit()", "title": "" }, { "docid": "e2772f386aaa0b876b7a9e86305bb8b8", "score": "0.45416203", "text": "def insert(connection):\n hooks.DMTABLE.put_item(\n Item={\n 'PK': 'connection',\n 'SK': connection['connectionId'],\n 'TP': connection['chatRoom'],\n 'connectionId': connection['connectionId'],\n 'userName': connection['userName']\n }\n )", "title": "" }, { "docid": "d4355eeb8a8ef84d676cd0fd54d69c55", "score": "0.4538734", "text": "def insertQueue(self,nodeId, data):\n\t\tself.openDb()\n\t\tself.c.execute(\"INSERT INTO can_data(id,message) VALUES ({0},{1})\".format(nodeId,data))\n\t\tself.closeDb()", "title": "" }, { "docid": "ea5492ce2abd47b42e50dd7fb7dfd8b1", "score": "0.45342115", "text": "def handle_stream(self, stream, address):\n raise NotImplementedError()", "title": "" }, { "docid": "f277440b79212303036cfd300041295f", "score": "0.45339483", "text": "def insert2(self, record):\n self.commit()", "title": "" }, { "docid": "f467047c9d7b49b6774f21fee9aa2a27", "score": "0.45315972", "text": "def _prepare_stream(self,bstream,offset):\n if offset == None:\n offset = bstream.offset\n\n if issubclass(offset.__class__,(types.IntType,)):\n self.offset = BinData(4)\n self.offset.init_data_from_int(offset)\n elif issubclass(offset.__class__,(BinData,)):\n self.offset = offset\n elif (issubclass(offset.__class__,(Entry,)) and\n '__int__' in dir(offset)):\n self.offset = offset\n else:\n raise Exception('Invalid type for EntryList offset (%s) in class %s' % \n (offset,self))\n\n bstream.seek(int(self.offset))", "title": "" } ]
d30191c262bc57e94f5ce39f6d349885
smartctl d scsi a
[ { "docid": "3c72ac6085e8b7a17cc81f23544665b8", "score": "0.64197683", "text": "def intel_scsi_cmd_all():\n stderr = \"\"\n stdout = \"\"\"\nsmartctl 6.2 2013-07-26 r3841 [x86_64-linux-3.10.0-327.3.1.el7.x86_64] (local build)\nCopyright (C) 2002-13, Bruce Allen, Christian Franke, www.smartmontools.org\n\nUser Capacity: 480,103,981,056 bytes [480 GB]\nLogical block size: 512 bytes\nRotation Rate: Solid State Device\nForm Factor: 2.5 inches\nLogical Unit id: 0x55cd2e404b6cd88d\nSerial number: BTWL435203C0480QGN\nDevice type: disk\nLocal Time is: Fri Jul 1 10:56:20 2016 CST\nSMART support is: Available - device has SMART capability.\nSMART support is: Enabled\nTemperature Warning: Disabled or Not Supported\n\n=== START OF READ SMART DATA SECTION ===\nSMART Health Status: OK\nCurrent Drive Temperature: 19 C\n\nError Counter logging not supported\n\n\n[GLTSD (Global Logging Target Save Disable) set. Enable Save with '-S on']\nNo self-tests have been logged\n \"\"\"\n return stdout, stderr", "title": "" } ]
[ { "docid": "149a9828f502d9dd58b679576e1b7ba9", "score": "0.67724967", "text": "def intel_scsi_cmd_sasphy():\n stderr = \"\"\n stdout = \"\"\"\nsmartctl 6.2 2013-07-26 r3841 [x86_64-linux-3.10.0-327.3.1.el7.x86_64] (local build)\nCopyright (C) 2002-13, Bruce Allen, Christian Franke, www.smartmontools.org\n\n=== START OF READ SMART DATA SECTION ===\nscsiPrintSasPhy Log Sense Failed [unsupported field in scsi command]\n \"\"\"\n return stdout, stderr", "title": "" }, { "docid": "73dda42f9f7a07d0020d2ade01c50e57", "score": "0.6467441", "text": "def intel_scsi_cmd_sataphy():\n stderr = \"\"\n stdout = \"\"\"\nsmartctl 6.2 2013-07-26 r3841 [x86_64-linux-3.10.0-327.3.1.el7.x86_64] (local build)\nCopyright (C) 2002-13, Bruce Allen, Christian Franke, www.smartmontools.org\n\nSCSI device successfully opened\n\nUse 'smartctl -a' (or '-x') to print SMART (and more) information\n \"\"\"\n return stdout, stderr", "title": "" }, { "docid": "640bf336eea01808264cc014bbb17198", "score": "0.6104268", "text": "def query_scsi_id(blk):\n if platform.dist()[0].lower() == 'redhat' and float(platform.dist()[1]) < 6:\n e,sid=process_call('scsi_id -g -x -a -s /block/'+blk, log=False)\n else:\n e,sid=process_call('scsi_id --whitelisted --export --device=/dev/'+blk, log=False)\n return e,sid.lower()", "title": "" }, { "docid": "0c0cad8d897bf07791f62aef4a39dd21", "score": "0.60098445", "text": "def get_scsi_disk(session=None):\n if session is None:\n scsi_disk_info = process.system_output(\"lsscsi\").splitlines()\n scsi_debug = [_ for _ in scsi_disk_info if 'scsi_debug' in _][-1]\n else:\n scsi_disk_info = session.cmd_output('lsscsi').splitlines()\n scsi_debug = [_ for _ in scsi_disk_info][-1]\n scsi_debug = scsi_debug.split()\n scsi_id = scsi_debug[0][1:-1]\n device_name = scsi_debug[-1]\n return scsi_id, device_name", "title": "" }, { "docid": "00de7445aac3453fefa2fd271ae05808", "score": "0.5689727", "text": "def set_sd(self, sd_image_path):\n self.append_arg([\n '-drive',\n 'file=%s,if=none,format=raw,id=hd1,index=1' % sd_image_path,\n '-device',\n 'virtio-blk-device,drive=hd1'\n ])", "title": "" }, { "docid": "02ed3f6a9f1ecbbc500cae6fb5b989ab", "score": "0.53380543", "text": "def format_dasds(self, intf, askUser, dasdlist):\n out = \"/dev/tty5\"\n err = \"/dev/tty5\"\n\n c = len(dasdlist)\n\n if intf and askUser:\n devs = ''\n for dasd, bypath in dasdlist:\n devs += \"%s\\n\" % (bypath,)\n\n rc = intf.questionInitializeDASD(c, devs)\n if rc == 1:\n log.info(\" not running dasdfmt, continuing installation\")\n return\n\n # gather total cylinder count\n argv = [\"-t\", \"-v\"] + self.commonArgv\n for dasd, bypath in dasdlist:\n buf = iutil.execWithCapture(self.dasdfmt, argv + [\"/dev/\" + dasd],\n stderr=err)\n for line in buf.splitlines():\n if line.startswith(\"Drive Geometry: \"):\n # line will look like this:\n # Drive Geometry: 3339 Cylinders * 15 Heads = 50085 Tracks\n cyls = long(filter(lambda s: s, line.split(' '))[2])\n self.totalCylinders += cyls\n break\n\n # format DASDs\n argv = [\"-P\"] + self.commonArgv\n update = self._updateProgressWindow\n\n title = P_(\"Formatting DASD Device\", \"Formatting DASD Devices\", c)\n msg = P_(\"Preparing %d DASD device for use with Linux...\" % c,\n \"Preparing %d DASD devices for use with Linux...\" % c, c)\n\n if intf:\n if self.totalCylinders:\n pw = intf.progressWindow(title, msg, 1.0)\n else:\n pw = intf.progressWindow(title, msg, 100, pulse=True)\n\n for dasd, bypath in dasdlist:\n log.info(\"Running dasdfmt on %s\" % (bypath,))\n arglist = argv + [\"/dev/\" + dasd]\n\n try:\n if intf and self.totalCylinders:\n rc = iutil.execWithCallback(self.dasdfmt, arglist,\n stdout=out, stderr=err,\n callback=update,\n callback_data=pw,\n echo=False)\n elif intf:\n rc = iutil.execWithPulseProgress(self.dasdfmt, arglist,\n stdout=out, stderr=err,\n progress=pw)\n else:\n rc = iutil.execWithRedirect(self.dasdfmt, arglist,\n stdout=out, stderr=err)\n except Exception as e:\n raise DasdFormatError(e, bypath)\n\n if rc:\n raise DasdFormatError(\"dasdfmt failed: %s\" % rc, bypath)\n\n if intf:\n pw.pop()", "title": "" }, { "docid": "a7628a6ea6635ceaba231388a7e8cdac", "score": "0.5331458", "text": "def enable_hdd():\n # If hardrives already on, get outta here!\n hdd_status = check_hdd()\n\n if hdd_status[\"HDD 1\"][\"status\"] != 0 and hdd_status[\"HDD 2\"][\"status\"] != 0:\n return \"Hard drives already powered.\\n\"\n\n # Do command\n __import__(\"enable_ext-hd\")\n\n time.sleep(25)\n\n # For EXT, re-scan SATA/SCSI hotswap drives\n if \"EXT\" in misc.get_hostname():\n exec_console_command(\"for i in $(find /sys/class/scsi_host/ -name host* ); do echo '- - -' > $i/scan\")\n time.sleep(2)", "title": "" }, { "docid": "fa7dc230290660eff231a0ffe931f204", "score": "0.5310524", "text": "def list_didaticoss_cmd():\n return ListDidaticosCommand()", "title": "" }, { "docid": "f0b16f358e8f8942b87bd87d21968cf4", "score": "0.5293264", "text": "def device():", "title": "" }, { "docid": "89f2a1b68093857da94092be285f8132", "score": "0.5234992", "text": "def getscsiproductid(self):\n ## show system\n #System Information\n #------------------\n #System Name: DAMC002-3\n #System Contact: Uninitialized Contact\n #System Location: B1 Lab station 46\n #System Information: pitg\n #Midplane Serial Number: 00C0FF1B50B6\n #Vendor Name:\n #Product ID: DH4544\n #Product Brand:\n #SCSI Vendor ID: DotHill\n #SCSI Product ID: DH4544\n #Enclosure Count: 2\n #Health: OK\n #Health Reason:\n #Other MC Status: Operational\n #PFU Status: Idle\n #Supported Locales: English (English), Spanish (espaol), French (franais), German (Deutsch), Italian (italiano), Japanese (), Dutch (Nederlands), Chinese-Simplified (), Chinese-Traditional (), Korean ()\n\n\n #Success: Command completed successfully. (2016-06-14 16:23:36)\n\n cmd = self.getrshcmd('show system')\n if debug >= 2: print \"cmd = \" + cmd\n\n p = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT)\n stdout, stderr = p.communicate()\n for line in stdout.splitlines():\n if debug >= 2: print line\n m = self.rpi.match(line) #search for SCSI Product ID:\n if m:\n self.scsiproductid = m.group('id')\n if (self.scsiproductid):\n return self.scsiproductid\n else:\n raise RuntimeError(\"Couldn't get SCSI Product ID.\")", "title": "" }, { "docid": "14fd0949f571bfc1b739f0be3497234e", "score": "0.52033657", "text": "def mount_hdd():\n outputDict = {'/data1': \"Drive #1\", '/data2': \"Drive #2\", '/data3': \"Drive #3\"}\n feedbackOutput = {}\n\n hostname = misc.get_hostname()\n\n if 'EXT' in hostname:\n drives = ['/data1', '/data2', '/data3']\n else:\n drives = ['/data1', '/data2']\n\n #Get current status of HDDs\n hdd_status = check_hdd()\n poweredArray = [hdd_status[\"HDD 1\"][\"status\"], hdd_status[\"HDD 2\"][\"status\"], hdd_status[\"HDD 3\"][\"status\"]]\n\n for idx, drive in enumerate(drives):\n # Do command for drive\n try:\n consoleOutput = exec_console_command(\"mount {0}\".format(drive))\n\n if poweredArray[idx] == 0:\n feedbackOutput[\"HDD {0}\".format(idx+1)] = \"{0} mount error: {1}\\n\".format(outputDict[drive], \"Hard drives need to be powered.\")\n else:\n feedbackOutput[\"HDD {0}\".format(idx+1)] = \"{0} mounted successfully.\\n\".format(outputDict[drive])\n\n except CommandError as error:\n feedbackOutput[\"HDD {0}\".format(idx+1)] = \"{0} mount error: {1}\\n\".format(outputDict[drive], \"Hard drives may have already been mounted. See status for confirmation.\")\n\n return feedbackOutput", "title": "" }, { "docid": "15a8359f840988d9e4eb495e6a224f91", "score": "0.5197712", "text": "def duthost2(ansible_adhoc, request):\n tbname = request.config.getoption(\"--testbed\")\n tbfile = request.config.getoption(\"--testbed_file\")\n tbinfo = TBInfo(tbfile)\n hostname2 = tbinfo.testbed_topo[tbname+'-dut2']['dut']\n return SonicHost(ansible_adhoc, hostname2, gather_facts=True)", "title": "" }, { "docid": "758f139cc09bddd7e2f9744355efc288", "score": "0.516498", "text": "def get_disk(chat_id, bot):\n\n try:\n session = Session(hostname=host_details.get_host(), community=host_details.get_community(),\n version=host_details.get_version())\n\n disk = session.get(\".1.3.6.1.4.1.2021.9.1.9.1\")\n\n bot.sendMessage(chat_id, 'Actual Disk usage on: ' + host_details.get_host() + ' = ' + str(disk.value) + \"%\")\n bot.sendMessage(chat_id, 'Back to command list', reply_markup=bot_command.keyboard)\n\n except exce.EasySNMPError as error:\n bot.sendMessage(chat_id, 'Error during interrogation, agent: <' + host_details.get_host() + ' '\n + host_details.get_community() + '> on disk request')\n bot_command.back_home(chat_id, bot)\n print(error)", "title": "" }, { "docid": "167c62291f772fd5d297e30ae283f77a", "score": "0.515343", "text": "def device_services_command(address):\n try:\n # check the mac address\n if is_valid_address(address): \n global devices\n \n # check that the device exists\n if address in devices:\n click.echo('Getting services for device {}...'.format(address))\n for key, value in devices[address][0].getServices().items():\n click.echo(' {} - {}'.format(key, value))\n click.echo('All discovered services for devices {} listed.'.format(address))\n else:\n click.secho('Command cannot be run. No such device with address {} found!'.format(address), fg='red')\n else:\n click.secho('Command cannot be run. Device MAC {} not valid!'.format(address), fg='red')\n except btle.BTLEException as e:\n click.secho('A BTLEException has occured!\\n{}'.format(e), fg='red')", "title": "" }, { "docid": "f7e5519baae7d1098e77630eb2e11a91", "score": "0.51386154", "text": "def sshd( network, cmd='/usr/sbin/sshd', opts='-D',\n ip='10.123.123.1/32', routes=None, switch=None ):\n if not switch:\n switch = network[ '1001' ] # switch to use\n if not routes:\n routes = [ '10.0.0.0/24' ]\n connectToRootNS( network, switch, ip, routes )\n for host in network.hosts:\n host.cmd( cmd + ' ' + opts + '&' )\n print \"*** Waiting for ssh daemons to start\"\n for server in network.hosts:\n waitListening( server=server, port=22, timeout=5 )\n\n print\n print \"*** Hosts are running sshd at the following addresses:\"\n print\n for host in network.hosts:\n print host.name, host.IP()\n print\n print \"*** Running NameNode and DataNode:\"\n print\n i = 0\n for host in network.hosts:\n if host is Pod_host[0]:\n host.cmd( './namenode.sh start' )\n print \"NN start\"\n else:\n i+=1\n host.cmd( './additional_datanode.sh start ' + str(i) + ' &' )\n print \"DN\"+str(i)+\" start\"\n print\n print \"*** Type 'exit' or control-D to shut down network\"\n CLI( network )\n i = 0\n for host in network.hosts:\n host.cmd( 'kill %' + cmd )\n if host is Pod_host[0]:\n host.cmd( './namenode.sh stop' )\n print \"NN stop\"\n else:\n i+=1\n host.cmd( './additional_datanode.sh stop ' + str(i) + ' &' )\n print \"DN\"+str(i)+\" stop\"\n network.stop()", "title": "" }, { "docid": "8077617c9df6e121333d4845ffa3a50c", "score": "0.5119707", "text": "def __init__(self,name):\n SanBase.__init__(self,name,'Virtual HBA')\n self.vsan=0\n self.priority=0\n self.avgload=50\n self.port=''\n self.provider=None\n self.fchost=''\n self.transport=Transport.iscsi\n self.targets={}", "title": "" }, { "docid": "dcf65611e06d062f8ddcca998b3da4fe", "score": "0.50913846", "text": "def get_drives():\n drives = multi_pipe_command(\n \"sudo smartctl --scan-open|cut -d ' ' -f 1,3|grep -v scsi|grep -v sat\")\n\n # Do some cleanup.\n drives = drives.split('\\n')\n raid_drives_tmp = [drive for drive in drives if drive]\n raid_drives = []\n for drive in raid_drives_tmp:\n raid_drives.append(tuple(drive.split(' ')))\n\n # This finds block devices that aren't behind a raid controller.\n drives = multi_pipe_command(\"lsblk -d|cut -d ' ' -f 1\")\n\n # Cleanup, split the output into an array and remove rados block devices and the headers.\n # Also append '/dev/' in front of the device name\n drives = drives.split('\\n')\n other_drives_tmp = [\n '/dev/' + drive for drive in drives if drive and drive != \"NAME\" and \"rbd\" not in drive]\n other_drives = []\n for drive in other_drives_tmp:\n other_drives.append(('no-raid', drive))\n\n # With this filter, we get drives that aren't a part of the raid controller.\n drives_not_in_raid = []\n for drive in other_drives:\n try:\n process_smartctl = subprocess.check_output(\n ['sudo', 'smartctl', '-i', drive[1]], shell=False)\n if \"device lacks SMART\" in process_smartctl:\n continue\n drives_not_in_raid.append(drive)\n except subprocess.CalledProcessError:\n pass\n\n all_drives = []\n for drive in raid_drives + drives_not_in_raid:\n if '/dev/' in drive[1]:\n serial = multi_pipe_command(\n \"sudo smartctl -i \" + drive[1] + \"|grep -i serial|cut -d : -f 2\")\n else:\n serial = multi_pipe_command(\n \"sudo smartctl -i \" + drive[0] + \" -d \" + drive[1] + \"|grep -i serial|cut -d : -f 2\")\n all_drives.append({\"{#DRIVENAME}\": drive[1], \"{#DRIVESERIAL}\": serial.strip(\n ), \"{#RAIDCONTROLLER}\": drive[0]})\n\n zabbix_output = {}\n zabbix_output[\"data\"] = all_drives\n\n # print(json.dumps(zabbix_output, sort_keys=True, indent=4, separators=(',', ': ')))\n sys.stdout.write(json.dumps(zabbix_output))\n return", "title": "" }, { "docid": "c34143287eab62b77d4a5e824c6fea59", "score": "0.5087748", "text": "def smartinfo():\n devices = smartctl.get_devices()\n header, data = smartctl.get_information_on_drives(devices)\n for device in data:\n if device[header.index('Health')] != 'PASS':\n # There's something wrong with 'Health'\n message = smartctl.format_drive_info(header, device)\n # TODO: log this entry\n pushover.send_message(settings, message, title=f'[{HOSTNAME}] Drive health warning')", "title": "" }, { "docid": "8a3f4bd962fe8431b33fedd826e2374b", "score": "0.50798655", "text": "def scsi_volume_without_paths(self):\n result = AutoinstallMachineModel.ZfcpVolume(\n 'cd0f0000', 20_000_000, multipath=True,\n wwid='36005076309ffd435000000000000cd0f')\n result.set_partitions('msdos', [{\n 'mount_point': '/data',\n 'size': 18_000,\n 'filesystem': 'ext4',\n 'part_type': 'primary',\n 'mount_opts': None,\n }])\n yield result", "title": "" }, { "docid": "bf37dc7ea56f05369e51eb6158d7db76", "score": "0.5063132", "text": "def sh_virsh_detach_domblks(self, log, vm_hostname):\n command = (\"virsh domblklist --inactive --details %s\" %\n (vm_hostname))\n retval = self.sh_run(log, command)\n if retval.cr_exit_status:\n log.cl_error(\"failed to run command [%s] on host [%s], \"\n \"ret = [%d], stdout = [%s], stderr = [%s]\",\n command,\n self.sh_hostname,\n retval.cr_exit_status,\n retval.cr_stdout,\n retval.cr_stderr)\n return -1\n\n for line in retval.cr_stdout.splitlines():\n fields = line.split()\n if (len(fields) != 4 or\n (fields[0] == \"Type\" and fields[1] == \"Device\")):\n continue\n device_type = fields[1]\n target_name = fields[2]\n if device_type != \"disk\":\n continue\n\n # Don't detach the disks that can't be detached\n if target_name.startswith(\"hd\"):\n continue\n\n log.cl_info(\"detaching disk [%s] of VM [%s]\",\n target_name, vm_hostname)\n\n command = (\"virsh detach-disk %s %s --persistent\" %\n (vm_hostname, target_name))\n retval = self.sh_run(log, command)\n if retval.cr_exit_status:\n log.cl_error(\"failed to run command [%s] on host [%s], \"\n \"ret = [%d], stdout = [%s], stderr = [%s]\",\n command,\n self.sh_hostname,\n retval.cr_exit_status,\n retval.cr_stdout,\n retval.cr_stderr)\n return -1\n return 0", "title": "" }, { "docid": "ff44994c02dcef8791aa6b38865dee1e", "score": "0.5050091", "text": "def sDn(cord):\n gear = 10\n yarn = machine(cord, gear)\n return yarn", "title": "" }, { "docid": "8244fd6a2a6bad87d8bb382f2f4953ec", "score": "0.5034576", "text": "def controlstart():\n global dev, endpoint\n # light.red.on()\n\n dev = usb.core.find(idVendor=USB_VENDOR, idProduct=USB_PRODUCT)\n \n endpoint = dev[0][(0, 0)][0]\n\n if dev.is_kernel_driver_active(USB_IF) is True:\n dev.detach_kernel_driver(USB_IF)\n \n usb.util.claim_interface(dev, USB_IF)\n \n #explorerhat.light.red.off()", "title": "" }, { "docid": "727a3d419f399cba5713d383dec79095", "score": "0.50278", "text": "def ListDrives():\n\n table = PrettyTable([\"Drive\", \"Serial\", \"Name\"])\n for objItem in GetLogicalDrivesFromWMI():\n table.add_row([objItem.Name, objItem.VolumeSerialNumber,\n objItem.VolumeName])\n\n print table", "title": "" }, { "docid": "7050c6f618b35532462a447e64042ee7", "score": "0.50068915", "text": "def device_chars_command(address, start_handle, end_handle, uuid):\n try:\n # check the mac address\n if is_valid_address(address): \n global devices\n \n # check that the device exists\n if address in devices:\n click.echo()\n chars = devices[address].getCharacteristics(\n startHnd=start_handle, endHnd=end_handle,uuid=uuid)\n if chars:\n for char in chars:\n if char.supportsRead():\n click.echo(' {} - {}'.format(char.getHandle(), char.read()))\n else:\n click.echo(' {} - read not supported!'.format(char.getHandle()))\n else:\n click.secho('The device does not specify GATT characteristics.', fg='yellow')\n else:\n click.secho('Command cannot be run. No such device with address {} found!'.format(address), fg='red')\n else:\n click.secho('Command cannot be run. Device MAC {} not valid!'.format(address), fg='red')\n except btle.BTLEException as e:\n click.secho('A BTLEException has occured!\\n{}'.format(e), fg='red')", "title": "" }, { "docid": "7103aa37fde2cb83e9ad53c1037a5e4a", "score": "0.49962473", "text": "def _online_device(self, node, dev):\n\n body = [\"command=cio_ignore -r %s\" % dev]\n self._xcat_xdsh(node, body)\n\n body = [\"command=chccwdev -e %s\" % dev]\n self._xcat_xdsh(node, body)", "title": "" }, { "docid": "86f03fff7d6d4b7e4a4e6d476340b8e2", "score": "0.49879172", "text": "def DeviceChemistry(self):\n return self.read_block(0x22)", "title": "" }, { "docid": "2e8ca5e6904a608eca8f08fe562efff1", "score": "0.49548072", "text": "def dds9():\n return Dds9Control(live_port)", "title": "" }, { "docid": "e332bd2b74904c439e6eabb73e48040b", "score": "0.4950362", "text": "def start_host(uml_id, config, index=0):\n cmd = []\n #normalize index\n # the index must be 2 digits\n idx = str(hex(index))[2:]\n if len(idx) < 2:\n idx = \"0\" + idx\n\n role = config.get(uml_id, \"role\")\n cow_path = config.get(\"global\", \"session_path\")\n root_image = config.get(\"global\", \"root_image\")\n root_image_name = root_image.split(\"/\")[-1]\n\n cow_file = \"{cow_path}/{root_image_name}-{uml_id}.cow\".format(**locals())\n\n screen_cmd = \"screen -dmS {uml_id} linux.uml umid={uml_id} role={role} index={idx} name={uml_id} \" \\\n \"ubd0={cow_file},{root_image}\".format(**locals())\n cmd .append(screen_cmd)\n\n #count interfaces\n interface_count = 0\n # Arno, 2014-05-16: Keep UML random MAC for tap devices. Note\n # this code assumes there is just one tap interface per UML instance\n tapdev = \"tapdev0=\"\n for interface in config.options(uml_id):\n\n if interface.startswith(\"eth\"):\n interface_idx = interface.lstrip(\"eth\")\n network_info = config.get(uml_id, interface).split(',')\n ipv4 = False\n ipv6 = False\n to_switch = \"\"\n\n if len(network_info) == 3:\n (to_switch, ipv4, ipv6) = network_info\n elif len(network_info) == 2:\n (to_switch, ipv4) = network_info\n else:\n to_switch = network_info\n\n #check for tuntap\n if to_switch.startswith(\"tap\"):\n eth = \"{interface}=tuntap,\".format(**locals())\n sw = \"{to_switch},,\".format(**locals())\n tapdev += interface \n else:\n eth = \"{interface}=daemon,,unix,\".format(**locals())\n switch_path = config.get(\"global\", \"session_path\")\n sw = \"{switch_path}/switch-{to_switch}.ctl/ctl\".format(**locals())\n\n cmd.append(eth + sw)\n\n if ipv4:\n iface = \"ip{interface_idx}={ipv4}\".format(**locals())\n cmd.append(iface)\n if ipv6:\n iface = \"ip6{interface_idx}={ipv6}\".format(**locals())\n cmd.append(iface)\n\n interface_count += 1\n\n #custom mem setting per host\n mem = config.get(\"global\", \"mem\")\n if config.has_option(uml_id, \"mem\"):\n mem = config.get(uml_id, \"mem\")\n\n mem = \"mem={mem} interface_count={interface_count}\".format(**locals())\n cmd.append(mem)\n\n if config.has_option(uml_id, \"home\"):\n home = \"home=\" + config.get(uml_id, \"home\")\n cmd.append(home)\n\n #pass prefix options to uml instance\n for option in config.options(uml_id):\n if option.startswith(\"pass_\"):\n passopt = option[5:] + \"=\" + config.get(uml_id, option)\n cmd.append(passopt)\n\n # Disable swap initialization on the instance\n cmd.append(\"noswap\")\n\n # Arno\n cmd.append(tapdev)\n \n cmd = \" \".join(cmd)\n execute(cmd)", "title": "" }, { "docid": "12cddd51868d91ebb144ba93ce45cebb", "score": "0.49345568", "text": "def get_scsi_device_options(self):\n sys_scsi_devices = self.get_sys_scsi_devices()\n scsi_devices_database = self.get_scsi_devices_database()\n return self.get_options_from_db(\n sys_scsi_devices, scsi_devices_database)", "title": "" }, { "docid": "2db8161564beee74a8678fd2bbed48a9", "score": "0.49258024", "text": "def format_disk(disk):\n print \"Formatting devices %s, please be patient...\" % disk\n try:\n output = execute(\"fdisk -E /dev/rdsk/%sp0\" % disk)\n except Retcode, r:\n sys.stderr.write(str(r))\n sys.stderr.write(r.output)\n sys.stderr.write(\"Please review /var/adm/messages for additional \"\n \"information.\\n\")\n sys.exit(1)", "title": "" }, { "docid": "39eec8b7f3bf920d8314e3adf8be62fb", "score": "0.492046", "text": "def enable_direct_access (fpgaid,verbose=0):\n cmd = \"rdwr -b %d %s 0x%X\" % (fpgaid,\"global.g.fpga.ReconfigCtrl.Direct\", 1)\n if (verbose & 2) == 2 : print cmd\n exec_cmd(cmd)\n time.sleep(1.0)", "title": "" }, { "docid": "60518bf232cb4d3870741aac01155e49", "score": "0.49169913", "text": "def disable_hdd():\n devices = [\"sdb\", \"sdc\", \"sdd\"] # Used for deleting devices in EXTs before powering off\n\n # If hardrives already off or mounted, get outta here!\n hdd_status = check_hdd()\n\n if hdd_status[\"HDD 1\"][\"status\"] != 1 and hdd_status[\"HDD 2\"][\"status\"] != 1:\n return \"Hard drives already off, or mounted. Drives must be in a 'powered' state to turn off safely.\\n\"\n\n # For EXT, delete the devices ONLY if they're all not solid state devices.\n if \"EXT\" in misc.get_hostname():\n for device in devices:\n # Check if the device is a solid state or HDD\n driveRotation = exec_console_command(\"smartctl -i /dev/{0} | grep 'Rotation Rate:'\".format(device))\n\n if not re.search(\"[0-9]\", driveRotation):\n raise RuntimeError(\n \"External drives are not on correct device label. Use the command line to resolve this.\")\n\n # No exceptions have been raised by this point, so delete drives\n for device in devices:\n exec_console_command(\"echo 1 > /sys/block/{0}/device/delete\".format(device))\n\n time.sleep(1)\n # Then proceed to power off as normal\n\n # Do command\n __import__(\"disable_ext-hd\")\n\n # Sleep if EXT, needs time to remove drives.\n if \"EXT\" in misc.get_hostname():\n time.sleep(22)", "title": "" }, { "docid": "1ff6926a0da55475e3b25ca6aa90252b", "score": "0.49156776", "text": "def ovs_vsctl(action, device, transport):\n\n VSCTL=\"sudo /usr/bin/ovs-vsctl\"\n command = VSCTL + \" \" + action + \" \" + device\n output = exec_cmd(transport, command)\n return output", "title": "" }, { "docid": "1bf26df9c18c7e701be1bf63ca92fd7c", "score": "0.49034184", "text": "def list_devices():\n devices = Adb.get_devices_as_list()\n if not devices:\n click.echo(\"No devices attached\")\n else:\n click.echo(\"Attached devices:\")\n for index, item in enumerate(devices):\n click.echo(\"index -> {}\\tid -> {}\\tdescription -> {}\".format(\n str(index), devices[index][0], devices[index][1]))", "title": "" }, { "docid": "95c56c118355de6b7ded7759a593bcd3", "score": "0.4877932", "text": "def get_devices():\n\n devices = []\n\n cmd = ['lsblk']\n for line in console(cmd).split('\\n'):\n if 'disk' in line:\n devices.append(line.split()[0])\n devices.sort()\n\n return devices", "title": "" }, { "docid": "cef8d38aeda701497edb5be68dd49fdf", "score": "0.4859137", "text": "def find_device(b): \n return item_find(b).device", "title": "" }, { "docid": "2ed3a3034af98aa4f2940253baecef5b", "score": "0.48577824", "text": "def get_guest_discard_disk(session):\n list_disk_cmd = \"ls /dev/[shv]d*|sed 's/[0-9]//p'|uniq -u\"\n disk = session.cmd_output(list_disk_cmd).splitlines()[0]\n return disk", "title": "" }, { "docid": "e87a6faf1880ba58b16f84b15a053787", "score": "0.48412496", "text": "def disable_direct_access (fpgaid,verbose=0):\n cmd = \"rdwr -b %d %s 0x%X\" % (fpgaid,\"global.g.fpga.ReconfigCtrl.Direct\", 0)\n if (verbose & 2) == 2 : print cmd\n exec_cmd(cmd)\n time.sleep(1.0)", "title": "" }, { "docid": "61ab5a07cc53d9d2450723c28c909712", "score": "0.48202106", "text": "def mount_virtio(self):\n self.logger.info(\"Mounting VirtIO device in simulated system\")\n\n self.gem5_shell('mkdir -p /mnt/obb')\n\n mount_command = \"mount -t 9p -o trans=virtio,version=9p2000.L,aname={} gem5 /mnt/obb\".format(self.temp_dir)\n self.gem5_shell(mount_command)", "title": "" }, { "docid": "74ee95359d94018b49f659ccdca918db", "score": "0.48151463", "text": "def device_discover_command(index, timeout):\n try:\n click.echo('Now scanning for btle devices...')\n scanner = btle.Scanner(index)\n entries = scanner.scan(timeout)\n click.echo('Found the following btle devices...')\n for entry in entries:\n click.echo(entry.getScanData())\n except btle.BTLEException as e:\n click.secho('A BTLEException has occurred!\\n{}'.format(e), fg='red')", "title": "" }, { "docid": "bb250035dcafde44d155adfdbf8f9e55", "score": "0.4811218", "text": "def mount_USB(self):\n\n logger.info(\"Mounting USB side\")\n\n if not self.simulate:\n subprocess.call(\n f\"sudo modprobe g_mass_storage file={self.binary_file} stall=0 ro=0 removeable=1\",\n shell=True,\n )\n self.is_usb_mounted = True", "title": "" }, { "docid": "c68bbf0c981c889979be72b545620c80", "score": "0.4793498", "text": "def probe_hdd():\n # Do command\n consoleOutput = exec_console_command(\"/root/bin/dfn_setup_data_hdds.sh -p\")\n data = {}\n\n # Parse results\n if \"no such file or directory\" in consoleOutput:\n consoleOutput = exec_console_command(\"/root/bin/dfn_setup_usb_hdds.sh -p\")\n\n if \"no such file or directory\" in consoleOutput:\n raise IOError(constants.scriptNotFound.format(\"dfn_setup_data_hdds.sh\"))\n\n firstLine = consoleOutput.split(\"\\n\")\n consoleOutput = firstLine[0]\n splitOutput = consoleOutput.split(\" \")\n\n for idx, token in enumerate(splitOutput):\n if \"/\" in token:\n data[splitOutput[idx + 1]] = token + \" \" + splitOutput[idx + 1]\n\n return data", "title": "" }, { "docid": "ab094eea73a1f0e4b07f272f65996db0", "score": "0.4767878", "text": "def sshd( network, cmd='/usr/sbin/sshd', opts='-D',\n ip='10.123.123.1/32', routes=None, switch=None ):\n if not switch:\n switch = network[ 's1' ] # switch to use\n if not routes:\n routes = [ '10.0.0.0/24' ]\n connectToRootNS( network, switch, ip, routes )\n for host in network.hosts:\n host.cmd( cmd + ' ' + opts + '&' )\n print \"*** Waiting for ssh daemons to start\"\n for server in network.hosts:\n waitListening( server=server, port=22, timeout=5 )\n\n print\n print \"*** Hosts are running sshd at the following addresses:\"\n print\n for host in network.hosts:\n print host.name, host.IP()\n print\n print \"*** Type 'exit' or control-D to shut down network\"\n setVlanHost(network.hosts[0],'300')\n setVlanHost(network.hosts[1],'300')\n setVlanHost(network.hosts[2],'200')\n setVlanHost(network.hosts[3],'200')\n\n CLI( network )\n for host in network.hosts:\n host.cmd( 'kill %' + cmd )\n network.stop()", "title": "" }, { "docid": "1f53cc0efd070faa93e23c08915daf98", "score": "0.47613013", "text": "def add_disks_different_sd(request, storage):\n self = request.node.cls\n\n for index in range(self.disks_count):\n alias = storage_helpers.create_unique_object_name(\n self.__name__, config.OBJECT_TYPE_DISK\n )\n testflow.step(\"Adding disk %s to VM %s\", alias, self.vm_name)\n assert ll_vms.addDisk(\n True, vm=self.vm_name, provisioned_size=3 * config.GB,\n wait=True, storagedomain=self.storage_domains[1],\n type=config.DISK_TYPE_DATA,\n interface=config.VIRTIO, format=config.COW_DISK,\n sparse='true', alias=alias\n )", "title": "" }, { "docid": "0b895a6c1bf2d99d2df7b06bbc83e1d8", "score": "0.47589952", "text": "def _detach_device(self, node, vdev):\n\n body = [' '.join(['--undedicatedevice', vdev])]\n self._xcat_chvm(node, body)", "title": "" }, { "docid": "38d5bdf33bca16fbaa17fbd116c954a7", "score": "0.47541893", "text": "def device_control_partition_id(self):", "title": "" }, { "docid": "301276503b50abc3fefff9111b3221c6", "score": "0.4745467", "text": "def create_disk(ctx,name,size,sr,readonly,vm,devicename,userdeviceno): \n try:\n session = xen_session()\n \n sr_pool = session.xenapi.pool.get_all()[0]\n default_sr = session.xenapi.pool.get_default_SR(sr_pool)\n default_sr = session.xenapi.SR.get_record(default_sr)\n sr_get=default_sr['uuid']\n sr_uuid = session.xenapi.SR.get_by_uuid(default_sr['uuid'])\n\n opaque = session.xenapi.VM.get_by_name_label(vm) \n\n # Need Byte Conversion\n bytes = str(long(size) * 1024L * 1024L * 1024L) \n \n vdi={'name_label': name,\n\t 'name_description': name,\n\t 'SR': sr_uuid,\n\t 'virtual_size': str(bytes),\n\t 'type': \"user\",\n 'sharable': False,\n 'read_only': False,\n 'other_config': dict()}\n\n vdi_object=session.xenapi.VDI.create(vdi)\n \n # Now that the VDI is created, I have to create it's VBD and attach to the VM\n vbdconnected={'VDI':vdi_object,\n 'VM':str(opaque[0]),\n 'userdevice': userdeviceno,\n \t 'mode':\"RW\",\n \t 'type':\"Disk\",\n \t 'bootable':True,\n 'empty':False,\n \t 'unpluggable':True,\n\t 'other_config':{},\n 'qos_algorithm_type':'',\n\t 'qos_algorithm_params':{},\n \t 'device':devicename}\n \n session.xenapi.VBD.create(vbdconnected)\n logger.info(\"Device: \" + name + \" successfully created!\")\n except:\n logging.error(\"ERROR: Unexpected Error - \", sys.exc_info()[0])\n raise", "title": "" }, { "docid": "e0f4465fae27bad4ee884e6a1b0787b8", "score": "0.47398376", "text": "def printer_sd(self):\n req = self.get('printer/sd')\n\n if req.status_code == 404:\n raise Exception(\"SD Support Disabled\", \"SD support has been \"\n \"disabled in OctoPrint's config.\")\n\n resp = req.json()\n return resp.get('ready', False)", "title": "" }, { "docid": "10063269c156561d4f3dec050ea1b188", "score": "0.47387418", "text": "def _verify_storage_rule(self):\n azure_disk_path = \"/dev/disk/azure\"\n scsi1_path = azure_disk_path + \"/scsi1\"\n # 1. Check /sda and /sdb soft links\n self.log.info(\"1. Check /sda and /sdb soft links\")\n links = self._get_links(azure_disk_path)\n devices_list = re.findall(r\"\\w+\",\n self.session.cmd_output(\"cd /dev;ls sd*\"))\n for device in devices_list:\n self._check_in_link(device, links)\n # There should be root and resource links\n self._check_in_link('root', links)\n self._check_in_link('resource', links)\n # 2. Attach a new disk, check /dev/disk/azure/scsi1\n self.log.info(\"2. Attach a new disk, check /dev/disk/azure/scsi1\")\n disk1_name = \"{}-disk1-{}\".format(self.vm.vm_name, self.postfix)\n self.vm.unmanaged_disk_attach(disk1_name, 10)\n time.sleep(5)\n links = self._get_links(scsi1_path)\n self._check_in_link(\"sdc\", links)\n # 3. Create partition /dev/sdc1, then check /dev/disk/azure/scsi1\n self.log.info(\n \"3. Create partition /dev/sdc1, then check /dev/disk/azure/scsi1\")\n self._disk_part(disk=\"/dev/sdc\", size=1)\n time.sleep(5)\n links = self._get_links(scsi1_path)\n self._check_in_link(\"sdc1\", links)\n # 4. Remove the partition /dev/sdc1, then check /dev/disk/azure/scsi1\n self.log.info(\"4. Remove the partition /dev/sdc1, then check \\\n/dev/disk/azure/scsi1\")\n self.session.cmd_output(\"parted /dev/sdc rm 1\")\n time.sleep(5)\n links = self._get_links(scsi1_path)\n self._check_in_link(\"sdc\", links)\n self._check_not_in_link(\"sdc1\", links)\n # 5. Add another new disk(disk2). Create a partition and check\n # /dev/disk/azure/scsi1\n self.log.info(\"5. Add another new disk(disk2). Create a partition \\\nand check /dev/disk/azure/scsi1\")\n disk2_name = \"{}-disk2-{}\".format(self.vm.vm_name, self.postfix)\n self.vm.unmanaged_disk_attach(disk2_name, 10)\n time.sleep(5)\n links = self._get_links(scsi1_path)\n self._disk_part(disk=\"/dev/sdd\", size=1)\n time.sleep(5)\n links = self._get_links(scsi1_path)\n self._check_in_link(\"sdd\", links)\n self._check_in_link(\"sdd1\", links)\n disk1_identifier = self.session.cmd_output(\"fdisk -l /dev/sdc | grep 'Disk identifier'\")\n disk2_identifier = self.session.cmd_output(\"fdisk -l /dev/sdd | grep 'Disk identifier'\")\n # 6. Restart the VM. Then check /dev/disk/azure/scsi1\n self.log.info(\"6. Restart the VM. Then check /dev/disk/azure/scsi1\")\n self.vm.reboot()\n self.session.connect()\n self.session.cmd_output(\"sudo su -\")\n for device in ['sda', 'sdb', 'sdc', 'sdd']:\n tmp_identifier = self.session.cmd_output(\"fdisk -l /dev/{} | grep 'Disk identifier'\".format(device))\n if disk1_identifier == tmp_identifier:\n disk1 = device\n if disk2_identifier == tmp_identifier:\n disk2 = device\n links = self._get_links(scsi1_path)\n for device in [disk1, disk2, disk2+\"1\"]:\n self._check_in_link(device, links)\n # 7. Detach the disk2, then check /dev/disk/azure/scsi1 again\n self.log.info(\n \"7. Detach the disk2, then check /dev/disk/azure/scsi1 again\")\n self.vm.unmanaged_disk_detach(disk2_name)\n time.sleep(5)\n links = self._get_links(scsi1_path)\n self._check_in_link(disk1, links)\n self._check_not_in_link(disk2, links)\n self._check_not_in_link(disk2+\"1\", links)", "title": "" }, { "docid": "ae121d198648d9cc6eac09670cbdb0d5", "score": "0.47191876", "text": "def do_sdn_controller_show(cc, args):\n\n try:\n controller = cc.sdn_controller.get(args.uuid)\n except exc.HTTPNotFound:\n raise exc.CommandError('Create SDN Controller UUID not found: %s'\n % args.uuid)\n _print_sdn_controller_show(controller)", "title": "" }, { "docid": "39c85ec9bfd52a9d5ce4e3b701b688e1", "score": "0.47181907", "text": "def make_discoverable(self):\n try:\n out = self.get_output(\"discoverable on\")\n except BluetoothctlError as e:\n print(e)\n return None", "title": "" }, { "docid": "34cda3d442ad8e94c02d50c607858248", "score": "0.47020882", "text": "def device():\n\n # security check\n if session['group_id'] != 'admin':\n return _error_permission_denied('Unable to view devices')\n\n # get all firmware\n try:\n items = db.firmware.get_all()\n except CursorError as e:\n return _error_internal(str(e))\n\n # get all the guids we can target\n devices = []\n seen_guid = {}\n for item in items:\n for md in item.mds:\n if md.guids[0] in seen_guid:\n continue\n seen_guid[md.guids[0]] = 1\n devices.append(md.guids[0])\n\n return render_template('devices.html', devices=devices)", "title": "" }, { "docid": "1aa4f8506ebe100e8b14878f83e5dbb2", "score": "0.47014526", "text": "def get_device_list():\n token = get_auth_token() # Get Token\n url = \"https://{}/api/v1/network-device/15/40\".format(DNAC_URL)\n hdr = {'x-auth-token': token, 'content-type' : 'application/json'}\n resp = requests.get(url, headers=hdr, verify=False) # Make the Get Request\n device_list = resp.json()\n print(\"{0:25}{1:25}\".format(\"hostname\", \"id\"))\n for device in device_list['response']:\n print(\"{0:25}{1:25}\".format(device['hostname'], device['id']))", "title": "" }, { "docid": "c85643fad04ed79848b2e00a7727fd1e", "score": "0.4699129", "text": "def drive_a_sienna(self):\t\t\n\t\tprint(self.name.title() + \" is now driving a sienna.\")", "title": "" }, { "docid": "29fe3589c91f048effd0d87fb3e2b555", "score": "0.46976236", "text": "def drives_in_array(array_dev='/dev/md0', good_only=False):\n\n # make sure whatever the input, md0 or /dev/md0 we get the right arg for mdadm\n array_dev='/dev/'+array_dev.split('/')[-1]\n\n output = StringIO.StringIO(sp.Popen(ENV + ['mdadm','--detail',array_dev], stdout=sp.PIPE).communicate()[0])\n devices=[]\n start_caring=False\n for line in output:\n if not start_caring:\n if MDADM_START_MATCHER.match(line):\n start_caring=True\n else:\n devinfo=line.strip().split()[4:]\n if len(devinfo)>=3:\n devices.append((devinfo[-1].split('/')[-1],devinfo[0]))\n\n # filer bad drives, if requested\n if (good_only):\n devices=filter(lambda d: d[1] != 'faulty', devices)\n\n if len(devices)==0:\n return None\n else:\n devices.sort(lambda x,y: cmp(x[0],y[0]))\n\n return devices", "title": "" }, { "docid": "00ab1da87cc4614ad190734d36e79b20", "score": "0.46969637", "text": "def hdparm_secure_erase(disk_name, se_option):\n # enhance_se = ARG_LIST.e\n log_file = disk_name.split(\"/\")[-1] + \".log\" # log file for sdx will be sdx.log\n log = open(log_file, \"a+\")\n if se_option:\n hdparm_option = \"--\" + se_option\n else:\n hdparm_option = \"--security-erase\" # Default is security erase\n\n # Hdparm SE Step1: check disk status\n #\n # Secure Erase supported output example\n # Security:\n # Master password revision code = 65534\n # supported\n # not enabled\n # not locked\n # not frozen\n # not expired: security count\n # supported: enhanced erase\n # 2min for SECURITY ERASE UNIT. 2min for ENHANCED SECURITY ERASE UNIT.\n # Checksum: correct\n #\n # except for \"supported\" and \"enabled\", other items should have \"not\" before them\n if hdparm_option == \"--security-erase\":\n pattern_se_support = re.compile(r'[\\s\\S]*(?!not)[\\s]*supported'\n r'[\\s]*[\\s\\S]*enabled[\\s]*not[\\s]'\n r'*locked[\\s]*not[\\s]*frozen[\\s]*not[\\s]*expired[\\s\\S]*')\n else:\n pattern_se_support = re.compile(r'[\\s\\S]*(?!not)[\\s]*supported[\\s]*[\\s\\S]*enabled[\\s]*not'\n r'[\\s]*locked[\\s]*not[\\s]*frozen[\\s]*not[\\s]*expired[\\s\\S]*'\n r'supported: enhanced erase[\\s\\S]*')\n hdparm_check_drive_status(pattern_se_support, disk_name, log)\n\n # TODO: add section to unlocked a disk\n\n # Hdparm SE Step2: set password\n command = [\"hdparm\", \"--verbose\", \"--user-master\", \"u\",\n \"--security-set-pass\", SE_PASSWORD, disk_name]\n assert robust_check_call(command, log)[\"exit_code\"] == 0, \\\n \"Failed to set password for disk \" + disk_name\n\n # Hdparm SE Step3: confirm disk is ready for secure erase\n # both \"supported\" and \"enabled\" should have no \"not\" before them\n # other items should still have \"not\" before them\n pattern_se_enabled = re.compile(r'[\\s\\S]*(?!not)[\\s]*supported[\\s]*(?!not)[\\s]*enabled[\\s]*not'\n r'[\\s]*locked[\\s]*not[\\s]*frozen[\\s]*not[\\s]*expired[\\s\\S]*')\n hdparm_check_drive_status(pattern_se_enabled, disk_name, log)\n log.close()\n\n # Hdparm SE step4: run secure erase command\n command = [\"hdparm\", \"--verbose\", \"--user-master\", \"u\", hdparm_option, SE_PASSWORD, disk_name]\n return secure_erase_base(disk_name, command)", "title": "" }, { "docid": "dba852b392b9b0bc64726f6918db493e", "score": "0.46905965", "text": "def selectDevice(devlist):\n count = 0\n\n if len(devlist):\n print(\"Found {0} BLE devices:\\n\".format(str(len(devlist))))\n # Display a list of devices, sorting them by index number\n for d in devlist.asList():\n \"\"\"@type : Device\"\"\"\n count += 1\n print(\" [{0}] {1} ({2}:{3}:{4}:{5}:{6}:{7}, RSSI = {8})\".format(count, d.name,\n \"%02X\" % d.address[0],\n \"%02X\" % d.address[1],\n \"%02X\" % d.address[2],\n \"%02X\" % d.address[3],\n \"%02X\" % d.address[4],\n \"%02X\" % d.address[5],\n d.RSSI))\n try:\n i = int(input(\"\\nSelect a device to sniff, or '0' to scan again\\n> \"))\n except KeyboardInterrupt:\n raise KeyboardInterrupt\n return None\n except:\n return None\n\n # Select a device or scan again, depending on the input\n if (i > 0) and (i <= count):\n # Select the indicated device\n return devlist.find(i - 1)\n else:\n # This will start a new scan\n return None", "title": "" }, { "docid": "d72ae23769f2272e9e6a8174d8cf0065", "score": "0.4690267", "text": "def sshd( network, cmd='/usr/sbin/sshd', opts='-D',\n ip='10.123.123.1/32', routes=None, switch=None ):\n if not switch:\n switch = network[ 's1' ] # switch to use\n if not routes:\n routes = [ '100.0.1.0/24' ]\n connectToRootNS( network, switch, ip, routes )\n for host in network.hosts:\n host.cmd( cmd + ' ' + opts + '&' )\n info( \"*** Waiting for ssh daemons to start\\n\" )\n for server in network.hosts:\n waitListening( server=server, port=22, timeout=5 )\n\n info( \"\\n*** Hosts are running sshd at the following addresses:\\n\" )\n for host in network.hosts:\n info( host.name, host.IP(), '\\n' )\n \n restrictBandwidth( network )\n \n info( \"\\n*** Type 'exit' or control-D to shut down network\\n\" )\n CLI( network )\n for host in network.hosts:\n host.cmd( 'kill %' + cmd )\n \n restoreBandwidth( network )\n \n network.stop()", "title": "" }, { "docid": "5a932660116ee8402526028b2d8a1087", "score": "0.46891415", "text": "def cuda(self, dev_id=0):\n return self.device(Device.kDLCUDA, dev_id)", "title": "" }, { "docid": "7eb06781ab8f743d6053c1efab49e33b", "score": "0.46870857", "text": "def dfurun(self,vid,pid):\n print \"Starting a DFU device as %04X:%04X\" % (vid,pid);\n sys.stdout.flush();\n #Set the VID and PID.\n self.DD[8]=vid&0xFF;\n self.DD[9]=(vid>>8)&0xFF;\n self.DD[10]=pid&0xFF;\n self.DD[11]=(pid>>8)&0xFF;\n \n #Run the service loop.\n while 1:\n self.service_irqs();", "title": "" }, { "docid": "c3689fc39f3cd6a29503430c6b335bef", "score": "0.46848765", "text": "def device_control_send_type(self):", "title": "" }, { "docid": "491f3505fd7be26dfee3244170cd02c6", "score": "0.4684482", "text": "def do_dd(self, arg):\n try:\n address = int(arg.replace(\"`\", \"\"), 16)\n except ValueError:\n print(\"Couldn't resolve error at '{:s}'\".format(arg))\n return\n\n data = self._target.get_bytes(address, 128)\n\n # print hexdump of data in 16-byte lines\n for i in range(0, 128, 16):\n chunk_address = address + i\n chunk = data[i:i+16]\n chunk_dwords = []\n for j in range(0, 16, 4):\n chunk_dwords.append(struct.unpack(\"<I\", chunk[j:j+4])[0])\n chunk_dwords = [\"{:08x}\".format(d) for d in chunk_dwords]\n line = \"{:08x}`{:08x} {:s}\"\n print(line.format(chunk_address >> 32, chunk_address & 0xffffffff,\n \" \".join(chunk_dwords)))", "title": "" }, { "docid": "5892b8085c4fe62199ea2ef7e1b3910a", "score": "0.46815273", "text": "def sshd( network, cmd='/usr/sbin/sshd', opts='-D',\n ip='10.123.123.1/32', routes=None, switch=None ):\n network\n if not switch:\n switch = network[ 's1' ] # switch to use\n if not routes:\n routes = [ '10.0.0.0/24' ]\n connectToRootNS( network, switch, ip, routes )\n for host in network.hosts:\n host.cmd( cmd + ' ' + opts + '&' )\n info( \"*** Waiting for ssh daemons to start\\n\" )\n for server in network.hosts:\n waitListening( server=server, port=22, timeout=5 )\n\n info( \"\\n*** Hosts are running sshd at the following addresses:\\n\" )\n for host in network.hosts:\n info( host.name, host.IP(), '\\n' )\n info( \"\\n*** Type 'exit' or control-D to shut down network\\n\" )\n CLI( network )\n for host in network.hosts:\n host.cmd( 'kill %' + cmd )\n network.stop()", "title": "" }, { "docid": "1947ef8dd6acda4d8bafdf8593db5785", "score": "0.4675885", "text": "def get_blkdev_info(root_helper, devlist):\n \n data = []\n devitr = devlist.replace('/dev/','').split(',')\n for dev in devitr:\n devinfo = {'dev': dev}\n\n # size\n cmd = ['cat', '/sys/block/%s/size' % dev]\n (out, _err) = putils.execute(*cmd, \n root_helper=root_helper, \n run_as_root=True)\n if out is not None:\n devinfo['size'] = round(float(out.split()[0]) * 512 / pow(1024, 3), 2)\n\n # type\n cmd = ['cat', '/sys/block/%s/queue/rotational' % dev]\n (out, _err) = putils.execute(*cmd, \n root_helper=root_helper, \n run_as_root=True)\n if out is not None:\n devinfo['type'] = out.split()[0]\n\n # model\n cmd = ['cat', '/sys/block/%s/device/model' % dev]\n (out, _err) = putils.execute(*cmd, \n root_helper=root_helper, \n run_as_root=True)\n if out is not None:\n devinfo['model'] = out.split()[0]\n\n # vendor\n cmd = ['cat', '/sys/block/%s/device/vendor' % dev]\n (out, _err) = putils.execute(*cmd, \n root_helper=root_helper, \n run_as_root=True)\n if out is not None:\n devinfo['vendor'] = out.split()[0]\n\n # aggregate information\n data.append(devinfo)\n\n return data", "title": "" }, { "docid": "86b09f738efe09d4688bde49911c9f95", "score": "0.46736705", "text": "def unmount_hdd():\n outputDict = {'/data1': \"Drive #1\", '/data2': \"Drive #2\", '/data3': \"Drive #3\", }\n drives = ['']\n feedbackOutput = \"\"\n\n hostname = misc.get_hostname()\n\n if 'EXT' in hostname:\n drives = ['/data1', '/data2', '/data3']\n else:\n drives = ['/data1', '/data2']\n\n # Get current status of HDDs\n hdd_status = check_hdd()\n poweredArray = [hdd_status[\"HDD 1\"][\"status\"], hdd_status[\"HDD 2\"][\"status\"], hdd_status[\"HDD 3\"][\"status\"]]\n\n for idx, drive in enumerate(drives):\n # Do command for drive\n try:\n consoleOutput = exec_console_command(\"unmount {0}\".format(drive))\n\n if poweredArray[idx] == 0:\n feedbackOutput[\"HDD {0}\".format(idx+1)] = \"{0} unmount error: {1}\\n\".format(outputDict[drive], \"Hard drives need to be powered.\")\n else:\n feedbackOutput[\"HDD {0}\".format(idx+1)] = \"{0} unmounted successfully.\\n\".format(outputDict[drive])\n\n except CommandError as error:\n feedbackOutput[\"HDD {0}\".format(idx+1)] = \"{0} unmount error: {1}\\n\".format(outputDict[drive], \"Hard drives may have already been unmounted.\")\n\n return feedbackOutput", "title": "" }, { "docid": "bc7bd25fc9512b2e053100574a1d4258", "score": "0.46598363", "text": "def ovs_ofctl(action, device, transport):\n OFCTL=\"sudo /usr/bin/ovs-ofctl\"\n command = OFCTL + \" \" + action + \" \" + device\n output = exec_cmd(transport, command)\n return output", "title": "" }, { "docid": "184943716a242645513f29bec3dbcf7b", "score": "0.46594223", "text": "def search_usb_device_blocks():\n usb_url = USB_DEVICE_BLOCKS.format(HOSTNAME, ORG_KEY)\n return requests.get(usb_url, headers=HEADERS)", "title": "" }, { "docid": "54c51c1e8f695fa462ef0c4467b6ec82", "score": "0.46550888", "text": "def sd():\n wh()", "title": "" }, { "docid": "06aadbac36b9d9581b4a1c27ce4131eb", "score": "0.46532723", "text": "def grub_dev(dev):\n\n letter = {'a': '0', 'b': '1', 'c': '2', 'd': '3', 'e': '4',\n 'f': '5', 'g': '6', 'h': '7', 'i': '8'}\n num = {'1': '0', '2': '1', '3': '2', '4': '3', '5': '4',\n '6': '5', '7': '6', '8': '7', '9': '8'}\n\n ext = dev[7:]\n name = 'hd%s,%s' % (letter[ext[0]], num[ext[1:]])\n return name", "title": "" }, { "docid": "d86108c64ec44d598a76d7cfa8e411d0", "score": "0.46485794", "text": "def check_hdd():\n command = \"mount | grep {0} > /dev/null\"\n data1MountedStatus = exec_console_command(command.format(\"/data1\"))\n data2MountedStatus = exec_console_command(command.format(\"/data2\"))\n data3MountedStatus = exec_console_command(command.format(\"/data3\"))\n\n # Parse output for results\n # NB: Status 0 = Unpowered, Status 1 = Powered, but not mounted, Status 2 = Powered + Mounted\n hdd0Status = 0\n hdd1Status = 0\n hdd2Status = 0\n hdd3Status = 0\n hdd0Space = \"N/A\"\n hdd1Space = \"N/A\"\n hdd2Space = \"N/A\"\n hdd3Space = \"N/A\"\n\n # TODO[ash]: Check that this actually works to check success\n if exec_console_command(\"df | grep /data0\"):\n hdd0Status = 2\n\n # Check if HDDs are powered. Depends on system architecture\n # DFNSMALLs\n if \"EXT\" not in misc.get_hostname():\n poweredStatus = exec_console_command(\"lsusb\")\n\n if \"JMicron Technology Corp.\" in poweredStatus:\n hdd1Status = 1\n hdd2Status = 1\n hdd3Status = 0\n\n if data1MountedStatus == \"1\":\n hdd1Status = 2\n\n if data2MountedStatus == \"1\":\n hdd2Status = 2\n\n if data3MountedStatus == \"1\":\n hdd3Status = 2\n # DFNEXTs\n else:\n poweredStatus = exec_console_command(\"lsblk | grep 'sdb1\\|sdc1\\|sdd1'\")\n\n if \"sdb1\" in poweredStatus:\n hdd1Status = 1\n\n if data1MountedStatus == \"1\":\n hdd1Status = 2\n\n if \"sdc1\" in poweredStatus:\n hdd2Status = 1\n\n if data2MountedStatus == \"1\":\n hdd2Status = 2\n\n if \"sdd1\" in poweredStatus:\n hdd3Status = 1\n\n if data3MountedStatus == \"1\":\n hdd3Status = 2\n\n # Finding remaining space in HDDs\n # If mounted, use df\n if hdd1Status == 2 and hdd2Status == 2:\n outText = exec_console_command(\"df -h | egrep 'Filesystem|data'\")\n\n if outText:\n lines = outText.split('\\n')\n # If not mounted, use disk usage file\n else:\n try:\n with open(\"/tmp/dfn_disk_usage\") as f:\n lines = f.readlines()\n except IOError:\n stack = inspect.stack()\n frame = stack[1][0]\n\n if hasattr(frame.f_locals, \"self\"):\n raise IOError(\"Error reading disk usage log file. To see disk space, please power and mount external drives.\")\n\n for line in lines: # For each line in the file\n fixedLine = re.sub(\" +\", \",\", line) # Reduce whitespace down to 1\n\n if line[0] == \"/\": # If the line is the title line, ignore it\n splitLine = re.split(\",\", fixedLine) # Split into terms\n device = splitLine[5] # Get mounted name\n spaceAvail = splitLine[4] # Get space for that mount\n\n # Check if the data applies, if so assign to variable\n if \"/data0\" in device:\n hdd0Space = spaceAvail\n if \"/data1\" in device:\n hdd1Space = spaceAvail\n if \"/data2\" in device:\n hdd2Space = spaceAvail\n if \"/data3\" in device:\n hdd3Space = spaceAvail\n\n hdd_status = []\n\n # TODO[ash]: Ask Scott how this list syntax works\n hdd_status[\"HDD 0\"] = {\n \"status\": hdd0Status,\n \"space\": hdd0Space\n }\n\n hdd_status[\"HDD 1\"] = {\n \"status\": hdd1Status,\n \"space\": hdd1Space\n }\n\n hdd_status[\"HDD 2\"] = {\n \"status\": hdd2Status,\n \"space\": hdd2Space\n }\n\n hdd_status[\"HDD 3\"] = {\n \"status\": hdd3Status,\n \"space\": hdd3Space\n }\n\n return hdd_status", "title": "" }, { "docid": "fa8e95dda0d082c1663998dc5b75308d", "score": "0.46456107", "text": "def get_disks():\n\n import re\n\n # parsing partitions from the procfs\n # attetion with the output format. the partitions list is without '/dev/'\n partition_table = open('/proc/partitions').read()\n regex = re.compile('[sh]d[a-g]')\n disks = regex.findall(partition_table)\n disks = set(disks) \n\n return disks", "title": "" }, { "docid": "05ec39e0c0493353c5e9306a84a1f7c6", "score": "0.46417966", "text": "def FDCcheckDeviceCondition(self):\n command = \"D\\r\"\n result = self.__FDCcommandResponse(command, 8)\n if self.verbose:\n # third byte:\n # 30 - Normal\n # 45 - Door open or no disk\n # 32 - write protected\n print \"third byte = 0x%X\" % ord(result[2])\n return result", "title": "" }, { "docid": "a795c202c0c770c370bec74d6b15edd2", "score": "0.46390468", "text": "def disk_usage_d_gestion():\r\n #\r\n # Take the info about the disk D from the function psutil\r\n #\r\n disk_info_d = get_disk_usage('D:')\r\n disk_stats_d = [\r\n \"Total space: \" + str(disk_info_d.total),\r\n \" / Used space: \" + str(disk_info_d.used),\r\n \" / Free space : \" + str(disk_info_d.free),\r\n \" / Percent : \" + str(disk_info_d.percent),\r\n ]\r\n disk_info_d_towrite = \"\".join(disk_stats_d)\r\n #\r\n # Write the info into the info.txt\r\n #\r\n file_text_d = f\"Disk D => {disk_info_d_towrite} %\"\r\n write_into_file(file_text_d + space)\r\n\r\n #\r\n # Insert info about the disk D into the database\r\n #\r\n sqlstatement_disk_d = f\"INSERT INTO disk_usage_d\" \\\r\n f\"(\" \\\r\n f\"total, used, free, percent\" \\\r\n f\") \" \\\r\n f\"VALUES (\" \\\r\n f\"'{disk_info_d.total}',\" \\\r\n f\" '{disk_info_d.used}', \" \\\r\n f\" '{disk_info_d.free}', \" \\\r\n f\" '{round(disk_info_d.percent)}') \"\r\n write_to_db(sqlstatement_disk_d)", "title": "" }, { "docid": "c43fe2aee7aa02690c5fe40dd70b77c2", "score": "0.46306297", "text": "def ssh_command2(device, show_command):\n net_connect = ConnectHandler(**device)\n output = net_connect.send_command(command)\n net_connect.disconnect()\n print(\"\\n\")\n print(\"-\" * 20)\n print(output)\n print(\"-\" * 20)\n print(\"\\n\")\n return", "title": "" }, { "docid": "f5c9513f69dbcedcae5dc70ed7edd7a4", "score": "0.46297058", "text": "async def spell(self) -> None:\n attr_to_read = [4, 0, 1, 5, 7, 0xFFFE]\n basic_cluster = self.endpoints[1].in_clusters[0]\n await basic_cluster.read_attributes(attr_to_read)", "title": "" }, { "docid": "ee0ca1a1a3821b0a6160854dfd9045c7", "score": "0.4620331", "text": "def discover_device(self):\n\n print(\"Trying to find Mach 30 device(s)...\")\n\n # Get a list of the serial devices avaialble so we can query them\n #self.device_locations = self.list_serial_ports()\n self.device_locations = [ 'COM1', 'COM2', 'COM3', 'COM4', '/dev/ttyACM0', '/dev/ttyACM1' ]\n \n # We have to walk through and try ports until we find our device\n for location in self.device_locations:\n print(\"Trying \", location)\n\n # Attempt to connect to a Shepard device via serial\n try:\n # Set up the connection\n self.device = serial.Serial(location, 115200)\n\n # Wait for the serial interface to come up on the device\n time.sleep(2.5)\n\n # If it's a Shepard device it should echo this back\n self.device.write(bytes(\"D\", 'UTF-8'))\n self.device.flush()\n \n # If we got a 'D' back, we have a Shepard device\n if self.device.read(1) == 'D':\n print(\"Device Found on\", location)\n\n break\n except Exception as inst:\n #print \"Failed to connect:\", inst\n pass", "title": "" }, { "docid": "56751dbea12a39bab502c9c3a0db4a23", "score": "0.4617205", "text": "def hackSDMToSED(data):\n\tdata.setMeta(\"utype\", \"sed:SED\")\n\ttable = data.getPrimaryTable()\n\ttable.setMeta(\"utype\", \"sed:Segment\")\n\t# copy the table definition to avoid clobbering the real attributes.\n\t# All this sucks. At some point we'll want real SED support\n\ttable.tableDef = table.tableDef.copy(table.tableDef.parent)\n\tfor col in table.tableDef:\n\t\tif col.utype in _SDM_TO_SED_UTYPES:\n\t\t\tcol.utype = _SDM_TO_SED_UTYPES[col.utype]\n\tfor param in table.tableDef.params:\n\t\tif param.utype in _SDM_TO_SED_UTYPES:\n\t\t\tparam.utype = _SDM_TO_SED_UTYPES[param.utype]", "title": "" }, { "docid": "16ff9c27456e0d06505650849ea8ced5", "score": "0.46122506", "text": "def getdrives():\n\t\n\tresults = [StringIO(subprocess.check_output(\"wmic logicaldisk get %s\" % s))\\\n\t\t\t\tfor s in (\"name\", \"volumename\", \"freespace\")]\n\t\n\tfrom itertools import repeat\n\t\n\tformat_template = \"\\t{0}\\t\\t\\t {1}\\t\\t {2}\"\n\t\n\tprint(format_template.format(*(next(f).strip() for f in results)))\n\t\n\tprint(\" {0}\\t\\t{1}\\t\\t{2}\".format(*repeat(\"-\"*15, 3)))\n\t\n\tspace_str = lambda s: str(round(float(s) / 10**9, 1)) + \" GB\"\n\tstrip_strs = lambda three: (s.strip() for s in three)\n\t\n\tfor name, vol_name, space in (strip_strs(t) for t in zip(*results)):\n\t\tif name:\n\t\t\tprint(format_template.format(\n\t\t\tname,\n\t\t\tvol_name or (name in shared_operations.cddrives() and \"(CD-ROM)\" or \"Unavailable\"),\n\t\t\tspace and space_str(space) or \"Unavailable\"))", "title": "" }, { "docid": "6858102e18b4148617c58c93f0d77c73", "score": "0.46105146", "text": "def kit_select(self, dev):\n self.kit_write('e:p:s', struct.pack(\"<B\", dev))\n return self.kit_read(0)", "title": "" }, { "docid": "841228946ad2513ad555187087ab9f69", "score": "0.46057144", "text": "def mamage_diskio(self, value):\n \n term.pos(31, 68)\n term.clearLineFromPos()\n term.write(\"Read: \" + str(round(value[\"read\"], 2)) + \" KB\")\n term.pos(31, 88)\n term.write(\"|\", term.bold)\n term.pos(32, 68)\n term.clearLineFromPos()\n term.write(\"Wrtn: \" + str(round(value[\"write\"], 2)) + \" KB\")\n term.pos(32, 88)\n term.write(\"|\", term.bold)\n term.pos(33, 68)\n term.write(\"Rsec: \" + str(value[\"read_count\"]))\n term.pos(34, 68)\n term.write(\"Wsec: \" + str(value[\"write_count\"]))\n term.pos(35, 69)\n term.write(\"Tps: \" + str(value[\"tps\"]))\n term.pos(32, 68)", "title": "" }, { "docid": "31cc610ed94966a087b7069f41800ece", "score": "0.46017826", "text": "async def test_hddtemp_one_disk(hass: HomeAssistant, telnetmock) -> None:\n assert await async_setup_component(hass, \"sensor\", VALID_CONFIG_ONE_DISK)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"sensor.hd_temperature_dev_sdd1\")\n\n reference = REFERENCE[state.attributes.get(\"device\")]\n\n assert state.state == reference[\"temperature\"]\n assert state.attributes.get(\"device\") == reference[\"device\"]\n assert state.attributes.get(\"model\") == reference[\"model\"]\n assert (\n state.attributes.get(\"unit_of_measurement\") == reference[\"unit_of_measurement\"]\n )\n assert (\n state.attributes.get(\"friendly_name\") == f\"HD Temperature {reference['device']}\"\n )", "title": "" }, { "docid": "c99c69eaafe01b9da17025775887f0dc", "score": "0.45989293", "text": "def find_vga_device():\n cmd = ['lspci', '-knnvmm']\n try:\n lspci_out = subprocess.check_output(cmd)\n except subprocess.CalledProcessError as cpe:\n logger.warning(\"lspci call failed with {}\".format(cpe))\n return None\n\n device_info = {}\n graphics_card = None\n for line in lspci_out.decode().split('\\n'):\n if line.strip():\n (tag, value) = line.split(':', 1)\n device_info[tag] = value.strip()\n elif device_info:\n if \"Class\" in device_info:\n if device_info[\"Class\"].endswith(\"[0300]\"):\n graphics_card = dict(device_info)\n break\n device_info = {}\n logging.debug(\"Found graphics card {}\".format(graphics_card))\n return graphics_card", "title": "" }, { "docid": "7f1c46ad419fb80bd56bf286e92fd7c0", "score": "0.45909855", "text": "def launch_lsusb_command(self, string_search_target):\r\n local_command_string = self.get_command_string()\r\n print \"\\nMock command: \" + local_command_string\r\n line = 'Bus 002 Device 003: ID 8086:0ad1 Intel Corp.'\r\n self.find_ids_from_input_line(line, string_search_target)\r\n return None", "title": "" }, { "docid": "46616b04ab3af521f714f93d5e1587d7", "score": "0.45875725", "text": "def descriptor_info(self):\n # TODO strip \"DS\" prefixes from each line?\n return self.comp_command('DS')", "title": "" }, { "docid": "a982f818edec9f1e6923c540a0d68f27", "score": "0.4581409", "text": "def disk_wipe():\n print('Destroying filesystem and partition data')\n sh.wipefs('-a', config.disk_dev)\n sh.sgdisk('--zap-all', config.disk_dev)\n sh.sgdisk('-og', config.disk_dev)\n print('Writing 10GB of zeros to the drive, this may take seconds or minutes depending on disk speed')\n sh.dd('bs=10M', 'count=1024', 'if=/dev/zero', f'of={config.disk_dev}', 'conv=fdatasync', _out=sys.stdout)", "title": "" }, { "docid": "a73dce232f8f2a27e7e179c90f054dce", "score": "0.45777217", "text": "def DeviceName(self):\n return self.read_block(0x21)", "title": "" }, { "docid": "85c565f1fa75b5da92a5127885acc141", "score": "0.4566034", "text": "def _disable_radvd(self, dev):\n if dev == '':\n return\n CsHelper.service(\"radvd\", \"stop\")\n CsHelper.service(\"radvd\", \"disable\")\n logging.info(CsHelper.execute(\"systemctl status radvd\"))", "title": "" }, { "docid": "3e31f7533dd73598648439e86a42d976", "score": "0.4565883", "text": "def hdparm_check_drive_status(pattern, disk_name, log):\n command = [\"hdparm\", \"-I\", disk_name]\n exit_status = robust_check_output(cmd=command, log=log)\n assert exit_status[\"exit_code\"] == 0, \"Can't get drive %s SE or ESE status\" % disk_name\n output = exit_status[\"message\"]\n secure_index = output.find(\"Security\")\n assert secure_index != -1, \\\n \"Can't find security info, probably disk %s doesn't support SE and ESE\" % disk_name\n output_secure_items = output[secure_index:-1]\n assert pattern.match(output_secure_items), \"Disk is not enabled for secure erase\"\n return", "title": "" }, { "docid": "18eabce769427c48f3463ae6b882e28e", "score": "0.4564764", "text": "def device(device_name):\n try:\n localdiskutils.setup_device_lvm(device_name, ctx['vg_name'])\n except subproc.CommandAliasError:\n _LOGGER.error(_ALIAS_ERROR_MESSAGE)", "title": "" }, { "docid": "711b7f79fdfe73da1e16b863541aa404", "score": "0.45632297", "text": "def set_dev_params(self,name,params):\n o=''; e=0\n if 'readahead' in params.keys():\n (e,o)= process_call(['blockdev','--setra',str(params['readahead']),'/dev/'+name])\n if 'iosched' in params.keys():\n try:\n f=open('/sys/block/%s/queue/scheduler' % name,'w')\n f.write(params['iosched'])\n f.close()\n except Exception, err:\n logger.agentlog.error('cant set ioscheduler on device '+name)\n e=1; o=str(err)\n print o\n return (e,o)", "title": "" }, { "docid": "249e103a985c24429e822934d79ff480", "score": "0.4561141", "text": "def set_dc(self):\n self.instr.write(self.channel + ':COUPLING DC')\n return None", "title": "" }, { "docid": "6f4d34bfe9a9b67974134b3778adad78", "score": "0.45604607", "text": "def test_from_knx(self):\n payload = APCI.from_knx(bytes([0x03, 0x4D, 0x00, 0x7B]))\n\n assert payload == DeviceDescriptorResponse(descriptor=13, value=123)", "title": "" }, { "docid": "6b8e772a33eb418b00ff6e0d886de355", "score": "0.45524785", "text": "def smart_attributes(dev, attributes=None, values=None):\n\n if not dev.startswith(\"/dev/\"):\n dev = \"/dev/\" + dev\n\n cmd = \"smartctl --attributes {}\".format(dev)\n smart_result = __salt__[\"cmd.run_all\"](cmd, output_loglevel=\"quiet\")\n if smart_result[\"retcode\"] != 0:\n raise CommandExecutionError(smart_result[\"stderr\"])\n\n smart_result = iter(smart_result[\"stdout\"].splitlines())\n\n fields = []\n for line in smart_result:\n if line.startswith(\"ID#\"):\n fields = re.split(r\"\\s+\", line.strip())\n fields = [key.lower() for key in fields[1:]]\n break\n\n if values is not None:\n fields = [field if field in values else \"_\" for field in fields]\n\n smart_attr = {}\n for line in smart_result:\n if not re.match(r\"[\\s]*\\d\", line):\n break\n\n line = re.split(r\"\\s+\", line.strip(), maxsplit=len(fields))\n attr = int(line[0])\n\n if attributes is not None and attr not in attributes:\n continue\n\n data = dict(zip(fields, line[1:]))\n try:\n del data[\"_\"]\n except Exception: # pylint: disable=broad-except\n pass\n\n for field in data:\n val = data[field]\n try:\n val = int(val)\n except Exception: # pylint: disable=broad-except\n try:\n val = [int(value) for value in val.split(\" \")]\n except Exception: # pylint: disable=broad-except\n pass\n data[field] = val\n\n smart_attr[attr] = data\n\n return smart_attr", "title": "" }, { "docid": "096963d1c62cd1dad974b7e9f405e46b", "score": "0.45431703", "text": "def test_descriptor_from_ficon_is_correct(hmc_session: FakedSession):\n cpc = hmc_session.hmc.cpcs.list()[0]\n partition = cpc.partitions.list()[0]\n stgroup = hmc_session.hmc.consoles.list()[0].storage_groups.list(\n {'type': 'fc'})[0]\n dasd_volume = stgroup.storage_volumes.list({'eckd-type': 'base'})[0]\n alias_volume = stgroup.storage_volumes.list({'eckd-type': 'alias'})[0]\n\n dasd_descriptor = describe_storage_volume(dasd_volume, partition)\n alias_descriptor = describe_storage_volume(alias_volume, partition)\n\n # make sure we picked correct volume\n assert isinstance(dasd_descriptor, FiconVolumeDescriptor)\n assert not dasd_descriptor.is_alias\n assert isinstance(alias_descriptor, FiconVolumeDescriptor)\n assert alias_descriptor.is_alias", "title": "" }, { "docid": "80415dc03a1af4921c59e2d1e0f09b4b", "score": "0.4540066", "text": "def list_devices():\n return usb.core.find(idVendor=0x49f, idProduct=0x505a, find_all=True)", "title": "" }, { "docid": "bf0be5447dc0e1cb7122f349b3311176", "score": "0.4535182", "text": "def _set_up_eth_usb(self):\n # use RNDIS mode\n #send_val = b\"\\x01\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\x00\\x04\"\n\n # use CDC/ECM mode\n send_val = b\"\\x01\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\x01\\x04\"\n try:\n ser = serial.Serial(\n \"/dev/ttyACM0\",\n 9600,\n serial.EIGHTBITS,\n serial.PARITY_NONE,\n serial.STOPBITS_ONE,\n )\n ser.write(send_val)\n except serial.serialutil.SerialException:\n try:\n ser = serial.Serial(\n \"/dev/ttyACM1\",\n 9600,\n serial.EIGHTBITS,\n serial.PARITY_NONE,\n serial.STOPBITS_ONE,\n )\n ser.write(send_val)\n except:\n pass", "title": "" }, { "docid": "a1100d6f6a82e772b3f9f728461a9560", "score": "0.45320863", "text": "def smart_test():\n smalldrives = [\"usbjmicron,00\", \"usbjmicron,01\"]\n successfuldrives = list(smalldrives)\n output = {}\n feedbackOutput = \"\"\n\n # If hardrives off or not mounted, get outta here!\n hdd_status = check_hdd()\n\n try:\n assert hdd_status[\"HDD 1\"][\"status\"] == 0 and hdd_status[\"HDD 2\"][\"status\"] == 0\n except AssertionError:\n raise AssertionError(\"Smart test failed. Hard drives need to be powered.\")\n\n # Start all smart tests\n for drive in smalldrives:\n consoleOutput = exec_console_command(\"smartctl -d {0} -t short /dev/sdb\".format(drive))\n\n if \"\\n127\" in consoleOutput:\n raise OSError(\"Smart test command not installed. Please contact [email protected].\")\n\n elif \"\\n0\" in consoleOutput:\n output.update({drive: \"\\nSmart test for {0} successfully executed.\\n\".format(drive)})\n\n else:\n output.update({drive: \"\\nSmart test {0} failed execution (try re-powering drives).\\n\".format(drive)})\n successfuldrives.remove(drive)\n\n # Wait for completion\n if successfuldrives:\n # Sleep while smart test performs\n time.sleep(70)\n\n # Evaluate results\n for drive in successfuldrives:\n consoleOutput = exec_console_command(\"smartctl -d {0} -a /dev/sdb\".format(drive))\n\n if \"No Errors Logged\" in consoleOutput:\n output[drive] += \"Smart test for {0} passed.\\n\".format(drive)\n else:\n output[drive] += \"Smart test for {0} failed.\\n\".format(drive)\n\n for drive in smalldrives:\n feedbackOutput += output[drive]\n\n return feedbackOutput", "title": "" }, { "docid": "40eae950c058ab34e9ce3bd09acf6a7b", "score": "0.45314983", "text": "def hddStepper(driveNum, velocity):\n print(\"HDD stepper {} triggered at {} velocity\".format(driveNum, velocity))\n msg = []\n msg.append(0xff)\n msg.append(driveNum)\n msg.append(velocity)\n hddStepperArduino.write(msg)", "title": "" }, { "docid": "3841a00d5455840b6c534169f1afa25b", "score": "0.4523894", "text": "def is_raid(device):\n return device.startswith(\"/dev/md\")", "title": "" }, { "docid": "c00203bc3ea626ebeb846ea9b2f948c5", "score": "0.45216376", "text": "def create_software_raid(root_helper, stspec):\n\n # choose block devices to use.\n ndisk = int(stspec['ndisk'])\n devlist = stspec['phydevs'].split(',')[0:ndisk]\n ndisk = len(devlist)\n\n # if available device is just one, no need to create an array.\n if ndisk == 1:\n return devlist[0]\n\n # decide an array name.\n cmd = ['cat', '/proc/mdstat']\n try:\n (out, _err) = putils.execute(*cmd,\n root_helper=root_helper,\n run_as_root=True)\n #LOG.debug('[MRA] mdstat out: %(out)s' % {'out': out})\n except putils.ProcessExecutionError as err:\n LOG.exception(_LE('Error in checking raid array info.'))\n LOG.error(_LE('Cmd :%s') % err.cmd)\n LOG.error(_LE('StdOut :%s') % err.stdout)\n LOG.error(_LE('StdErr :%s') % err.stderr)\n raise\n\n lines = out.split('\\n')\n idx = 0\n for line in lines:\n arrname = line.split()[0] if len(line.split()) > 0 else None\n #parsed = line.split()\n #if len(parsed) > 0:\n # arrname = parsed[0]\n if arrname is not None and arrname.startswith('md'):\n arridx = int(arrname[2:])\n idx = arridx+1 if arridx >= idx else idx\n\n array_name = '/dev/md' + str(idx)\n\n # making a command\n cmd = ['mdadm', '--create', array_name, '--run', '--assume-clean']\n\n raidconf = stspec['raidconf']\n if raidconf == \"jbod\":\n cmd.extend(['--level', 'linear', '--raid-devices', ndisk])\n elif raidconf == \"raid0\":\n cmd.extend(['--level', 'stripe', '--raid-devices', ndisk])\n elif raidconf == \"raid1\":\n cmd.extend(['--level', 'mirror', '--raid-devices', ndisk])\n elif raidconf == \"raid5\":\n cmd.extend(['--level', '5', '--raid-devices', ndisk])\n elif raidconf == \"raid6\":\n cmd.extend(['--level', '6', '--raid-devices', ndisk])\n else:\n LOG.error(\"[MRA] unknown software raid configuration: %s\" % stspec['raidconf'])\n raise\n\n for dev in devlist:\n cmd.extend([dev])\n\n # execute mdadm commands.\n try:\n (out, _err) = putils.execute(*cmd,\n root_helper=root_helper,\n run_as_root=True)\n except putils.ProcessExecutionError as err:\n LOG.exception(_LE('Error creating Software RAID'))\n LOG.error(_LE('Cmd :%s') % err.cmd)\n LOG.error(_LE('StdOut :%s') % err.stdout)\n LOG.error(_LE('StdErr :%s') % err.stderr)\n raise\n\n return array_name", "title": "" } ]
d9e0c850fb5ac39f1ac1485d0d02f903
Delete the given entity and save if specified.
[ { "docid": "77927a1bebad2d96ec15cffe291ba3a0", "score": "0.61194575", "text": "def delete(self, commit=True):\n ufo.db.session.delete(self)\n return commit and ufo.db.session.commit()", "title": "" } ]
[ { "docid": "895f750707f81a5f6c61c536bfe65074", "score": "0.78031045", "text": "def delete(self, entity):\n\n entity.delete()\n # entity has been deleted call _onDelete\n self._onDelete(entity)", "title": "" }, { "docid": "dc5642c37ec78fb25ad36ba7f63fa2a4", "score": "0.7014826", "text": "def _onDelete(self, entity):\n\n if not entity:\n raise NoEntityError", "title": "" }, { "docid": "1c35db168aee907f8fd5cc07cc1729a1", "score": "0.69175076", "text": "def delete_entity_by_id(self, entity, entity_id):\n return self.delete_entity(entity + '/' + entity_id)", "title": "" }, { "docid": "03bffe6b1a94a3b2b4e030827a145931", "score": "0.68949056", "text": "def entity_delete(args):\n\n msg = \"WARNING: this will delete {0} {1} in {2}/{3}\".format(\n args.entity_type, args.entity, args.project, args.workspace)\n\n if not (args.yes or _confirm_prompt(msg)):\n return\n\n json_body=[{\"entityType\": args.entity_type,\n \"entityName\": args.entity}]\n r = fapi.delete_entities(args.project, args.workspace, json_body)\n fapi._check_response_code(r, 204)\n if fcconfig.verbosity:\n print(\"Succesfully deleted \" + args.type + \" \" + args.entity)", "title": "" }, { "docid": "d2d5ddb64cf15fd89cba40a2a2c64d27", "score": "0.6736677", "text": "def obj_delete(self, bundle, **kwargs):\n if not hasattr(bundle.obj, 'delete'):\n try:\n bundle.obj = self.obj_get(bundle=bundle, **kwargs)\n except self.Meta.object_class.DoesNotExist:\n raise NotFound(\n \"A model instance matching the \"\n \"provided arguments could not be found\")\n\n self.authorized_delete_detail(\n self.get_object_list(\n bundle.request), bundle)\n if \"is_deleted\" in bundle.obj.__dict__:\n bundle.obj.__dict__.update({\"is_deleted\": True})\n bundle.obj.save()\n else:\n bundle.obj.delete()", "title": "" }, { "docid": "504503303b138571bd4800e4e8a4838f", "score": "0.66985846", "text": "def delete(self, obj=None):\n if obj:\n self.__session.delete(obj)\n self.save()", "title": "" }, { "docid": "d227f87aa31aea126d1ba55cb526ff9e", "score": "0.6586945", "text": "def delete_entity(self, request, tenant_id, entity_id):\n entities = self._entity_cache_for_tenant(tenant_id).entities_list\n checks = self._entity_cache_for_tenant(tenant_id).checks_list\n alarms = self._entity_cache_for_tenant(tenant_id).alarms_list\n for e in range(len(entities)):\n if entities[e]['id'] == entity_id:\n del entities[e]\n break\n for c in checks:\n if c['entity_id'] == entity_id:\n del checks[checks.index(c)]\n for a in alarms:\n if a['entity_id'] == entity_id:\n del alarms[alarms.index(a)]\n request.setResponseCode(204)\n request.setHeader('content-type', 'text/plain')", "title": "" }, { "docid": "e4ffcbf22d8edc093be5f1dd8d9af00a", "score": "0.6583079", "text": "def delete_entity(self, entity: int, immediate=False) -> None:\n if immediate:\n for component_type in self._entities[entity]:\n self._components[component_type].discard(entity)\n\n if not self._components[component_type]:\n del self._components[component_type]\n\n del self._entities[entity]\n self.clear_cache()\n\n else:\n self._dead_entities.add(entity)", "title": "" }, { "docid": "19e5e4310dc6cbf00c3f84597b0e9a23", "score": "0.6557732", "text": "def delete(self, *args, **kwargs):\n # Create the model and fetch its data\n self.model = self.get_model(kwargs.get('id'))\n result = yield self.model.fetch()\n\n # If model is not found, return 404\n if not result:\n self.not_found()\n return\n\n # Stub to check for delete permissions\n if not self.has_delete_permission():\n self.permission_denied()\n return\n\n # Delete the model from its storage backend\n self.model.delete()\n\n # Set the status to request processed, no content returned\n self.set_status(204)\n self.finish()", "title": "" }, { "docid": "0531aa58ff1f348d52cdc24df3161d5f", "score": "0.6530567", "text": "def remove_entity(self, entity):\n raise NotImplementedError()", "title": "" }, { "docid": "933b486f0de1e8bbf8d1b1e70b68dd83", "score": "0.65196794", "text": "def test_deleting_a_persisted_entity(self, test_domain):\n person = test_domain.repository_for(Person)._dao.create(\n id=3, first_name=\"Jim\", last_name=\"Carrey\"\n )\n deleted_person = test_domain.repository_for(Person)._dao.delete(person)\n assert deleted_person is not None\n assert deleted_person.state_.is_destroyed is True\n\n with pytest.raises(ObjectNotFoundError):\n test_domain.repository_for(Person)._dao.get(3)", "title": "" }, { "docid": "9a6e33adeb0072633830855f748773c4", "score": "0.64800304", "text": "def delete(ctx: Configuration, entity):\n ctx.auto_output(\"table\")\n deleted = api.remove_state(ctx, entity)\n\n if deleted:\n ctx.echo(\"State for entity %s deleted.\", entity)\n else:\n ctx.echo(\"Entity %s not found.\", entity)", "title": "" }, { "docid": "9a8c9c80d5fb103238f473f80cd7ff05", "score": "0.6455669", "text": "def delete(self):\n self.session.delete(self)\n self.quick_save()", "title": "" }, { "docid": "2897269cf966e54c4adb06d055b5348f", "score": "0.64279526", "text": "def delete_entity(self, entity):\n for comp_set in self._components.values():\n comp_set.pop(entity, None)\n\n self._entities.discard(entity)", "title": "" }, { "docid": "1fd42129cb68ddc1970b9861d8954b4d", "score": "0.6392887", "text": "def delete(self, model, pk=None, **kwargs):\n # letting self.models[model] raise a KeyError on purpose, see above\n instance = self.models[model].query.get_or_404(pk)\n self.database.session.delete(instance)\n self.database.session.commit()", "title": "" }, { "docid": "9fd1d6724a448c55d9f89c1274380437", "score": "0.6356544", "text": "def delete(self,entityId):\n if self.findById(entityId) is None:\n raise InvalidIdException(\"Invalid id {0}.\".format(entityId))\n del(self._entities[entityId])", "title": "" }, { "docid": "c3e44db4c492d56e6ddcfb4bd4059b35", "score": "0.63444", "text": "def delete_entity(self, entity_id):\r\n\t\tfor comp_type in self.database.iterkeys():\r\n\t\t\ttry:\r\n\t\t\t\tdel self.database[comp_type][entity_id]\r\n\t\t\t\tif self.database[comp_type] == {}:\r\n\t\t\t\t\tdel self.database[comp_type]\r\n\t\t\texcept KeyError:\r\n\t\t\t\tpass", "title": "" }, { "docid": "8cca2e073056ce2125cbccd6022b02b9", "score": "0.62968224", "text": "def delete(self, hass):\n dr = device_registry.async_get(hass)\n er = entity_registry.async_get(hass)\n\n for e in self.entities.values():\n er.async_remove(e.entity_id)\n\n self.entities = {}\n\n device = dr.async_get_device({(DOMAIN, self.browserID)})\n dr.async_remove_device(device.id)", "title": "" }, { "docid": "3e56af6983413d87127bd3410515fa8c", "score": "0.6269139", "text": "def delete(self, record):\n # note: does (and should) not delete or destroy the record\n if self.record:\n self._validate_record(record)\n if self.join_args:\n # Need to find out who has the foreign key\n # If record has it, set to None, then done.\n # If one level up has it, mark the record for destruction\n final_table = self.join_args[0]['table']\n if final_table in associations.associations_for(self.model):\n # +record+ does not have the foreign key\n # Find the record one level up, then mark for destruction\n related_class = associations.model_from_name(\n final_table)\n related_record = related_class.find_by(\n **self._related_args(record, related_class))\n # mark the joining record to be destroyed the primary is saved\n self.record._delete_related_records.append(related_record)\n else:\n # We have the foreign key\n # Look up in the foreign key table, bearing in mind that\n # this is a belongs_to, so the entry will be singular,\n # whereas the table name is plural (we need to remove the\n # 's' at the end)\n key = associations.foreign_keys_for(self.model\n )[inflector.singularize(final_table)]\n # Set the foreign key to None to deassociate\n setattr(record, key, None)\n else:\n setattr(record, foreign_key(record, self.record), None)\n # Ensure that the change is persisted on save\n self.record._related_records.append(record)", "title": "" }, { "docid": "7aa5759a7bdff38dd0785933ff2bc223", "score": "0.62540245", "text": "def test_otoroshi_controllers_adminapi_script_api_controller_delete_entity_action(self):\n pass", "title": "" }, { "docid": "eebda17ac8afc37cb19e14234e72ddda", "score": "0.6246779", "text": "def delete(self, model):\n self._isinstance(model)\n DB.session.delete(model)\n DB.session.commit()", "title": "" }, { "docid": "f6e774bd887552100650c5cdfc5e3bfe", "score": "0.6226896", "text": "def delete(self):\n self.world.entities.discard(self)", "title": "" }, { "docid": "e4a20f3817b6c4b23893d63060d78974", "score": "0.62255317", "text": "def delete(self, model):\n self._isinstance(model)\n db.session.delete(model)\n db.session.commit()", "title": "" }, { "docid": "4b3e746b7e9af9f9c206da412fdcb213", "score": "0.618721", "text": "def remove(self, entity):\n self._entities.remove(entity)", "title": "" }, { "docid": "4c3f86549b157f82c242550fc6328337", "score": "0.61868995", "text": "def delete(self, commit = True):\n\n assert 'peer_id' in self\n\n # Remove all related entities\n for obj in \\\n Slices(self.api, self['slice_ids']) + \\\n Keys(self.api, self['key_ids']) + \\\n Persons(self.api, self['person_ids']) + \\\n Nodes(self.api, self['node_ids']) + \\\n Sites(self.api, self['site_ids']):\n assert obj['peer_id'] == self['peer_id']\n obj.delete(commit = False)\n\n # Mark as deleted\n self['deleted'] = True\n self.sync(commit)", "title": "" }, { "docid": "a0cefc8b1df4c158b8fe8ffcc85ddc9c", "score": "0.6183882", "text": "def delete(self, obj):\n return super(StorageObjectManager, self).delete(obj)", "title": "" }, { "docid": "126745662fdbb2cb0b0ee70a1048e9d7", "score": "0.6178013", "text": "def delete_model(self, request, obj):\n obj.delete()", "title": "" }, { "docid": "e2b611a597d4b6d0ad86e7767a9e85d5", "score": "0.6174427", "text": "def delete(self):\n self.deleted = True\n self.save()", "title": "" }, { "docid": "6d2c43a072b137f10ab823f0b17ceefa", "score": "0.6169699", "text": "def delete(cls, namespace_name):\n with common_utils.Namespace(appengine_config.DEFAULT_NAMESPACE_NAME):\n entity = cls.ENTITY.get_by_key_name(cls.key_name(namespace_name))\n if entity is not None:\n entity.delete()", "title": "" }, { "docid": "bc8a1bede12e5cda45023408e4696eea", "score": "0.6168132", "text": "def delete(self, obj=None):\n if (obj is not None):\n self.__session.delete(obj)", "title": "" }, { "docid": "6a5aacc88b70afba4ec6282204d9d4ff", "score": "0.61567605", "text": "def delete(self, obj=None):\n if obj:\n self.__session.delete(obj)", "title": "" }, { "docid": "6a5aacc88b70afba4ec6282204d9d4ff", "score": "0.61567605", "text": "def delete(self, obj=None):\n if obj:\n self.__session.delete(obj)", "title": "" }, { "docid": "8b4f9e49d08ef835b4f385bb97c0166b", "score": "0.61399335", "text": "def delete(storage, clazz, item_id):\n factory = clazz.get_factory(storage)\n try:\n instance = factory.load(item_id)\n except sa.orm.exc.NoResultFound:\n raise NotFound()\n storage.delete(instance)", "title": "" }, { "docid": "417e033d41e56970003b35c0cd75b992", "score": "0.6125982", "text": "def delete_obj(obj:Base):\n db_session.delete(obj)\n db_session.commit()", "title": "" }, { "docid": "5c2057a9db3705ff11b676087b2246b3", "score": "0.6122116", "text": "def delete(obj):\n\n g.session = db.session\n g.session.delete(obj)\n g.session.flush()\n g.session.commit()", "title": "" }, { "docid": "0b40621c83fecc6b0a9f7355447b6122", "score": "0.6107423", "text": "def delete_article(article: Article) -> None:\n db.session.delete(article)\n db.session.commit()", "title": "" }, { "docid": "2e75b5c6788ed051d3244ad9467b4ace", "score": "0.61049235", "text": "def delete(self, obj=None):\n if obj is not None:\n self.__session.delete(obj)", "title": "" }, { "docid": "2e75b5c6788ed051d3244ad9467b4ace", "score": "0.61049235", "text": "def delete(self, obj=None):\n if obj is not None:\n self.__session.delete(obj)", "title": "" }, { "docid": "34f0fa075ec5b1e0040cf023c080c5f1", "score": "0.6099365", "text": "def delete(self, obj=None):\n session = self.__session\n if obj is not None:\n session.delete(obj)", "title": "" }, { "docid": "03c6f9682b4952a6fb4978740f9ae964", "score": "0.6079834", "text": "def delete_entity(self, endpoint):\n response = self._make_request(\n 'DELETE',\n self._get_full_endpoint(endpoint)\n )\n\n return response", "title": "" }, { "docid": "ad2d69a6f8a6e7723f60ddb4d62b2698", "score": "0.60714453", "text": "def test_can_delete_entity(area, entity):\n area.add_entity(entity)\n deleted_entity = area.delete_entity(entity)\n assert deleted_entity.curr_area is None\n assert area.entity_dict[(deleted_entity.x, deleted_entity.y)] == []", "title": "" }, { "docid": "82609d876e90516038de4add07869a11", "score": "0.60676605", "text": "def delete(self, obj, model=None):\n if isinstance(obj, int):\n if model in self._dct:\n if obj in self._dct[model]:\n ret = self._dct[model][obj]\n self._dct[model][obj] = None\n return ret\n else:\n cls = obj.__class__\n if cls in self._dct:\n if obj.id in self._dct[cls]:\n ret = self._dct[cls][obj.id]\n self._dct[cls][obj.id] = None\n return ret\n return None", "title": "" }, { "docid": "d45b6c8e223b40fa2ff577d07658e53e", "score": "0.60658056", "text": "def delete_record(self, record):", "title": "" }, { "docid": "e91f9b06661d350c6ea8201126645ed2", "score": "0.6034162", "text": "def delete(self):\n self.session.delete(self.db_object)\n self.session.commit()", "title": "" }, { "docid": "5c66dfa5c6eee1307306e40f65a852ef", "score": "0.60119164", "text": "def delete(self):\n self._check_write_access()\n if self.is_collection:\n # data_fs.rmtree(self.path)\n data_fs.rmtree(self.path_entity)\n else:\n # data_fs.unlink(self.path)\n data_fs.unlink(self.path_entity)\n self.remove_all_properties(recursive=True)\n self.remove_all_locks(recursive=True)", "title": "" }, { "docid": "d35d05d66e792447cf6b328ed48460c5", "score": "0.600439", "text": "def locked_delete(self):\r\n\r\n if self._cache:\r\n self._cache.delete(self._key_name)\r\n\r\n entity = self._model.get_by_key_name(self._key_name)\r\n if entity is not None:\r\n entity.delete()", "title": "" }, { "docid": "ba19d66a963dd18c93bd357960cec87a", "score": "0.5969407", "text": "def delete(self, id: int) -> bool:\n with self.session() as session:\n entity = (\n session.query(self.entity_type)\n .filter(self.entity_type.id == id)\n .first()\n )\n session.delete(entity)\n return True", "title": "" }, { "docid": "f009c445d48591e58dde446ed9d14f9d", "score": "0.5963336", "text": "def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n success_url = self.get_success_url()\n self.object.delete()\n messages.success(self.request,\n \"Artículo eliminado correctamente\",\n extra_tags='msg')\n return HttpResponseRedirect(success_url)", "title": "" }, { "docid": "78733e37625069250b4d90188b590ce9", "score": "0.5959138", "text": "def delete(self, obj=None):\n if obj is not None:\n key = \"{}.{}\".format(type(obj).__name__, obj.id)\n try:\n del self.__objects[key]\n except KeyError:\n pass", "title": "" }, { "docid": "14bbbeb3ed612486f360f61c67e119ca", "score": "0.5939319", "text": "def _delete(self, *criterion):\n count = self._query(*criterion).delete()\n if count == 0:\n raise ModelNotFoundError\n return True", "title": "" }, { "docid": "4071a767be7ab5043b114dd2c078b54f", "score": "0.5936451", "text": "def delete(self):\n self.session.delete(self)\n self.session.commit()", "title": "" }, { "docid": "4b45acd4d1968eeb9a125a840b3c632d", "score": "0.59137404", "text": "def deleteObject(self,obj):\n\t\tself.session.delete(obj)", "title": "" }, { "docid": "054f7face57554d94511ea252ea9df9f", "score": "0.59113884", "text": "def delete(self, req, resp, obj):\n deleted = obj.delete()\n if deleted == 0:\n raise falcon.HTTPConflict('Conflict', 'Resource found but conditions violated')", "title": "" }, { "docid": "048bc7a1b083b164b0582870773f7d38", "score": "0.5908574", "text": "def delete(self, commit=True):\n db.session.delete(self)\n if commit:\n try:\n db.session.commit()\n except Exception:\n db.session.rollback()\n raise", "title": "" }, { "docid": "d619de74ea36df92f66c99982004c020", "score": "0.5906901", "text": "def delete_entity(kind,name):\n\tds = get_client()\n\tID = 'index{}'.format(name)\n\tkey = ds.key(kind,ID)\n\tds.delete(key)\n\tpass", "title": "" }, { "docid": "f9f80f9cce4a83da2cda3e253eb2d8c8", "score": "0.5906033", "text": "def delete(self,trulyDelete=False):\n if not trulyDelete:\n self.sequence = ''\n self.validation = None\n self.save()\n else: super(type(self),self).delete()", "title": "" }, { "docid": "47f2b48e021d6ead011372b751790bc2", "score": "0.59004825", "text": "def delete(self, model, uid):\n obj = UID.objects.get(uid=uid, content_type=self._ct(model))\n with transaction.atomic():\n obj.content_object.delete()\n obj.delete()", "title": "" }, { "docid": "76232b954a0c5d668a3c716671b7107c", "score": "0.58994335", "text": "def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n success_url = self.get_success_url()\n self.object.status = False\n self.object.save()\n return HttpResponseRedirect(success_url)", "title": "" }, { "docid": "59ca262d8e2981794760e16ce9eb46d8", "score": "0.58938384", "text": "def delete_object(self, obj):\n return self.object_manager.delete(obj)", "title": "" }, { "docid": "941859fbe4ab69177e53b6cfbdf135e4", "score": "0.58882535", "text": "def delete(self, commit=True):\r\n db.session.delete(self)\r\n return commit and db.session.commit()", "title": "" }, { "docid": "7c57af4516aa93658771774a8a9bfa1c", "score": "0.58809537", "text": "def delete(self, request, pk, format=None):\n resource = self.get_object(pk)\n resource.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "title": "" }, { "docid": "9562a2be34e672221b0e7a1a52cb4d27", "score": "0.5873323", "text": "def delete_entity(project_id, entity_type_id, entity_value):\n import dialogflow_v2 as dialogflow\n entity_types_client = dialogflow.EntityTypesClient()\n \n entity_type_path = entity_types_client.entity_type_path(\n project_id, entity_type_id)\n \n entity_types_client.batch_delete_entities(\n entity_type_path, [entity_value])", "title": "" }, { "docid": "89e7b0a50e1675c3b770937bcedd8ff4", "score": "0.5869494", "text": "def delete(self):\n return self.manager.delete(self.id)", "title": "" }, { "docid": "d6cfd470f9230f15d63b4b4fa366631f", "score": "0.58689046", "text": "def delete(self):\n session.delete(self)\n session.flush()\n session.commit()", "title": "" }, { "docid": "d499379646cfd20972b4965d9aaf41f6", "score": "0.5868597", "text": "def delete(self):\n self.logger.info(\"Deleting %r\", self.name)\n db.session.delete(self)\n try:\n db.session.commit()\n except InvalidRequestError:\n db.session.rollback()", "title": "" }, { "docid": "6eb3584688fef299f1e8f89edbe94301", "score": "0.5868032", "text": "def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n success_url = self.get_success_url()\n self.object.delete()\n return HttpResponseRedirect(success_url)", "title": "" }, { "docid": "191def5ec5d98a9e78473ece48080770", "score": "0.5845162", "text": "def delete(self):\n db.session.delete(self)\n db.session.commit()", "title": "" }, { "docid": "191def5ec5d98a9e78473ece48080770", "score": "0.5845162", "text": "def delete(self):\n db.session.delete(self)\n db.session.commit()", "title": "" }, { "docid": "191def5ec5d98a9e78473ece48080770", "score": "0.5845162", "text": "def delete(self):\n db.session.delete(self)\n db.session.commit()", "title": "" }, { "docid": "191def5ec5d98a9e78473ece48080770", "score": "0.5845162", "text": "def delete(self):\n db.session.delete(self)\n db.session.commit()", "title": "" }, { "docid": "191def5ec5d98a9e78473ece48080770", "score": "0.5845162", "text": "def delete(self):\n db.session.delete(self)\n db.session.commit()", "title": "" }, { "docid": "191def5ec5d98a9e78473ece48080770", "score": "0.5845162", "text": "def delete(self):\n db.session.delete(self)\n db.session.commit()", "title": "" }, { "docid": "191def5ec5d98a9e78473ece48080770", "score": "0.5845162", "text": "def delete(self):\n db.session.delete(self)\n db.session.commit()", "title": "" }, { "docid": "191def5ec5d98a9e78473ece48080770", "score": "0.5845162", "text": "def delete(self):\n db.session.delete(self)\n db.session.commit()", "title": "" }, { "docid": "191def5ec5d98a9e78473ece48080770", "score": "0.5845162", "text": "def delete(self):\n db.session.delete(self)\n db.session.commit()", "title": "" }, { "docid": "46acf7c31dc16c737a356dc9955eeb1e", "score": "0.5822833", "text": "def delete(self, identifier):\n return self._delete(self.model_class.id == identifier)", "title": "" }, { "docid": "b39ce06852a9b28c620f589263be033b", "score": "0.5819706", "text": "def delete(self):\n models.storage.delete(self)\n models.storage.save()", "title": "" }, { "docid": "baa854ffbd3b1e0700aedfcdd062c18f", "score": "0.5816111", "text": "def delete(self, request, *args, **kwargs):\n try:\n key = kwargs[\"pk\"]\n except KeyError:\n return JsonResponse(\n {\"Error\": \"Not Found\"}, status=status.HTTP_400_BAD_REQUEST\n )\n\n pk = key\n try:\n obj = self.model.objects.get(pk=pk)\n except self.model.DoesNotExist:\n return JsonResponse({}, status=status.HTTP_204_NO_CONTENT)\n\n perm = self.get_perm_module(request, \"DELETE\")\n if perm:\n if not request.user.has_perm(f\"{perm}.delete\", obj):\n return JsonResponse(\n {\"error\": \"delete not permitted\"}, status=status.HTTP_403_FORBIDDEN\n )\n\n with transaction.atomic():\n self.pre_delete(obj)\n obj.delete()\n self.post_delete(obj)\n\n return JsonResponse({\"ok\": \"Deleted\"})", "title": "" }, { "docid": "f1a3fe82ae2eb0895c40b5b42e50f44d", "score": "0.58160883", "text": "def delete(self, OBJECT_TYPE):\n\n # Delete if object found and not pending insert\n if self.id and self.type == \"update\":\n # Delete from project data store\n app.updates.delete(self.key)\n self.type = \"delete\"", "title": "" }, { "docid": "971d4706702ee5c8cc58709a88b54937", "score": "0.5807124", "text": "def delete(self, obj=None):\n try:\n del self.__objects[\"{}.{}\".format(type(obj).__name__, obj.id)]\n except (AttributeError, KeyError):\n pass", "title": "" }, { "docid": "84dfa4785f33ec8917a9a7ba41d1364d", "score": "0.57993144", "text": "def delete(self) -> None:\n field_name_in_api_path = self.url_pk_name or f\"{self.__name__.lower()}_id\"\n url = self._client._build_url(\n self.url_detail_name, **{field_name_in_api_path: self.id}\n )\n\n response = self._client._request(\"DELETE\", url)\n if response.status_code != requests.codes.no_content:\n raise NotFoundError(\"Could not delete object\", response=response)\n # reset the id to None to feedback that the object is deleted in KE-chain\n self.id = None\n return None", "title": "" }, { "docid": "548c6ab6ae78fdb39aa3f1107f7a4600", "score": "0.5791855", "text": "def save(self, entity):\n raise NotImplementedError", "title": "" }, { "docid": "19906dc2d39c88aab007acc31b54d595", "score": "0.57857734", "text": "def delete(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "7a85ad9d3f1f5645a77e4fd22d442951", "score": "0.5777411", "text": "def post_delete( sender, instance, *args, **kwargs):\n key = get_model_key(instance)\n \n if key not in registry.keys():\n return None\n \n document = registry[key](instance)\n connection.delete([document,])", "title": "" }, { "docid": "05295236954ff03960232f83496e7b70", "score": "0.57739586", "text": "def process_delete(self, form):\r\n form.instance.delete()", "title": "" }, { "docid": "05295236954ff03960232f83496e7b70", "score": "0.57739586", "text": "def process_delete(self, form):\r\n form.instance.delete()", "title": "" }, { "docid": "3353f88c307644f08c3e062346d3b649", "score": "0.5771151", "text": "def test_model_soft_delete_succeeds(self, create_user, create_vendor):\n\n vendor, _ = create_vendor\n\n vendor.delete()\n assert vendor.deleted is True", "title": "" }, { "docid": "0eae893a5de7fea5ae71b81a1af56759", "score": "0.5758941", "text": "async def remove(self, resource: Resource) -> None:\n Model = self.get_model(resource)\n with Session(self.engine) as session:\n session.exec(delete(Model)) # type: ignore\n session.commit()", "title": "" }, { "docid": "10821203e9b9c19eab377168be3667cb", "score": "0.5754851", "text": "def delete(self, filter) -> bool:\n instance = self.get_object(filter=filter)\n\n if instance:\n self.db.session.delete(instance)\n self.db.session.commit()\n return True\n\n return False", "title": "" }, { "docid": "1adfd616bb25f8036b2914621adc8671", "score": "0.57532763", "text": "def delete_entry(model: typing.Type[SqlAlchemyModel], **kwargs) -> None:\n instance = model.query.filter_by(**kwargs[\"query\"]).first_or_404()\n db.session.delete(instance)\n db.session.commit()", "title": "" }, { "docid": "c161a2600c7884d27970b8dbdce8fe93", "score": "0.5750189", "text": "def delete(self):\n k = \"{}.{}\".format(type(self).__name__, self.id)\n del models.storage.__objects[k]", "title": "" }, { "docid": "80eb6f2f669d058366b35a2d890ae6b1", "score": "0.57487905", "text": "def delete( request, repo, org, cid, eid, confirm=False ):\n try:\n entity = Entity.from_json(Entity.entity_path(request,repo,org,cid,eid))\n except:\n raise Http404\n collection = Collection.from_json(entity.parent_path)\n if collection.locked():\n messages.error(request, WEBUI_MESSAGES['VIEWS_COLL_LOCKED'].format(collection.id))\n return HttpResponseRedirect( reverse('webui-entity', args=[repo,org,cid,eid]) )\n if entity.locked():\n messages.error(request, WEBUI_MESSAGES['VIEWS_ENT_LOCKED'])\n return HttpResponseRedirect( reverse('webui-entity', args=[repo,org,cid,eid]) )\n git_name = request.session.get('git_name')\n git_mail = request.session.get('git_mail')\n if not git_name and git_mail:\n messages.error(request, WEBUI_MESSAGES['LOGIN_REQUIRED'])\n #\n if request.method == 'POST':\n form = DeleteEntityForm(request.POST)\n if form.is_valid() and form.cleaned_data['confirmed']:\n collection_delete_entity(request,\n git_name, git_mail,\n collection, entity,\n settings.AGENT)\n return HttpResponseRedirect( reverse('webui-collection', args=[repo,org,cid]) )\n else:\n form = DeleteEntityForm()\n return render_to_response(\n 'webui/entities/delete.html',\n {'repo': entity.repo,\n 'org': entity.org,\n 'cid': entity.cid,\n 'eid': entity.eid,\n 'entity': entity,\n 'form': form,\n },\n context_instance=RequestContext(request, processors=[])\n )", "title": "" }, { "docid": "a460d092de0d612d0ec3fafa9163ad1b", "score": "0.5745028", "text": "def delete(self, *args, **kwargs):\n return", "title": "" }, { "docid": "258263608093db795a68c41caf6e3709", "score": "0.57435393", "text": "def delete(self):\n try:\n document = self.database[self.id]\n except KeyError:\n document = None\n if document:\n document.delete()", "title": "" }, { "docid": "a366cef6eadcb4e0c3674bc1ed255950", "score": "0.5736239", "text": "def delete(self):\n self._pre_action_check('delete')\n self.cypher(\"MATCH (self) WHERE id(self)={self} \"\n \"OPTIONAL MATCH (self)-[r]-()\"\n \" DELETE r, self\")\n delattr(self, 'id')\n self.deleted = True\n return True", "title": "" }, { "docid": "b89a0d7244efa73cc1576c094dfa85a6", "score": "0.57284003", "text": "async def delete(self):\n if self.has_backwards:\n await OnDeleteManager().handle_backwards([self])\n\n await self.objects.internal_query.delete_one(_id=self._id)\n\n # Remove document id from the ODM object\n self._id = None", "title": "" }, { "docid": "944756c06b0b1cc906621aff7d61c056", "score": "0.57283634", "text": "def delete(self, commit=True):\n db.session.delete(self)\n return commit and db.session.commit()", "title": "" }, { "docid": "944756c06b0b1cc906621aff7d61c056", "score": "0.57283634", "text": "def delete(self, commit=True):\n db.session.delete(self)\n return commit and db.session.commit()", "title": "" }, { "docid": "944756c06b0b1cc906621aff7d61c056", "score": "0.57283634", "text": "def delete(self, commit=True):\n db.session.delete(self)\n return commit and db.session.commit()", "title": "" }, { "docid": "944756c06b0b1cc906621aff7d61c056", "score": "0.57283634", "text": "def delete(self, commit=True):\n db.session.delete(self)\n return commit and db.session.commit()", "title": "" } ]
a310024ff88502d4a0f995bd83129ca2
Gets the ac of this ResultUniprotEntries.
[ { "docid": "da54b5d5690d45cfaab31f193e2a7fb2", "score": "0.6681408", "text": "def ac(self) -> str:\n return self._ac", "title": "" } ]
[ { "docid": "51c9ab7c299c1a8bb422cc0ec0b90cc8", "score": "0.61584806", "text": "def getAcr(self):\n pass", "title": "" }, { "docid": "22f9792a7026c3202de799bbe14f5e70", "score": "0.5821461", "text": "def ac(self):\n return np.array(self['ac'], dtype=np.float32) / 1000", "title": "" }, { "docid": "52af7de3094a07adc961e6b4c2868c62", "score": "0.5767277", "text": "def acls(self) -> Sequence['outputs.GetContainerAclResult']:\n return pulumi.get(self, \"acls\")", "title": "" }, { "docid": "bf39c3ca6058a3fb66108b6be9fcea76", "score": "0.5702268", "text": "def getA(self):\n return _AAL.EllipticCurve_getA(self)", "title": "" }, { "docid": "5844a610e9ba151f538835eda52f80d3", "score": "0.565121", "text": "def aucorp(self):\n return self._aucorp", "title": "" }, { "docid": "ab71d51c45f719b148469df6d72f1c7e", "score": "0.56490254", "text": "def getA(self):\n return _AAL.IntegerBinom_getA(self)", "title": "" }, { "docid": "be421fbf13f423d6fcf7417bf038a606", "score": "0.56089306", "text": "def getA(self):\n return _AAL.IntegerBinomEllipticCurve_getA(self)", "title": "" }, { "docid": "41339b8ab9df5ccb9de8fb8342f717da", "score": "0.55437756", "text": "def abund(self):\n return self._abund", "title": "" }, { "docid": "1d36e49ee837bdaca0087d0e5502621d", "score": "0.5480426", "text": "def _get_alphabet(self):\n \n return self._alphabet[:]", "title": "" }, { "docid": "b4a71c6323976762459d4f19bd4e38fb", "score": "0.539866", "text": "def AcName(self):\n if self.force_auto_sync:\n self.get('AcName')\n return self._AcName", "title": "" }, { "docid": "de2ffb5c29f767966443d9ef990de51a", "score": "0.5374227", "text": "def get_a(self): \n return self._a", "title": "" }, { "docid": "3f5bcbd78cd0ec45d5a4334605aeb661", "score": "0.5333704", "text": "def get_a_i(self):\n return self._a_i", "title": "" }, { "docid": "bf2d95d64608950a43a1cb5e31ac904d", "score": "0.53223056", "text": "def get_authority_record(self):\n return self.assertion.authority_record", "title": "" }, { "docid": "5e8a9d005f5af6b390fb10c6d2b17794", "score": "0.52816164", "text": "def getEntry(self):\n return self.entry", "title": "" }, { "docid": "287d8c566c98c4cdb7412e5b7099a4bd", "score": "0.52755684", "text": "def list_accoms(self):\n\n return self.api.list_accoms()", "title": "" }, { "docid": "32d3fc88bbe5728997e33bdb33029c3c", "score": "0.5259739", "text": "def get_authority(self):\n return self.get_authority_record().authority", "title": "" }, { "docid": "c850a4b4721e1bfed55e05416581bc26", "score": "0.5251086", "text": "def getAcquirerID( self ):\n\t\treturn self.acquirerID", "title": "" }, { "docid": "25936169861d2cef2fb63fba587afc37", "score": "0.5232262", "text": "def get_alphabet(self):\r\n\r\n # Sort the dictionary by the keys. Keeping the key/value pairs here\r\n # for debugging purposes\r\n sorted_alphabet = [(k,self.dictionary[k]) for k in sorted(\r\n self.dictionary, key=self.dictionary.get, reverse=True)]\r\n\r\n # Generate the final alphabet string\r\n count = 0\r\n final_alphabet = ''\r\n for item in sorted_alphabet:\r\n # Bail out if we have grabbed the N most common items so far\r\n if count >= self.alphabet_size:\r\n return final_alphabet\r\n\r\n # Append item to our final alphabet\r\n final_alphabet += item[0]\r\n count += 1\r\n\r\n return final_alphabet", "title": "" }, { "docid": "3ccffe318652e3c5e4818d5ee051b474", "score": "0.5228689", "text": "def anticonsensus(self):\n return self.counts.anticonsensus", "title": "" }, { "docid": "2e675ba6566d3ae7aba89f4d54543bca", "score": "0.5223276", "text": "def getA(self):\n return self.A", "title": "" }, { "docid": "d6faff0541ae75a000dbcbb0f330ba33", "score": "0.52153236", "text": "def A(self):\n return self._A", "title": "" }, { "docid": "2c6f5b0ad5d9ea6a042be0c192e5c89e", "score": "0.52053285", "text": "def a(self):\n return self._a", "title": "" }, { "docid": "2c6f5b0ad5d9ea6a042be0c192e5c89e", "score": "0.52053285", "text": "def a(self):\n return self._a", "title": "" }, { "docid": "cee1e6911646dec1f274dd87b62906e9", "score": "0.5162884", "text": "def au(self):\n return self._au", "title": "" }, { "docid": "0c8612549d9a2b74a254a2e300ea577f", "score": "0.51561165", "text": "def ba(self):\n\t\treturn self._ba", "title": "" }, { "docid": "4ad471523ba8b561784fcecae4fcda6c", "score": "0.51416945", "text": "def actividad(self):\n return self._actividad", "title": "" }, { "docid": "e5add847132a72da81bd71f16b3d0986", "score": "0.51413274", "text": "def _get_alphabet(self, position):\n \n position = makeNonnegInt(position)\n \n if position in self._alphabets:\n result = self._alphabets[position]\n else:\n result = self._alphabets[self.DEFAULT_KEY]\n #end if\n \n return result", "title": "" }, { "docid": "b0bd2581c20f5f60474c3f24daa3735e", "score": "0.51406336", "text": "def ao(self):\n return self._ao", "title": "" }, { "docid": "7d83fc9bcefe345b4010da3e40b60f0d", "score": "0.5100031", "text": "def getA0(self):\n return _AAL.IntegerBinomModulePolynom_getA0(self)", "title": "" }, { "docid": "10dcc4beae3097069820b92cac54552c", "score": "0.5089664", "text": "def link_acls(self):\n return {0: [1]} # Host 0 'acls_in': [1]", "title": "" }, { "docid": "10dcc4beae3097069820b92cac54552c", "score": "0.5089664", "text": "def link_acls(self):\n return {0: [1]} # Host 0 'acls_in': [1]", "title": "" }, { "docid": "10dcc4beae3097069820b92cac54552c", "score": "0.5089664", "text": "def link_acls(self):\n return {0: [1]} # Host 0 'acls_in': [1]", "title": "" }, { "docid": "10dcc4beae3097069820b92cac54552c", "score": "0.5089664", "text": "def link_acls(self):\n return {0: [1]} # Host 0 'acls_in': [1]", "title": "" }, { "docid": "fe259b0d035b0025289750303b8faf26", "score": "0.5082638", "text": "def get_alpha(self):\n\n return self._alpha", "title": "" }, { "docid": "6e37235d1eb8f1b09021c9c59ced651d", "score": "0.50821316", "text": "def combo_act_kasha(self):\n return self._combo_act_kasha", "title": "" }, { "docid": "4f7e56a7a3c9b5360e1e57ff663fac17", "score": "0.5074189", "text": "def acl(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"acl\")", "title": "" }, { "docid": "b0ca9981badf94e725c0e109e6e76865", "score": "0.50589514", "text": "def Ac(self):\n\n return np.pi * self.d * self.te", "title": "" }, { "docid": "b18a7dd75b0021f91903520b8c204eac", "score": "0.5058001", "text": "def a(self):\r\n return self._a", "title": "" }, { "docid": "d27963181e568c408029d16720c4b416", "score": "0.50572765", "text": "def valor_tac(self):\n return self._valor_tac", "title": "" }, { "docid": "9e5e5725dbfe85ee47f865c1aee9e966", "score": "0.5051251", "text": "def acltotpktsdenied(self) :\n\t\ttry :\n\t\t\treturn self._acltotpktsdenied\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "678b1078b3f8afc1aac2ec3636f8543d", "score": "0.5050967", "text": "def A(self):\n return self.__A", "title": "" }, { "docid": "678b1078b3f8afc1aac2ec3636f8543d", "score": "0.5050967", "text": "def A(self):\n return self.__A", "title": "" }, { "docid": "678b1078b3f8afc1aac2ec3636f8543d", "score": "0.5050967", "text": "def A(self):\n return self.__A", "title": "" }, { "docid": "678b1078b3f8afc1aac2ec3636f8543d", "score": "0.5050967", "text": "def A(self):\n return self.__A", "title": "" }, { "docid": "678b1078b3f8afc1aac2ec3636f8543d", "score": "0.5050967", "text": "def A(self):\n return self.__A", "title": "" }, { "docid": "b102b58561963b4eb32c95131a55dee8", "score": "0.5029651", "text": "def digito_conta(self):\n return self._digito_conta", "title": "" }, { "docid": "e59e32945ddbbc12effc6342b6b33c06", "score": "0.50225717", "text": "def getA1(self):\n return _AAL.PolynomGF3_mY_9_getA1(self)", "title": "" }, { "docid": "d57e97628de86bc4f75224588054bbf0", "score": "0.50188625", "text": "def authority(self):\n return self.assertion.authority_record.authority", "title": "" }, { "docid": "1c58bdb25f2fc2160b556695a1b7153b", "score": "0.50117743", "text": "def getA(self):\n return _openmoc.Plane_getA(self)", "title": "" }, { "docid": "f16726249ab4da8d19cdc319660be220", "score": "0.5008799", "text": "def acls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DashboardPermissionsAclArgs']]]]:\n return pulumi.get(self, \"acls\")", "title": "" }, { "docid": "3ddfddbf7ca7ff6dfa84ce2eaf847431", "score": "0.5005136", "text": "def getA1(self):\n return _AAL.IntegerBinomModulePolynom_getA1(self)", "title": "" }, { "docid": "46eb470e34491c68769d7033d1a0053b", "score": "0.5002979", "text": "def Account(self):\n return self._Account", "title": "" }, { "docid": "9709e8c71f7d513e6a981b79ea1805f4", "score": "0.4990479", "text": "def getA0(self):\n return _AAL.PolynomGF3_mY_9_getA0(self)", "title": "" }, { "docid": "2592ad0067e7180072b8d227854b7198", "score": "0.49846667", "text": "def ach_credit_transfer(self):\n return self._ach_credit_transfer", "title": "" }, { "docid": "88462de763e2edba8b595dc8092422e6", "score": "0.4982841", "text": "def get_CA_or(res:Residue)->Atom:\n atoms:List[Atom] =list( res.get_atoms() )\n alphacarbons = list(filter(lambda atom: True if atom.get_name() == 'CA' else False ,atoms))\n return atoms[0] if len(alphacarbons) == 0 else alphacarbons[0]", "title": "" }, { "docid": "8bb6a7498093b7005d515f7050b18237", "score": "0.49723005", "text": "def acr_along(self):\n return self.acr2trs[:, :, 0:1]", "title": "" }, { "docid": "3c311c23fc3a2787fe76c3e592d64f14", "score": "0.49639937", "text": "def getA1(self):\n return _AAL.PolynomGF3_m_6_getA1(self)", "title": "" }, { "docid": "844e3fcacf1c990b2b9a33063b73f8b2", "score": "0.49562806", "text": "def aic(self):\n return self.info_criteria(\"aic\")", "title": "" }, { "docid": "8d93538abe979965200dc6b2b97a4993", "score": "0.49525833", "text": "def getA0(self):\n return _AAL.PolynomGF3_m_6_getA0(self)", "title": "" }, { "docid": "5146d6fd2587871b46599df8af90ea0a", "score": "0.4944114", "text": "def alt_three_letter_amino_acid_residue(self):\n if self.alt_amino_acid_residue == '*':\n return self.alt_amino_acid_residue\n else:\n return Codon.one_to_three_letter_amino_acid_residue(self.alt_amino_acid_residue)", "title": "" }, { "docid": "db8b368733ae1a5b3729cf3c60883192", "score": "0.49295768", "text": "def get_account(self, public_key):\n return self.ahd_to_account[public_key.export_key(format='PEM')]", "title": "" }, { "docid": "ede8d9851132e5f7ab90d73a8629e814", "score": "0.4922281", "text": "def akamai(self) -> Optional[pulumi.Input['AkamaiAccessControlArgs']]:\n return pulumi.get(self, \"akamai\")", "title": "" }, { "docid": "f18e90cd667f1e4b038191de4cd0d1d1", "score": "0.4916349", "text": "def getA3(self):\n return _AAL.SuperSingularEllipticCurve_getA3(self)", "title": "" }, { "docid": "54b1326d1930959a1128c83708602feb", "score": "0.49145445", "text": "def auinitm(self):\n return self._auinitm", "title": "" }, { "docid": "d15f0d9faf40caad030b3618e82c4e16", "score": "0.49122438", "text": "def getacls(self, key, user=None):\n try:\n return self._getacls(key, user)\n except Exception as e:\n self.logging.debug(\"Failed to getacls: {}\".format(e))\n return []", "title": "" }, { "docid": "15a591b856f27f47aa5b3a89df981fbe", "score": "0.49048653", "text": "def getA6(self):\n return _AAL.SuperSingularEllipticCurve_getA6(self)", "title": "" }, { "docid": "3d0dd31efb9c907444be94de2d957039", "score": "0.48998165", "text": "def _get_mac_entry(self):\n return self.__mac_entry", "title": "" }, { "docid": "3d0dd31efb9c907444be94de2d957039", "score": "0.48998165", "text": "def _get_mac_entry(self):\n return self.__mac_entry", "title": "" }, { "docid": "3d0dd31efb9c907444be94de2d957039", "score": "0.48998165", "text": "def _get_mac_entry(self):\n return self.__mac_entry", "title": "" }, { "docid": "3d0dd31efb9c907444be94de2d957039", "score": "0.48998165", "text": "def _get_mac_entry(self):\n return self.__mac_entry", "title": "" }, { "docid": "7e1b19fbb92191ffb386ecbf2f4daa52", "score": "0.48959345", "text": "def handle(self):\n return self._acg", "title": "" }, { "docid": "571cc14a7d01acb142d097a4d09d40a6", "score": "0.48940703", "text": "def alpha(self):\n return self._alpha", "title": "" }, { "docid": "571cc14a7d01acb142d097a4d09d40a6", "score": "0.48940703", "text": "def alpha(self):\n return self._alpha", "title": "" }, { "docid": "49e31fa75a9041f47b806c45be5c3f24", "score": "0.4869025", "text": "def getAccession(self, eusolID):\r\n return self.EusolToAccession[eusolID]", "title": "" }, { "docid": "881a96a6e763b838eb2c2e7c3be54f73", "score": "0.48673755", "text": "def get_atom(self):\n\n return self._atom", "title": "" }, { "docid": "b963b8fee893393a699f746d867feb2f", "score": "0.48591632", "text": "def aic(self):\n return aic(self.llf, self.nobs_effective, self.df_model)", "title": "" }, { "docid": "b06a8b0bbf3d5e306ce1683560eb71de", "score": "0.48583648", "text": "def getA6(self):\n return _AAL.EllipticCurveGF3_getA6(self)", "title": "" }, { "docid": "c52618f9282865920fa5f5963c284040", "score": "0.48534077", "text": "def alignment(self):\n return self._alignment", "title": "" }, { "docid": "c3293172928e8753e87830963d467580", "score": "0.4835088", "text": "def getA6(self):\n return _AAL.NonSuperSingularEllipticCurve_getA6(self)", "title": "" }, { "docid": "61cad2487267ddc186e93cc9749354e6", "score": "0.4831762", "text": "def getEntriesLastAccount(self):\n id = self.getLastEntryId()\n if id:\n return self[id].getAccount()\n return None", "title": "" }, { "docid": "c95b7996da670fdee48de5079bf832a1", "score": "0.48298016", "text": "def combo_act_oka(self):\n return self._combo_act_oka", "title": "" }, { "docid": "a6dc83233041054e4d17f1e37f2688a1", "score": "0.4828932", "text": "def codigo_autorizacao(self):\n return self._codigo_autorizacao", "title": "" }, { "docid": "0006c8fcafcc5291083e5e3497695cd8", "score": "0.48114547", "text": "def link_acls():\n return {}", "title": "" }, { "docid": "31c3ecebf07d11e97b7bb5c0bad8f8a5", "score": "0.48076344", "text": "def acquisitions(self):\n return tuple(self._acquisitions)", "title": "" }, { "docid": "d28c39674855171891f8166b09fbd4f6", "score": "0.4804125", "text": "def get_authority(self):\n return # string", "title": "" }, { "docid": "38a05c355270cecd9eae2068b4161a1e", "score": "0.47978193", "text": "def ca(self) -> str:\n return self._ca", "title": "" }, { "docid": "75889cedcdbe059b784532cd43603112", "score": "0.47965685", "text": "def get_entry(self, idx):\n return self._entries[idx]", "title": "" }, { "docid": "576d1b0cb96bc2bba2f8648ceb8a3ee9", "score": "0.47882158", "text": "def auinit(self):\n return self._auinit", "title": "" }, { "docid": "b53d3925be55e4043d21f7d320570abd", "score": "0.47861326", "text": "def get_acres(fc):\n\n # Add ACRES field to analysis area - check if exists\n field_list = [field.name for field in arcpy.ListFields(fc)\n if field.name.upper() == \"ACRES\"]\n\n # If ACRES/Acres/acres exists in table, flag for calculation instead\n if field_list:\n acre_field = field_list[0] # select the 'acres' variant\n else:\n arcpy.AddField_management(fc, \"ACRES\", \"DOUBLE\", 15, 2)\n acre_field = \"ACRES\"\n\n arcpy.CalculateField_management(fc,\n acre_field,\n \"!shape.area@ACRES!\",\n \"PYTHON_9.3\")\n acres = sum(row[0] for row in arcpy.da.SearchCursor(fc, acre_field))\n return acre_field, acres", "title": "" }, { "docid": "114c8a2e071b493e8368cf54b8b997ec", "score": "0.47617564", "text": "def get_account_alias(self):\r\n return self.get_response('ListAccountAliases', {},\r\n list_marker='AccountAliases')", "title": "" }, { "docid": "4b745863467fffb002fedc19af4d960d", "score": "0.4760866", "text": "def get_accom_data(self, accomID):\n return self.api.get_accom_data(accomID)", "title": "" }, { "docid": "5ae84c5704ab31717505c66f5c4c65ec", "score": "0.4760145", "text": "def aicc(self):\n return aicc(self.llf, self.nobs_effective, self.df_model)", "title": "" }, { "docid": "d5ebe480c642127036386ed270da2de6", "score": "0.47513145", "text": "def Enrollment(self):\n return self._Enrollment", "title": "" }, { "docid": "cd85f0d0d60ec89eeb4867e1937facbc", "score": "0.47433048", "text": "def aln_str(self):\n return self._aln_str", "title": "" }, { "docid": "51ff10725e2c8cdcd4699868b65c015a", "score": "0.4743286", "text": "def get_account(self):\n return self._account", "title": "" }, { "docid": "51ff10725e2c8cdcd4699868b65c015a", "score": "0.4743286", "text": "def get_account(self):\n return self._account", "title": "" }, { "docid": "c779b3ab035654671f81a0e978a1d922", "score": "0.47432408", "text": "def get_alpha(self):\n return self.__get__('alpha.csv')", "title": "" }, { "docid": "8d90d89428da5437e49a0308201a9fd1", "score": "0.47348994", "text": "def EnrollmentTerm(self):\n return self._EnrollmentTerm", "title": "" }, { "docid": "c08fa55e08a43f0a6eb832a90886c556", "score": "0.4734518", "text": "def data_autorizacao(self):\n return self._data_autorizacao", "title": "" }, { "docid": "94e351375895b18fe0e1acd48067a858", "score": "0.4732329", "text": "def alpha(self):\n\n return ZZ(1)/ZZ(2) * (ZZ(1)/ZZ(2) - ZZ(1)/self._n)", "title": "" } ]
c094f9d2a50f484f869a9927fcb66753
Return a postorder list of all items in this binary search tree.
[ { "docid": "bd0a7e7e008793639ed841a8839d03af", "score": "0.84094524", "text": "def items_post_order(self):\n items = []\n if not self.is_empty():\n # Traverse tree post-order from root, appending each node's item\n self._traverse_post_order_iterative(self.root, items.append)\n # Return post-order list of all items in tree\n return items", "title": "" } ]
[ { "docid": "811485f014d979a705648958971be696", "score": "0.7364006", "text": "def post_order(self):\n output = []\n def walk(node):\n if not node:\n return\n walk(node.left)\n walk(node.right)\n output.append(node.value)\n walk(self.root)\n return output", "title": "" }, { "docid": "9ebe602978d835b8be04f1ff25953046", "score": "0.7257629", "text": "def items_in_order(self):\n items = []\n if not self.is_empty():\n # Traverse tree in-order from root, appending each node's item\n self._traverse_in_order_iterative(self.root, items.append)\n # Return in-order list of all items in tree\n return items", "title": "" }, { "docid": "f7a70c9d1554750f28373edff6a3961b", "score": "0.68784523", "text": "def items(self) -> list:\n if self.is_empty():\n return []\n else:\n return self._left.items() + [self._root] + self._right.items()", "title": "" }, { "docid": "22597515ad9fa3f5d365bf8a1cec4aa7", "score": "0.6866134", "text": "def postorder(self):\n a = self.postorder_aux(self._root)\n return a", "title": "" }, { "docid": "697fdf2953355c49facc94b531f9d0f3", "score": "0.6854198", "text": "def postorder(self):\n if not self.is_empty():\n for p in self._subtree_preorder(self.root()): # start the recursion\n yield p", "title": "" }, { "docid": "4ffe1fa9db6bcb91a882170c727e0458", "score": "0.68374765", "text": "def post_order(self):\n result=[]\n def walk(current):\n if current:\n if current.left:\n walk(current.left)\n if current.right:\n walk(current.right)\n result.append(str(current.value))\n walk(self.root)\n return result", "title": "" }, { "docid": "f39f8ecc5c320c9b7c6841f5fdb933d7", "score": "0.66971284", "text": "def post_order(self, left_child=None, right_child=None):\n values = []\n\n def walk(curr_node):\n if not curr_node:\n return\n \n # Check for a left-child node\n walk(curr_node.left_child)\n # Check for a right-child node\n walk(curr_node.right_child)\n # Deal with the root\n values.append(curr_node.value)\n\n walk(self.root_node)\n\n return values", "title": "" }, { "docid": "96c15879ebe502abe69646c981222053", "score": "0.66852343", "text": "def items_level_order(self):\n items = []\n if not self.is_empty():\n # Traverse tree level-order from root, appending each node's item\n self._traverse_level_order_iterative(self.root, items.append)\n # Return level-order list of all items in tree\n return items", "title": "" }, { "docid": "2c6288d3354209c9ed7ff2e6c2c6f835", "score": "0.6565388", "text": "def items_pre_order(self):\n items = []\n if not self.is_empty():\n # Traverse tree pre-order from root, appending each node's item\n self._traverse_pre_order_iterative(self.root, items.append)\n # Return pre-order list of all items in tree\n return items", "title": "" }, { "docid": "cc331dab6b00a1b6b5bca3c741cca681", "score": "0.64852566", "text": "def post_order(tree: Node, result=None) -> list:\n if result is None:\n result = list()\n if tree is None:\n return result\n\n if tree.left is not None:\n post_order(tree.left, result)\n if tree.right is not None:\n post_order(tree.right, result)\n result.append(tree.data)\n return result", "title": "" }, { "docid": "b78c6e374f5f10108c6443ed3d6341e8", "score": "0.6415109", "text": "def postorderTraversal(root: TreeNode) -> List[int]:\n if not root:\n return [None] # Can use [] as an option to omit None\n return postorderTraversal(root.left) + postorderTraversal(root.right) + [root.val]", "title": "" }, { "docid": "1260a6e3ddc04202a5a8b34e084ca3e1", "score": "0.6327749", "text": "def postorder(self, node):\n if node:\n yield from self.postorder(node.left)\n yield from self.postorder(node.right)\n yield node\n else:\n return", "title": "" }, { "docid": "555c75766606f9585213a7375944e3f5", "score": "0.6303942", "text": "def postorderTraversal(self, root: TreeNode) -> List[int]:\n\n if not root:\n return []\n\n stack = [[root, False, False]]\n visited_values = []\n\n while stack:\n curr_node, left_visited, right_visited = (\n stack[-1][0],\n stack[-1][1],\n stack[-1][2],\n )\n\n if left_visited and right_visited:\n visited_values.append(curr_node.val)\n stack.pop()\n continue\n\n if not right_visited:\n stack[-1][2] = True\n if not left_visited:\n stack[-1][1] = True\n if curr_node.right:\n stack.append([curr_node.right, False, False])\n if curr_node.left:\n stack.append([curr_node.left, False, False])\n\n return visited_values", "title": "" }, { "docid": "f93780b264d8a717ca271d4a12a4f6b4", "score": "0.6269361", "text": "def post_order(self) -> Iterator[NL]:\n for child in self.children:\n yield from child.post_order()\n yield self", "title": "" }, { "docid": "0a5a503b91a5aeccaba389cfb6afda46", "score": "0.6266493", "text": "def pre_order_list(self):\n return self.__root.pre_order_list(self.__root)", "title": "" }, { "docid": "6d89eb1c6bd57353d40082bbf521273e", "score": "0.6265425", "text": "def in_order(self):\n output = []\n def walk(node):\n if not node:\n return\n walk(node.left) \n output.append(node.value)\n walk(node.right)\n walk(self.root)\n return output", "title": "" }, { "docid": "cc8616d04b0fd03e611ed66f64be831f", "score": "0.6237585", "text": "def in_order_list(self):\n return self.__root.in_order_list(self.__root)", "title": "" }, { "docid": "eb6de04a216ec028ee9a723273dd97c2", "score": "0.6237037", "text": "def postOrder (self):\n\t\tif type(self.data) == int or type(self.data) == float:\n\t\t\treturn [str(self.data)]\n\t\telse:\n\t\t\treturn self.left.postOrder() + self.right.postOrder() + [self.data]", "title": "" }, { "docid": "c53308bdcca8002df42da7156887a5ab", "score": "0.6211292", "text": "def post_order(self) -> Iterator[\"Leaf\"]:\n yield self", "title": "" }, { "docid": "be4f06ec10e75591949f76180f913f61", "score": "0.6202028", "text": "def get_all_items(self):\n all_items = []\n items = list(self.get_children())\n while items:\n item = items.pop()\n all_items.append(int(item))\n items.extend(self.get_children(item))\n return all_items", "title": "" }, { "docid": "9ff28e98684a41fe14b86ed8c3390b0c", "score": "0.6168023", "text": "def get_all_items(self):\n return self._tree.nodes", "title": "" }, { "docid": "81b4899b0baa7a59fd7656a917d75289", "score": "0.61307526", "text": "def postorder(self, node):\n\n if node is not None:\n sz = self.depth(node.value)\n if sz != -1:\n if node.right is not None:\n for i in self.postorder(node.left):\n yield i\n if node.left is not None:\n for i in self.postorder(node.right):\n yield i\n yield node.value\n else:\n return None", "title": "" }, { "docid": "26cfc730ad40326cb6d29a64aa75ca05", "score": "0.61203134", "text": "def printTreePostOrder(self):\n if self.left:\n self.left.printTreePostOrder()\n if self.right:\n self.right.printTreePostOrder()\n print self.data,", "title": "" }, { "docid": "87e83434bf2a5bf61ffd40199c5e093e", "score": "0.6108899", "text": "def pre_order(self):\n output = []\n def _walk(node):\n if not node:\n return\n output.append(node.value)\n _walk(node.left)\n _walk(node.right)\n\n _walk(self.root)\n return output", "title": "" }, { "docid": "72457e50d5ad726684a164141b73eb2f", "score": "0.60539484", "text": "def dump(self):\n return sorted(list(self.tree.keys()))", "title": "" }, { "docid": "c440e3c1589b803e924938ea19d6af32", "score": "0.60116506", "text": "def to_list(self, order='inorder'):\n return self._to_list(self.root, order)", "title": "" }, { "docid": "16751ef5b567193db7448c92635d113f", "score": "0.60090756", "text": "def in_order_trav(self):\n q = []\n self._in_order(self.root, q)\n return q", "title": "" }, { "docid": "257d0463c2e64a2e2e195cbd6e20f724", "score": "0.6007158", "text": "def get_bst_list(self):\n queue = [self.root]\n result = []\n # BFS traversal:\n while queue:\n cur_node = queue.pop(0)\n if cur_node:\n result.append(cur_node.val)\n queue.extend([cur_node.left, cur_node.right])\n\n return result", "title": "" }, { "docid": "3c6d28b454fcc37673ac41c078702646", "score": "0.6000058", "text": "def items(self):\n\n return self._items(self._root)", "title": "" }, { "docid": "7355953f8a3c2893423d70ff9d1e8f7c", "score": "0.5988485", "text": "def items(self):\n\n items = [] # O(1) only one step to do this\n # Start at head node\n current_node = self.head # O(1) only one step to do this\n # Loop until node is None, meaning at the end\n while current_node is not None: # O(n) loop through all of the list\n items.append(current_node.data) # O(1) only one step to do this\n # Now the node is equal to the node.next to move to the next item, also, last step current_node points to None\n current_node = current_node.next # O(1) only one step to do this\n return items # O(1) only one step to do this", "title": "" }, { "docid": "ccfb716c626752ed3e88b9cc346d9d72", "score": "0.59704167", "text": "def postorder_traversal(self, root, result):\n if root:\n self.postorder_traversal(root.left, result)\n self.postorder_traversal(root.right, result)\n result.append(root.value)\n return result", "title": "" }, { "docid": "f01be6eab3cfdd7e777f365c26979fdb", "score": "0.5966465", "text": "def in_order(self):\n result=[]\n def walk(current):\n if current:\n if current.left:\n walk(current.left)\n result.append(str(current.value))\n if current.right:\n walk(current.right)\n walk(self.root)\n return result", "title": "" }, { "docid": "4a6374ab814da12b6cedeabc0acb5ec3", "score": "0.5961586", "text": "def all_nodes(self):\n\t\treturn self.order[:]", "title": "" }, { "docid": "c7113d397b9a697f10767e87c8e2483c", "score": "0.59587115", "text": "def postorder(tree: BinaryTree):\n if tree:\n postorder(tree.get_left_child())\n postorder(tree.get_right_child())\n print(tree.get_root_value())", "title": "" }, { "docid": "714a20d895ff83b64d58fdb42fe330ba", "score": "0.59463227", "text": "def traverse(self):\n\n lister = []\n lister.append(self.value)\n\n for child in self.children:\n lister += child.traverse()\n\n return lister", "title": "" }, { "docid": "91dc087a25c0072fe95b720649d0b33e", "score": "0.5945955", "text": "def to_list(self) -> list:\n if self.is_empty():\n return []\n else:\n return self._left.to_list() + [self._root] + self._right.to_list()", "title": "" }, { "docid": "357129ec1390110952fcaf17c82b0016", "score": "0.5943748", "text": "def sort(self):\n if self.root:\n return self.root.sort([])\n else:\n return []", "title": "" }, { "docid": "bae1ba1d01c3996662f797036447547a", "score": "0.5941819", "text": "def postOrderMine(self):\n my_left = self.left.copy()\n my_right = self.right.copy()\n stack = deque()\n results = []\n stack.append(0)\n while stack:\n node = stack.pop()\n if my_right[node] == -1 and my_left[node] == -1:\n results.append(self.key[node])\n continue\n stack.append(node)\n if my_right[node] != -1:\n stack.append(my_right[node])\n my_right[node] = -1 # we now already processed the right arm\n if my_left[node] != -1:\n stack.append(my_left[node])\n my_left[node] = -1\n return results", "title": "" }, { "docid": "ca16e9d5f2b902944d43e2b5fec02f0e", "score": "0.5920721", "text": "def postorder(self, x):\r\n if not self.is_empty():\r\n for c in x.children:\r\n self.postorder(c)\r\n print x.data", "title": "" }, { "docid": "af917195a4875c5acaaa0b7efa216704", "score": "0.5919596", "text": "def postorder_traversal(self):\r\n if self.left:\r\n self.left.postorder_traversal()\r\n if self.right:\r\n self.right.postorder_traversal()\r\n print(self.key)", "title": "" }, { "docid": "976a7b3aee2bbdbdb85c300878e10887", "score": "0.59104574", "text": "def child_tree(self):\n # Because we traverse down, we need to revese to get merge-order\n children = self.recurse_tree('children')\n children.reverse()\n return children", "title": "" }, { "docid": "de41e97aa62f45bbef50f514ba3a17b0", "score": "0.5898327", "text": "def post_order(self):\n start = self.root\n if start is None:\n raise StopIteration\n s = []\n last = None\n while s or start:\n if start:\n s.append(start)\n start = start.left\n else:\n peek = s[-1]\n if peek.right and last is not peek.right:\n start = peek.right\n else:\n yield peek.value\n last = s.pop()", "title": "" }, { "docid": "251c00769c15a3c4b0cd422600fbea1e", "score": "0.58923215", "text": "def get_children(self) -> list:\n return list(self.__children.values())", "title": "" }, { "docid": "69d35ce0c83bd792ee39d13897953534", "score": "0.5888606", "text": "def post_order(self):\r\n if self._root is None:\r\n return '[ ]'\r\n else:\r\n return \"[ \" + self._post_order(self._root)[:-2] + \" ]\"", "title": "" }, { "docid": "5451a5f14187adbb87b55136e17ea24c", "score": "0.5879257", "text": "def post_order(self):\n\t\tif self.left_node:\n\t\t\tself.left_node.post_order()\n\n\t\tif self.right_node:\n\t\t\tself.right_node.post_order()\n\n\t\tprint(self.value)", "title": "" }, { "docid": "e007fa9d4d3ee0f403b33a33099bd8c4", "score": "0.5873231", "text": "def print_postorder(self, node):\n if node is None:\n return\n self.print_postorder(node.left)\n self.print_postorder(node.right)\n print node.data,\n return", "title": "" }, { "docid": "75472d3c44e71d47e8cc2d69101e6006", "score": "0.5869206", "text": "def items(self):\n result = []\n node = self.head\n while node is not None:\n result.append(node.data)\n node = node.next\n return result", "title": "" }, { "docid": "c686b6776ae95f1ca0c81923f7150371", "score": "0.58553815", "text": "def trees(self):\n try:\n return [rec.tree for rec in self]\n except ValueError:\n return []", "title": "" }, { "docid": "dfbca917d6c79e58cd77c35eaf609751", "score": "0.5847798", "text": "def traverse(self):\n traverse_list = []\n if self.left_child:\n traverse_list += self.left_child.traverse()\n\n traverse_list.append(self.root)\n\n if self.right_child:\n traverse_list += self.right_child.traverse()\n return traverse_list", "title": "" }, { "docid": "146df4aba01c7422777eebd3cfb38c48", "score": "0.5820092", "text": "def postorder(t: Tree) -> None:\n if t is None: return\n postorder(t.left)\n postorder(t.right)\n print(t.v)", "title": "" }, { "docid": "d66edacba202c6aaea64bca51d7f8f27", "score": "0.5818365", "text": "def inorder(self):\r\n result = []\r\n def inorder_util(node):\r\n if node is None:\r\n return\r\n inorder_util(node.left)\r\n result.append(node.val)\r\n inorder_util(node.right)\r\n inorder_util(self.root)\r\n print(result)", "title": "" }, { "docid": "0528c8583fef1d44a7562ec8916096aa", "score": "0.58133185", "text": "def postorder(root):\n if root:\n postorder(root.left)\n postorder(root.right)\n print(root.key)", "title": "" }, { "docid": "84ec17a247dc5fa3287831102a26e119", "score": "0.5803636", "text": "def items(self):\n items = [] # O(1) time to create empty list\n # Start at head node\n node = self.head # O(1) time to assign new variable\n # Loop until node is None, which is one node too far past tail\n while node is not None: # Always n iterations because no early return\n items.append(node.data) # O(1) time (on average) to append to list\n # Skip to next node to advance forward in linked list\n node = node.next # O(1) time to reassign variable\n # Now list contains items from all nodes\n return items # O(1) time to return list", "title": "" }, { "docid": "0244c4f33ff77b41f93313eb623aa47a", "score": "0.5802047", "text": "def inorder_traverse(self):\n results = []\n if self.left: results.extend(self.left.inorder_traverse())\n results.append(self.value)\n if self.right: results.extend(self.right.inorder_traverse())\n return results", "title": "" }, { "docid": "473352355cc88ac38952faef2aec49e5", "score": "0.57911813", "text": "def in_order_traversal(self):\n if self.root is None:\n return []\n else:\n return self.in_order_traversal_helper(self.root)\n pass", "title": "" }, { "docid": "e97b580ac9f979f156d0fa40ef03998e", "score": "0.57907116", "text": "def inorder(self):\n if not self.is_empty():\n for p in self._subtree_inorder(self.root()):\n yield p", "title": "" }, { "docid": "7d0daf5bc250208837543bb55f5e3530", "score": "0.5786458", "text": "def _all_traverse(self):\r\n result = []\r\n result.append(self)\r\n for child in self.children:\r\n result.extend(child._all_traverse())\r\n return result", "title": "" }, { "docid": "f6da9d34eb22ed604fd884510692dd65", "score": "0.5749426", "text": "def get_all_children(self, item):\n return self._tree.get_all_children(item)", "title": "" }, { "docid": "5805d1e77cd7dfa4c8923a3c7a1e4e2e", "score": "0.57156014", "text": "def items(self) -> list[tuple[str, Node]]:\n return list(self.iteritems())", "title": "" }, { "docid": "ef037d1a78282505e9547760a91d344a", "score": "0.56820756", "text": "def tree(self):\n list = []\n for item in self.data:\n if type(item) is InstanceType:\n try:\n list = list + item.tree()\n except AttributeError:\n list.append(repr(item))\n else:\n list.append(item)\n return [self.name, list]", "title": "" }, { "docid": "74bad725c2106059f781097c9677aead", "score": "0.568005", "text": "def _traverse_post_order_recursive(self, node, visit):\n if node.left:\n self._traverse_post_order_recursive(node.left, visit)\n\n if node.right:\n self._traverse_post_order_recursive(node.right, visit)\n\n visit(node.data)", "title": "" }, { "docid": "15e7c5f5a3dbc7c20f6edea91afab71e", "score": "0.5677276", "text": "def print_tree_postorder(node):\n\n if not node:\n return\n print_tree_postorder(node.left)\n print_tree_postorder(node.right)\n print(node.val)", "title": "" }, { "docid": "0cc8d58210d92b66ccd31e4c7dda5ea6", "score": "0.5653891", "text": "def postorder_traversal(self):\n for v in self.subviews:\n yield from v.postorder_traversal\n yield self", "title": "" }, { "docid": "e5b82a7eb7a841056043a88813924a14", "score": "0.5650253", "text": "def get_root_items(self):\n return self._tree.get_children(None)", "title": "" }, { "docid": "53ff2373976c9e1227dedd88d5e43918", "score": "0.56421864", "text": "def levelorder(self):\n a = []\n \n if self._root is not None:\n node = self._root\n c = []\n c.append(node)\n while len(c) != 0: \n n = [] \n for v in c:\n a.append(v._data)\n if v._left is not None:\n n.append(v._left)\n if v._right is not None:\n n.append(v._right)\n c = n\n \n return a", "title": "" }, { "docid": "be4c3622d2c1fe90a33a2eddc219b2ad", "score": "0.56297636", "text": "def preorder(self):\n stack = []\n keys = []\n stack.append(self.getroot())\n while len(stack) > 0:\n current = stack.pop(-1)\n if current is not None:\n keys.append(current.getkey())\n if current.getright() is not None:\n stack.append(current.getright())\n if current.getleft() is not None:\n stack.append(current.getleft())\n return keys", "title": "" }, { "docid": "0911684c6299004563cddd16b5255ee8", "score": "0.56288093", "text": "def _subtre_postorder(self,p):\n for c in self.children(p): # for each child of c\n for other in self._subtree_preorder(c): # do postorder of c's children\n yield other # yeilding each to our caller\n yield p # visit p before its subtree", "title": "" }, { "docid": "b90408ecd4e613f0c8e4ff49de63a3ef", "score": "0.5625749", "text": "def level_order(self):\n keys = []\n queue = self.deque()\n queue.append(self.root)\n while queue:\n x = queue.popleft()\n if x is None:\n continue\n\n keys.append(x.key)\n queue.append(x.left)\n queue.append(x.right)\n return keys", "title": "" }, { "docid": "b1183e8ad61bb86f8aa7edeab54af1aa", "score": "0.5617291", "text": "def levelorder(self):\n queue = []\n keys = []\n queue.append(self.getroot())\n while len(queue) > 0:\n current = queue.pop(0)\n keys.append(current.getkey())\n if current.getleft() is not None:\n queue.append(current.getleft())\n if current.getright() is not None:\n queue.append(current.getright())\n return keys", "title": "" }, { "docid": "23bef2c06d5c1a2b9c658a7a73ac9b19", "score": "0.56163615", "text": "def show_postorder(self, walker=None):\n if not walker:\n walker = self.tree.root\n\n if walker.left: self.show_postorder(walker.left)\n if walker.right: self.show_postorder(walker.right)\n print walker.data,", "title": "" }, { "docid": "82aff717d284596479fe4c74810f35cc", "score": "0.56144667", "text": "def pre_order(self, left_child=None, right_child=None):\n values = []\n\n def walk(curr_node):\n # Break immediately if node is empty\n if not curr_node:\n return\n \n # Deal with the root\n values.append(curr_node.value)\n # Check for a left-child node\n walk(curr_node.left_child)\n # Check for a right-child node\n walk(curr_node.right_child)\n\n walk(self.root_node)\n\n return values", "title": "" }, { "docid": "2c6b72202b09e29e8fc85806d383f67b", "score": "0.5613129", "text": "def items(self):\n return sorted(self.__items)", "title": "" }, { "docid": "15ca5f715ca1ad34c58bc8ae3fdfd45f", "score": "0.5601525", "text": "def in_order_dft(self, node):\n values = []\n\n def dfs(n):\n values.append(n.value)\n if n.right:\n dfs(n.right)\n if n.left:\n dfs(n.left)\n dfs(node)\n values.sort()\n return values", "title": "" }, { "docid": "4d487b3892fb26d35943dce5310f8866", "score": "0.55979496", "text": "def inorder(self):\n if not self.is_empty():\n for pos in self._subtree_inorder(self.root()):\n yield pos", "title": "" }, { "docid": "3cb445db290a6227cc8c41d75bd5bf76", "score": "0.5575878", "text": "def in_order(self, left_child=None, right_child=None):\n values = []\n\n def walk(curr_node):\n if not curr_node:\n return\n \n # Check for a left-child node\n walk(curr_node.left_child)\n # Deal with the root\n values.append(curr_node.value)\n # Check for a right-child node\n walk(curr_node.right_child)\n\n walk(self.root_node)\n\n return values", "title": "" }, { "docid": "bc18b99f88c3334a203588d1eaf282ef", "score": "0.5574316", "text": "def get_sorted_children(self):\n return sorted(list(self.children), key=lambda x: x.out_rel.name)", "title": "" }, { "docid": "fce4a55bf62aff72d34cee51e623b0f9", "score": "0.5556275", "text": "def inorder(self):\n stack = []\n keys = []\n current = self.getroot()\n while len(stack) > 0 or current is not None:\n if current is not None:\n stack.append(current)\n current = current.getleft()\n else:\n current = stack.pop(-1)\n keys.append(current.getkey())\n current = current.getright()\n return keys", "title": "" }, { "docid": "b0e2903cb98a2e8ec030889cb65781c9", "score": "0.5519866", "text": "def nodelist(self) -> List[T]:\n return self._nodelist", "title": "" }, { "docid": "1ecffa0faf310fd5653712050d6e83b8", "score": "0.5509672", "text": "def __LevelOrderlist(self):\n self.orderlist = [node.name for node in PreOrderIter(self.node)]", "title": "" }, { "docid": "2da8acb5f6104a55ff647d0ec458c216", "score": "0.5506769", "text": "def nodes_list(self):\n return self._nodes_list(self.root)", "title": "" }, { "docid": "e7d68daa83a130fa8d579d905a78c51a", "score": "0.5503501", "text": "def get_tree_as_list(self) -> list[LeoTreeData]: # only_expanded=True, sort=None, key=None):\n assert g.callers(1) == '_getValues', g.callers()\n aList = [z for z in self.walk_tree(only_expanded=True)]\n # g.trace('LeoTreeData', len(aList))\n return aList", "title": "" }, { "docid": "094ccf2953fd951e8f4e0d97b178f905", "score": "0.54940337", "text": "def children(self):\n return list(self._children.values())", "title": "" }, { "docid": "2493c528a84f6cc18278374ec4fe8933", "score": "0.54875606", "text": "def traverse_postorder(op):\r\n\r\n nodes_postorder = []\r\n\r\n def recurse(node):\r\n # operations are only nodes which can have multiple input nodes\r\n if isinstance(node, Operation): \r\n for input_node in node.input_nodes:\r\n recurse(input_node)\r\n \r\n nodes_postorder.append(node) #doing this last puts node last - temporally\r\n\r\n recurse(op)\r\n return nodes_postorder", "title": "" }, { "docid": "a30821a0044457d9d796021995def3ba", "score": "0.54870933", "text": "def levels(self):\n current = self.queue\n while current:\n yield current\n next_item = []\n for node in current:\n next_item.extend(node.children)\n current = next_item", "title": "" }, { "docid": "65bf29e38b1fc076a836cabae2305c5b", "score": "0.54720986", "text": "def post_order(self) -> Iterator[NL]:\n raise NotImplementedError", "title": "" }, { "docid": "18655cbd7833cdc778afb8adfee4e5d5", "score": "0.5467351", "text": "def get_child_list(self):\n return self.__child_list", "title": "" }, { "docid": "89cc2c8c66a4a44176240969ed85c1c0", "score": "0.54661053", "text": "def serialize(self, root) -> list:\n\n def rec(root: TreeNode) -> None:\n if not root:\n ans.append(None)\n return\n ans.append(root.val)\n rec(root.left)\n rec(root.right)\n\n ans = list()\n rec(root)\n return ans", "title": "" }, { "docid": "38127e8c3d342d1a8a7e2e095ff776c5", "score": "0.5462107", "text": "def values(self):\n list = []\n node = self.root\n while node:\n list.append(node.value)\n node = node.child\n return list", "title": "" }, { "docid": "56878183bce27e05ce9ebc946aaa7cd7", "score": "0.5448015", "text": "def test_post_order_operation_balance_post_order(balanced_bst):\n order = []\n balanced_bst.post_order(lambda n: order.append(n.val))\n assert order == [3, 8, 7, 12, 20, 16, 10]", "title": "" }, { "docid": "572529f1a776081f0d650be5fb2f3848", "score": "0.5439007", "text": "def bfs(self):\n\n\t\tQ = [[self]]\n\t\tbfsTree = []\n\t\twhile Q:\n\t\t\tcurrq = Q.pop(0)\n\t\t\tnextq = []\n\t\t\tlevel = []\n\t\t\twhile currq:\n\t\t\t\tq = currq.pop(0)\n\t\t\t\tif q is None:\n\t\t\t\t\tlevel.append(None)\n\t\t\t\telse:\n\t\t\t\t\tlevel.append(q.val)\n\t\t\t\t\tnextq.append(q.left)\n\t\t\t\t\tnextq.append(q.right)\n\t\t\tif nextq:\n\t\t\t\tQ.append(nextq)\n\t\t\tif level:\n\t\t\t\tbfsTree.append(level)\n\t\treturn bfsTree", "title": "" }, { "docid": "319d9e6465ce7334f114992a4108f9fb", "score": "0.5435621", "text": "def traverse_postorder(operation):\r\n \r\n nodes_postorder = []\r\n \r\n def recurse(node):\r\n if isinstance(node, Operation):\r\n for input_node in node.input_nodes:\r\n recurse(input_node)\r\n nodes_postorder.append(node)\r\n \r\n recurse(operation)\r\n return nodes_postorder", "title": "" }, { "docid": "0d800907f37ef931c2d2b539e449e7dd", "score": "0.54258287", "text": "def inorder(self):\n lyst = list()\n\n def recurse(node):\n if node != None:\n recurse(node.left)\n lyst.append(node.data)\n recurse(node.right)\n\n recurse(self._root)\n return iter(lyst)", "title": "" }, { "docid": "d016148125ecf593cf75bf9d97d8a635", "score": "0.54214156", "text": "def in_order(self):\r\n if self._root is None:\r\n return '[ ]'\r\n else:\r\n return \"[ \" + self._in_order(self._root)[:-2] + \" ]\"", "title": "" }, { "docid": "08dda1c6af0c0b3bf13a619dbe72c3f8", "score": "0.54158545", "text": "def postorder(self, fn):\n if self.is_leaf():\n fn(self)\n return\n for child in self.children:\n child.postorder(fn)\n fn(self)", "title": "" }, { "docid": "cbafef14fc19ed7ff7066a70682f84e6", "score": "0.5414696", "text": "def get_all_children_tree(tree: ttk.Treeview, item: str = \"\") -> List:\n children = tree.get_children(item)\n for child in children:\n children += get_all_children_tree(tree, child)\n return children", "title": "" }, { "docid": "c1ae3f0135a7e66c051cd18f4a366299", "score": "0.5408453", "text": "def post_order_traversal(root):\n if root:\n post_order_traversal(root.left)\n post_order_traversal(root.right)\n print(root.value)", "title": "" }, { "docid": "a3d641c3a44939ac615708fe5805aee6", "score": "0.537836", "text": "def get_leaf_values(self):\n if self.is_leaf(): return [self.value]\n else:\n values = self.right.get_leaf_values()\n values.extend(self.left.get_leaf_values())\n return sorted(values)", "title": "" }, { "docid": "acd10c14d8e7c5a412a97c1c2465662b", "score": "0.5376759", "text": "def get_children(self, item):\n return self._tree.get_children(item)", "title": "" }, { "docid": "ddfb60066de963b8708e0ae50a942e08", "score": "0.5373588", "text": "def get_query_set(self):\r\n return super(TreeManager, self).get_query_set().order_by(\r\n self.tree_id_attr, self.left_attr)", "title": "" }, { "docid": "5692a7301eb703c6805e1fcf09e8bd6e", "score": "0.5369167", "text": "def in_order(self):\n arr = []\n if self.root and self.root.value != None:\n return self.root.in_order(arr)\n else:\n raise EmptyTreeException()", "title": "" } ]
9da9283cd11ad789fa2024aae0cc15dd
verifies that the density sums to number of electrons
[ { "docid": "c040f739c7369e7c4ff7b97cb58f507f", "score": "0.81512326", "text": "def _check_density(self,density, num_electrons):\n\n FLOAT_PRECISION = 0.01\n #integrate the density over the spherical space\n #s = float(np.sum(density))\n #s = 4*np.pi * float(np.sum(density * self.grid.gridvec**2 ))\n s = 4*np.pi * integrate.simps(density * self.grid.gridvec**2 ,self.grid.gridvec)\n print(\"the density sums to \",s)\n assert (abs(s - num_electrons) < FLOAT_PRECISION), \\\n \"density should sum to {0} ! got prob={1} instead\".format(num_electrons, s)", "title": "" } ]
[ { "docid": "a2143906e5d0c2ae91b15a37fbb28e96", "score": "0.6757781", "text": "def _calc_density(self, EigenVecs, num_electrons): \n density = 0\n\n for i in range (0, len(self.occupation_list)):\n #print(\"orbital number - {0} adding occupation: {1}\".format(i, self.occupation_list[i]))\n #density += self.occupation_list[i] * np.power(np.abs(EigenVecs[:, i]), 2)\n density += self.occupation_list[i] * np.abs(EigenVecs[:, i])**2 \n\n self._check_density(density, num_electrons)\n return density", "title": "" }, { "docid": "a5f6a78ca039a6290d3e79347be4296c", "score": "0.6402443", "text": "def compute_density(self):", "title": "" }, { "docid": "361dc1dc57ccef3a6a7f78b1f97d3bd0", "score": "0.6185222", "text": "def test_density(self):\n\n r_max = self.r_max + 0.5*self.diameter\n test_set = util.make_raw_query_nlist_test_set(\n self.box, self.pos, self.pos, \"ball\", r_max, 0, True)\n for nq, neighbors in test_set:\n self.ld.compute(nq, neighbors=neighbors)\n\n # Test access\n self.ld.density\n self.ld.num_neighbors\n self.ld.box\n\n self.assertTrue(self.ld.box == freud.box.Box.cube(10))\n\n npt.assert_array_less(np.fabs(self.ld.density - 10.0), 1.5)\n\n npt.assert_array_less(\n np.fabs(self.ld.num_neighbors - 1130.973355292), 200)", "title": "" }, { "docid": "519e894f2cc5e165bc6eafb13af550f7", "score": "0.61282694", "text": "def density(self) -> float:\r\n return sum([1 for _ in self.truth_table if _[1]]) / len(self.truth_table)", "title": "" }, { "docid": "b137b52a958504c710daab12997abec6", "score": "0.59869313", "text": "def check_size(): \n global npi, npo, nands, nff, nmd\n #print n_pis(),n_pos(),n_ands(),n_latches()\n result = ((npi == n_pis()) and (npo == n_pos()) and (nands == n_ands()) and (nff == n_latches()) )\n return result", "title": "" }, { "docid": "76b05a66687b0da0130e43f5ac679c1d", "score": "0.59661883", "text": "def atomic_areal_density_nm2(counts_edge: float, counts_spectrum: float, partial_cross_section_nm2: float) -> float:\n pass", "title": "" }, { "docid": "baae7ca63d31283d46526a5b130a053c", "score": "0.58755064", "text": "def calc_num_of_notes(emo_density, min_density, max_density):\n\n interval = (max_density - min_density) / 5\n\n for i in range(5):\n if min_density + (i * interval) <= emo_density <= min_density + (\n (i + 1) * interval):\n return int(2 ** i)", "title": "" }, { "docid": "a74b8361bd9b99d58521ca69c4d06186", "score": "0.5874067", "text": "def get_density(self, cell) -> float:\n return len([x for x in cell.context if x.value != 0]) // len(cell.context)", "title": "" }, { "docid": "eb58a64ee9b87def7b081dd9c17ff573", "score": "0.5797099", "text": "def shell_density(self, elements, mole, dr):\n # Usually density_dr is different from stats_dr.\n self.stats(dr)\n # Avogadro constant. Modified by coefficient used to\n # convert angstrom^3 to cm^3.\n NA = 6.022 / 10\n nbins = len(self._shell_atoms[elements[0]])\n # Calculate atom count for all species in elements as whole.\n # Convert numpy.Array to mutable list.\n count = [x for x in sum([self._shell_atoms[ele] for ele in elements])]\n # Calculate density.\n for i in range(nbins):\n r_low = i * dr\n r_high = r_low + dr\n # Volume unit is Angstrom^3.\n volume = self.vol_sphere(r_high) - self.vol_sphere(r_low)\n count[i] = count[i] / NA / volume\n return count", "title": "" }, { "docid": "0ac0dcd9e6a3ec22fb34cb66e06ab351", "score": "0.57378155", "text": "def density(self, x):", "title": "" }, { "docid": "0577a7b88ce6eab4889c17f9feb5897e", "score": "0.5707906", "text": "def spectral_density_function():\n pass", "title": "" }, { "docid": "e92ac46fb02cabb6ca7b2f79eed44b4f", "score": "0.56586033", "text": "def compute_valid_data_ratio(elevation):\n return np.count_nonzero(np.isfinite(elevation)) / elevation.size", "title": "" }, { "docid": "7f0e422421b9fd077cf49d1fc68acc46", "score": "0.5658476", "text": "def purity(self, clusters, classes):\n \n A = np.c_[(clusters,classes)]\n n_accurate = 0.\n for j in np.unique(A[:,0]):\n z = A[A[:,0] == j, 1]\n x = np.argmax(np.bincount(z))\n n_accurate += len(z[z == x])\n\t #print ('z', z)\n\t #print ('x', x)\n\t #if z[z!=x]:\n\t\t#print ('z != x', z[z!=x])\n return n_accurate / A.shape[0]", "title": "" }, { "docid": "fcf6e8ace4d3f183f2151b9b9094bff2", "score": "0.56467646", "text": "def electron_density(ratios_low, Z_vals):\n densities = np.empty(len(Z_vals))\n\n for i in np.arange(len(Z_vals)):\n Z = Z_vals[i]\n Z2 = Z*Z\n Z3 = Z*Z*Z\n Z4 = Z*Z*Z*Z\n ratio1 = ratios_low[i]\n # Calculate the denominator of the equation\n FG_Z = 0\n for j in np.arange(num_energies):\n FG_Z += wts_low[j] * (Z4*(Z4*F[j, 0] + Z3*F[j, 1] + Z2*F[j, 2] + Z*F[j, 3] + F[j, 4]) + Z2*G[j, 0] + Z*G[j, 1] + G[j, 2])\n\n # Calculate the density\n densities[i] = ratio1 * (FG_Zw1 / FG_Z)\n\n return densities", "title": "" }, { "docid": "e0cee3ee9ca36fb9b8abf042bea2d95e", "score": "0.56133944", "text": "def density(self) -> float:\n pass", "title": "" }, { "docid": "315415936127fa161d18c292e98c1c19", "score": "0.5611444", "text": "def count_sums(self):\n self.sum = 0\n self.sum_sq = 0\n for i in range(0, self.size):\n self.sum += self.data[i]\n for j in range(0, self.size):\n self.sum_sq += self.data[j]**2", "title": "" }, { "docid": "d815016b2a1b4293146aeba3ca528244", "score": "0.55895257", "text": "def test_derivative_no_self_energy():\n from numpy import array\n\n density = array([1, 0, 1, 10, 15, 0])\n density_plus_one = density.copy()\n density[1] += 1 \n\n expected = 0\n actual = energy(density_plus_one) - energy(density) \n assert_almost_equal(expected, actual)", "title": "" }, { "docid": "85da3b6cd95e6db9ce00282bfb26a747", "score": "0.557849", "text": "def fidelity(rho, sigma):\n [eig, uni] = np.linalg.eig(rho)\n eig = [np.sqrt(max(0, i)) for i in np.real(eig)]\n sqrt_rho = uni.dot(np.diag(eig).dot(uni.T.conj()))\n rho_all = sqrt_rho.dot(sigma.dot(sqrt_rho))\n [eig, uni] = np.linalg.eig(rho_all)\n eig = [np.sqrt(max(0, i)) for i in np.real(eig)]\n sqrt_rho_all = uni.dot(np.diag(eig).dot(uni.T.conj()))\n return float_type(np.real(np.trace(sqrt_rho_all)))", "title": "" }, { "docid": "8240c861d45cd81a3c369ef11bcc841e", "score": "0.5550921", "text": "def test_radical_count(self):\n self.assertEqual(self.molecule[0].get_radical_count(),\n sum([atom.radical_electrons for atom in self.molecule[0].atoms]))\n self.assertEqual(self.molecule[1].get_radical_count(),\n sum([atom.radical_electrons for atom in self.molecule[1].atoms]))", "title": "" }, { "docid": "73b8e56c9cfcf9e5108fa2bb402790e6", "score": "0.5532259", "text": "def test_n(self):\n self.assertAlmostEqual(self.surfarr.n.value_si, self.n, 6)", "title": "" }, { "docid": "02d91dd5159684a008c0fddbe4470115", "score": "0.54924786", "text": "def check_density(render, min_fill=2.0):\n\tfilled_pixels = np.count_nonzero(render)\n\tfill_percentage = 100 * filled_pixels/np.size(render)\n\tif fill_percentage > min_fill:\n\t\treturn True\n\t\n\treturn False", "title": "" }, { "docid": "e539f70277c168b292a58fb67a7e2d03", "score": "0.5472513", "text": "def spectral_density(self):\n pass", "title": "" }, { "docid": "14029bb6caf043bb61bf75d9a4959e3a", "score": "0.54656625", "text": "def test_reduced_density_matrix(self, local_device, shots, tol):\n dev = local_device(2)\n\n @qml.qnode(dev)\n def circuit():\n qml.Hadamard(wires=0)\n qml.CNOT(wires=[0, 1])\n qml.CRX(np.pi / 2, wires=[0, 1])\n return qml.density_matrix(wires=[0])\n\n output = circuit()\n expected_dm = np.array([[0.5, 1j / np.sqrt(8)], [-1j / np.sqrt(8), 0.5]])\n assert np.allclose(output, expected_dm, **tol)", "title": "" }, { "docid": "dd8b8ed6293d5301dd2b1bf17c203605", "score": "0.54535985", "text": "def check_counts(self):\n assert (np.alltrue(self.emissions >= 0))\n assert (np.alltrue(self.bi_trans >= 0))\n assert (np.sum(self.emissions) == self.N)\n assert (np.sum(self.y_sums) == self.N)\n # because the first transitions *in* aren't counted.\n assert (np.sum(self.bi_trans) == self.N-1)\n\n for y in range(self.K):\n assert np.sum(self.emissions[y,:]) == self.y_sums[y]\n if self.trigram:\n assert (np.alltrue(self.tri_trans >= 0))\n assert (np.sum(self.tri_trans) == self.N-2)", "title": "" }, { "docid": "e1320e20b9283e1d78f21e0dc621ad31", "score": "0.5433408", "text": "def _check_density(self, rho):\n\n if np.isscalar(rho):\n rho = np.array([rho])\n elif type(rho) != np.ndarray:\n rho = np.array(rho)\n if len(np.shape(rho)) == 2:\n rho = rho[0]\n\n if any(np.isnan(rho)):\n raise ValueError(\"NaN was given as a value of density, rho\")\n elif rho.size == 0:\n raise ValueError(\"No value of density, rho, was given\")\n elif any(rho < 0.0):\n raise ValueError(\"Density values cannot be negative.\")\n\n return rho", "title": "" }, { "docid": "de07ee4b15310a4bceb1af20532de9eb", "score": "0.5431671", "text": "def _sparseness(x):\n\tsqrt_n = np.sqrt(len(x))\n\treturn (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)", "title": "" }, { "docid": "8af82a897f4d961c073f0c5538dc7770", "score": "0.5419921", "text": "def get_sum_divisors(n):\n ret = 1\n for x in range(2, int(math.sqrt(n))+1):\n if n % x == 0:\n ret += x\n if n/x != x:\n ret += n/x\n return ret", "title": "" }, { "docid": "689276d392f45d2ae48bd32a3cd77615", "score": "0.5416422", "text": "def observed_class_distribution_is_pure(self):\n count = 0\n for weight in self.stats.values():\n if weight != 0:\n count += 1\n if count == 2: # No need to count beyond this point\n break\n return count < 2", "title": "" }, { "docid": "06877081ade598e9869d67fcea19bc59", "score": "0.54072684", "text": "def get_expected_density(mu, sigma, bin_edges):\r\n # Allow other array-like containers to be used.\r\n bin_edges = np.asarray(bin_edges)\r\n\r\n distr = norm(loc=mu, scale=sigma)\r\n # Given the distribution, compute the expected counts in each bin using\r\n # the cumulative density function.\r\n return np.diff(distr.cdf(bin_edges))", "title": "" }, { "docid": "819fc689a101f72ae712c489f7fc5f6d", "score": "0.53888863", "text": "def check_distinct_diversity(self):\n # frequency distribution of values in sensitive attr\n sensitive_dict = self.categorical_freq[self.categorical_freq.keys()[-1]]\n\n count_distinct_values = len(sensitive_dict.keys())\n return count_distinct_values >= self.l_diversity", "title": "" }, { "docid": "ad0767f2576924e467287b671b00dbdf", "score": "0.5385514", "text": "def test_compare_energies(self):\n fluxes = np.array(self.spectrum[1])\n energies = events.assign_energies(1000, self.spectrum)\n energies = [int(energy) for energy in energies]\n\n # Histogram energies to get shape approximation\n gen_energies = ((np.array(energies) - 1) / 1).astype(int)\n\n lc = np.bincount(energies)\n\n # Remove first entry as it contains occurences of '0' element\n lc = lc[1:7]\n\n # Calculate probabilities and compare\n lc_prob = (lc/float(sum(lc)))\n fluxes_prob = fluxes/float(sum(fluxes))\n assert np.all(np.abs(lc_prob - fluxes_prob) < 3 * np.sqrt(fluxes_prob))", "title": "" }, { "docid": "53b2d03241170c84f8f2cbc969d68373", "score": "0.53773457", "text": "def test_normal_ke():\n \n h=0.5\n u=0.0000000001\n assert_array_almost_equal(normal_kde(u,h), 0.598413420602)", "title": "" }, { "docid": "8a6a54b5b1f54e6f983f0d9aaf2aaace", "score": "0.5374017", "text": "def size_divisibility(self) -> int:\n return 0", "title": "" }, { "docid": "4831cbb5df9564fec08e3ef013469a28", "score": "0.5367638", "text": "def test_companion_to_flux_runs(area_ratio):\n assert mass_main(\"HD30501\", 90, 5, area_ratio=area_ratio) is 0", "title": "" }, { "docid": "31f8b25dbc1ee1de3161477aa67eccb9", "score": "0.5355416", "text": "def nelectrons(self, vasp, structure):\n from math import fsum\n # constructs dictionnary of valence charge\n valence = {}\n for key, value in vasp.species.items():\n valence[key] = value.valence\n # sums up charge.\n return fsum( valence[atom.type] for atom in structure )", "title": "" }, { "docid": "cc6a5f74822447b6fd73620ba05389af", "score": "0.5352538", "text": "def compute_density_evolution(self, args):\n L = 2**args.m\n # Get directories based on the argument\n self._get_string_dependent_vars(args)\n # Load variable arrays\n seeds = np.loadtxt(self._dir+'seeds.txt', dtype=int)\n # Allocate\n N = np.zeros((args.nmeasures+1, len(seeds)))\n M = np.zeros((args.nmeasures+1, len(seeds)))\n ph = np.zeros((args.nmeasures+1, len(seeds)))\n etah = np.zeros((args.nmeasures+1, len(seeds)))\n # Load data\n for i, seed in enumerate(seeds):\n suffix = self._suffix.format(seed=seed)\n N[:,i] = np.load(self._ddir+f'pred_population{suffix}.npy')\n M[:,i] = np.load(self._ddir+f'prey_population{suffix}.npy')\n #_ph = np.ma.divide(np.load(self._ddir+f'predators_on_habitat{suffix}.npy'), N[:,i]).filled(0)\n #I = np.load(self._ddir+f'isolated_patches{suffix}.npy').astype(float)\n #I = np.cumsum(I)\n #I /= args.rho * L**2\n #I[:args.nmeasures//2+1] /= (args.rho * L**2)\n #I[args.nmeasures//2+1:] /= (args.rho/5*L**2)\n #etah[:,i] = 1 - I\n # Save\n np.save(self._rdir+f'N{self._save_suffix}', N)\n np.save(self._rdir+f'M{self._save_suffix}', M)\n #np.save(self._rdir+f'ph{self._save_suffix}', ph)\n #np.save(self._rdir+f'etah{self._save_suffix}', etah)\n # Print colsing statements\n print(f'Computed population dynamics for \\n {self._printstr}')", "title": "" }, { "docid": "9d5c2ef4552533b4854edd1bc4978c37", "score": "0.5350062", "text": "def converged_electronic(self):\n final_esteps = self.ionic_steps[-1][\"electronic_steps\"]\n if 'LEPSILON' in self.incar and self.incar['LEPSILON']:\n i = 1\n to_check = set(['e_wo_entrp', 'e_fr_energy', 'e_0_energy'])\n while set(final_esteps[i].keys()) == to_check:\n i += 1\n return i + 1 != self.parameters[\"NELM\"]\n return len(final_esteps) < self.parameters[\"NELM\"]", "title": "" }, { "docid": "9ca3bff2e4626092b4e12240018868fa", "score": "0.53432965", "text": "def state_fidelity(rho: np.ndarray, sigma: np.ndarray) -> float:\n # If the input is a state vector, convert it to density operator; otherwise, unchanged\n rho = vec_to_operator(rho)\n sigma = vec_to_operator(sigma)\n\n if rho.shape != sigma.shape:\n raise ArgumentError(\"The dimensions of the two input density matrices mismatch!\")\n\n fid = np.trace(la.sqrtm(la.sqrtm(rho) @ sigma @ la.sqrtm(rho))) ** 2\n return float(np.real(fid))", "title": "" }, { "docid": "b2e3b927e760727c6464d5763a7a29ae", "score": "0.5342407", "text": "def is_abundant(n):\n return n < get_sum_divisors(n)", "title": "" }, { "docid": "44fd45fe6d708cf2faf26f37d1aca4c7", "score": "0.5330233", "text": "def test_solve_sat_density(self, model):\n # test saturated liquid and vapor density solve (critical part of the\n # phase equlibrium calc)\n te = model.te\n data = read_sat_data(self.pdata_sat, self.mw)\n for i, T in enumerate(data[\"T\"]):\n if T > 304.128:\n # if this goes over the critical temperature this makes no\n # sense while we're looking at the two phase region. (not sure\n # how it got in the data)\n pass\n else:\n tol = 1e-2\n # test p, x spec\n rhol = value(\n te.rho_liq(p=data[\"P\"][i]*pyunits.Pa, x=0))\n rhov = value(\n te.rho_vap(p=data[\"P\"][i]*pyunits.Pa, x=1))\n assert rhol == pytest.approx(data[\"rhol\"][i], rel=tol)\n assert rhov == pytest.approx(data[\"rhov\"][i], rel=tol)\n # test t, x spec\n rhol = value(te.rho_liq(T=T*pyunits.K, x=0))\n rhov = value(te.rho_vap(T=T*pyunits.K, x=1))\n assert rhol == pytest.approx(data[\"rhol\"][i], rel=tol)\n assert rhov == pytest.approx(data[\"rhov\"][i], rel=tol)\n\n # Ignore the phase equilibrium and use T,P data to calc densities\n if T > 296:\n tol = 1e-1 # data needs more sig fig\n rhol = value(te.rho_liq(\n p=data[\"P\"][i]*pyunits.Pa, T=T*pyunits.K, x=0))\n rhov = value(te.rho_vap(\n p=data[\"P\"][i]*pyunits.Pa, T=T*pyunits.K, x=1))\n assert rhol == pytest.approx(data[\"rhol\"][i], rel=tol)\n assert rhov == pytest.approx(data[\"rhov\"][i], rel=tol)", "title": "" }, { "docid": "a10bcf426c6006193a8a894bc736ab74", "score": "0.5325989", "text": "def dissimilarity(clusters):\n totDist = 0\n for c in clusters:\n totDist += c.variability()\n return totDist", "title": "" }, { "docid": "7b320d4af573a41a9b259330a416780c", "score": "0.53200376", "text": "def main():\n s = set()\n CAP = 10**14\n SQRT = int(math.sqrt(CAP)) + 1\n RT4 = int(math.sqrt(SQRT)) + 2\n for den in range(1, RT4 + 1):\n print(den, RT4)\n for a in range(den, SQRT + 1, den):\n c = a + den\n num = a * c + a - a * a\n assert num % den == 0\n d = num // den\n assert c * d % a == 0\n b = c * d // a\n n = a * b\n assert n == c * d\n assert a % den == 0\n assert c % den == 0\n assert a + b == c + d + 1\n if n <= CAP:\n s.add(n)\n else:\n break\n\n print(len(s))", "title": "" }, { "docid": "922094a584843d6cd424a1a7c5134404", "score": "0.53151107", "text": "def assertProbs(self, xs, value_density, N):\n hist = Counter(xs)\n\n # do binomial test of proportions for each item\n for i in hist:\n self.assertProp(hist[i] / N, value_density[i], N)", "title": "" }, { "docid": "16d05d5ea15df5d749446b35a3f1e116", "score": "0.53134876", "text": "def evaluate_energy(grid):\r\n norm_constant = 16 * 16 * 3\r\n energy = norm_constant\r\n subgrids = get_subgrids(grid)\r\n\r\n # Find num unique elements on all rows\r\n for i in range(16):\r\n row_contents = [x for x in grid[i][:] if x >= 0]\r\n unique_in_row = len(set(row_contents))\r\n energy -= unique_in_row\r\n \r\n # Find num unique elements on all columns\r\n for j in range(16):\r\n col_contents = [y for y in grid[:][j] if y >= 0]\r\n unique_in_col = len(set(col_contents))\r\n energy -= unique_in_col\r\n \r\n # Find num unique elements in subgrid\r\n for k in range(16):\r\n subgrid_contents = [z for z in subgrids[k] if z >= 0]\r\n unique_in_subgrid = len(set(subgrid_contents))\r\n energy -= unique_in_subgrid\r\n \r\n energy /= norm_constant\r\n return energy", "title": "" }, { "docid": "605e9c1836b153d696faf91b0c42d023", "score": "0.53130615", "text": "def check_recursive_diversity(self):\n rl_to_rm = 0\n c, l = self.recursive_cl_diversity\n\n # frequency distribution of values in sensitive attr\n sensitive_dict = self.categorical_freq[self.categorical_freq.keys()[-1]]\n\n sorted_values = sorted(sensitive_dict, key=sensitive_dict.get, reverse=True)\n r1 = sensitive_dict[sorted_values[0]]\n for i in range(l-1, len(sensitive_dict.keys())):\n rl_to_rm += sensitive_dict[sorted_values[i]]\n return r1 < c * rl_to_rm", "title": "" }, { "docid": "17e582f656f81f2a44d00d09447e20ff", "score": "0.531203", "text": "def number_density(parameters, numsamples) :\n\n # Construct the full set of parameters we'll need\n N, gamma, nu, sigma0, sigma1 = parameters\n gammanu = gamma * nu\n kappa = 1.0 / (1 - gamma * gamma)\n nufact = 3.0 * nu / gamma\n fullparams = [N, gamma, nu, sigma0, sigma1, gammanu, kappa, nufact]\n\n # Define the variables of integration and their ranges\n # \"n\" is standard normal, good enough for initialization for everything\n # \"pn\" is positive normal\n jump = 0.5\n jump2 = 0.5\n jump3 = 0.5\n variables = [\n [\"l1\", \"n\", jump], [\"l2\", \"n\", jump], [\"l3\", \"n\", jump],\n [\"a\", \"pn\", jump2], [\"b\", \"pn\", jump2], [\"c\", \"pn\", jump2],\n [\"d\", \"n\", jump3], [\"e\", \"n\", jump3], [\"f\", \"n\", jump3]\n ]\n\n # Perform the Monte Carlo integration\n integrals, errors, acceptance = MCIntegrate(variables, integrand, probability, numsamples, parameters=fullparams)\n\n # Scale everything by the appropriate coefficient\n scaleval = scale(*parameters)\n integrals *= scaleval\n errors *= scaleval\n\n # Return the results\n return integrals, errors, signed_exact(*parameters), acceptance", "title": "" }, { "docid": "eaf383140de8bac90e36dfcfe757a4f1", "score": "0.53072494", "text": "def _perform_measurement(self) -> List[int]:\n bits, _ = sim.measure_density_matrix(\n self.target_tensor,\n self.axes,\n out=self.target_tensor,\n qid_shape=self.qid_shape,\n seed=self.prng,\n )\n return bits", "title": "" }, { "docid": "b4312df0e46df19b1756d2df9b542e83", "score": "0.52975976", "text": "def print_density_summary(rho):\n\n assert len(rho.shape) == 2\n M, N = rho.shape\n assert M == N\n n = int(np.log2(N))\n\n evalues, evectors = np.linalg.eigh(rho)\n\n print_mixture_summary(rho, evalues, evectors)\n\n print(\"Entropy: %.2f\" % entropy(evalues))\n\n warnings.simplefilter(\"ignore\", np.ComplexWarning)\n\n str_basis_vectors = basis_states_str(n)\n\n for bs in basis_density_states(n):\n p = np.trace(rho * bs)\n print(\"P(%s) = %.4f\" % (next(str_basis_vectors), p))", "title": "" }, { "docid": "69fe7eba1c9a6ab3460c170f9bb9bb55", "score": "0.5297284", "text": "def check_ionfrac_sum_1(self, mode):\n return np.isclose(np.sum(self['ion_frac'][key][:], axis=-1), 1.0, atol=0.01)", "title": "" }, { "docid": "8c48734da2c2f4cc8cfffc9c57461c58", "score": "0.5283966", "text": "def is_density_known(self):\n return self._is_density_known", "title": "" }, { "docid": "c71567437a7eebfc074382b7a67994b3", "score": "0.5279083", "text": "def densities(self):\n return self._frequencies / self.bin_sizes", "title": "" }, { "docid": "85f1afde75e6c22999b6447d8c7c60ea", "score": "0.52711594", "text": "def potential(systm):\n # diffs = [v for v in np.diff(systm) if v<=1.0]\n # return sum(diffs)", "title": "" }, { "docid": "efa79352b8873721591a24d4c380d829", "score": "0.52653277", "text": "def getDataDensity(npArr):\n\n noEntries = 1\n for length in npArr.shape:\n noEntries = noEntries * length\n return 1. - np.count_nonzero(npArr == 0.)/float(noEntries)", "title": "" }, { "docid": "c68e68f5a798c2f15cf2f638d0ac7be7", "score": "0.52617884", "text": "def _cluster_density_sparseness(MST, labels, cluster):\n indices = np.where(labels == cluster)[0]\n cluster_MST = MST[indices][:, indices]\n cluster_density_sparseness = np.max(cluster_MST)\n return cluster_density_sparseness", "title": "" }, { "docid": "c335fb63a27df8755d77629846c33ccc", "score": "0.5258654", "text": "def check_dmd_computation_simple(exact, total):\n\n num_snapshots = 10\n\n A = np.array([[0.9, 0.1], [0.0, 0.8]])\n X = np.random.randn(2, num_snapshots)\n Y = A.dot(X)\n\n ADMD = Y.dot(np.linalg.pinv(X))\n vals, vecs = np.linalg.eig(ADMD)\n inds = np.argsort(np.abs(vals))[::-1]\n vals = vals[inds]\n vecs = vecs[:, inds]\n\n # DMD class with rank 2\n DMD = dmdtools.DMD(2, exact, total)\n DMD.fit(X, Y)\n dmd_vals, dmd_modes = dmdtools.sort_modes_evals(DMD)\n\n for ii in range(len(vals)):\n assert np.abs(dmd_vals[ii] - vals[ii]) < 1e-10\n check_eigenvectors(vecs[:, ii], dmd_modes[:, ii])", "title": "" }, { "docid": "2aac796a2221516cbcd0d154965e792d", "score": "0.52565163", "text": "def calculate_n(self):\n if self.power is None:\n power = 0.8\n else:\n power = self.power\n\n if self.alpha is None:\n alpha = 0.05\n else:\n alpha = self.alpha\n\n found_solution = False\n for n in range(self._minN, self._maxN):\n count = 0\n for sim in range(self._N_SIMS):\n res = self.dist.rvs(size=n, random_state=self.seed * sim)\n\n _, p_val = self.normal_test(res)\n if p_val < alpha:\n count += 1\n test_power = count / self._N_SIMS\n if test_power > power:\n found_solution = True\n self.power = test_power\n self.n = n\n break\n if not found_solution:\n raise BaseException(\"N is greater than maximum N\")", "title": "" }, { "docid": "119c3150199dca22fb3f9d48b787ff8b", "score": "0.52429026", "text": "def _check_sum(self, weights):\n return np.sum(weights) - 1", "title": "" }, { "docid": "8f42bfd155d693925e908ff769295a7b", "score": "0.52225965", "text": "def abundant(n):\r\n \"*** YOUR CODE HERE ***\"\r\n val = 1\r\n su = 0\r\n while val * val <= n:\r\n if n % val == 0:\r\n print (val, '*', n // val)\r\n su = su + val\r\n if val != 1 and val * val != n:\r\n su = su + (n // val)\r\n val = val + 1\r\n if su > n:\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "80310585ae5bb7bcc409a3c15e8c6436", "score": "0.5221602", "text": "def calcNumbOfDomComps(self, i_amountOfTotalVariance):\n\n totalVariance = np.sum(self.m_eigenDecomp.m_eigenValReal)\n sumEigVal = 0\n for i, e in enumerate(self.m_eigenDecomp.m_eigenValReal):\n\n sumEigVal += e\n if sumEigVal / totalVariance >= i_amountOfTotalVariance:\n\n return i + 1", "title": "" }, { "docid": "4b94ea1a542de132e33bd797c5daee9b", "score": "0.5220293", "text": "def process(densBoxes, density_length, dsigma, dsigmaCircle, thickness, snapshots, boxesX):\n\n # Makes the density sampling box distribution independent of the number of\n # system snapshots within the subsample and volume of the density box\n denseBoxes = (densBoxes/(snapshots*boxesX*((sigma*density_length)**3)))\n # Obtains the two dimensional initial distribution\n collapsedBoxesInitial = denseBoxes.copy()\n # Obtains one dimensional initial distributions about each axis\n vertDistributionInitial = numpy.sum(collapsedBoxesInitial, axis=0)\n horizDistributionInitial = numpy.sum(collapsedBoxesInitial, axis=1)\n \n # Determines the index dimensions of the collapsed density matrix\n shape = numpy.shape(denseBoxes)\n ys = shape[0]\n zs = shape[1]\n # Determines the number of elements in the collapsed density matrix\n numElements = float(ys*zs)\n \n # Calculates the median modified density of a sampling box\n densityCutoff = numpy.mean(denseBoxes)\n # Values to be used in determining the modified density half way between\n # that of bulk fluid and bulk vapor\n fluidDensity = 0\n fluidBoxes = 0\n vaporDensity = 0\n vaporBoxes = 0\n # For every y index in the sampling box matrix\n for yDim in range(0,ys):\n # For every z value at that y value\n for zDim in range(0,zs):\n # If the sampling box is in a fluid region\n if(denseBoxes[yDim,zDim] > densityCutoff):\n # Increment the number of fluid sampling boxes\n fluidBoxes = fluidBoxes + 1\n # Add the density of the sampling box to the fluid accumulator\n fluidDensity = fluidDensity + denseBoxes[yDim,zDim]\n # Otherwise the sampling box is in a vapor region\n else:\n # Increment the number of vapor sampling boxes\n vaporBoxes = vaporBoxes + 1\n # Add the density of the sampling box to the vapor accumulator\n vaporDensity = vaporDensity + denseBoxes[yDim,zDim]\n # Set the threshold density equal to density half way between bulk fluid\n # and bulk vapor\n densityCutoff = ((vaporDensity/vaporBoxes)+(fluidDensity/fluidBoxes))/2\n\n \n # Set all density sampling boxes with below the cutoff density atoms to zero\n denseBoxes[denseBoxes < densityCutoff] = 0\n\n # Obtains one dimensional filtered distributions about each axis\n vertDistribution = numpy.sum(denseBoxes, axis=0)\n horizDistribution = numpy.sum(denseBoxes, axis=1)\n\n # Creates a list to store all outermost points that will be considered\n points = list()\n # For every Z value in the two dimensional system\n for z in range(zs):\n # Determine the Y box indices of all nonzero boxes at that Z value\n nonzeros = numpy.nonzero(denseBoxes[:,z])[0]\n # If there is at least one nonzero density sampling box at that Z value\n if (numpy.size(nonzeros) > 0):\n # Determines the maximum and minimum Y box index at that Z value\n yMin = numpy.min(nonzeros)\n yMax = numpy.max(nonzeros)\n # If there are no nonzero box values at that Z value\n else:\n # Sets the maximum and minimum box indices to infinite values\n yMax = float('-inf')\n yMin = float('inf')\n # If the current Z index contains at least one box to be considered\n if ((yMax != float('-inf')) and (yMin != float('inf'))):\n # Converts the current Z index to the position of the box center\n Z = float((z*sigma*density_length) + (0.5*sigma*density_length) + minWallZ)\n # Converts the outermost Y indices at that Z value back to position\n yl = float((yMin*sigma*density_length) + (0.5*sigma*density_length) + minWallY)\n yu = float((yMax*sigma*density_length) + (0.5*sigma*density_length) + minWallY)\n # Adds the points to a list of all outside fluid points\n points.append([yl, Z])\n points.append([yu, Z])\n \n # Lists to store the four quadrants of final points\n upper_right = list()\n upper_left = list()\n lower_right = list()\n lower_left = list()\n # Lists for all circular regression points\n circle_list = list()\n circlePoints_advancing = list()\n circlePoints_receding = list()\n \n # Adjusts dsigma to obtain an integer number of boxes of height to analyze\n dsigmaBoxes = int((dsigma*(maxWallZ - minWallZ - (2*thickness)))/(density_length*sigma))\n # Determines lower and upper cutoff Z values based on the number of boxes\n lowerCutoff = (minWallZ + thickness + (sigma*dsigmaBoxes*density_length) + (2*sigma))\n upperCutoff = (maxWallZ - thickness - (sigma*dsigmaBoxes*density_length) - (2*sigma))\n # For all boundary sampling box positions with sufficient fluid density\n for point in points:\n # Note the first two sigmas of fluid height is not considered due\n # to unpredictable behavior where the fluid contacts the surface\n # If the point is above the upper cutoff Z value and greater than the\n # center Y value\n if ((float(point[1]) > upperCutoff) and (float(point[1]) < (maxWallZ - thickness - (2*sigma))) and (float(point[0]) > targetCenter)):\n # Add the point to the upper right list\n upper_right.append(point)\n # If the point is above the upper cutoff Z value and less than the\n # center Y value\n elif ((float(point[1]) > upperCutoff) and (float(point[1]) < (maxWallZ - thickness - (2*sigma))) and (float(point[0]) < targetCenter)):\n # Add the point to the upper left list\n upper_left.append(point)\n # If the point is below the lower cutoff Z value and greater than the\n # center Y value\n elif ((float(point[1]) < lowerCutoff) and (float(point[1]) > (minWallZ + thickness + (2*sigma))) and (float(point[0]) > targetCenter)):\n # Add the point to the lower right list\n lower_right.append(point)\n # If the point is below the lower cutoff Z value and less than the\n # center Y value\n elif ((float(point[1]) < lowerCutoff) and (float(point[1]) > (minWallZ + thickness + (2*sigma))) and (float(point[0]) < targetCenter)):\n # Add the point to the lower left list\n lower_left.append(point)\n\n # Center of circular regression height range\n circleRegressionCenter = (minWallZ + ((maxWallZ - minWallZ)/2))\n # Upper and lower height range cutoffs for circular regression analysis\n circleLowerCutoff = (circleRegressionCenter - (0.5*dsigmaCircle*((maxWallZ - minWallZ) - (2*thickness))))\n circleUpperCutoff = (circleRegressionCenter + (0.5*dsigmaCircle*((maxWallZ - minWallZ) - (2*thickness))))\n # For all boundary sampling box positions with sufficient fluid density\n for point in points:\n # Note the first two sigmas of fluid height is not considered due\n # to unpredictable behavior where the fluid contacts the surface\n # If the point is within the circular regression height range and not\n # within two sigmas of either wall and is an advancing point\n if ((float(point[1]) < circleUpperCutoff) and (float(point[1]) > circleLowerCutoff) and (float(point[1]) < (maxWallZ - thickness - (2*sigma))) and (float(point[1]) > (minWallZ + thickness + (2*sigma))) and (float(point[0]) > targetCenter)):\n # Add the point to the list of advancing points\n circlePoints_advancing.append(point)\n # If the point is within the circular regression height range and not\n # within two sigmas of either wall and is a receding point\n if ((float(point[1]) < circleUpperCutoff) and (float(point[1]) > circleLowerCutoff) and (float(point[1]) < (maxWallZ - thickness - (2*sigma))) and (float(point[1]) > (minWallZ + thickness + (2*sigma))) and (float(point[0]) < targetCenter)):\n # Add the point to the list of advancing points\n circlePoints_receding.append(point)\n \n # Returns four lists of points that will each be fit with a linear\n # regression in order to determine contact angles for the system\n # in addition to distributions of fluid density\n return circlePoints_advancing, circlePoints_receding, upper_right, lower_right, upper_left, lower_left, densityCutoff, dsigmaBoxes, collapsedBoxesInitial, denseBoxes, horizDistributionInitial, horizDistribution, vertDistributionInitial, vertDistribution", "title": "" }, { "docid": "3dd2aeb94965f867e1e83438153a662a", "score": "0.52149516", "text": "def termCond():\n nbrNeigbTotal = 0\n vtx_array = []\n \n vtx_array = CommonKnowledge.adjMtxGraph.get_Vertices()\n \n for vtx in vtx_array:\n nbrNeigbTotal += CommonKnowledge.adjMtxGraph.get_nbrOfNeigb(vtx)\n \n return int(ceil(float(nbrNeigbTotal) / float(CommonKnowledge.adjMtxGraph.get_nbrOfVertices())))", "title": "" }, { "docid": "420e2c8512c5aada77a8d84ac270a2b7", "score": "0.52074116", "text": "def kde_count(data, grid, **kwargs):\n r = kwargs.get(\"radius\", np.ptp(data) / 10)\n\n # Get the number of data points within the radius r of each grid point\n iter = (np.sum(np.linalg.norm(data - g, axis=1) < r) for g in grid)\n count = np.fromiter(iter, float, count=data.shape[0])\n\n # Get fraction of data within radius\n density = count / data.shape[0]\n\n return density", "title": "" }, { "docid": "306ee27a87c425f69e5fc73430049076", "score": "0.52019835", "text": "def electronic_potential(xs):\n with tf.name_scope('Vee'):\n if len(xs) > 1:\n v = []\n for (i, ri) in enumerate(xs):\n v.extend([\n 1.0 / smooth_norm(ri - rj) for rj in xs[i + 1:]\n ])\n return tf.add_n(v)\n else:\n return 0.0", "title": "" }, { "docid": "de6972e4b79c695aea6b0688ff035c66", "score": "0.51937556", "text": "def compute_arm_predictive_density(self, t):", "title": "" }, { "docid": "4635f8d6ad0e55985482b2bd35f0984e", "score": "0.51934373", "text": "def check_for_rings(self,molecule):\r\n# for atom in self.group.atoms:\r\n# if atom.symbol == 'Q':\r\n# self.found += 1\r\n \r\n \r\n try:\r\n self.expected = EV.expected_ringnumber[self.amino_acid[1]]\r\n except KeyError:\r\n self.expected = 0 \r\n if self.found >= self.expected[0]:\r\n self.confirmed = True\r\n \r\n return None", "title": "" }, { "docid": "78771b229ac0ec79274431cd58c2babf", "score": "0.5192776", "text": "def test_population_density(self):\n expected = 0.1\n actual = self.test_country_1.population_density()\n self.assertEqual(expected, actual)", "title": "" }, { "docid": "d51b0b98c153562c1f8f81259e4a329f", "score": "0.5188492", "text": "def density(self):\n tot_nonsparse = sum([ser.sp_index.npoints\n for _, ser in self.iteritems()])\n tot = len(self.index) * len(self.columns)\n return tot_nonsparse / float(tot)", "title": "" }, { "docid": "943ff86e4fc1b81de7ac8c14d517ea23", "score": "0.5179352", "text": "def test_occupancy_calc3(self):\n results = np.zeros(100)\n upper = np.zeros(100)\n lower = np.zeros(100)\n for i in range(100):\n a = np.random.multinomial(10,self.fragment_dist.nfr_fit.get())\n b = np.random.multinomial(30,self.fragment_dist.nuc_fit.get())\n results[i],lower[i],upper[i] = calculateOccupancy(a+b,np.array([1,1,1]),self.params)\n print np.mean(results)\n self.assertTrue(abs(np.mean(results)-0.75)<0.1)\n self.assertTrue(sum(upper < 0.75) < 85)\n self.assertTrue(sum(lower > 0.75) < 85)", "title": "" }, { "docid": "f35de0b5d09156fe05920005c5872ca4", "score": "0.5176777", "text": "def _sparseness(x):\n sqrt_n = np.sqrt(len(x))\n return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)", "title": "" }, { "docid": "f9f41b8a292d2c55a0e3a2c18712e29b", "score": "0.51672924", "text": "def virial_mass(r, mean_dens, delta_halo):\n return 4 * np.pi * r ** 3 * mean_dens * delta_halo / 3", "title": "" }, { "docid": "45e985f189f8d563662c8b31f8e66cb9", "score": "0.516664", "text": "def _calculate_nbf(cls, atom_map, center_data) -> int:\n\n center_count = {}\n for k, center in center_data.items():\n center_count[k] = sum(x.nfunctions() for x in center.electron_shells)\n\n ret = 0\n for center in atom_map:\n ret += center_count[center]\n\n return ret", "title": "" }, { "docid": "67033aebfd1f1554223e63e607c9f3b4", "score": "0.5149769", "text": "def get_material_density_terms(self, p, j):\n if j in self._params.component_list:\n return self.dens_mol_phase[p] * self.mole_frac_phase[p, j]\n else:\n return 0", "title": "" }, { "docid": "d27dd53bc0ad25cd67c9dd06cda33098", "score": "0.5143153", "text": "def test_correctness_of_pressure(self, number_of_iterations):\n\n\t\tprint \"Testing pressure implementation with %d iterations\" %(number_of_iterations)\n\t\tforce=0\n\t\tpressure=np.zeros(number_of_iterations)\n\t\tpressure_expected=((self.N)/float(self.L**3))*self.k*self.T\n\t\tfor l in range(1,number_of_iterations+1):\n\t\t\tself.draw_particles()\n\t\t\tprint \"Iteration number:\", l\n\t\t\tfor k in range(self.timesteps):\n\t\t\t\tself.x=self.x+self.v*self.dt\n\t\t\t\tfor i in range(self.x.shape[1]):\n\t\t\t\t\tparticles_passed_right = self.x[:,i]>= self.L\n\t\t\t\t\tif i == 2:\n\t\t\t\t\t\tif self.debug:\n\t\t\t\t\t\t\tprint np.sum(self.v[particles_passed_right, i])\n\t\t\t\t\t\tforce+=2.0*self.m_molecule*(np.sum(self.v[particles_passed_right, i]))/float(self.dt)\n\t\t\t\t\tself.x[particles_passed_right,i]=self.L\n\t\t\t\t\tself.v[particles_passed_right,i]=-self.v[particles_passed_right,i]\n\t\t\t\t\tparticles_passed_left=self.x[:,i] <= 0\n\t\t\t\t\tself.x[particles_passed_left,i]=0\n\t\t\t\t\tself.v[particles_passed_left,i]=-self.v[particles_passed_left, i]\n\t\t\t\tif k % 100 == 0 and k!=0:\n\t\t\t\t\tprint \"Progres of pressure calculations: \" + str(100.0*k/self.timesteps)+'%'\n\t\t\tpressure_iteration=force/(float(self.L**2)*(float(self.timesteps)))\n\t\t\tpressure[l-1]=pressure_iteration\n\t\t\tpressure_expected=((self.N)/float(self.L**3))*self.k*self.T\n\t\t\tforce=0\n\t\tif self.debug:\n\t\t\tprint pressure\n\t\tmean_pressure=np.mean(pressure)\n\t\tpercentage_error=100*abs((mean_pressure-pressure_expected)/float(pressure_expected))\n\t\tprint \"Mean pressure with %2d iterations is %.5f [Pa]\" %(number_of_iterations, mean_pressure)\n\t\tprint \"Analytic pressure should be: \"+ str(pressure_expected)+\" [Pa]\"\n\t\tprint \"Percentage error [%]:\", np.mean(percentage_error)", "title": "" }, { "docid": "f11a81b40b9526b9f522790daaaba1c7", "score": "0.51403797", "text": "def test_calculate_ssd(freq_obs):\n # Setup\n correct_ssd = [-1, 12.199282041547999]\n freq_theo = np.array([0.30103, 0.17609126, 0.12493874,\n 0.09691001, 0.07918125, 0.06694679,\n 0.05799195, 0.05115252, 0.0457574])\n\n # Exercise\n current_ssd = ben.calculate_ssd(freq_obs, freq_theo)\n\n # Verify\n assert_almost_equal(correct_ssd[int(sum(freq_obs))], current_ssd, 10)\n\n # Cleanup - None", "title": "" }, { "docid": "e5115778a0ed3c55ee30a42a68824a7e", "score": "0.5137042", "text": "def test_probability_density():\n global X\n global random_state\n\n gmm = GMM(n_components=2, random_state=random_state)\n gmm.from_samples(X)\n\n x = np.linspace(-100, 100, 201)\n X_grid = np.vstack(map(np.ravel, np.meshgrid(x, x))).T\n p = gmm.to_probability_density(X_grid)\n approx_int = np.sum(p) * ((x[-1] - x[0]) / 201) ** 2\n assert_less(np.abs(1.0 - approx_int), 0.01)", "title": "" }, { "docid": "77ae473e02fd2151419c5819f20fc785", "score": "0.5134909", "text": "def test_dist_one_comp_one_full(self):\n result = dist_one_comp_one_full(self.td.RHDX[1],\n self.td.U[2],\n self.td.mask[1],\n self.td.Q,\n self.td.P)\n correct = np.sqrt(5/3. * 50)\n self.assertAlmostEqual(correct,result,places=6)", "title": "" }, { "docid": "3786928d324ff021e8c1143d257fee29", "score": "0.5129708", "text": "def test_parts_coverage_d1(nsymbols, size):\n bdm = BDM(ndim=1, nsymbols=nsymbols)\n expected = sum(nsymbols**i for i in range(1, size+1))\n total = 0\n for dct in bdm._ctm.values():\n for key, _ in dct.items():\n n = len(set(key))\n mult = factorial(nsymbols) / factorial(nsymbols - n)\n total += mult\n assert total / expected >= .25", "title": "" }, { "docid": "e830eeb39d5f6dbc26965a4df16b39f3", "score": "0.51279104", "text": "def test_solve_vapor_density(self, model):\n te = model.te\n data = read_data(self.pdata, self.mw)\n for i, T in enumerate(data[\"T\"]):\n if (data[\"phase\"][i] == \"vapor\" or\n data[\"phase\"][i] == \"supercritical\"):\n rho = value(te.rho_vap(\n p=data[\"P\"][i]*pyunits.Pa, T=T*pyunits.K, x=1))\n assert rho == pytest.approx(data[\"rho\"][i], rel=1e-2)", "title": "" }, { "docid": "d9d31a86e45461058141f3d6b0fc5920", "score": "0.51218504", "text": "def calculate_knudsen_number(particle_diameter, particle_critical_diameter):\n\n return particle_critical_diameter / particle_diameter", "title": "" }, { "docid": "7d0e3415e2df6e0b8b4f21b86c549329", "score": "0.51190245", "text": "def _compute_det_variance(self):", "title": "" }, { "docid": "1460e23f6ed1481b424baa7b7f6a7f89", "score": "0.51113576", "text": "def num_measurements(self):\n return np.sqrt(self.d_ij.size)", "title": "" }, { "docid": "24c8b49bf8f20ff3ac5aaa8562655ec6", "score": "0.5109928", "text": "def testprincomponents(self):\n principal_components = numpy.linalg.eigvalsh(self.data.polarizabilities[0])\n for c in range(3):\n assert abs(principal_components[c]-self.principal_components[c]) < \\\n self.principal_components_delta", "title": "" }, { "docid": "1785e6b09c7f2f907c5a50449b756527", "score": "0.51044", "text": "def test_get_free_energy(self):\n Tlist = np.array([400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000])\n for T in Tlist:\n g_exp = self.nasa.get_enthalpy(T) - T * self.nasa.get_entropy(T)\n g_act = self.nasa.get_free_energy(T)\n self.assertAlmostEqual(g_exp / g_act, 1.0, 4, '{0} != {1}'.format(g_exp, g_act))", "title": "" }, { "docid": "e164999cb651e6eafb1592378e2a7ec1", "score": "0.5103606", "text": "def arc_consistency_check(self, var):\n # Problem 3.1d\n # BEGIN_YOUR_CODE (around 17 lines of code expected)\n new_domains = copy.deepcopy(self.domains)\n q = []\n q.append(var)\n while q != []:\n for i in self.csp.binaryPotentials[q[0]]:\n yy = [0] * len(new_domains[i])\n for j in range(len(new_domains[i])):\n y = 0\n for k in range(len(new_domains[q[0]])):\n if self.csp.binaryPotentials[q[0]][i][new_domains[q[0]][k]][new_domains[i][j]] != 0:\n y = 1\n if y != 0:\n yy[j] = 1\n else:\n if not (i in q):\n q.append(i)\n new_domains[i] = [v for yy, v in zip(yy, new_domains[i]) if yy]\n if new_domains[i] == []:\n return 0\n q.pop(0)\n return new_domains\n # END_YOUR_CODE", "title": "" }, { "docid": "7479b42b6ba74a94c18674b36a350568", "score": "0.5102741", "text": "def electron(photon_energies, _: float):\n if hasattr(photon_energies, \"__len__\"):\n return np.array([0.0 for _ in photon_energies])\n return 0.0", "title": "" }, { "docid": "8d0bd3e40e3d5b0fad65503db646165b", "score": "0.5101606", "text": "def calculate_distance_electron(electron_density, tau_event):\n # add full_relativity here\n return tau_event / (electron_density * SIGMA_THOMSON)", "title": "" }, { "docid": "75d11be4499f083c0203eb94cab4836b", "score": "0.51013374", "text": "def get_distribution_sparsity(self):\n d=self.get_distribution()\n return 1-np.count_nonzero(d)/float(d.size)", "title": "" }, { "docid": "9dc4d8bea5ce178ccd88a1a932b93609", "score": "0.50967157", "text": "def calculateDustDensity(data):\n rhoDGrid = data[\"r1\"] * data[\"rho\"]\n return rhoDGrid", "title": "" }, { "docid": "c99b6e3c98de5bdb9a938d6c53c84bfd", "score": "0.5095931", "text": "def _expected_counts(self, f):\n p = np.exp(self.zetas - f)\n row_sums = np.sum(p, axis=1)\n p_norm = p / row_sums[:, np.newaxis]\n row_sums = np.sum(self.counts, axis=1)\n expected_counts = p_norm * row_sums[:, np.newaxis]\n\n return expected_counts", "title": "" }, { "docid": "d0e4debcacda7cceff91779c0c3746a2", "score": "0.50904095", "text": "def _fine_map_check(self, max_x, max_y, size):\n self._fine_map_sum_res = self.grid_density_actual(max_x, max_y, size, size)\n return self._fine_map_sum_res > size * size * self.get_average_density()", "title": "" }, { "docid": "023e6f646f2401df9b09feae56d9b4aa", "score": "0.5081896", "text": "def test_density_matrices_entropy(\n self, density_matrix, pure, wires, base, check_state, interface\n ):\n if interface:\n density_matrix = qml.math.asarray(density_matrix, like=interface)\n entropy = qml.math.vn_entropy(density_matrix, wires, base, check_state)\n\n if pure:\n expected_entropy = 0\n else:\n expected_entropy = np.log(2) / np.log(base)\n\n assert qml.math.allclose(entropy, expected_entropy)", "title": "" }, { "docid": "2b94cd59465ed573390417de2f44ccfa", "score": "0.5081749", "text": "def sparsity(D):\n n = len(list(D.keys()))\n return sum([0 if D[key] == 0 else 1 for key in D.keys()]) / n", "title": "" }, { "docid": "d7c1f36a013f2691b4adef0ce9bedb93", "score": "0.50814503", "text": "def test_occupancy_calc4(self):\n results = np.zeros(100)\n upper = np.zeros(100)\n lower = np.zeros(100)\n bias = np.array([3,2,1])\n nfrprob = self.fragment_dist.nfr_fit.get() * bias\n nucprob = self.fragment_dist.nuc_fit.get() * bias\n nucprob = nucprob / np.sum(nucprob)\n nfrprob = nfrprob / np.sum(nfrprob)\n for i in range(100):\n a = np.random.multinomial(10,nfrprob)\n b = np.random.multinomial(30,nucprob)\n results[i],lower[i],upper[i] = calculateOccupancy(a+b,bias,self.params)\n print np.mean(results)\n self.assertTrue(abs(np.mean(results)-0.75)<0.1)\n self.assertTrue(sum(upper < 0.75) < 85)\n self.assertTrue(sum(lower > 0.75) < 85)", "title": "" }, { "docid": "1e0b9352c224a6df63f0469a7e09e507", "score": "0.508029", "text": "def test_Cacciato09Cens4():\n model = Cacciato09Cens(threshold=9.5)\n ncen_exp = model.mean_occupation(prim_haloprop=np.logspace(9, 12, 100))\n assert np.all(np.diff(ncen_exp) >= 0)", "title": "" }, { "docid": "647ab2740f7e79a26687d759da40a9df", "score": "0.50786334", "text": "def test_calculate_case_density():\n cases = _series_with_date_index([0, 0, 20, 60, 120])\n pop = 100\n every_ten = 10\n smooth = 2\n\n density = top_level_metrics.calculate_case_density(\n cases, pop, smooth=smooth, normalize_by=every_ten\n )\n\n pd.testing.assert_series_equal(\n density, _series_with_date_index([np.nan, 0.0, 1.0, 3.0, 5.0], dtype=\"float\"),\n )", "title": "" }, { "docid": "abc3688e0a10b0b00122ad0ce0d83657", "score": "0.5077319", "text": "def constraints_met(center):\n return (is_truthful_equilibrium(center) and\n not any(is_fixed_report_equilibrium(center, i) for i in range(center.world.n)))", "title": "" }, { "docid": "95b0f7ba81801f83226004522b50d9b6", "score": "0.5067311", "text": "def test_purity_non_density_matrix():\n rho = np.array([[1, 2], [3, 4]])\n\n with np.testing.assert_raises(ValueError):\n purity(rho)", "title": "" }, { "docid": "f2933781e402cb98386b2d769d35a393", "score": "0.50524586", "text": "def compute_energy_spectrum(wave_spectral_density, gravity, density):\n return wave_spectral_density * gravity * density", "title": "" }, { "docid": "743ba843f258b0fd2b81aac421f7e23f", "score": "0.5048303", "text": "def test_multiple(self):\n for arr in [self.biarr, self.triarr]:\n with self.subTest(arr=arr):\n out = density(arr, vmin=0)\n self.assertTrue(isinstance(out, matplotlib.axes.Axes))\n plt.close(\"all\")", "title": "" }, { "docid": "5174d1b46882d8352923bea207c72b27", "score": "0.50364697", "text": "def evaluate_density(self, X):\n return self.kde.score_samples(X)", "title": "" } ]
f66314df9dbb5323c0a87f20ba09e248
Test add as numarrayarray for basic function with array limit Array code b.
[ { "docid": "ca7e711de6ccf497bd1fac675d6d8c8a", "score": "0.0", "text": "def test_add_basic_num_array_array_d3(self):\n\t\tfor testvalx, testdatay in self.groupeddatay:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testvalx, testdatay)):\n\n\t\t\t\tdata1 = array.array('b', testdatay)\n\t\t\t\tdataout = array.array('b', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(testvalx, y) for y in testdatay]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(testvalx, data1, dataout, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" } ]
[ { "docid": "5d9a0aca89a47d67865d0746fcaa1754", "score": "0.773321", "text": "def test_add_num_array_array_b4(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.MinLimit, self.zero1array, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.MinLimit, self.minus1array, self.dataout)", "title": "" }, { "docid": "5d9a0aca89a47d67865d0746fcaa1754", "score": "0.773321", "text": "def test_add_num_array_array_b4(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.MinLimit, self.zero1array, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.MinLimit, self.minus1array, self.dataout)", "title": "" }, { "docid": "5d9a0aca89a47d67865d0746fcaa1754", "score": "0.773321", "text": "def test_add_num_array_array_b4(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.MinLimit, self.zero1array, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.MinLimit, self.minus1array, self.dataout)", "title": "" }, { "docid": "5d9a0aca89a47d67865d0746fcaa1754", "score": "0.773321", "text": "def test_add_num_array_array_b4(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.MinLimit, self.zero1array, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.MinLimit, self.minus1array, self.dataout)", "title": "" }, { "docid": "5d9a0aca89a47d67865d0746fcaa1754", "score": "0.77303946", "text": "def test_add_num_array_array_b4(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.MinLimit, self.zero1array, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.MinLimit, self.minus1array, self.dataout)", "title": "" }, { "docid": "60304f46e02bbb40092045e6a8eee67c", "score": "0.76917565", "text": "def test_add_array_num_array_a2(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1amax, 0, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.inparray1bmax, 1, self.dataout)", "title": "" }, { "docid": "60304f46e02bbb40092045e6a8eee67c", "score": "0.7691379", "text": "def test_add_array_num_array_a2(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1amax, 0, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.inparray1bmax, 1, self.dataout)", "title": "" }, { "docid": "60304f46e02bbb40092045e6a8eee67c", "score": "0.7691379", "text": "def test_add_array_num_array_a2(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1amax, 0, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.inparray1bmax, 1, self.dataout)", "title": "" }, { "docid": "60304f46e02bbb40092045e6a8eee67c", "score": "0.7691379", "text": "def test_add_array_num_array_a2(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1amax, 0, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.inparray1bmax, 1, self.dataout)", "title": "" }, { "docid": "60304f46e02bbb40092045e6a8eee67c", "score": "0.7691379", "text": "def test_add_array_num_array_a2(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1amax, 0, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.inparray1bmax, 1, self.dataout)", "title": "" }, { "docid": "60304f46e02bbb40092045e6a8eee67c", "score": "0.7691379", "text": "def test_add_array_num_array_a2(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1amax, 0, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.inparray1bmax, 1, self.dataout)", "title": "" }, { "docid": "60304f46e02bbb40092045e6a8eee67c", "score": "0.7691379", "text": "def test_add_array_num_array_a2(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1amax, 0, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.inparray1bmax, 1, self.dataout)", "title": "" }, { "docid": "60304f46e02bbb40092045e6a8eee67c", "score": "0.7691379", "text": "def test_add_array_num_array_a2(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1amax, 0, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.inparray1bmax, 1, self.dataout)", "title": "" }, { "docid": "60304f46e02bbb40092045e6a8eee67c", "score": "0.7691379", "text": "def test_add_array_num_array_a2(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1amax, 0, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.inparray1bmax, 1, self.dataout)", "title": "" }, { "docid": "60304f46e02bbb40092045e6a8eee67c", "score": "0.7691379", "text": "def test_add_array_num_array_a2(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1amax, 0, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.inparray1bmax, 1, self.dataout)", "title": "" }, { "docid": "467b02577878b6d708279c31fac7e50b", "score": "0.76807994", "text": "def test_add_array_num_array_b2(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1amin, 0, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.inparray1bmin, -1, self.dataout)", "title": "" }, { "docid": "467b02577878b6d708279c31fac7e50b", "score": "0.76806843", "text": "def test_add_array_num_array_b2(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1amin, 0, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.inparray1bmin, -1, self.dataout)", "title": "" }, { "docid": "467b02577878b6d708279c31fac7e50b", "score": "0.76806843", "text": "def test_add_array_num_array_b2(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1amin, 0, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.inparray1bmin, -1, self.dataout)", "title": "" }, { "docid": "467b02577878b6d708279c31fac7e50b", "score": "0.76806843", "text": "def test_add_array_num_array_b2(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1amin, 0, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.inparray1bmin, -1, self.dataout)", "title": "" }, { "docid": "467b02577878b6d708279c31fac7e50b", "score": "0.76806843", "text": "def test_add_array_num_array_b2(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1amin, 0, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.inparray1bmin, -1, self.dataout)", "title": "" }, { "docid": "d34adaab72bc19d92b73bf0753b1dc90", "score": "0.7628076", "text": "def test_add_num_array_array_b4(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.MinLimit, self.zero1array, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(ArithmeticError):\n\t\t\tarrayfunc.add(self.MinLimit, self.minus1array, self.dataout)", "title": "" }, { "docid": "d34adaab72bc19d92b73bf0753b1dc90", "score": "0.76268077", "text": "def test_add_num_array_array_b4(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.MinLimit, self.zero1array, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(ArithmeticError):\n\t\t\tarrayfunc.add(self.MinLimit, self.minus1array, self.dataout)", "title": "" }, { "docid": "ee3cd3e508a92783adcedf64e28b4cf2", "score": "0.76062405", "text": "def test_add_array_num_array_a2(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1amax, 0.0, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(ArithmeticError):\n\t\t\tarrayfunc.add(self.inparray1bmax, 1.0e300, self.dataout)", "title": "" }, { "docid": "d21c34fec8a4db4228b888f4d3ecb221", "score": "0.7601205", "text": "def test_add_array_num_array_a2(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1amax, 0.0, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(ArithmeticError):\n\t\t\tarrayfunc.add(self.inparray1bmax, 1.0e37, self.dataout)", "title": "" }, { "docid": "4d7b1b3b508d6995ca9fe1c3ba5fd6e5", "score": "0.75946796", "text": "def test_add_array_num_array_b2(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1amin, 0.0, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(ArithmeticError):\n\t\t\tarrayfunc.add(self.inparray1bmin, -1.0e300, self.dataout)", "title": "" }, { "docid": "9a38afcf66d05442ab42b46ed9697e70", "score": "0.7588149", "text": "def test_add_array_num_array_b2(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1amin, 0.0, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(ArithmeticError):\n\t\t\tarrayfunc.add(self.inparray1bmin, -1.0e37, self.dataout)", "title": "" }, { "docid": "3954b05481d21485ca8939c1edd27f85", "score": "0.754814", "text": "def test_add_num_array_array_a4(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.MaxLimit, self.zero1array, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.MaxLimit, self.plus1array, self.dataout)", "title": "" }, { "docid": "3954b05481d21485ca8939c1edd27f85", "score": "0.754814", "text": "def test_add_num_array_array_a4(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.MaxLimit, self.zero1array, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.MaxLimit, self.plus1array, self.dataout)", "title": "" }, { "docid": "3954b05481d21485ca8939c1edd27f85", "score": "0.754814", "text": "def test_add_num_array_array_a4(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.MaxLimit, self.zero1array, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.MaxLimit, self.plus1array, self.dataout)", "title": "" }, { "docid": "3954b05481d21485ca8939c1edd27f85", "score": "0.754814", "text": "def test_add_num_array_array_a4(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.MaxLimit, self.zero1array, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.MaxLimit, self.plus1array, self.dataout)", "title": "" }, { "docid": "3954b05481d21485ca8939c1edd27f85", "score": "0.754814", "text": "def test_add_num_array_array_a4(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.MaxLimit, self.zero1array, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.MaxLimit, self.plus1array, self.dataout)", "title": "" }, { "docid": "3954b05481d21485ca8939c1edd27f85", "score": "0.754814", "text": "def test_add_num_array_array_a4(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.MaxLimit, self.zero1array, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.MaxLimit, self.plus1array, self.dataout)", "title": "" }, { "docid": "3954b05481d21485ca8939c1edd27f85", "score": "0.754814", "text": "def test_add_num_array_array_a4(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.MaxLimit, self.zero1array, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.MaxLimit, self.plus1array, self.dataout)", "title": "" }, { "docid": "3954b05481d21485ca8939c1edd27f85", "score": "0.754814", "text": "def test_add_num_array_array_a4(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.MaxLimit, self.zero1array, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.MaxLimit, self.plus1array, self.dataout)", "title": "" }, { "docid": "3954b05481d21485ca8939c1edd27f85", "score": "0.754814", "text": "def test_add_num_array_array_a4(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.MaxLimit, self.zero1array, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.MaxLimit, self.plus1array, self.dataout)", "title": "" }, { "docid": "3954b05481d21485ca8939c1edd27f85", "score": "0.75476485", "text": "def test_add_num_array_array_a4(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.MaxLimit, self.zero1array, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(OverflowError):\n\t\t\tarrayfunc.add(self.MaxLimit, self.plus1array, self.dataout)", "title": "" }, { "docid": "2f958dde8324957f7af9576a63164446", "score": "0.74695367", "text": "def test_add_array_num_array_b2(self):\n\t\t# Copy the array so we don't change the original data.\n\t\tinpvalue = self.inparray2a[0]\n\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1a, inpvalue, self.dataout, maxlen=self.testmaxlen)\n\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.add(self.inparray1b, inpvalue, self.dataout, maxlen='a')", "title": "" }, { "docid": "2f958dde8324957f7af9576a63164446", "score": "0.74695367", "text": "def test_add_array_num_array_b2(self):\n\t\t# Copy the array so we don't change the original data.\n\t\tinpvalue = self.inparray2a[0]\n\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1a, inpvalue, self.dataout, maxlen=self.testmaxlen)\n\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.add(self.inparray1b, inpvalue, self.dataout, maxlen='a')", "title": "" }, { "docid": "2f958dde8324957f7af9576a63164446", "score": "0.7468987", "text": "def test_add_array_num_array_b2(self):\n\t\t# Copy the array so we don't change the original data.\n\t\tinpvalue = self.inparray2a[0]\n\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1a, inpvalue, self.dataout, maxlen=self.testmaxlen)\n\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.add(self.inparray1b, inpvalue, self.dataout, maxlen='a')", "title": "" }, { "docid": "2f958dde8324957f7af9576a63164446", "score": "0.7468987", "text": "def test_add_array_num_array_b2(self):\n\t\t# Copy the array so we don't change the original data.\n\t\tinpvalue = self.inparray2a[0]\n\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1a, inpvalue, self.dataout, maxlen=self.testmaxlen)\n\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.add(self.inparray1b, inpvalue, self.dataout, maxlen='a')", "title": "" }, { "docid": "2f958dde8324957f7af9576a63164446", "score": "0.7468987", "text": "def test_add_array_num_array_b2(self):\n\t\t# Copy the array so we don't change the original data.\n\t\tinpvalue = self.inparray2a[0]\n\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1a, inpvalue, self.dataout, maxlen=self.testmaxlen)\n\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.add(self.inparray1b, inpvalue, self.dataout, maxlen='a')", "title": "" }, { "docid": "2f958dde8324957f7af9576a63164446", "score": "0.7468987", "text": "def test_add_array_num_array_b2(self):\n\t\t# Copy the array so we don't change the original data.\n\t\tinpvalue = self.inparray2a[0]\n\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1a, inpvalue, self.dataout, maxlen=self.testmaxlen)\n\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.add(self.inparray1b, inpvalue, self.dataout, maxlen='a')", "title": "" }, { "docid": "2f958dde8324957f7af9576a63164446", "score": "0.7468987", "text": "def test_add_array_num_array_b2(self):\n\t\t# Copy the array so we don't change the original data.\n\t\tinpvalue = self.inparray2a[0]\n\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1a, inpvalue, self.dataout, maxlen=self.testmaxlen)\n\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.add(self.inparray1b, inpvalue, self.dataout, maxlen='a')", "title": "" }, { "docid": "2f958dde8324957f7af9576a63164446", "score": "0.7468987", "text": "def test_add_array_num_array_b2(self):\n\t\t# Copy the array so we don't change the original data.\n\t\tinpvalue = self.inparray2a[0]\n\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1a, inpvalue, self.dataout, maxlen=self.testmaxlen)\n\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.add(self.inparray1b, inpvalue, self.dataout, maxlen='a')", "title": "" }, { "docid": "2f958dde8324957f7af9576a63164446", "score": "0.7468987", "text": "def test_add_array_num_array_b2(self):\n\t\t# Copy the array so we don't change the original data.\n\t\tinpvalue = self.inparray2a[0]\n\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1a, inpvalue, self.dataout, maxlen=self.testmaxlen)\n\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.add(self.inparray1b, inpvalue, self.dataout, maxlen='a')", "title": "" }, { "docid": "2f958dde8324957f7af9576a63164446", "score": "0.7468987", "text": "def test_add_array_num_array_b2(self):\n\t\t# Copy the array so we don't change the original data.\n\t\tinpvalue = self.inparray2a[0]\n\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1a, inpvalue, self.dataout, maxlen=self.testmaxlen)\n\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.add(self.inparray1b, inpvalue, self.dataout, maxlen='a')", "title": "" }, { "docid": "2f958dde8324957f7af9576a63164446", "score": "0.7468987", "text": "def test_add_array_num_array_b2(self):\n\t\t# Copy the array so we don't change the original data.\n\t\tinpvalue = self.inparray2a[0]\n\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1a, inpvalue, self.dataout, maxlen=self.testmaxlen)\n\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.add(self.inparray1b, inpvalue, self.dataout, maxlen='a')", "title": "" }, { "docid": "2f958dde8324957f7af9576a63164446", "score": "0.7468987", "text": "def test_add_array_num_array_b2(self):\n\t\t# Copy the array so we don't change the original data.\n\t\tinpvalue = self.inparray2a[0]\n\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1a, inpvalue, self.dataout, maxlen=self.testmaxlen)\n\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.add(self.inparray1b, inpvalue, self.dataout, maxlen='a')", "title": "" }, { "docid": "0431571ec5aab4db34ae674521c3fcdb", "score": "0.7429379", "text": "def test_add_num_array_array_a4(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.MaxLimit, self.zero1array, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(ArithmeticError):\n\t\t\tarrayfunc.add(self.MaxLimit, self.plus1array, self.dataout)", "title": "" }, { "docid": "0431571ec5aab4db34ae674521c3fcdb", "score": "0.7428248", "text": "def test_add_num_array_array_a4(self):\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.MaxLimit, self.zero1array, self.dataout)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(ArithmeticError):\n\t\t\tarrayfunc.add(self.MaxLimit, self.plus1array, self.dataout)", "title": "" }, { "docid": "1145e601a81e943db2e5ab96cf52e1c2", "score": "0.7391989", "text": "def test_add_array_num_array_b1(self):\n\t\tinpvalue = self.inparray2a[0]\n\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1a, inpvalue, self.dataout, nosimd=True)\n\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.add(self.inparray1b, inpvalue, self.dataout, nosimd='a')", "title": "" }, { "docid": "1145e601a81e943db2e5ab96cf52e1c2", "score": "0.7391989", "text": "def test_add_array_num_array_b1(self):\n\t\tinpvalue = self.inparray2a[0]\n\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1a, inpvalue, self.dataout, nosimd=True)\n\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.add(self.inparray1b, inpvalue, self.dataout, nosimd='a')", "title": "" }, { "docid": "1145e601a81e943db2e5ab96cf52e1c2", "score": "0.73910296", "text": "def test_add_array_num_array_b1(self):\n\t\tinpvalue = self.inparray2a[0]\n\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1a, inpvalue, self.dataout, nosimd=True)\n\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.add(self.inparray1b, inpvalue, self.dataout, nosimd='a')", "title": "" }, { "docid": "1145e601a81e943db2e5ab96cf52e1c2", "score": "0.73910296", "text": "def test_add_array_num_array_b1(self):\n\t\tinpvalue = self.inparray2a[0]\n\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1a, inpvalue, self.dataout, nosimd=True)\n\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.add(self.inparray1b, inpvalue, self.dataout, nosimd='a')", "title": "" }, { "docid": "1145e601a81e943db2e5ab96cf52e1c2", "score": "0.73910296", "text": "def test_add_array_num_array_b1(self):\n\t\tinpvalue = self.inparray2a[0]\n\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1a, inpvalue, self.dataout, nosimd=True)\n\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.add(self.inparray1b, inpvalue, self.dataout, nosimd='a')", "title": "" }, { "docid": "1145e601a81e943db2e5ab96cf52e1c2", "score": "0.73910296", "text": "def test_add_array_num_array_b1(self):\n\t\tinpvalue = self.inparray2a[0]\n\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1a, inpvalue, self.dataout, nosimd=True)\n\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.add(self.inparray1b, inpvalue, self.dataout, nosimd='a')", "title": "" }, { "docid": "1145e601a81e943db2e5ab96cf52e1c2", "score": "0.73910296", "text": "def test_add_array_num_array_b1(self):\n\t\tinpvalue = self.inparray2a[0]\n\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1a, inpvalue, self.dataout, nosimd=True)\n\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.add(self.inparray1b, inpvalue, self.dataout, nosimd='a')", "title": "" }, { "docid": "1145e601a81e943db2e5ab96cf52e1c2", "score": "0.73910296", "text": "def test_add_array_num_array_b1(self):\n\t\tinpvalue = self.inparray2a[0]\n\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1a, inpvalue, self.dataout, nosimd=True)\n\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.add(self.inparray1b, inpvalue, self.dataout, nosimd='a')", "title": "" }, { "docid": "1145e601a81e943db2e5ab96cf52e1c2", "score": "0.73910296", "text": "def test_add_array_num_array_b1(self):\n\t\tinpvalue = self.inparray2a[0]\n\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1a, inpvalue, self.dataout, nosimd=True)\n\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.add(self.inparray1b, inpvalue, self.dataout, nosimd='a')", "title": "" }, { "docid": "1145e601a81e943db2e5ab96cf52e1c2", "score": "0.73910296", "text": "def test_add_array_num_array_b1(self):\n\t\tinpvalue = self.inparray2a[0]\n\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1a, inpvalue, self.dataout, nosimd=True)\n\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.add(self.inparray1b, inpvalue, self.dataout, nosimd='a')", "title": "" }, { "docid": "1145e601a81e943db2e5ab96cf52e1c2", "score": "0.73910296", "text": "def test_add_array_num_array_b1(self):\n\t\tinpvalue = self.inparray2a[0]\n\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1a, inpvalue, self.dataout, nosimd=True)\n\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.add(self.inparray1b, inpvalue, self.dataout, nosimd='a')", "title": "" }, { "docid": "1145e601a81e943db2e5ab96cf52e1c2", "score": "0.73910296", "text": "def test_add_array_num_array_b1(self):\n\t\tinpvalue = self.inparray2a[0]\n\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(self.inparray1a, inpvalue, self.dataout, nosimd=True)\n\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.add(self.inparray1b, inpvalue, self.dataout, nosimd='a')", "title": "" }, { "docid": "30eb8312847ddbc6df9ef2508a80cf8a", "score": "0.7365009", "text": "def test_add_ninf_array_num_array_b1(self):\n\t\tfor testval in [-25.0,-1.0,0.0,1.0,25.0]:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = testval):\n\n\t\t\t\t# Copy the array so we don't change the original data.\n\t\t\t\tdataok1 = copy.copy(self.dataok1)\n\t\t\t\terrordata = copy.copy(self.errordata)\n\n\t\t\t\t# This version is expected to pass.\n\t\t\t\tarrayfunc.add(dataok1, testval, self.dataout)\n\n\t\t\t\t# This is the actual test.\n\t\t\t\twith self.assertRaises(ArithmeticError):\n\t\t\t\t\tarrayfunc.add(errordata, testval, self.dataout)", "title": "" }, { "docid": "30eb8312847ddbc6df9ef2508a80cf8a", "score": "0.7364065", "text": "def test_add_ninf_array_num_array_b1(self):\n\t\tfor testval in [-25.0,-1.0,0.0,1.0,25.0]:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = testval):\n\n\t\t\t\t# Copy the array so we don't change the original data.\n\t\t\t\tdataok1 = copy.copy(self.dataok1)\n\t\t\t\terrordata = copy.copy(self.errordata)\n\n\t\t\t\t# This version is expected to pass.\n\t\t\t\tarrayfunc.add(dataok1, testval, self.dataout)\n\n\t\t\t\t# This is the actual test.\n\t\t\t\twith self.assertRaises(ArithmeticError):\n\t\t\t\t\tarrayfunc.add(errordata, testval, self.dataout)", "title": "" }, { "docid": "9f5ca001823b6929fbc3c71cb047d1c4", "score": "0.7344737", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('f', testdatax)\n\t\t\t\tdataout = array.array('f', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "9f5ca001823b6929fbc3c71cb047d1c4", "score": "0.7344737", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('f', testdatax)\n\t\t\t\tdataout = array.array('f', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "9f5ca001823b6929fbc3c71cb047d1c4", "score": "0.7344737", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('f', testdatax)\n\t\t\t\tdataout = array.array('f', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "9f5ca001823b6929fbc3c71cb047d1c4", "score": "0.7344737", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('f', testdatax)\n\t\t\t\tdataout = array.array('f', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "89f682b7aa069696a3a04af0bd14067a", "score": "0.7344037", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('q', testdatax)\n\t\t\t\tdataout = array.array('q', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "89f682b7aa069696a3a04af0bd14067a", "score": "0.7344037", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('q', testdatax)\n\t\t\t\tdataout = array.array('q', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "89f682b7aa069696a3a04af0bd14067a", "score": "0.7344037", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('q', testdatax)\n\t\t\t\tdataout = array.array('q', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "89f682b7aa069696a3a04af0bd14067a", "score": "0.7344037", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('q', testdatax)\n\t\t\t\tdataout = array.array('q', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "c649580817281f8da18ec1645ca1dff9", "score": "0.73368293", "text": "def test_add_basic_array_num_array_b2(self):\n\t\tfor testval in self.datayparam:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = testval):\n\n\t\t\t\tdata1 = array.array('q', self.datax)\n\t\t\t\tdataout = array.array('q', [0]*len(data1))\n\n\t\t\t\texpected = [x + testval for x in data1] \n\n\t\t\t\tarrayfunc.add(data1, testval, dataout, matherrors=True)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "c649580817281f8da18ec1645ca1dff9", "score": "0.73368293", "text": "def test_add_basic_array_num_array_b2(self):\n\t\tfor testval in self.datayparam:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = testval):\n\n\t\t\t\tdata1 = array.array('q', self.datax)\n\t\t\t\tdataout = array.array('q', [0]*len(data1))\n\n\t\t\t\texpected = [x + testval for x in data1] \n\n\t\t\t\tarrayfunc.add(data1, testval, dataout, matherrors=True)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "4cf688f884a38ba3d0a29f0f33d275cb", "score": "0.7332749", "text": "def test_add_basic_array_num_array_b2(self):\n\t\tfor testval in self.datayparam:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = testval):\n\n\t\t\t\tdata1 = array.array('d', self.datax)\n\t\t\t\tdataout = array.array('d', [0]*len(data1))\n\n\t\t\t\texpected = [x + testval for x in data1] \n\n\t\t\t\tarrayfunc.add(data1, testval, dataout, matherrors=True)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "4cf688f884a38ba3d0a29f0f33d275cb", "score": "0.7332749", "text": "def test_add_basic_array_num_array_b2(self):\n\t\tfor testval in self.datayparam:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = testval):\n\n\t\t\t\tdata1 = array.array('d', self.datax)\n\t\t\t\tdataout = array.array('d', [0]*len(data1))\n\n\t\t\t\texpected = [x + testval for x in data1] \n\n\t\t\t\tarrayfunc.add(data1, testval, dataout, matherrors=True)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "a343d0c2c0b36842d89c3c01438d6793", "score": "0.73276275", "text": "def test_add_basic_array_num_array_b2(self):\n\t\tfor testval in self.datayparam:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = testval):\n\n\t\t\t\tdata1 = array.array('Q', self.datax)\n\t\t\t\tdataout = array.array('Q', [0]*len(data1))\n\n\t\t\t\texpected = [x + testval for x in data1] \n\n\t\t\t\tarrayfunc.add(data1, testval, dataout, matherrors=True)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "a343d0c2c0b36842d89c3c01438d6793", "score": "0.73276275", "text": "def test_add_basic_array_num_array_b2(self):\n\t\tfor testval in self.datayparam:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = testval):\n\n\t\t\t\tdata1 = array.array('Q', self.datax)\n\t\t\t\tdataout = array.array('Q', [0]*len(data1))\n\n\t\t\t\texpected = [x + testval for x in data1] \n\n\t\t\t\tarrayfunc.add(data1, testval, dataout, matherrors=True)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "844b77c1cb439f833400133423dd3356", "score": "0.73268056", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('L', testdatax)\n\t\t\t\tdataout = array.array('L', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "844b77c1cb439f833400133423dd3356", "score": "0.73268056", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('L', testdatax)\n\t\t\t\tdataout = array.array('L', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "844b77c1cb439f833400133423dd3356", "score": "0.73268056", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('L', testdatax)\n\t\t\t\tdataout = array.array('L', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "844b77c1cb439f833400133423dd3356", "score": "0.73268056", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('L', testdatax)\n\t\t\t\tdataout = array.array('L', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "e40bd9751dd14d2c1e353b1c34ec3554", "score": "0.7324465", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('i', testdatax)\n\t\t\t\tdataout = array.array('i', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "e40bd9751dd14d2c1e353b1c34ec3554", "score": "0.7324465", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('i', testdatax)\n\t\t\t\tdataout = array.array('i', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "e40bd9751dd14d2c1e353b1c34ec3554", "score": "0.7324465", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('i', testdatax)\n\t\t\t\tdataout = array.array('i', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "e40bd9751dd14d2c1e353b1c34ec3554", "score": "0.7324465", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('i', testdatax)\n\t\t\t\tdataout = array.array('i', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "da7e027dcb95815eb2872e810b03d58c", "score": "0.73238504", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('Q', testdatax)\n\t\t\t\tdataout = array.array('Q', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "da7e027dcb95815eb2872e810b03d58c", "score": "0.73238504", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('Q', testdatax)\n\t\t\t\tdataout = array.array('Q', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "da7e027dcb95815eb2872e810b03d58c", "score": "0.73238504", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('Q', testdatax)\n\t\t\t\tdataout = array.array('Q', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "da7e027dcb95815eb2872e810b03d58c", "score": "0.73238504", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('Q', testdatax)\n\t\t\t\tdataout = array.array('Q', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "c7baf87ec7bbe635829808f8e3eba18f", "score": "0.7320441", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('d', testdatax)\n\t\t\t\tdataout = array.array('d', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "c7baf87ec7bbe635829808f8e3eba18f", "score": "0.7320441", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('d', testdatax)\n\t\t\t\tdataout = array.array('d', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "c7baf87ec7bbe635829808f8e3eba18f", "score": "0.7320441", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('d', testdatax)\n\t\t\t\tdataout = array.array('d', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "c7baf87ec7bbe635829808f8e3eba18f", "score": "0.7320441", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('d', testdatax)\n\t\t\t\tdataout = array.array('d', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "78053588e3e165fc074373e58987431c", "score": "0.7319053", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('l', testdatax)\n\t\t\t\tdataout = array.array('l', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "78053588e3e165fc074373e58987431c", "score": "0.7319053", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('l', testdatax)\n\t\t\t\tdataout = array.array('l', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "78053588e3e165fc074373e58987431c", "score": "0.7319053", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('l', testdatax)\n\t\t\t\tdataout = array.array('l', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "78053588e3e165fc074373e58987431c", "score": "0.7319053", "text": "def test_add_basic_array_num_array_b4(self):\n\t\tfor testdatax, testvaly in self.groupeddatax:\n\t\t\twith self.subTest(msg='Failed with parameter', testval = (testdatax, testvaly)):\n\n\t\t\t\tdata1 = array.array('l', testdatax)\n\t\t\t\tdataout = array.array('l', [0]*len(data1))\n\n\t\t\t\tlimited = len(data1) // 2\n\n\t\t\t\t# Calculate the expected result.\n\t\t\t\tpydataout = [operator.add(x, testvaly) for x in testdatax]\n\t\t\t\texpected = pydataout[0:limited] + list(dataout)[limited:]\n\n\t\t\t\tarrayfunc.add(data1, testvaly, dataout, matherrors=True, maxlen=limited)\n\n\t\t\t\t# The behavour of assertEqual is modified by addTypeEqualityFunc.\n\t\t\t\tself.assertEqual(list(dataout), expected)", "title": "" }, { "docid": "f1f884d7868c6cb8b9770de036ea5dc0", "score": "0.7316666", "text": "def test_add_num_array_array_d2(self):\n\t\tinpvalue = self.inparray1a[0]\n\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(inpvalue, self.inparray2a, self.dataout, maxlen=self.testmaxlen)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.add(inpvalue, self.inparray2b, self.dataout, maxlen='a')", "title": "" }, { "docid": "f1f884d7868c6cb8b9770de036ea5dc0", "score": "0.7316666", "text": "def test_add_num_array_array_d2(self):\n\t\tinpvalue = self.inparray1a[0]\n\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(inpvalue, self.inparray2a, self.dataout, maxlen=self.testmaxlen)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.add(inpvalue, self.inparray2b, self.dataout, maxlen='a')", "title": "" }, { "docid": "f1f884d7868c6cb8b9770de036ea5dc0", "score": "0.7316666", "text": "def test_add_num_array_array_d2(self):\n\t\tinpvalue = self.inparray1a[0]\n\n\t\t# This version is expected to pass.\n\t\tarrayfunc.add(inpvalue, self.inparray2a, self.dataout, maxlen=self.testmaxlen)\n\n\t\t# This is the actual test.\n\t\twith self.assertRaises(TypeError):\n\t\t\tarrayfunc.add(inpvalue, self.inparray2b, self.dataout, maxlen='a')", "title": "" } ]
4c37e6bc30849391acf8f7382aa80bde
Test precedence of rc over env and whitelisting.
[ { "docid": "3f163da996d456259d8bcfa00adc4413", "score": "0.0", "text": "def test_set_settings(self):\n in_toto.user_settings.set_settings()\n\n # From envvar IN_TOTO_ARTIFACT_BASE_PATH\n self.assertEquals(in_toto.settings.ARTIFACT_BASE_PATH, \"e/n/v\")\n\n # From RCfile setting (has precedence over envvar setting)\n self.assertListEqual(in_toto.settings.ARTIFACT_EXCLUDE_PATTERNS,\n [\"r\", \"c\", \"file\"])\n\n # Not whitelisted rcfile settings are ignored by `set_settings`\n self.assertTrue(\"new_rc_setting\" in in_toto.user_settings.get_rc())\n self.assertRaises(AttributeError, getattr, in_toto.settings,\n \"NEW_RC_SETTING\")\n\n # Not whitelisted envvars are ignored by `set_settings`\n self.assertTrue(\"NOT_WHITELISTED\" in in_toto.user_settings.get_env())\n self.assertRaises(AttributeError, getattr, in_toto.settings,\n \"NOT_WHITELISTED\")", "title": "" } ]
[ { "docid": "fb408c71fae228d4c62e72491b1bfd25", "score": "0.6072304", "text": "def check_environment_presets():\n presets = [x for x in os.environ.copy().keys()\n if x.startswith('IRONIC') or x.startswith('OS_')]\n if presets:\n print(\"_\" * 80)\n print(\"*WARNING* Found existing environment variables that may \"\n \"cause conflicts:\")\n for preset in presets:\n print(\" - %s\" % preset)\n print(\"_\" * 80)", "title": "" }, { "docid": "79f3759074613a879d94e8222214ff4b", "score": "0.5698117", "text": "def check_environment_presets():\n presets = [x for x in os.environ.copy().keys() if x.startswith('NOVA_') or\n x.startswith('OS_')]\n if len(presets) < 1:\n return True\n else:\n click.echo(\"_\" * 80)\n click.echo(\"*WARNING* Found existing environment variables that may \"\n \"cause conflicts:\")\n for preset in presets:\n click.echo(\" - %s\" % preset)\n click.echo(\"_\" * 80)\n return False", "title": "" }, { "docid": "89e803481fc62bfc3cb50eed5b2bc67e", "score": "0.5644679", "text": "def IsExpandingEnvVars(self):", "title": "" }, { "docid": "11c7cf4efc128b3a197084a79b771a1f", "score": "0.5558786", "text": "def allowed(self):\n if self.allowed_envs in [AllEnvs, DebugEnvs]:\n func = self.allowed_envs.func\n elif callable(self.allowed_envs):\n func = self.allowed_envs\n elif isinstance(self.allowed_envs, six.string_types):\n def func(s): return s.ENV == self.allowed_envs\n elif isinstance(self.allowed_envs, Iterable): # pragma: no branch\n def func(s): return s.ENV in self.allowed_envs\n\n if func(settings):\n return True\n else:\n return False", "title": "" }, { "docid": "cc68042110ef1f6b4b1884e06d5f79db", "score": "0.54577655", "text": "def allowcustom():\n return any(x in ['--custom', '--all'] for x in sys.argv)", "title": "" }, { "docid": "33b615d86f27d9c88bac492844c1fc12", "score": "0.53944695", "text": "def conda_env_matches_expected(conda_prefix):\n return conda_env_is_activated() and os.environ[\"CONDA_PREFIX\"] == conda_prefix", "title": "" }, { "docid": "1cedbb49041629ec61b84cc133665724", "score": "0.5372644", "text": "def test_env_checks(monkeypatch, run_line, cmd):\n monkeypatch.setitem(os.environ, \"GLOBUS_CLI_INTERACTIVE\", \"Whoops\")\n result = run_line(f\"globus {cmd}\", assert_exit_code=1)\n assert \"GLOBUS_CLI_INTERACTIVE\" in result.stderr", "title": "" }, { "docid": "d4d77760fb2cafd2c136c6cb1b148265", "score": "0.53593224", "text": "def test_get_env(self):\n env_dict = in_toto.user_settings.get_env()\n\n # Parsed and used by `set_settings` to monkeypatch settings\n self.assertEquals(env_dict[\"ARTIFACT_BASE_PATH\"], \"e/n/v\")\n\n # Parsed (and split) but overriden by rcfile setting in `set_settings`\n self.assertListEqual(env_dict[\"ARTIFACT_EXCLUDE_PATTERNS\"],\n [\"e\", \"n\", \"v\"])\n\n # Parsed but ignored in `set_settings` (not in case sensitive whitelist)\n self.assertEquals(env_dict[\"NOT_WHITELISTED\"], \"parsed\")\n\n # Not parsed because of missing prefix\n self.assertFalse(\"NOT_PARSED\" in env_dict)", "title": "" }, { "docid": "32fa94df12078ad0a565d9a4effb29b2", "score": "0.5286826", "text": "def test_main__setup_environ(self):\n self.ec2rl.options.global_args[\"perfimpact\"] = \"true\"\n self.ec2rl._setup_environ()\n self.assertEqual(os.environ[\"EC2RL_PERFIMPACT\"], \"True\")\n del self.ec2rl.options.global_args[\"perfimpact\"]\n\n self.ec2rl._setup_environ()\n self.assertEqual(os.environ[\"EC2RL_PERFIMPACT\"], \"False\")\n self.assertEqual(os.environ[\"EC2RL_WORKDIR\"], \"/var/tmp/ec2rl\")\n self.assertTrue(os.environ[\"EC2RL_RUNDIR\"].startswith(\"/var/tmp/ec2rl/\"))\n self.assertTrue(os.environ[\"EC2RL_LOGDIR\"].startswith(\"/var/tmp/ec2rl/\"))\n self.assertTrue(os.environ[\"EC2RL_LOGDIR\"].endswith(\"/mod_out\"))\n self.assertTrue(os.environ[\"EC2RL_GATHEREDDIR\"].startswith(\"/var/tmp/ec2rl/\"))\n self.assertTrue(os.environ[\"EC2RL_GATHEREDDIR\"].endswith(\"/gathered_out\"))\n self.assertTrue(os.environ[\"EC2RL_CALLPATH\"].endswith(\"test/modules\"))\n self.assertTrue(re.match(r\"^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}_[0-9]{2}_[0-9]{2}.[0-9]{6}$\",\n os.environ[\"EC2RL_SPECDIR\"]))", "title": "" }, { "docid": "e82670c16c3d530ec9d809370f7454cb", "score": "0.52466226", "text": "def test_valid_environment(saltworker_client):\n saltworker_client.ent_env = 'dev'\n saltworker_client.valid_envs = [None, 'dev', 'test', 'prod']\n assert saltworker_client.before_install() is None", "title": "" }, { "docid": "75e81c655c6858b956b87d1882974871", "score": "0.52355224", "text": "def _config_dependent_side_effect(match_config, exc):\r\n keywords = match_config.split()\r\n\r\n def _side_effect_function(target, config):\r\n if all(word in config for word in keywords):\r\n raise exc\r\n return _side_effect_function", "title": "" }, { "docid": "75e81c655c6858b956b87d1882974871", "score": "0.52355224", "text": "def _config_dependent_side_effect(match_config, exc):\r\n keywords = match_config.split()\r\n\r\n def _side_effect_function(target, config):\r\n if all(word in config for word in keywords):\r\n raise exc\r\n return _side_effect_function", "title": "" }, { "docid": "c1f019adcd6a51e10bb68a80dd6192d5", "score": "0.52354455", "text": "def test_get_rc(self):\n rc_dict = in_toto.user_settings.get_rc()\n\n # Parsed (and split) and used by `set_settings` to monkeypatch settings\n self.assertListEqual(rc_dict[\"ARTIFACT_EXCLUDE_PATTERNS\"], [\"r\", \"c\", \"file\"])\n\n # Parsed but ignored in `set_settings` (not in case sensitive whitelist)\n self.assertEquals(rc_dict[\"artifact_base_path\"], \"r/c/file\")\n self.assertEquals(rc_dict[\"new_rc_setting\"], \"new rc setting\")", "title": "" }, { "docid": "7389471266bfdbc4645cbda8c98e3db6", "score": "0.5209031", "text": "def only_on_env(env_list):\n\n def decorator(func):\n @wraps(func)\n def _wrapped_view(*args, **kwargs):\n if get_env_str() in env_list:\n return func(*args, **kwargs)\n\n return _wrapped_view\n\n return decorator", "title": "" }, { "docid": "94189000fe71bdfbbc4b748320dfde73", "score": "0.5131936", "text": "def _env_is_exposed(env):\n return salt.utils.stringutils.check_whitelist_blacklist(\n env,\n whitelist=__opts__[\"hgfs_saltenv_whitelist\"],\n blacklist=__opts__[\"hgfs_saltenv_blacklist\"],\n )", "title": "" }, { "docid": "caa376c2591ac6c41cca3a96b7720298", "score": "0.5120863", "text": "def check_env(self, env):\n if env == 'base':\n return True\n if os.sep in env:\n return os.path.isdir(env)\n envs = self.list_envs()\n return any(e == env for e in envs)", "title": "" }, { "docid": "225f1c25738ee7734efa8e08228a443d", "score": "0.50867134", "text": "def env_vars_check(app_configs, **kwargs):\n errors = []\n for watched in settings.ENVVAR_WATCHED:\n if getattr(settings, watched) == settings.ENVVAR_SENTINAL:\n msg = 'Env var \"{}\" must be set in production.'.format(watched)\n errors.append(Error(msg))\n return errors", "title": "" }, { "docid": "d84329d9a2734d958798d03bd2817c51", "score": "0.50714177", "text": "def env(line, cell):\n if line.startswith('--config-only') or line.startswith('-c'):\n return\n\n if line.startswith('--local-only'):\n line = line.replace('--local-only', '').strip()\n if line.startswith('-l'):\n line = line.replace('-l', '').strip()\n\n if line:\n set_env(line)\n\n for line in cell_lines(cell):\n set_env(line)", "title": "" }, { "docid": "9ae9b4ecfaa6a9a97aa3202a0e3c46d6", "score": "0.50446254", "text": "def test_pytest_uses_posargs(self, testenv_commands):\n assert any([re.search(r'py\\.test [^\\n]*\\{posargs', command) for command in testenv_commands])", "title": "" }, { "docid": "9fb39c35214e848525d4bed57c32ab76", "score": "0.50394356", "text": "def test_validate_preprocesing_args(self):\n args = self.get_common_data_args_namespace()\n options.validate_preprocessing_args(args)", "title": "" }, { "docid": "d6c21ff76cbc9d9f2ac5aa077049e0fe", "score": "0.50276154", "text": "def validateEnv(env: Env) -> None:\n def warn(msg: str) -> None:\n print(f\"\\033[93m{msg}\\033[m\")\n\n # Check if SELinux will block socket access\n if env.capabilities.get(\"selinux\"):\n for cap in (\"x11\", \"tun\", \"pulseaudio\"):\n if env.capabilities.get(cap):\n warn(\n f\"SELinux is disabled because capability '{cap}' need \"\n \"extra type enforcement that are not currently supported.\")\n selinuxCap(False, env.ctx, env)\n env.capabilities[\"selinux\"] = False\n\n # Check for uid permissions\n if not env.capabilities.get(\"root\") and not env.capabilities.get(\"uidmap\"):\n for cap in (\"x11\", \"pulseaudio\", \"ssh\", \"gpg\"):\n if env.capabilities.get(cap):\n warn(\n f\"UIDMap is required because '{cap}' need \"\n \"DAC access to the host file\")\n uidmapCap(True, env.ctx, env)\n break\n\n # Check for system capabilities\n if env.capabilities.get(\"tun\") and \"NET_ADMIN\" not in env.ctx.syscaps:\n warn(f\"NET_ADMIN capability is needed by the tun device\")\n env.ctx.syscaps.append(\"NET_ADMIN\")\n\n # Check mount points labels\n if env.capabilities.get(\"selinux\") and HAS_SELINUX:\n label = \"container_file_t\"\n for hostPath in env.ctx.mounts.values():\n hostPath = hostPath.expanduser().resolve()\n if hostPath.exists() and \\\n selinux.getfilecon(str(hostPath))[1].split(':')[2] != label:\n warn(f\"SELinux is disabled because {hostPath} doesn't have \"\n f\"the {label} label. To set the label run: \"\n f\"chcon -Rt {label} {hostPath}\")\n selinuxCap(False, env.ctx, env)\n\n # Check mount points permissions\n for hostPath in env.ctx.mounts.values():\n hostPath = hostPath.expanduser().resolve()\n if hostPath.exists() and not os.access(str(hostPath), os.R_OK):\n warn(f\"{hostPath} is not readable by the current user.\")\n\n # Check for home mount point\n if env.overlays and not env.ctx.mounts.get(env.ctx.home):\n warn(f\"overlay needs a home mount point, \"\n \"mountRun capability is enabled.\")\n mountRunCap(True, env.ctx, env)\n\n if env.capabilities.get(\"mount-home\") and not env.capabilities.get(\n \"uidmap\"):\n warn(\"UIDMap is required for mount-home\")\n uidmapCap(True, env.ctx, env)\n\n # Check for image management\n if not env.manageImage:\n if env.packages:\n warn(\"manage-image capability is required for packages\")\n manageImageCap(True, env.ctx, env)\n if env.imageCustomizations or env.imageTasks:\n warn(\"manage-image capability is required for image tasks\")\n manageImageCap(True, env.ctx, env)\n if env.branchImage:\n warn(\"branch-image capability is incompatible with manage-image\")", "title": "" }, { "docid": "3253f57ffbae316449c752051e961b0d", "score": "0.499687", "text": "def _addl_env_args(addl_env):\n\n # NOTE (twilson) If using rootwrap, an EnvFilter should be set up for the\n # command instead of a CommandFilter.\n if addl_env is None:\n return []\n return ['env'] + ['%s=%s' % pair for pair in addl_env.items()]", "title": "" }, { "docid": "4652288d2ba6bcb1a92e33982dabb28e", "score": "0.49935052", "text": "def get_allowed_args(self):\n\n # Arguments that apply to all C/C++ rules.\n args = {\n 'arch_compiler_flags',\n 'arch_preprocessor_flags',\n 'auto_headers',\n 'compiler_flags',\n 'compiler_specific_flags',\n 'deps',\n 'external_deps',\n 'global_symbols',\n 'header_namespace',\n 'headers',\n 'known_warnings',\n 'lex_args',\n 'linker_flags',\n 'modules',\n 'name',\n 'nodefaultlibs',\n 'nvcc_flags',\n 'precompiled_header',\n 'preprocessor_flags',\n 'py3_sensitive_deps',\n 'shared_system_deps',\n 'srcs',\n 'supports_coverage',\n 'system_include_paths',\n 'visibility',\n 'yacc_args',\n 'additional_coverage_targets',\n 'autodeps_keep',\n 'tags',\n }\n\n # Set rule-type-specific args.\n rtype = self.get_fbconfig_rule_type()\n\n if rtype in ('cpp_benchmark', 'cpp_unittest'):\n args.update([\n 'args',\n 'emails',\n 'env',\n 'owner',\n 'runtime_files',\n 'tags',\n ])\n\n if rtype == 'cpp_unittest':\n args.update([\n 'type',\n 'use_default_test_main',\n ])\n\n if rtype == 'cpp_binary':\n args.update([\n 'dlopen_enabled',\n 'dont_link_prerequisites',\n 'enable_lto',\n 'hs_profile',\n 'split_symbols',\n 'os_deps',\n 'os_linker_flags',\n ])\n\n if rtype in ('cpp_benchmark', 'cpp_binary', 'cpp_unittest'):\n args.update([\n 'allocator',\n 'dlls',\n 'versions',\n ])\n\n if rtype == 'cpp_library':\n args.update([\n 'lib_name',\n 'link_whole',\n 'modular_headers',\n 'os_deps',\n 'os_linker_flags',\n 'preferred_linkage',\n 'propagated_pp_flags',\n 'undefined_symbols',\n ])\n\n if rtype == 'cpp_precompiled_header':\n args.update([\n 'src',\n ])\n\n if rtype == 'cpp_python_extension':\n args.update([\n 'base_module',\n # Intentionally not visible to users!\n #'module_name',\n ])\n\n if rtype == 'cpp_lua_extension':\n args.update([\n 'base_module',\n ])\n\n if rtype == 'cpp_java_extension':\n args.update([\n 'lib_name',\n ])\n\n if rtype == 'cpp_lua_main_module':\n args.update([\n 'embed_deps',\n ])\n\n return args", "title": "" }, { "docid": "c80401f0e287472685e7024c0957707c", "score": "0.49700686", "text": "def test_active_run_does_not_inherit_env_addition(self):\n envs = self.F.EnvironmentFactory.create_full_set(\n {\"OS\": [\"OS X\", \"Linux\"]})\n pv = self.F.ProductVersionFactory.create(environments=envs[1:])\n run = self.F.RunFactory.create(productversion=pv, status=\"active\")\n\n pv.add_envs(envs[0])\n\n self.assertEqual(set(run.environments.all()), set(envs[1:]))", "title": "" }, { "docid": "fb522753d893331c071d164f36cd1dc5", "score": "0.49408793", "text": "def Matches(self, runtime, env):\n return self._RuntimeMatches(runtime) and self._EnvMatches(env)", "title": "" }, { "docid": "22989e297449a7829f9635279cefcf15", "score": "0.49237117", "text": "def test_prefer_current_user(self, user_preferred_setting):\n result = distutils.msvc9compiler.find_vcvarsall(9.0)\n expected = os.path.join(user_preferred_setting, 'vcvarsall.bat')\n assert expected == result", "title": "" }, { "docid": "ce7718cbd9ed08de22492a61988581cb", "score": "0.49186996", "text": "def test_local_machine_recognized(self, local_machine_setting):\n result = distutils.msvc9compiler.find_vcvarsall(9.0)\n expected = os.path.join(local_machine_setting, 'vcvarsall.bat')\n assert expected == result", "title": "" }, { "docid": "31a4464775d5593b7ed60802c33f3666", "score": "0.4909785", "text": "def test_preserve_var_case(self):\n config_manager = apputils.ConfigManager\n c_file = os.path.join(fixture_dir, 'sample_config_1.conf')\n conf = config_manager.get_scheduled_task_config_from_file(c_file)._sections # NOQA\n assert \"ARG_1\" in conf[\"env_literal\"]", "title": "" }, { "docid": "a7c194773d59933a6d7068c8fe1ccc7c", "score": "0.48935175", "text": "def test_cli_profile_set_redundant(capsys: pytest.CaptureFixture):\n for i in range(2):\n assert main([\"profile\", \"set\", \"hello\"]) == False\n captured = capsys.readouterr()\n assert not captured.out\n assert not captured.err", "title": "" }, { "docid": "012027dbca84f8983802661fc1e80fe4", "score": "0.48927313", "text": "def test_prevalidate_arguments_local_vs_remote(self, mock_platform):\n\n mock_platform.system.return_value = \"Linux\"\n with self.assertRaises(ValueError):\n self.register.prevalidate_arguments(self._build_args(\n infrastructure_class=\"on-premises\",\n hostname=None, target=None, local=False))\n with self.assertRaises(ValueError):\n self.register.prevalidate_arguments(self._build_args(\n infrastructure_class=\"on-premises\",\n hostname=None, target=\"HOSTNAME\", local=True))\n self.register.prevalidate_arguments(self._build_args(\n infrastructure_class=\"on-premises\",\n hostname=None, target=\"HOSTNAME\", local=False))\n self.register.prevalidate_arguments(self._build_args(\n infrastructure_class=\"on-premises\",\n hostname=None, target=None, local=True))\n\n with self.assertRaises(ValueError):\n self.register.prevalidate_arguments(self._build_args(\n infrastructure_class=\"ec2\",\n hostname=None, target=None, local=False))\n with self.assertRaises(ValueError):\n self.register.prevalidate_arguments(self._build_args(\n infrastructure_class=\"ec2\",\n hostname=None, target=\"HOSTNAME\", local=True))\n self.register.prevalidate_arguments(self._build_args(\n infrastructure_class=\"ec2\",\n hostname=None, target=\"HOSTNAME\", local=False))\n self.register.prevalidate_arguments(self._build_args(\n infrastructure_class=\"ec2\",\n hostname=None, target=None, local=True))", "title": "" }, { "docid": "2bb43e5acf8c8c0c5f3ea765d9e7b23b", "score": "0.4883903", "text": "def test_valid_environment(self):\r\n resp, error = self.execute([fix, 'list_nodes', '--env', 'staging'])\r\n self.assertEquals(error, \"\", error)\r\n self.assertTrue(\"Environment: staging\" in resp, resp)", "title": "" }, { "docid": "c7d6334fef9b066d0588467aff942d1c", "score": "0.48748755", "text": "def check_critiacal_argv(self):\n if any(\n map(\n lambda x: self._argv_rules[x]['critical'] and x not in self._argv_p,\n self._argv_rules.keys()\n )\n ):\n self.FATAL = True\n self.errmsg += [\"Parent: parse-argv: not all critical params were passed\"]\n return self", "title": "" }, { "docid": "279882fa12205b4d438fc351d4d1ed4e", "score": "0.4873874", "text": "def check_env_conda(cls, name: str) -> bool:\n args = [\"conda\", \"env\", \"list\"]\n ret = utils.cmdline(args)\n env_found = False\n for line in ret.stdout.splitlines():\n if line.startswith(name):\n env_found = True\n break\n return env_found", "title": "" }, { "docid": "f4a16fb0c364f387c5b35722b26437fe", "score": "0.48725855", "text": "def check_env_and_conf(env_var, conf_var, configuration):\n return os.getenv(env_var, '0') == '1' or configuration.get(conf_var, '0') == '1'", "title": "" }, { "docid": "5810e43ec80c1bf4c6427b038a86b02c", "score": "0.48626626", "text": "def test_show_env_with_pattern():\n pytest_enable_socket()\n\n\n dir_env_var_name = \"$ggd_hg19_gaps_ucsc_v1_dir\"\n file_env_var_name = \"$ggd_hg19_gaps_ucsc_v1_file\"\n parser = ()\n\n ## Good pattern should have \"ggd_hg19_gaps\" in the results\n args = Namespace(command='show-env', pattern=\"gaps\")\n temp_stdout = StringIO()\n with redirect_stdout(temp_stdout):\n show_env.show_env(parser,args)\n output = temp_stdout.getvalue().strip() \n assert (dir_env_var_name in output)\n assert (file_env_var_name in output)\n\n ## Bad pattern should return \"No matching recipe variables found for this environment\"\n args = Namespace(command='show-env', pattern=\"NONE\")\n temp_stdout = StringIO()\n with redirect_stdout(temp_stdout):\n show_env.show_env(parser,args)\n output = temp_stdout.getvalue().strip() \n assert (dir_env_var_name not in output)\n assert (file_env_var_name not in output)\n assert (\"No matching recipe variables found for this environment\" in output)\n\n\n ## invalid pattern should exit\n args = Namespace(command='show-env', pattern=\")()(\")\n with pytest.raises(SystemExit) as pytest_wrapped_e:\n show_env.show_env(parser,args) \n assert \"SystemExit\" in str(pytest_wrapped_e.exconly()) ## test that SystemExit was raised by sys.exit() \n assert pytest_wrapped_e.match(\"1\") ## Check that the exit code is 1", "title": "" }, { "docid": "09add8dc55a9904e87c391cb80361a22", "score": "0.48585206", "text": "def test_activate_environment_variables():\n pytest_enable_socket()\n\n dir_env_var_name = \"$ggd_hg19_gaps_ucsc_v1_dir\"\n file_env_var_name = \"$ggd_hg19_gaps_ucsc_v1_file\"\n temp_stdout = StringIO()\n with redirect_stdout(temp_stdout):\n show_env.activate_environment_variables()\n output = temp_stdout.getvalue().strip() \n newout = \"\"\n active = False\n for line in output.strip().split(\"\\n\"):\n if \"Active environment variables:\" in line:\n active = True\n if \"Inactive or out-of-date environment variables:\" in line:\n active = False\n if active:\n newout += line\n assert (dir_env_var_name in output)\n assert (file_env_var_name in output)", "title": "" }, { "docid": "f40b1579fb77073476a806536c2050b2", "score": "0.4856092", "text": "def test_preprocessing_valid(self):\n precomp_args = _internal.process_precomp_groups(\n precomp_groups=tuple(), groups=tuple(), custom_class_=MFETestClass)\n\n assert len(precomp_args) > 0", "title": "" }, { "docid": "87badfacfe1284c513b9b844e821e887", "score": "0.4850207", "text": "def test_config_flag():\n output = runner.invoke(proselint, \"--demo\")\n assert \"uncomparables.misc\" in output.stdout\n\n output = runner.invoke(\n proselint, \"--demo --config tests/test_config_flag_proselintrc.json\")\n assert \"uncomparables.misc\" not in output.stdout\n\n output = runner.invoke(proselint, \"--demo --config non_existent_file\")\n assert output.exit_code == 1\n assert \"FileNotFoundError\" == output.exc_info[0].__name__\n\n output = runner.invoke(proselint, \"non_existent_file\")\n assert output.exit_code == 2", "title": "" }, { "docid": "48b1a315443f93e11250f96f617d4714", "score": "0.48458233", "text": "def check_envs():\n required_envs = ['JAVA_HOME']\n for env in required_envs:\n if os.getenv(env) is None:\n raise ValueError('%s is not set' % env)", "title": "" }, { "docid": "7c00ef435b92c0b578471d6e865c15a6", "score": "0.4844988", "text": "def config_allowed():", "title": "" }, { "docid": "bc5ecdef32c18ed43746f858d3b90cab", "score": "0.48308322", "text": "def _check_prereqs_prevars(self):\r\n\r\n self._check_ugid()\r\n self._check_exclusive()\r\n\r\n #Chimera.confirm_psycopg2_version()\r\n Chimera.confirm_fs()", "title": "" }, { "docid": "2a6dffa1b1b5e43fe842be56ebcc48bc", "score": "0.47972545", "text": "def _detect(env, kwargs):\n return _multiGet(\"PROTOC\", \"\", env, kwargs) or env.Detect(protocs)", "title": "" }, { "docid": "32f123ac4186027fd3fd15cea966f183", "score": "0.47925025", "text": "def test_no_valid_value(self):\r\n resp, error = self.execute([fix, 'list_nodes', '--env'])\r\n self.assertEquals(resp, \"\")\r\n self.assertTrue(\r\n \"error: argument -e/--env: expected one argument\" in error, error)\r\n\r\n resp, error = self.execute([fix, '--env', 'list_nodes'])\r\n self.assertEquals(resp, \"\")\r\n self.assertTrue(\"error: No value given for --env\" in error, error)\r\n\r\n cmd = [fix, '--env', 'nodes_with_role:base', 'role:base']\r\n resp, error = self.execute(cmd)\r\n self.assertEquals(resp, \"\")\r\n self.assertTrue(\"error: No value given for --env\" in error, error)", "title": "" }, { "docid": "12c8c18cb1614bcea739d61ac4627150", "score": "0.4791004", "text": "def test_draft_run_inherits_env_addition(self):\n envs = self.F.EnvironmentFactory.create_full_set(\n {\"OS\": [\"OS X\", \"Linux\"]})\n pv = self.F.ProductVersionFactory.create(environments=envs[1:])\n run = self.F.RunFactory.create(productversion=pv, status=\"draft\")\n\n pv.add_envs(envs[0])\n\n self.assertEqual(set(run.environments.all()), set(envs))", "title": "" }, { "docid": "c380cfab3ebd52e9785ab5d338a46540", "score": "0.4789243", "text": "def _is_relevant(prf, cfg):\n return True", "title": "" }, { "docid": "ac86593f8166b70aa8a005bc1d1dc676", "score": "0.4785867", "text": "def _check_for_testing_env(self):\n env_var = 'JUXTAPY_ENV'\n if not env_var in os.environ:\n raise Exception(\"Could not find juxtapy environment in OS environment\")\n\n casetrackr_api_env = os.environ[env_var]\n if casetrackr_api_env != 'testing':\n raise Exception(\"Did not find testing environment - instead, found [%s]\" % casetrackr_api_env)", "title": "" }, { "docid": "47da45886d883f8c98bdee8f6411486f", "score": "0.47837362", "text": "def print_valid_envs(valid_envs):\n print(\"[%s] Your valid environments are:\" %\n (colors.gwrap('Found environments')))\n print(\"%r\" % valid_envs)", "title": "" }, { "docid": "3605f68058c51d4a3dc3cd8600e14e95", "score": "0.47791952", "text": "def test_show_env_goodrun():\n pytest_enable_socket()\n\n try:\n uninstall_hg19_gaps_ucsc_v1()\n except:\n pass\n\n try:\n install_hg19_gaps_ucsc_v1()\n except:\n pass\n\n parser = ()\n args = Namespace(command='show-env', pattern=None)\n dir_env_var_name = \"$ggd_hg19_gaps_ucsc_v1_dir\"\n file_env_var_name = \"$ggd_hg19_gaps_ucsc_v1_file\"\n\n ## Test a normal run\n temp_stdout = StringIO()\n with redirect_stdout(temp_stdout):\n show_env.show_env(parser,args)\n output = temp_stdout.getvalue().strip() \n assert (dir_env_var_name in output)\n assert (file_env_var_name in output)\n\n ## Test active environment variables\n sp.check_call([\"activate\", \"base\"])\n temp_stdout = StringIO()\n with redirect_stdout(temp_stdout):\n show_env.show_env(parser,args)\n output = temp_stdout.getvalue().strip() \n newout = \"\"\n active = False\n for line in output.strip().split(\"\\n\"):\n if \"Active environment variables:\" in line:\n active = True\n if \"Inactive or out-of-date environment variables:\" in line:\n active = False\n if active:\n newout += line\n assert (dir_env_var_name in output)\n assert (file_env_var_name in output)", "title": "" }, { "docid": "2c92cfb23c123701949c8c06e06da39f", "score": "0.47713473", "text": "def config_check_setup(opp, config):\n if config.get(\"comp_a\", {}).get(\"valid\", False):\n return True\n raise Exception(f\"Config not passed in: {config}\")", "title": "" }, { "docid": "4aee7f37652348a3eb574c0cca6be2dc", "score": "0.47602817", "text": "def _prepare_environment( # noqa: C901\n self, environment: Environment, log: Logger\n ) -> bool:\n # TODO: Reduce this function's complexity and remove the disabled warning.\n\n is_success: bool = True\n\n if environment.runbook.nodes_requirement:\n is_success = False\n nodes_requirement = environment.runbook.nodes_requirement\n node_count = len(nodes_requirement)\n # fills predefined locations here.\n predefined_caps: List[Any] = [None] * node_count\n # make sure all vms are in same location.\n existing_location: str = \"\"\n predefined_cost: int = 0\n\n assert self._eligible_capabilities\n\n # check locations\n for req in nodes_requirement:\n # apply azure specified values\n # they will pass into arm template\n node_runbook: AzureNodeSchema = req.get_extended_runbook(\n AzureNodeSchema, AZURE\n )\n if node_runbook.location:\n if existing_location:\n # if any one has different location, calculate again\n if existing_location != node_runbook.location:\n raise LisaException(\n f\"predefined node must be in same location, \"\n f\"previous: {existing_location}, \"\n f\"found: {node_runbook.location}\"\n )\n else:\n existing_location = node_runbook.location\n\n if existing_location:\n locations = [existing_location]\n else:\n locations = LOCATIONS\n\n # check eligible locations\n found_or_skipped = False\n for location_name in locations:\n predefined_cost = 0\n predefined_caps = [None] * node_count\n for req_index, req in enumerate(nodes_requirement):\n found_or_skipped = False\n node_runbook = req.get_extended_runbook(AzureNodeSchema, AZURE)\n if not node_runbook.vm_size:\n # not to check, if no vm_size set\n found_or_skipped = True\n continue\n\n # find predefined vm size on all avaiable's.\n location_info: AzureLocation = self._get_location_info(\n location_name, log\n )\n for azure_cap in location_info.capabilities:\n if azure_cap.vm_size == node_runbook.vm_size:\n predefined_cost += azure_cap.estimated_cost\n\n min_cap: schema.NodeSpace = req.generate_min_capability(\n azure_cap.capability\n )\n # apply azure specified values\n # they will pass into arm template\n min_runbook = min_cap.get_extended_runbook(\n AzureNodeSchema, AZURE\n )\n # the location may not be set\n min_runbook.location = location_name\n min_runbook.vm_size = azure_cap.vm_size\n assert isinstance(min_cap.nic_count, int)\n min_runbook.nic_count = min_cap.nic_count\n if not existing_location:\n existing_location = location_name\n predefined_caps[req_index] = min_cap\n found_or_skipped = True\n break\n if not found_or_skipped:\n # if not found any, skip and try next location\n break\n if found_or_skipped:\n # if found all, skip other locations\n break\n if not found_or_skipped:\n # no location meet requirement\n raise LisaException(\n f\"cannot find predefined vm size [{node_runbook.vm_size}] \"\n f\"in location [{locations}]\"\n )\n for location_name, location_caps in self._eligible_capabilities.items():\n # in each location, all node must be found\n # fill them as None and check after meeted capability\n found_capabilities: List[Any] = list(predefined_caps)\n\n # skip unmatched location\n if existing_location and existing_location != location_name:\n continue\n\n estimated_cost: int = 0\n for req_index, req in enumerate(nodes_requirement):\n for azure_cap in location_caps:\n if found_capabilities[req_index]:\n # found, so skipped\n continue\n\n check_result = req.check(azure_cap.capability)\n if check_result.result:\n min_cap = req.generate_min_capability(azure_cap.capability)\n\n # apply azure specified values\n # they will pass into arm template\n node_runbook = min_cap.get_extended_runbook(\n AzureNodeSchema, AZURE\n )\n if node_runbook.location:\n assert node_runbook.location == azure_cap.location, (\n f\"predefined location [{node_runbook.location}] \"\n f\"must be same as \"\n f\"cap location [{azure_cap.location}]\"\n )\n\n # will pass into arm template\n node_runbook.location = azure_cap.location\n if not node_runbook.vm_size:\n node_runbook.vm_size = azure_cap.vm_size\n assert isinstance(\n min_cap.nic_count, int\n ), f\"actual: {min_cap.nic_count}\"\n node_runbook.nic_count = min_cap.nic_count\n\n estimated_cost += azure_cap.estimated_cost\n\n found_capabilities[req_index] = min_cap\n if all(x for x in found_capabilities):\n break\n\n if all(x for x in found_capabilities):\n # all found and replace current requirement\n environment.runbook.nodes_requirement = found_capabilities\n environment.cost = estimated_cost + predefined_cost\n is_success = True\n log.debug(\n f\"requirement meet, \"\n f\"cost: {environment.cost}, \"\n f\"cap: {environment.runbook.nodes_requirement}\"\n )\n break\n return is_success", "title": "" }, { "docid": "f5bd9a52c14856264fee13e8f2c47101", "score": "0.4753132", "text": "def is_valid_environment(env, nova_creds):\n if env in nova_creds.keys():\n return env\n else:\n return False", "title": "" }, { "docid": "ce64797012da43e293f42a59a2ed5ca4", "score": "0.47337657", "text": "def test_default_valid_environments(saltworker_client):\n # test that the defaults work\n assert saltworker_client.before_install() is None", "title": "" }, { "docid": "8b92b546ecc9bc341269235e0e87e849", "score": "0.47199365", "text": "def getenv():\n ...", "title": "" }, { "docid": "c1c7ee7f9494715071117e0315c53f48", "score": "0.4708881", "text": "def test_right_cluster_and_wrong_env(self):\n cmd = BASECMD + ['large:wrong_env']\n pipes = Popen(cmd, stdout=PIPE, stderr=PIPE)\n out, err = pipes.communicate()\n self.assertRegex(err, b'System \"large:wrong_env\" not recognized')", "title": "" }, { "docid": "67d96dd5429fa6d9f0dde564f09dea13", "score": "0.47074768", "text": "def conda_env_is_activated():\n return \"CONDA_PREFIX\" in os.environ", "title": "" }, { "docid": "167bf1e5a04079d76b2c79ac0562bb8a", "score": "0.47054705", "text": "def test_init_app_config_env_overlapping_prefix(self) -> None:\n with pytest.raises(\n ValueError, match=\"Env prefix FLASK_APP is overlapping an earlier prefix\"\n ):\n init_app(self.app, env_prefix=['FLASK', 'FLASK_APP'])\n with pytest.raises(\n ValueError, match=\"Env prefix FLASK is overlapping an earlier prefix\"\n ):\n init_app(self.app, env_prefix=['FLASK_APP', 'FLASK'])\n # No error because no overlap when an underscore is suffixed\n init_app(self.app, env_prefix=['FLASK', 'FLASKAPP'])", "title": "" }, { "docid": "560c12afab00251026d12e81e4acd32f", "score": "0.47042742", "text": "def pytest_cmdline_preparse(config, args):\n\n # Build up argv which will contain only the \"pytest acceptable\" args\n argv = []\n for arg in args:\n \n if not '=' in arg:\n # This is not one of our args. All of our args are of the form KEY=VALUE\n argv.append(arg)\n continue\n \n # Update the args which pytest will continue to parse\n args[:] = argv", "title": "" }, { "docid": "61977d3a1086b15a9f4cce2029dd87d7", "score": "0.4703985", "text": "def check(expr: List[Expr], true_commands: List[str], args: List[str] = []):\n combiner = ArgCombiner(args)\n pred_commands = []\n for new_expr, new_env in combiner.execute(expr, {}):\n pred_commands.append(combiner.to_command(new_expr))\n assert true_commands == pred_commands", "title": "" }, { "docid": "02bc3dbc32611699dbb976a87af99599", "score": "0.46991748", "text": "def test_no_environments_in_common(self):\n self.pv8.environments.add(*self.envs)\n\n tc = self.F.CaseFactory.create(product=self.p)\n tcv1 = self.F.CaseVersionFactory.create(\n case=tc, productversion=self.pv8, status=\"active\")\n tcv1.remove_envs(*self.envs[:2])\n\n ts = self.F.SuiteFactory.create(product=self.p, status=\"active\")\n self.F.SuiteCaseFactory.create(suite=ts, case=tc)\n\n r = self.F.RunFactory.create(productversion=self.pv8)\n r.remove_envs(*self.envs[2:])\n self.F.RunSuiteFactory.create(suite=ts, run=r)\n\n r.activate()\n\n self.assertCaseVersions(r, [])", "title": "" }, { "docid": "5af165b36f4a0603be8def52b0192c2e", "score": "0.46985197", "text": "def test_ssh_env(self):\n conn = connection.SshConnection(self.settings)\n\n result = conn.run('printenv', env={'MYSPECIALVAR': 20})\n self.assertTrue(result.stdout.find('MYSPECIALVAR=20'))", "title": "" }, { "docid": "41f313141036620367713282033cea2a", "score": "0.46959773", "text": "def _getFromEnviron(self, name: str) -> bool:\n if (os.environ[name.upper()]):\n self.config[name.lower()] = os.environ[name.upper()]\n return True\n\n return False", "title": "" }, { "docid": "2b75bde13fcb9f4a0edac2d827f602e5", "score": "0.46926093", "text": "def test_contains(self):\n self.assertIn(\"app\", self.config)\n self.assertIn(\"window\", self.config.app)", "title": "" }, { "docid": "d4e73a01cb7149ef5c9c016044966d6e", "score": "0.46879378", "text": "def test_apply_cli_subset_none():\n test_config = ApplicationConfiguration(\n application_name=\"test_application\",\n post_processor=None,\n subcommands=[\n SubCommand(name=\"list\", description=\"list\"),\n SubCommand(name=\"run\", description=\"run\"),\n ],\n entries=[\n Entry(\n name=\"subcommand\",\n short_description=\"Subcommands\",\n subcommand_value=True,\n value=EntryValue(default=\"run\"),\n ),\n Entry(\n name=\"z\",\n apply_to_subsequent_cli=C.NONE,\n cli_parameters=CliParameters(short=\"-z\"),\n short_description=\"the z paramter\",\n value=EntryValue(),\n ),\n ],\n )\n configurator = Configurator(\n params=[\"list\", \"-z\", \"zebra\"], application_configuration=test_config, initial=True\n )\n _messages, exit_messages = configurator.configure()\n assert exit_messages == []\n\n assert isinstance(test_config.initial, ApplicationConfiguration)\n\n expected = [\n (\"subcommand\", \"list\"),\n (\"z\", \"zebra\"),\n ]\n for expect in expected:\n assert test_config.entry(expect[0]).value.current == expect[1]\n assert test_config.entry(expect[0]).value.source is C.USER_CLI\n\n configurator = Configurator(\n params=[\"run\"], application_configuration=test_config, apply_previous_cli_entries=C.ALL\n )\n _messages, exit_messages = configurator.configure()\n assert exit_messages == []\n\n expected = [\n (\"subcommand\", \"run\", C.USER_CLI),\n (\"z\", C.NOT_SET, C.NOT_SET),\n ]\n for expect in expected:\n assert test_config.entry(expect[0]).value.current == expect[1]\n assert test_config.entry(expect[0]).value.source is expect[2]", "title": "" }, { "docid": "8f9fe0b99109338a1b33c0c8cb3ce169", "score": "0.46866134", "text": "def test_additional_settings_from_file(self) -> None:\n env = 'FLASK_ENV'\n environ[env] = \"gibberish\"\n assert _additional_config.get(environ[env]) is None\n for k, v in _additional_config.items():\n environ[env] = k\n assert _additional_config.get(environ[env]) == v", "title": "" }, { "docid": "91f12b71d58148996ab9762b6615d599", "score": "0.4682654", "text": "def IsValidEnvironment():\r\n\r\n # By default, the environment is suitable\r\n return None", "title": "" }, { "docid": "31b2d8d49f3d7fe841ad74fa0a1a02bb", "score": "0.46793932", "text": "def _check_dependency_expression(self, env, valueToTest):\n \n # if there is no test to do, return true\n if(not valueToTest):\n return True\n \n stringToChange = valueToTest\n \n ### Clean expression\n # elements to ignore\n lib_var = [r'\\b(or)\\b', r'\\b(not)\\b', r'\\b(and)\\b',r'\\b(if)\\b']\n \n stringToChange = re.sub(r'(\\(|\\))',' ',stringToChange)\n for element in lib_var :\n stringToChange = re.sub(element,'',stringToChange) \n \n stringToChange = re.sub(' +',' ', stringToChange)\n \n # split the command names\n if re.search(' ', stringToChange):\n elementsToChange = stringToChange.split()\n else :\n elementsToChange = [stringToChange]\n \n # add the call to the function that verifies if the program exist\n elementsSet = set([])\n for element in elementsToChange:\n elementsSet.add(element) \n \n \n stringToChange = self._add_command_calls(valueToTest.replace('\\\\',''),\n elementsSet)\n \n # Evaluate if all the programs exist\n returnValue = eval(stringToChange)\n return returnValue", "title": "" }, { "docid": "ab053b1751ab858a4a2705bbd8e7ad66", "score": "0.46777916", "text": "def test_prevalidate_arguments_ssh_override(self):\n\n self.register.prevalidate_arguments(self._build_args(\n ssh=\"telnet\", infrastructure_class=\"ec2\", target=\"i-12345678\"\n ))\n self.register.prevalidate_arguments(\n self._build_args(\n username=\"root\", private_key=\"id_rsa\",\n infrastructure_class=\"ec2\", target=\"1.2.3.4\"))\n with self.assertRaises(ValueError):\n self.register.prevalidate_arguments(\n self._build_args(\n ssh=\"telnet\", username=\"root\", infrastructure_class=\"ec2\",\n target=\"1.2.3.4\"))\n with self.assertRaises(ValueError):\n self.register.prevalidate_arguments(\n self._build_args(\n ssh=\"telnet\", private_key=\"id_rsa\",\n infrastructure_class=\"ec2\", target=\"1.2.3.4\"))\n with self.assertRaises(ValueError):\n self.register.prevalidate_arguments(\n self._build_args(\n ssh=\"telnet\", username=\"root\", private_key=\"id_rsa\",\n infrastructure_class=\"ec2\", target=\"1.2.3.4\"))", "title": "" }, { "docid": "359c824ca3c3e8466f3f0d9e39b6ac2f", "score": "0.4665635", "text": "def env_valid(env):\n if env not in EFConfig.ENV_LIST:\n raise ValueError(\"unknown env: {}; env must be one of: \".format(env) + \", \".join(EFConfig.ENV_LIST))\n return True", "title": "" }, { "docid": "e40b9198e5f0f28e8b36c2f92d6ec70a", "score": "0.46634254", "text": "def test_prefix_in_conda():\n pytest_enable_socket()\n\n ## Test a bad env (environments not in base environment path)\n try:\n utils.prefix_in_conda(os.getcwd())\n except utils.CondaEnvironmentNotFound as e:\n assert \"The prefix supplied is not a conda environment: {}\".format(os.getcwd()) in str(e) \n except Exception as e:\n assert False\n\n try:\n utils.prefix_in_conda(\"/Not/A/Real/Location\")\n except utils.CondaEnvironmentNotFound as e:\n assert \"The prefix supplied is not a conda environment: {}\".format(\"/Not/A/Real/Location\") in str(e) \n except Exception as e:\n assert False\n\n try:\n utils.prefix_in_conda(\"current\")\n except utils.CondaEnvironmentNotFound as e:\n assert \"The prefix supplied is not a conda environment: {}\".format(\"current\") in str(e) \n except Exception as e:\n assert False\n\n ## Test that the prefix is or is not in the environmnets \n ### List of enviroments\n environments = [os.path.join(x+\"/\") for x in utils.check_output([\"conda\", \"info\", \"--env\"]).strip().replace(\"*\",\"\").replace(\"\\n\",\" \").split(\" \") if os.path.isdir(x)]\n base_env = min(environments)\n env_name = \"temp_env\"\n temp_env = os.path.join(utils.conda_root(), \"envs\", env_name)\n\n try:\n utils.prefix_in_conda(temp_env)\n except utils.CondaEnvironmentNotFound as e:\n assert \"The prefix supplied is not a conda environment: {}\".format(temp_env) in str(e) \n except Exception as e:\n assert False\n\n ## Test the prefix passes all checks, is in the base environment, is in the list of environments, and it is a directoyr\n #os.mkdir(temp_env) \n sp.check_output([\"conda\", \"create\", \"--name\", env_name])\n\n assert utils.prefix_in_conda(utils.conda_root()) ## conda_root, Test environment path\n assert utils.prefix_in_conda(os.path.basename(utils.conda_root())) ## conda_root, Test environment name\n\n assert utils.prefix_in_conda(temp_env) ## temp_env, Test environment path\n assert utils.prefix_in_conda(env_name) ## temp_env, test environment name\n\n environments = [os.path.join(x+\"/\") for x in utils.check_output([\"conda\", \"info\", \"--env\"]).strip().replace(\"*\",\"\").replace(\"\\n\",\" \").split(\" \") if os.path.isdir(x)]\n\n for env in environments:\n assert utils.prefix_in_conda(env) ## test environment path\n assert utils.prefix_in_conda(os.path.basename(env.rstrip(\"/\"))) ## Test environment name (basename does not work if it is a directory. Must strip the trailing \"/\" if it exists\n\n ### Remove temp env\n sp.check_output([\"conda\", \"env\", \"remove\", \"--name\", env_name])\n try:\n shutil.rmtree(temp_env)\n except Exception:\n pass\n assert os.path.exists(temp_env) == False", "title": "" }, { "docid": "ace39fc44fc6050ea508605853c87f51", "score": "0.46630153", "text": "def _PreprocessCond(\n cnxn, cond, project_ids, services, harmonized_config, is_member):\n # All the fields in a cond share the same name because they are parsed\n # from a user query term, and the term syntax allows just one field name.\n field_name = cond.field_defs[0].field_name\n assert all(fd.field_name == field_name for fd in cond.field_defs)\n\n # Case 1: The user is searching custom fields.\n if any(fd.field_id for fd in cond.field_defs):\n # There can't be a mix of custom and built-in fields because built-in\n # field names are reserved and take priority over any conflicting ones.\n assert all(fd.field_id for fd in cond.field_defs)\n return _PreprocessCustomCond(cnxn, cond, services, is_member)\n\n # Case 2: The user is searching a built-in field.\n preproc = _PREPROCESSORS.get(field_name)\n if preproc:\n # We have a preprocessor for that built-in field.\n return preproc(\n cnxn, cond, project_ids, services, harmonized_config, is_member)\n else:\n # We don't have a preprocessor for it.\n return cond", "title": "" }, { "docid": "d297e165f866657e96802cb43346076e", "score": "0.46611613", "text": "def test_check_convention_globals(self):\n #check for pass\n dataset = self.get_pair(static_files['rutgers'])\n result = self.cf.check_convention_globals(dataset)\n for each in result:\n self.assertTrue(each.value)\n #check if it doesn't exist that we pass\n dataset = self.get_pair(static_files['bad_data_type'])\n result = self.cf.check_convention_globals(dataset)\n for each in result:\n self.assertTrue(each.value)", "title": "" }, { "docid": "8bab66a02b35d28d4de40e4ec109cd4d", "score": "0.46593398", "text": "def test_expand_environment_variables(self):\n include_prefixes = ['-I']\n db = CCppProperties(include_prefixes)\n environ['TEST_VARIABLE_TO_EXPAND'] = '/lib_include_dir'\n\n expected = [Flag('-I', path.normpath('/lib_include_dir')),\n Flag('', '-Dlib_EXPORTS')]\n path_to_db = path.join(path.dirname(__file__),\n 'c_cpp_properties_files',\n 'environment')\n scope = SearchScope(from_folder=path_to_db)\n print(scope)\n self.assertEqual(expected, db.get_flags(search_scope=scope))", "title": "" }, { "docid": "737ca03636f05988fb61d1403ff7c65a", "score": "0.4657743", "text": "def test_popen_env_replaces():\n out = check_output([\"env\"], env=dict(SOMEVAR=\"/tmp/foo\"))\n assert out.decode().strip() == \"SOMEVAR=/tmp/foo\"", "title": "" }, { "docid": "814c4a732ab01201f598aa5def04bdd6", "score": "0.46574524", "text": "def checkClientMode(): # XXX WTF!???! This also exists in config.py.\n try:\n c = Config()\n return c.checkClientMode()\n except EnvError:\n return False", "title": "" }, { "docid": "d26560ecc02d6d7bea6ebdf5add5e9a8", "score": "0.46559483", "text": "def with_env(lst):\n ret = os.environ.copy()\n for l in lst:\n (n,v) = l\n ret[n] = v\n return ret", "title": "" }, { "docid": "4abb38303b0633078931aa1e6292af2b", "score": "0.46489725", "text": "def test_command_env(self):\n env = {'FOOBAR': 'foobar', 'BIN_PATH': 'foobar'}\n cmd = BuildCommand('echo', environment=env)\n for key in list(env.keys()):\n self.assertEqual(cmd.environment[key], env[key])", "title": "" }, { "docid": "1f918483d69a79bc112bbdc6e16f6419", "score": "0.46415547", "text": "def standard_env(config=environ):\n expected = ('local', 'standard')\n if isinstance(config, dict) or config is environ:\n gae_env = config.get('GAE_ENV', None)\n gae_instance = config.get('GAE_INSTANCE', None)\n else:\n gae_env = getattr(config, 'GAE_ENV', None)\n gae_instance = getattr(config, 'GAE_INSTANCE', None)\n if config is not environ:\n gae_env = gae_env or environ.get('GAE_ENV', None)\n gae_instance = gae_instance or environ.get('GAE_INSTANCE', None)\n code_environment = 'local' if not gae_instance else gae_env\n if code_environment in expected:\n return True\n return False", "title": "" }, { "docid": "bf59d646285b53cf4843bcf185c2d602", "score": "0.46400386", "text": "def GetOptionsRc( self, rcfile, rcbase='', env={} ) :\n \n # call parent:\n lines = UtopyaJobScriptBatch.GetOptionsRc( self, rcfile, 'batch.test', rcbase=rcbase, env=env )\n \n # ok\n return lines", "title": "" }, { "docid": "3bc43a8c49b8bf060658b25515cdebbb", "score": "0.4633241", "text": "def tox_runtest_pre(venv):", "title": "" }, { "docid": "909adf8a151e6e60da7e384e857dca8a", "score": "0.4631524", "text": "def test_apply_previous_cli_mixed():\n\n params = \"doc shell --ee False --eei test_image:latest --forks 15\"\n application_configuration = deepcopy(NavigatorConfiguration)\n\n configurator = Configurator(\n application_configuration=application_configuration,\n params=params.split(),\n initial=True,\n )\n with mock.patch.dict(os.environ, {\"ANSIBLE_NAVIGATOR_PASS_ENVIRONMENT_VARIABLES\": \"ENV1,ENV2\"}):\n _messages, exit_messages = configurator.configure()\n assert exit_messages == []\n\n assert isinstance(application_configuration.initial, ApplicationConfiguration)\n\n expected = [\n (\"app\", \"doc\", C.USER_CLI),\n (\"cmdline\", [\"--forks\", \"15\"], C.USER_CLI),\n (\"execution_environment\", False, C.USER_CLI),\n (\"execution_environment_image\", \"test_image:latest\", C.USER_CLI),\n (\"pass_environment_variable\", [\"ENV1\", \"ENV2\"], C.ENVIRONMENT_VARIABLE),\n (\"plugin_name\", \"shell\", C.USER_CLI),\n ]\n for expect in expected:\n assert application_configuration.entry(expect[0]).value.current == expect[1]\n assert application_configuration.entry(expect[0]).value.source is expect[2]\n\n params = \"doc shell --eei different_image:latest\"\n configurator = Configurator(\n application_configuration=application_configuration,\n params=params.split(),\n apply_previous_cli_entries=C.ALL,\n )\n with mock.patch.dict(os.environ, {\"ANSIBLE_NAVIGATOR_SET_ENVIRONMENT_VARIABLES\": \"ENV1=VAL1\"}):\n _messages, exit_messages = configurator.configure()\n assert exit_messages == []\n\n expected = [\n (\"app\", \"doc\", C.USER_CLI),\n (\"cmdline\", [\"--forks\", \"15\"], C.PREVIOUS_CLI),\n (\"execution_environment\", False, C.PREVIOUS_CLI),\n (\"execution_environment_image\", \"different_image:latest\", C.USER_CLI),\n (\"pass_environment_variable\", C.NOT_SET, C.NOT_SET),\n (\"plugin_name\", \"shell\", C.USER_CLI),\n (\"set_environment_variable\", {\"ENV1\": \"VAL1\"}, C.ENVIRONMENT_VARIABLE),\n ]\n for expect in expected:\n assert application_configuration.entry(expect[0]).value.current == expect[1]\n assert application_configuration.entry(expect[0]).value.source is expect[2]", "title": "" }, { "docid": "70e9001c571d92864fae29ed1c140062", "score": "0.4629221", "text": "def validate(runtime_env_dict: dict) -> None:\n pass", "title": "" }, { "docid": "06bd33e68183350ec2b6d0d5b441d185", "score": "0.462694", "text": "def test_shell_exec_flag(test_resources, modify_root_config, cli_args, monkeypatch):\n with monkeypatch.context() as m:\n m.setattr(flags, 'get_flag', lambda *a: False)\n root_config_file = test_resources['umapi_root_config']\n\n args = cli_args({'config_filename': root_config_file})\n modify_root_config(['directory_users', 'connectors', 'ldap'], \"$(some command)\")\n with pytest.raises(AssertionException):\n UMAPIConfigLoader(args)", "title": "" }, { "docid": "61e9717dcda16ed765b287cb6bd5f52b", "score": "0.46254238", "text": "def should_wrap(self, env, arg, kwargs):\n return True", "title": "" }, { "docid": "06e13bd415a8c287c6fb926a98da9e2b", "score": "0.46218637", "text": "def _is_venv(self):\n return (hasattr(sys, 'real_prefix') or\n (hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix))", "title": "" }, { "docid": "4026df2e99892eb8686de286fa138c17", "score": "0.46179384", "text": "def __call__(self, target, creds, enforcer):\n\n return self.match.lower() in [x.lower() for x in creds['roles']]", "title": "" }, { "docid": "b05c3c7c90cb296694386e89a3940d9c", "score": "0.46172443", "text": "def apply_env_changes(config, prefix='KQUEEN_'):\n\n for name, value in os.environ.items():\n if name.startswith(prefix):\n config_key_name = name[len(prefix):]\n if re.search('(?i)true|(?i)false', value):\n value = util.strtobool(value)\n setattr(config, config_key_name, value)", "title": "" }, { "docid": "809eae672cd1ff0de7240e72994e6c5f", "score": "0.4610705", "text": "def test_integration_basic_env(self):\n env = PathEnv()\n run_env(env)", "title": "" }, { "docid": "5444f647a11b85721eb9a7060cb9359b", "score": "0.4610022", "text": "def test_prevalidate_arguments_local_linux_only(self, mock_platform):\n\n mock_platform.system.return_value = \"Linux\"\n self.register.prevalidate_arguments(self._build_args(\n infrastructure_class=\"on-premises\", target=None, local=True))\n with self.assertRaises(ValueError):\n mock_platform.system.return_value = \"Windows\"\n self.register.prevalidate_arguments(self._build_args(\n infrastructure_class=\"on-premises\", target=None, local=True))", "title": "" }, { "docid": "efd6b04b15dc05006d60b2da30a2354a", "score": "0.4609045", "text": "def test_testenv_uses_pytest(self, testenv_commands):\n assert any([command.startswith('py.test') for command in testenv_commands])", "title": "" }, { "docid": "8257f2d8327d547d9ec6f30fa89abed7", "score": "0.4608534", "text": "def is_valid_environment(env):\n valid_envs = config.nova_creds.sections()\n return env in valid_envs", "title": "" }, { "docid": "78bd2f27b8adbbbc1a0bc54ad79cb297", "score": "0.45972687", "text": "def test_conditions(self):\n pass", "title": "" }, { "docid": "b3b01789d8fb52f74ec1564bd99bd69c", "score": "0.45946056", "text": "def check_settings_prerequisites(self, global_settings) -> bool:\n pass", "title": "" }, { "docid": "8fae25af734701004625a06a82cb5ecb", "score": "0.45915082", "text": "def test_env_actions(self):\n actions = self.source.env_actions(self.device)\n assert len(actions) == len(self.env)\n assert isinstance(actions[0], list)", "title": "" }, { "docid": "7b34a9a9195087cbcf8cbee360a99225", "score": "0.45909333", "text": "def _check_prereqs_postvars(self):\r\n\r\n self.validate_scan_location() # Implemented in derived class.\r\n self._validate_output_files_paths()\r\n self._validate_checkpoint()", "title": "" }, { "docid": "572a3686f818e0cede7fe8d13efaf826", "score": "0.45883512", "text": "def PredefinedPresetsFirst(self) -> bool:", "title": "" }, { "docid": "0440625bdea2bde8e52ed463c93fb025", "score": "0.4587772", "text": "def test_break_vpc_configuration_usual_case():", "title": "" }, { "docid": "3666f356ec74f3ee3160c9e57a589f8a", "score": "0.45832643", "text": "def test_apply_previous_cli_cmdline_not_applied():\n params = \"run /tmp/site.yml --ee False --forks 15\"\n application_configuration = deepcopy(NavigatorConfiguration)\n configurator = Configurator(\n application_configuration=application_configuration,\n params=params.split(),\n initial=True,\n )\n\n _messages, exit_messages = configurator.configure()\n assert exit_messages == []\n\n assert isinstance(application_configuration.initial, ApplicationConfiguration)\n\n expected = [\n (\"app\", \"run\"),\n (\"cmdline\", [\"--forks\", \"15\"]),\n (\"execution_environment\", False),\n (\"playbook\", \"/tmp/site.yml\"),\n ]\n\n for expect in expected:\n assert application_configuration.entry(expect[0]).value.current == expect[1]\n assert application_configuration.entry(expect[0]).value.source is C.USER_CLI\n\n params = \"doc shell\"\n configurator = Configurator(\n application_configuration=application_configuration,\n params=params.split(),\n apply_previous_cli_entries=C.ALL,\n )\n _messages, exit_messages = configurator.configure()\n assert exit_messages == []\n\n expected = [\n (\"app\", \"doc\", C.USER_CLI),\n (\"cmdline\", C.NOT_SET, C.NOT_SET),\n (\"execution_environment\", False, C.PREVIOUS_CLI),\n (\"playbook\", \"/tmp/site.yml\", C.PREVIOUS_CLI),\n (\"plugin_name\", \"shell\", C.USER_CLI),\n ]\n\n for expect in expected:\n assert application_configuration.entry(expect[0]).value.current == expect[1]\n assert application_configuration.entry(expect[0]).value.source is expect[2]", "title": "" }, { "docid": "e662df68db0bfc958b227fbe7d09df89", "score": "0.45793095", "text": "def test_noenv(self):\n if not os.path.exists('/proc/self/environ'):\n raise SkipTest(\"'/proc/self/environ' not available\")\n\n env = os.environ.copy()\n env['SPT_TESTENV'] = 'testenv'\n rv = self.run_script(\"\"\"\n import os\n os.environ['SPT_NOENV'] = \"1\"\n\n cmdline_len = len(open('/proc/self/cmdline').read())\n print cmdline_len\n print 'SPT_TESTENV=testenv' in open('/proc/self/environ').read()\n\n import setproctitle\n setproctitle.setproctitle('X' * cmdline_len * 10)\n\n title = open('/proc/self/cmdline').read().rstrip()\n print title\n print len(title)\n\n print 'SPT_TESTENV=testenv' in open('/proc/self/environ').read()\n \"\"\", env=env)\n lines = rv.splitlines()\n cmdline_len = int(lines[0])\n self.assertEqual(lines[1], 'True', \"can't verify testenv\")\n title = lines[2]\n self.assert_('XXX' in self._clean_up_title(title),\n \"title not set as expected\")\n title_len = int(lines[3])\n self.assertEqual(lines[4], 'True', \"env has been clobbered\")\n self.assert_(title_len <= cmdline_len,\n \"title (len %s) not limited to argv (len %s)\"\n % (title_len, cmdline_len))", "title": "" }, { "docid": "ec766c6b1dc49b250a1a6304dfd66387", "score": "0.45789647", "text": "def config_from_env_and_argv(\n request: SubRequest, module_tmp_path: Path\n) -> Generator[bool, None, None]:\n argv = request.param + [str(module_tmp_path / \"dummy.py\")]\n cache_key = (\n tuple(request.param),\n os.getenv(\"NO_COLOR\"),\n os.getenv(\"FORCE_COLOR\"),\n os.getenv(\"PY_COLORS\"),\n (module_tmp_path / \"pyproject.toml\").read_bytes(),\n )\n if cache_key not in config_cache:\n _, config, _ = parse_command_line(argv)\n config_cache[cache_key] = config[\"color\"]\n yield config_cache[cache_key]", "title": "" }, { "docid": "8aeed39941ebb39eb14e5975101d86cd", "score": "0.4578479", "text": "def test_apply_previous_cli_specified():\n params = \"doc shell --ee False --eei test_image:latest --forks 15\"\n application_configuration = deepcopy(NavigatorConfiguration)\n configurator = Configurator(\n application_configuration=application_configuration,\n params=params.split(),\n initial=True,\n )\n\n _messages, exit_messages = configurator.configure()\n assert exit_messages == []\n assert isinstance(application_configuration.initial, ApplicationConfiguration)\n\n expected = [\n (\"app\", \"doc\"),\n (\"cmdline\", [\"--forks\", \"15\"]),\n (\"execution_environment\", False),\n (\"execution_environment_image\", \"test_image:latest\"),\n ]\n for expect in expected:\n assert application_configuration.entry(expect[0]).value.current == expect[1]\n assert application_configuration.entry(expect[0]).value.source is C.USER_CLI\n\n params = \"doc shell\"\n configurator = Configurator(\n application_configuration=application_configuration,\n params=params.split(),\n apply_previous_cli_entries=[\"execution_environment\", \"execution_environment_image\"],\n )\n _messages, exit_messages = configurator.configure()\n assert exit_messages == []\n\n expected = [\n (\"app\", \"doc\", C.USER_CLI),\n (\"cmdline\", C.NOT_SET, C.NOT_SET),\n (\"execution_environment\", False, C.PREVIOUS_CLI),\n (\"execution_environment_image\", \"test_image:latest\", C.PREVIOUS_CLI),\n (\"plugin_name\", \"shell\", C.USER_CLI),\n ]\n for expect in expected:\n assert application_configuration.entry(expect[0]).value.current == expect[1]\n assert application_configuration.entry(expect[0]).value.source is expect[2]", "title": "" }, { "docid": "941bad702bf4f2f0b33628e73e0d6be4", "score": "0.45750207", "text": "def parse_environment_paths(paths):\n ...", "title": "" } ]
52e98db146334c52dcf6cc896b73dc5f
Identify stream handlers writing to the given streams(s).
[ { "docid": "08b2c4b0c538d95191f54db5270cadb1", "score": "0.6958262", "text": "def match_stream_handler(handler, streams=[]):\n return (isinstance(handler, logging.StreamHandler) and\n getattr(handler, 'stream') in (streams or (sys.stdout, sys.stderr)))", "title": "" } ]
[ { "docid": "d3ad778637d048076f6d24a6fcd020c0", "score": "0.59115714", "text": "def _file_descriptors_work(*streams):\n # test whether we can get fds for out and error\n try:\n for stream in streams:\n stream.fileno()\n return True\n except BaseException:\n return False", "title": "" }, { "docid": "f714c7cb393fff2810052b2d8599a3d9", "score": "0.57894886", "text": "def _redirect_logging_stream_handlers(self, old_stream, new_stream):\r\n for handler in _current_handlers():\r\n points_to_stream = (isinstance(handler, logging.StreamHandler) and\r\n hasattr(handler.stream, 'fileno') and\r\n handler.stream.fileno() == old_stream.fileno())\r\n if points_to_stream:\r\n logger.removeHandler(handler)\r\n handler.close() # doesn't close the stream, just the handler\r\n\r\n new_handler = logging.StreamHandler(new_stream)\r\n new_handler.setLevel(handler.level)\r\n new_handler.setFormatter(handler.formatter)\r\n for log_filter in handler.filters:\r\n new_handler.addFilter(log_filter)\r\n logger.addHandler(new_handler)", "title": "" }, { "docid": "a3b2d210fe1e6b703850d206d1d2efc3", "score": "0.56561226", "text": "def getLogFileHandles(self,logger):\n handles = []\n for handler in logger.handlers:\n handles.append(handler.stream.fileno())\n if logger.parent:\n handles += self.getLogFileHandles(logger.parent)\n return handles", "title": "" }, { "docid": "92865e0479a15939f86224f0dd886892", "score": "0.5494242", "text": "def _get_handlers(self) -> Union[Iterable[logging.Handler], logging.Handler]:\n handler = logging.StreamHandler(sys.stdout)\n handler.setLevel(self.level)\n handler.setFormatter(self._get_logging_formatter())\n\n return handler", "title": "" }, { "docid": "0f641449d03b677c9cde7432b56113a5", "score": "0.54203", "text": "def getSinkStreamIndices(self):\n streams = os.popen('pacmd list-sink-inputs| grep index:').readlines()\n sinks = os.popen('pacmd list-sink-inputs| grep sink:').readlines()\n \n active_streams = []\n active_sinks = []\n\n for i in range(len(streams)):\n stream = streams[i]\n index = int(stream.split(': ')[1].split('\\n')[0])\n active_streams.append(index)\n \n sink = sinks[1]\n name = sink.split('<')[1].split('>')[0]\n active_sinks.append(name)\n\n return active_streams, active_sinks", "title": "" }, { "docid": "0d881d234f0ad070a16cbd85fafa17d0", "score": "0.539732", "text": "def run(self):\n try:\n while 1:\n readers = []\n writers = []\n for fileno, elem in self.fileno_map.items():\n if elem.register_as_reader():\n readers.append(fileno)\n if elem.register_as_writer():\n writers.append(fileno)\n\n readers_ready, writers_ready, _ = select.select(\n readers, writers, [])\n for reader in readers_ready:\n self.fileno_map[reader].read()\n for writer in writers_ready:\n self.fileno_map[writer].write()\n except:\n for handler in self.fileno_map.values():\n handler.close()\n raise", "title": "" }, { "docid": "28a97c93d4055aae016a7cc829e04482", "score": "0.5356919", "text": "def forward_streams(self):\n self.replaced_streams = [FdReplacer(\"stdout\"), FdReplacer(\"stderr\")]", "title": "" }, { "docid": "80c04e33f1c5ffc32b489210008fa7dd", "score": "0.53082997", "text": "def emit(self):\n if not self.handlers or \\\n not isinstance(self.handlers, list):\n logging.error(\"invalid handlers: must be a list\")\n return False\n\n for handler in self.handlers:\n for source in self.sources:\n handler.handle(source)", "title": "" }, { "docid": "9ffb5b3f106508c23afb44def87fd0d7", "score": "0.5294008", "text": "def connect_streams(self) -> None:\n identity = f'{self.input_file}#{self.index}'\n for stream in self.streams:\n stream.connect_input(identity)", "title": "" }, { "docid": "0dc0323136b1006ea66e89e484554d8d", "score": "0.52279246", "text": "def register(self, handler, types=None):\n if not isinstance(types, (list, tuple)):\n types = [types]\n for type in types:\n self[type] = handler\n if handler.isOutput:\n self.all_output_handlers.append(handler)", "title": "" }, { "docid": "d34f012fbbf41d62e0d1475c10921991", "score": "0.51887727", "text": "def _restore_stream_handlers(self):\r\n self._redirect_logging_stream_handlers(self._fd_copy_stream,\r\n self._stream)\r\n self._fd_copy_stream.close()", "title": "" }, { "docid": "552335ea13f189ac7e9d3c1c034efd2d", "score": "0.5184194", "text": "def writes(*schemes):\n def decorator(f):\n _writers.update((scheme, f) for scheme in schemes)\n return f\n return decorator", "title": "" }, { "docid": "23eb01a35ba1594e31854ae1eef8dc9d", "score": "0.51554763", "text": "def _handle_alternate_sinks(self, interval, sinks, source):\n self._add_derived_event(interval, source, LadConfigAll._wad_table_name(interval), 'Central')\n for name in sinks:\n sink = self._sink_configs.get_sink_by_name(name)\n if sink is None:\n self._logger_log(\"Ignoring sink '{0}' for which no definition was found\".format(name))\n elif sink['type'] == 'EventHub':\n if 'sasURL' in sink:\n self._add_streaming_annotation(source, sink['sasURL'])\n else:\n self._logger_error(\"Ignoring EventHub sink '{0}': no 'sasURL' was supplied\".format(name))\n elif sink['type'] == 'JsonBlob':\n self._add_derived_event(interval, source, name, 'JsonBlob')\n else:\n self._logger_log(\"Ignoring sink '{0}': unknown type '{1}'\".format(name, sink['type']))", "title": "" }, { "docid": "8e4fe324ca672521817fc61abda06e18", "score": "0.51226056", "text": "def register (self, fileno, events):\n\n if events & self.EVENT_ERROR:\n self._error_filenos.add(fileno)\n\n if events & self.EVENT_READ:\n self._read_filenos.add(fileno)\n\n if events & self.EVENT_WRITE:\n self._write_filenos.add(fileno)", "title": "" }, { "docid": "324afa3b9b661c4640895028b0af2be5", "score": "0.50651944", "text": "def _default_handlers(stream, logging_level):\n # Create the filter.\n def should_log(record):\n \"\"\"Return whether a logging.LogRecord should be logged.\"\"\"\n if record.name.startswith(\"webkitpy.thirdparty\"):\n return False\n return True\n\n logging_filter = logging.Filter()\n logging_filter.filter = should_log\n\n # Create the handler.\n handler = logging.StreamHandler(stream)\n if logging_level == logging.DEBUG:\n formatter = logging.Formatter(\"%(name)s: [%(levelname)s] %(message)s\")\n else:\n formatter = logging.Formatter(\"%(message)s\")\n\n handler.setFormatter(formatter)\n handler.addFilter(logging_filter)\n\n return [handler]", "title": "" }, { "docid": "a44f1a37cf7c72c5e068747b62b08efa", "score": "0.5030907", "text": "def streams(self, streams):\n # type: (list) -> None\n\n if streams is not None:\n if not isinstance(streams, list):\n raise TypeError(\"Invalid type for `streams`, type has to be `list[MuxingStream]`\")\n\n self._streams = streams", "title": "" }, { "docid": "c2f8409b2f17cea04b5599b47db19f30", "score": "0.5030769", "text": "def retrieve_console_handlers(*, debug=False):\n stdout_handler = logging.StreamHandler(sys.stdout)\n stdout_handler.setFormatter(CONSOLE_FORMATTER)\n stdout_handler.addFilter(lambda record: record.levelno <= logging.INFO)\n\n if debug:\n stdout_handler.setLevel(logging.DEBUG)\n else:\n stdout_handler.setLevel(logging.INFO)\n\n stderr_handler = logging.StreamHandler(sys.stderr)\n stderr_handler.setLevel(logging.WARNING)\n stderr_handler.setFormatter(CONSOLE_FORMATTER)\n\n return stdout_handler, stderr_handler", "title": "" }, { "docid": "81c81f2bb8d9d898f9938271394f4f91", "score": "0.49870098", "text": "def _check_handler_exists(find_handler, logger):\n for existing_handler in list(logger.handlers):\n handler_streamname = getattr(find_handler.stream, 'name', str(find_handler.stream))\n existing_handler_streamname = getattr(existing_handler.stream, 'name', str(existing_handler.stream))\n\n if handler_streamname != existing_handler_streamname:\n # Did not match, move on\n continue\n # We have a handler with this name, return the state\n return True\n return False", "title": "" }, { "docid": "dcfc9033992978ccf531f74fc0b6f31b", "score": "0.4970243", "text": "def mock_streams(*which):\n def mocked_streams_decorator(func):\n @wraps(func)\n def inner_wrapper(*args, **kwargs):\n if 'stdout' in which:\n my_stdout, sys.stdout = sys.stdout, StringIO()\n if 'stderr' in which:\n my_stderr, sys.stderr = sys.stderr, StringIO()\n result = func(*args, **kwargs)\n if 'stderr' in which:\n sys.stderr = my_stderr\n if 'stdout' in which:\n sys.stdout = my_stdout\n return result\n return inner_wrapper\n return mocked_streams_decorator", "title": "" }, { "docid": "ec929dff2aa9bce09527e3b7cb77e124", "score": "0.49621823", "text": "def add_handlers(self, logger, handlers):\r\n for h in handlers:\r\n try:\r\n logger.addHandler(self.config['handlers'][h])\r\n except StandardError as e:\r\n raise ValueError('Unable to add handler %r: %s' % (h, e))", "title": "" }, { "docid": "606482221ab513fd2d563c4bb85175ee", "score": "0.49463058", "text": "def _point_stream_handlers_to_copy(self):\r\n fd_copy = os.dup(self._fd)\r\n self._fd_copy_stream = os.fdopen(fd_copy, 'w')\r\n self._redirect_logging_stream_handlers(self._stream,\r\n self._fd_copy_stream)", "title": "" }, { "docid": "4333e71a5efaec77f7c81b609aa00cfc", "score": "0.49388576", "text": "def allStreams(self):\n raise NotImplementedError()", "title": "" }, { "docid": "d5ad6301a5baeddede6390b40012e49d", "score": "0.49009672", "text": "def create_redirection_streams(self):\n self.stdout_file = create_output_stream(True, False, common=False)\n self.stderr_file = create_output_stream(True, False, common=False)", "title": "" }, { "docid": "d44eac2042714a66ef7bb8b2163d82c6", "score": "0.48975885", "text": "def apply_logger_handlers() -> None:\n # create handlers\n c_handler = logging.StreamHandler()\n f_handler = logging.FileHandler(f'/tmp/{cfg.app_name}.log')\n\n c_handler.setLevel(logging.INFO)\n f_handler.setLevel(logging.INFO)\n\n # create formatters and add it to handlers\n formatter = logging.Formatter('%(asctime)s - %(name)s - '\n '%(levelname)s - %(message)s')\n\n c_handler.setFormatter(formatter)\n f_handler.setFormatter(formatter)\n\n # Add handlers to the logger\n logger.addHandler(c_handler)\n logger.addHandler(f_handler)", "title": "" }, { "docid": "7bab18a24b52565316d7ba9bc501d5a0", "score": "0.48591053", "text": "def handles_write(self, fd, callback):\n return callback in self.handlers[fd]['write']", "title": "" }, { "docid": "09f29cba5b919c7f34d3f6ec1ddcd318", "score": "0.482538", "text": "def getLogHandlers(self):\n\t\treturn self.__logHandlers", "title": "" }, { "docid": "09f29cba5b919c7f34d3f6ec1ddcd318", "score": "0.482538", "text": "def getLogHandlers(self):\n\t\treturn self.__logHandlers", "title": "" }, { "docid": "98eb399c828abdd4d4d479d266bc96ee", "score": "0.47970876", "text": "def mapStreamsByType(someStreams, fileIndex, argsArray):\n for stream in someStreams:\n argsArray.append('-map')\n argsArray.append(fileIndex + str(stream['index']))", "title": "" }, { "docid": "214486373c23fad24d9d66c02f8da461", "score": "0.476771", "text": "def poll_streams():\n if _win: # pragma: no cover\n # select.select is not supported on windows\n result.read_stdout(src=stdout, log=logger, verbose=verbose)\n result.read_stderr(src=stderr, log=logger, verbose=verbose)\n else: # pragma: no cover\n rlist, _, _ = select.select(\n [item for item in (stdout, stderr) if item is not None],\n [],\n [])\n if rlist:\n if stdout in rlist:\n result.read_stdout(\n src=stdout,\n log=logger,\n verbose=verbose\n )\n if stderr in rlist:\n result.read_stderr(\n src=stderr,\n log=logger,\n verbose=verbose\n )", "title": "" }, { "docid": "800eff17c0a83993dba604e4d8716649", "score": "0.4761252", "text": "def handle_stream(self, stream, address):\r\n raise NotImplementedError()", "title": "" }, { "docid": "ed4cdb7bb56f727789af0696d126ea50", "score": "0.4756942", "text": "def __update_handler(self, handler, for_loggers, levelno, file_pat=None):\n for_names = []\n for logger in for_loggers:\n for_names.append(logger.name)\n\n orig_level = handler.level\n name = 'Filter({0}@{1})'.format(for_names, levelno)\n filter = _LoggingConfigFilter(for_names, levelno, orig_level, name,\n file_pat=file_pat)\n handler.addFilter(filter)\n handler.setLevel(logging.NOTSET)", "title": "" }, { "docid": "5dbd474b3a6c4f5503a46b2ae047a2c9", "score": "0.47542244", "text": "def _batch_send_msg(self, streams, msg):\n count = 0\n if streams:\n packed_msg = self._pack_msg(msg)\n for stream in streams:\n if stream is not None and not stream.closed():\n stream.write(packed_msg, lambda: None)\n count += 1\n return count", "title": "" }, { "docid": "8a6bd3b764857a12f3f409ef5549444a", "score": "0.47516376", "text": "def verify_log_streams_data(self, logs_state, expected_stream_index, observed_streams):\n for stream in observed_streams:\n self._verify_log_stream_data(logs_state, expected_stream_index, stream)", "title": "" }, { "docid": "2d97b19051222a12754f3848f2c77a18", "score": "0.47494656", "text": "def AddHandlers (self, **Entries):\n\tfor Entry in Entries:\n\t if Entry in self._io_offset:\n\t self._AddIo (self._io_offset [Entry], Entries [Entry])\n\t elif Entry in self._connect_offset:\n\t self._AddConnect (self._connect_offset [Entry], Entries [Entry])\n\t else:\n\t print \"Resmgr.Add - No IO or Connect entry found for %s\" % Entry", "title": "" }, { "docid": "9b3c2a43956adbb9ef4739142e4e774d", "score": "0.474863", "text": "def setup(self):\n if self.has_been_setup:\n warn(\"Repeated call to setup detected.\"\n \"This might lead to unexpected behavior\")\n status = 1\n else:\n status = 0\n\n for route in self.routes:\n source = route[0] # type: MeasurementStreamHandler\n consumer = route[1] # type: ModelHandler\n\n if not isinstance(consumer, ModelHandler):\n t = str(type(consumer))\n raise TypeError(\"Expected a ModelHandler type, \"\n \"got {}\".format(t))\n source.add_consumer(consumer)\n\n if not isinstance(source, MeasurementStreamHandler):\n t = str(type(source))\n raise TypeError(\"Expected a MeasurementStreamHandler type, \"\n \"got {} instead\".format(t))\n consumer.add_source(source)\n self.has_been_setup = True\n return status", "title": "" }, { "docid": "23e6aba770f0fd890ee09524c0e40bae", "score": "0.47324", "text": "def manage_stream(self, stream, level, stream_setter):\r\n if self._started:\r\n raise RuntimeError('You must call this before start_logging()')\r\n self._streams.append(self.STREAM_MANAGER_CLASS(stream, level,\r\n stream_setter))", "title": "" }, { "docid": "a0bc8709fa6b6e6a2cf2d4ccc62a15b7", "score": "0.47181237", "text": "def ison(self, anames):\n\n logger.info('PyroHandler.ison(%s) called' % anames)\n retlist = []\n for aname in anames:\n if aname.upper() in OUTPUTS.keys():\n with self.lock:\n retlist.append(OUTPUTS[aname.upper()].ison())\n else:\n retlist.append(None)\n return retlist", "title": "" }, { "docid": "c2b679bdbd9ef416e7041035a90f04c7", "score": "0.47085863", "text": "def read_loggingTargets(self):\n # PROTECTED REGION ID(SKABaseDevice.loggingTargets_read) ENABLED START #\n return [str(handler.name) for handler in self.logger.handlers]\n # PROTECTED REGION END # // SKABaseDevice.loggingTargets_read", "title": "" }, { "docid": "5315179d3781662827c3f6e5d667ee1c", "score": "0.47026244", "text": "def RegisterSockets(self):\n\t\tevents = select.POLLIN | select.POLLPRI | select.POLLERR\n\n\t\t# register auth sockets\n\t\tfor sock in self.authfds:\n\t\t\tself.fdmap[sock.fileno()] = (sock, SOCKTYPE_AUTH)\n\t\t\tself.pollobj.register(sock, events)\n\n\t\t# register accounting sockets\n\t\tfor sock in self.acctfds:\n\t\t\tself.fdmap[sock.fileno()] = (sock, SOCKTYPE_ACCT)\n\t\t\tself.pollobj.register(sock, events)", "title": "" }, { "docid": "c6f3ac5ade3d4d72d54f88ff0dedbd25", "score": "0.47000453", "text": "def reopen_files(self):\r\n for log in (self.error_log, self.access_log):\r\n for h in log.handlers:\r\n if isinstance(h, logging.FileHandler):\r\n h.acquire()\r\n h.stream.close()\r\n h.stream = open(h.baseFilename, h.mode)\r\n h.release()", "title": "" }, { "docid": "4adf54a5c16a11a2a0b647a32a515543", "score": "0.4689143", "text": "def ProcessStreams(opts, order, callername, extraArgs=[], inifiles=[]):\n # Validate input options, and update if necessary. The opts object\n # will be modified in-place\n validateOptions(opts)\n\n # Generate a final dummy stream - a \"catch-all\" whose sole function is\n # to track any records not captured by any other stream. This stream\n # produces no output and is not recorded in stats, other than to show\n # the output counts\n dummyArgs = 'unstreamed,AA,sam,nout'\n dummytokens = Tokenize(dummyArgs)\n dummyStream = SAMStream('n/a', dummyArgs)\n dummyStream.parseOperatorTokens(dummytokens)\n opts.orderedStreams.append(dummyStream)\n\n # Preprocess streams (or a subset of them). This function may return\n # immediately if certain operators are not set\n heartbeat = RecordCounter(opts.inputfile)\n heartbeat.header()\n preProcessStreams(opts, heartbeat)\n\n # Set up controller and writer processes if multi-processing enabled\n if opts.nproc == 1:\n heartbeat.message(\"Processing on a single core\", True)\n else:\n if opts.QLOW > (opts.QHIGH * 0.7):\n opts.QLOW = int(round(opts.QHIGH * 0.7))\n\n Controller = MPController(heartbeat, numProc=opts.nproc,\n chunkSize=opts.chunksize)\n Controller.start()\n # Use a simple or ordered writer, depending on option\n if opts.mp_unsorted:\n Writer = Concatenator(Controller.results, Controller.writerConn[1],\n opts.nproc, Controller.Counter, Controller.getpids())\n else:\n Writer = OrderedWriter(Controller.results, Controller.writerConn[1],\n opts.nproc, Controller.Counter)\n Writer.defibrillate(heartbeat)\n Writer.start()\n pids = Controller.getpids()\n pids.append(Writer.pid)\n write_kill_script(pids)\n\n #print \"PROCESS ID Controller (self): \", os.getpid()\n #print \"PROCESS ID processors:\", Controller.getpids()\n #print \"PROCESS ID Writer:\", Writer.pid\n #print \"NUM PROCESSES:\", opts.nproc\n #print \"QUEUE HIGH WATERMARK:\", opts.QHIGH\n #print \"QUEUE LOW WATERMARK:\", opts.QLOW\n\n # Check and set limit value\n if opts.limit is not None:\n full_limit = opts.limit\n else:\n full_limit = 0\n\n # Set up InfoFile run parameters\n opts.infofile.openELN(sys.argv[1:] + extraArgs)\n for file in inifiles:\n opts.infofile.extraFiles.append(FileAndIdentifier(file))\n infofilecopy = opts.infofile.copy()\n\n # Set up output directories. Output directory names will be stored as\n # an attribute 'outputDirectories' in opts\n makeDirectories(opts, callername)\n\n # Set up initial counter and update heartbeat\n total = 0\n heartbeat.update(force=True)\n\n # In Multi-processor mode, begin iterating through the input file\n # and converting pairs of lines into StreamProcessor callable task\n # objects before adding to the process queue\n # Iterate through records in input file.\n if opts.nproc > 1:\n Controller.Send( { 'orderedStreams' : opts.orderedStreams,\n })\n paircount = 0\n chunkID = 0\n dataBloc = []\n pairBloc = []\n # Attempt open as a gzipped file, otherwise fall back\n try:\n fp = gzip.open(opts.inputfile, 'rb')\n fp._read_gzip_header()\t# Throws an exception if not gzipped\n fp.rewind()\t\t\t# Rewind on success\n fpIterator = fp.readlines\t# Store file line iterator\n except IOError:\n fp.close()\n fp = open(opts.inputfile, 'r')\n fpIterator = fp.xreadlines\t# Store file line iterator\n\n for line in fpIterator():\n if line.startswith('@'):\n continue\n pairBloc.append(line)\n paircount += 1\n if paircount % 2 == 0:\n dataBloc.append(pairBloc)\n pairBloc = []\n total += 1\n if len(dataBloc) == opts.chunksize:\n Controller.add(StreamProcessor(chunkID, dataBloc, opts),\n opts.QHIGH, opts.QLOW)\n dataBloc = []\n chunkID += 1\n # Break out of processing loop if --limit records processed\n if full_limit > 0 and full_limit == total:\n break\n fp.close()\n\n # Any remaining pairs go into a final task construct\n if len(dataBloc) > 0:\n Controller.add(StreamProcessor(chunkID, dataBloc, opts),\n opts.QHIGH, opts.QLOW)\n dataBloc = []\n chunkID += 1\n total += len(dataBloc)\n\n Controller.finishQueue()\n Controller.wait()\n dataDict = Controller.Recv()\n Controller.finishProcesses()\n opts.orderedStreams = dataDict['orderedStreams']\n\n # In single-processor mode use HTSeq to iterate through the input file\n # returning record pairs for processing. A tally is kept of record pairs\n # which do not get filtered out by any stream\n else:\n for pair in ReadSAMFile(opts.inputfile):\n total += 1\n for stream in opts.orderedStreams:\n pair = stream.next(pair, opts)\n if pair is None:\n break\n heartbeat += 1\n heartbeat.update()\n\n # Break out of processing loop if --limit records processed\n if full_limit == heartbeat.count:\n break\n\n # Now write the full run output\n WriteOutputFiles(opts, heartbeat, total, finalOutput=True)\n\n # Clean up empty output directories\n cleanDirectories(opts)\n\n if opts.nproc > 1:\n remove_kill_script()", "title": "" }, { "docid": "4d423e13702eaffd027c97aa8caf2491", "score": "0.46888673", "text": "async def start_stream_handlers(self):\n Spawn(self.run_stream_handlers())", "title": "" }, { "docid": "286946c5b43c6bf55f0c23d3cb3e3d57", "score": "0.4684802", "text": "def streamlogger(logname,recordfields=[],outputstream=None, level = INFO):\n handler = StreamHandler(outputstream)\n return logger(logname,handler,recordfields,level)", "title": "" }, { "docid": "12f2789e2e47eb90609feb1747928162", "score": "0.4672032", "text": "def __link_streams_to_input(self) -> None:\n video_streams = 0\n audio_streams = 0\n if self.streams is None:\n raise RuntimeError(\"Streams not initialized\")\n\n for stream in self.streams:\n if stream.kind == VIDEO:\n meta: Optional[VideoMeta] = getattr(stream, 'meta', None)\n if self.hardware and self.device and meta:\n meta.device = Device(hardware=self.hardware,\n name=self.device)\n stream.index = video_streams\n video_streams += 1\n elif stream.kind == AUDIO:\n stream.index = audio_streams\n audio_streams += 1\n else:\n raise ValueError(stream.kind)\n stream.source = self", "title": "" }, { "docid": "8d8092e9674dd271409f0eed647c4e2e", "score": "0.46658078", "text": "def _write(self, stream) -> CommandExecutionResult:\n raise Exception('_write() not implemented for handler')", "title": "" }, { "docid": "431d4f1b7b45c25fa6b681e93f30c566", "score": "0.46655047", "text": "def create_redirection_streams(self):\n self.stdout_file = create_output_stream(self.experiment.redirect_stdout, False, common=False)\n self.stderr_file = create_output_stream(self.experiment.redirect_stderr, False, common=False)", "title": "" }, { "docid": "6c11f71e1f1bb6ce7e101a2989cc17ed", "score": "0.46561316", "text": "def handlers(self):\n handlers = []\n handlers.append(\n CommandHandler(self._send_fn, self._send_cmd)\n )\n handlers.append(\n CommandHandler(self._respond_fn, self._respond_cmd)\n )\n return handlers", "title": "" }, { "docid": "83a7abceb74d228352b3339ab209eed5", "score": "0.46502754", "text": "def __init__(self, *writers):\n self.writers = []\n self.orig_stream = None # The original stream terminal/file\n for writer in writers:\n self.add_writer(writer)", "title": "" }, { "docid": "e6d6ad7c2d25634e84f05a7359795697", "score": "0.4647336", "text": "def checkFromStreams(func):\n @functools.wraps(func)\n def decorated():\n argList = []\n for i in range(4):\n if os.path.isfile(sys.argv[i + 1]):\n argList.append(open(sys.argv[i + 1], \"r\"))\n else:\n argList.append(None)\n try:\n print(*func(*argList), sep = \"\\n\")\n finally:\n for fileStream in argList:\n if fileStream is not None:\n fileStream.close()\n return decorated", "title": "" }, { "docid": "c169c3d3a0572f33d005942cd7c81565", "score": "0.4625779", "text": "def aggregate_logging_outputs(logging_outputs):\n raise NotImplementedError", "title": "" }, { "docid": "e234bbbb939a5c6b10b0dc68dca752e9", "score": "0.46221262", "text": "def verify_log_streams_exist(self, logs_state, expected_stream_index, observed_streams):\n observed_stream_names = [stream.get(\"logStreamName\") for stream in observed_streams]\n assert_that(observed_stream_names).contains_only(*expected_stream_index)", "title": "" }, { "docid": "4f4b0353a7f7cd0ad75b3e3b550c61fd", "score": "0.4620509", "text": "async def run_stream_handlers(self):\n await Parallel(\n self._stream_writer(),\n self._stream_reader())\n\n # Remove any proxies that use this connection\n self.remove_client()", "title": "" }, { "docid": "0f6e900f4e8b729da838676adc22040b", "score": "0.46125326", "text": "def write_asdf(filename, streams, event, label=None):\n workspace = StreamWorkspace(filename)\n workspace.addStreams(event, streams, label=label, gmprocess_version=VERSION)\n workspace.close()", "title": "" }, { "docid": "25aa8f73dadd8a7349c65c9b032e5304", "score": "0.46107706", "text": "def setOutputStream(self, stream):", "title": "" }, { "docid": "15788c9b76022953ba84a506dcaf6141", "score": "0.4608287", "text": "def register_handlers():\n NumpyArrayHandler.handles(np.ndarray)\n PandasTimeSeriesHandler.handles(pd.TimeSeries)\n PandasDataFrameHandler.handles(pd.DataFrame)", "title": "" }, { "docid": "f144ca3cc69306757cde27376985917b", "score": "0.4601788", "text": "def add_handlers(self, logger, handler_list: list):\n existing_handler_names = []\n for existing_handler in logger.handlers:\n existing_handler_names.append(existing_handler.name)\n\n for new_handler in handler_list:\n if new_handler.name not in existing_handler_names:\n logger.addHandler(new_handler)", "title": "" }, { "docid": "d6819f8fd63892b2c9851752b3fd27c6", "score": "0.4597897", "text": "def patch_python_logging_handlers():\n logging.StreamHandler = StreamHandler\n logging.FileHandler = FileHandler\n logging.handlers.SysLogHandler = SysLogHandler\n logging.handlers.WatchedFileHandler = WatchedFileHandler\n logging.handlers.RotatingFileHandler = RotatingFileHandler\n if sys.version_info >= (3, 2):\n logging.handlers.QueueHandler = QueueHandler", "title": "" }, { "docid": "fdf33d3851c142afefb3bd199733de67", "score": "0.45971072", "text": "def _close_streams(self, fos, writer):\n _method_name = '_close_streams'\n\n if writer is not None:\n writer.close()\n elif fos is not None:\n try:\n fos.close()\n except JIOException, ioe:\n self._logger.fine('WLSDPLY-18016', ioe, ioe.getLocalizedMessage(),\n class_name=self._class_name, method_name=_method_name)", "title": "" }, { "docid": "2f743f2650920e4fadca89bcc8b9e458", "score": "0.45964956", "text": "def verify_log_streams_exist(self, logs_state, expected_stream_index, observed_streams):\n observed_stream_names = [stream.get(\"logStreamName\") for stream in observed_streams]\n assert_that(observed_stream_names).contains(*expected_stream_index)", "title": "" }, { "docid": "abb5212d5af53515bc8107bb3e416635", "score": "0.45959422", "text": "def subscribe(self):\r\n for sig, func in self.handlers.items():\r\n try:\r\n self.set_handler(sig, func)\r\n except ValueError:\r\n pass", "title": "" }, { "docid": "96e61c759ed31700b10cd00431d718ad", "score": "0.45947152", "text": "def get_streams(orig):\n streams = {}\n cmd = \"ffprobe '{0}'\".format(orig)\n cache = tools.Cache()\n try:\n output = subprocess.check_output(\n cmd, shell=True,\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as error:\n logger.error(error)\n logger.error(tools.to_unicode(error.output))\n return None\n output = tools.to_unicode(output)\n\n for line in output.split(\"\\n\"):\n if cache(re.search(r\".*Stream #0.(\\d+).*: (.*?): (.+?)[, ].*\", line)):\n stream, stype, encoding = cache.output.groups()\n if stype != \"Audio\":\n continue\n if encoding not in ENCODINGS:\n logger.error(\"Unknown encoding: {0}\".format(encoding))\n else:\n streams[int(stream)] = encoding\n return streams", "title": "" }, { "docid": "82a22eebdfa4b21db942283bfbbf9193", "score": "0.45919845", "text": "def create_redirection_streams(self):\n self.stdout_file = create_output_stream(self.research.redirect_stdout, False, common=True)\n self.stderr_file = create_output_stream(self.research.redirect_stderr, False, common=True)", "title": "" }, { "docid": "e4a4ab38ae5b6c7d9aef8b8e8ff100b8", "score": "0.45856246", "text": "def produce_io_stream_callables():\n if utils.in_aws_lambda():\n try:\n globs = dict(import_string=import_string, input=None, output=None)\n exec(\n \"\"\"\n{% set callables = ['mapper', 'filter', 'partition_key'] %}\n{% if meta_input %}\ninput = {\n {% for k, v in meta_input.items() %}\n {% if k in callables %}\n '{{k}}': '{{v or ''}}' and import_string('{{v}}'),\n {% else %}\n '{{k}}': '{{v}}',\n {% endif %}\n {% endfor %}\n}\n{% endif %}\n{% if meta_output %}\noutput = [\n {% for o in meta_output %}\n {\n {% for k, v in o.items() %}\n {% if k in callables %}\n '{{k}}': '{{v or ''}}' and import_string('{{v}}'),\n {% else %}\n '{{k}}': '{{v or ''}}',\n {% endif %}\n {% endfor %}\n },\n {% endfor %}\n]\n{% endif %}\n \"\"\", globs)\n return globs[\"input\"], globs[\"output\"]\n except:\n logger.error(\"Unable to produce I/O callables\")\n dsn = utils.get_secret(\"sentry.dsn\",\n environment=\"{{_env.name}}\",\n stage=\"{{_env.stage}}\")\n if not dsn:\n logger.error(\"Unable to retrieve Sentry DSN\")\n else:\n client = raven.Client(dsn)\n client.captureException()\n # This is a critical error: must re-raise\n raise", "title": "" }, { "docid": "03e5b6a26be735f16f1e77f6a4ac49f0", "score": "0.45803142", "text": "def get_streams_matching(**kwargs) -> List[StreamInlet]:\n infos = get_streaminfos_matching(**kwargs)\n if infos is None:\n return None\n else:\n streams = []\n for info in infos:\n streams.append(pylsl.StreamInlet(info))\n return streams", "title": "" }, { "docid": "6a1779bb2ec18e52fc4e59f98d937368", "score": "0.45703843", "text": "def _get_expected_log_stream_index(self, logs_state):\n expected_stream_index = {}\n for instance in logs_state:\n for _log_path, log_dict in instance.get(\"logs\").items():\n if not log_dict.get(\"exists\") or log_dict.get(\"is_empty\"):\n continue # Log streams aren't created until events are logged to the file\n expected_stream_name = self._get_expected_log_stream_name(\n instance.get(\"hostname\"), instance.get(\"instance_id\"), log_dict.get(\"log_stream_name\")\n )\n expected_stream_index[expected_stream_name] = log_dict\n LOGGER.info(\"Expected stream index:\\n{0}\".format(_dump_json(expected_stream_index)))\n return expected_stream_index", "title": "" }, { "docid": "55d47445954d2cb016713d7f2212bb98", "score": "0.4569798", "text": "def _PrepareSockets(self):\n for fd in self.authfds + self.acctfds + self.coafds:\n self._fdmap[fd.fileno()] = fd\n self._poll.register(fd.fileno(), select.POLLIN | select.POLLPRI | select.POLLERR)\n if self.auth_enabled:\n self._realauthfds = list(map(lambda x: x.fileno(), self.authfds))\n if self.acct_enabled:\n self._realacctfds = list(map(lambda x: x.fileno(), self.acctfds))\n if self.coa_enabled:\n self._realcoafds = list(map(lambda x: x.fileno(), self.coafds))", "title": "" }, { "docid": "40b0dc0aad3a56c3b322647b560c80bd", "score": "0.45661017", "text": "def write(self, stream: Iterable, name, overwrite=False):", "title": "" }, { "docid": "eda195301ed1644525ab970a540750fd", "score": "0.4556253", "text": "def _register_handler(handler, file_formats):\n if not isinstance(handler, BaseFileHandler):\n raise TypeError(\n \"handler must be a child of BaseFileHandler, not {}\".format(type(handler))\n )\n if isinstance(file_formats, str):\n file_formats = [file_formats]\n if not is_list_of(file_formats, str):\n raise TypeError(\"file_formats must be a str or a list of str\")\n for ext in file_formats:\n file_handlers[ext] = handler", "title": "" }, { "docid": "81f1c278ee86d736fd81da7ab2f0ca22", "score": "0.45560277", "text": "def create_redirection_streams(self):\n self.stdout_file = create_output_stream(\n self.experiment.redirect_stdout, True, 'stdout.txt', path=self.full_path, common=False\n )\n self.stderr_file = create_output_stream(\n self.experiment.redirect_stderr, True, 'stderr.txt', path=self.full_path, common=False\n )", "title": "" }, { "docid": "c0ca9f3114bb8cb0a0903bda61bf6b99", "score": "0.45516068", "text": "def dispatch(self, *args, **kwargs):\n called_handlers = set()\n for handler_list in self.handlers.values():\n for handler, argspec in handler_list:\n accept_args, accept_kwargs = argspec.accepts\n if handler in called_handlers and False:\n continue\n else:\n if args_match(accept_args, accept_kwargs, self.default, *args, **kwargs):\n called_handlers.add(handler)\n handler(*args, **kwargs)\n \n return called_handlers", "title": "" }, { "docid": "3a0a7ecdaee22a5aa53074946a2c4e08", "score": "0.45511758", "text": "def event_handlers(self):\n return []", "title": "" }, { "docid": "8118593ba0fa439f426fb77c6154341d", "score": "0.45340574", "text": "def json_streams(self, item, when):\n if not self.want_streams:\n return {}\n return {\n key: val\n for when_, key, val in item._report_sections\n if when_ == when and key in ['stdout', 'stderr']\n }", "title": "" }, { "docid": "5fc4f65b02b07a682165ce78c322766c", "score": "0.4534023", "text": "def request_stream(self, *sensor_ids):\n length = len(sensor_ids)\n print(sensor_ids)\n return self.send_cmd(create.OP_STREAM, length, *sensor_ids)", "title": "" }, { "docid": "7eac2916098bb5e73fe3bed15a83f860", "score": "0.45290253", "text": "def update_handler(self, fileno, events):\n self.events = events", "title": "" }, { "docid": "390ff1b786e67d2733f23829123f0c80", "score": "0.45263335", "text": "def stream_events(self):\n print(\"3. Stream event\")", "title": "" }, { "docid": "b7bd8c25d73197c9a6ef108f1caecd1a", "score": "0.45258546", "text": "def modify (self, fileno, events):\n\n if events & self.EVENT_ERROR:\n self._error_filenos.add(fileno)\n\n else:\n self._error_filenos.discard(fileno)\n\n if events & self.EVENT_READ:\n self._read_filenos.add(fileno)\n\n else:\n self._read_filenos.discard(fileno)\n\n if events & self.EVENT_WRITE:\n self._write_filenos.add(fileno)\n\n else:\n self._write_filenos.discard(fileno)", "title": "" }, { "docid": "30fb2cd5016ccdcadb2ec5dd4d383369", "score": "0.45210996", "text": "def get_multi_consumer(self, hashes, event_handler, consumer_type = 'http'):\n return StreamConsumer.factory(self, consumer_type, hashes, event_handler)", "title": "" }, { "docid": "b51f8a1bd8cfa2065e6174e28c410e41", "score": "0.45139518", "text": "def _attach_socket_callbacks(self):\n self.socket.on(self.DBG_STOP, self.callback_stop)\n self.socket.on(self.DBG_STEP_INTO, self.callback_step_into)\n self.socket.on(self.DBG_STEP_OUT, self.callback_step_out)\n self.socket.on(self.DBG_STEP_OVER, self.callback_step_over)\n self.socket.on(self.DBG_CONTINUE, self.callback_continue)\n self.socket.on(self.DBG_LOAD_SYMBOL, self.callback_load_symbol)\n self.socket.on(self.DBG_GET_NAMESPACE, self.callback_get_namespace_shells)", "title": "" }, { "docid": "e7458e4e4f98f44078457649a07c858a", "score": "0.45118016", "text": "def create_redirection_streams(self):\n self.stdout_file = create_output_stream(\n self.research.redirect_stdout, True, 'stdout.txt', self.research.name, common=True\n )\n self.stderr_file = create_output_stream(\n self.research.redirect_stderr, True, 'stderr.txt', self.research.name, common=True\n )", "title": "" }, { "docid": "d5af14374615d32fcf98de47f55b3dcb", "score": "0.4507081", "text": "def add_sockets(self, sockets):\r\n if self.io_loop is None:\r\n self.io_loop = IOLoop.current()\r\n\r\n for sock in sockets:\r\n self._sockets[sock.fileno()] = sock\r\n add_accept_handler(sock, self._handle_connection,\r\n io_loop=self.io_loop)", "title": "" }, { "docid": "4499afdae1c769cab3e4e3c5280bbe99", "score": "0.45025295", "text": "def ready_to_write(self):\n\n return [h for h in self._handler_pool.values() if h.ready_to_write()]", "title": "" }, { "docid": "d1f86043b5170fd224e4328689f06404", "score": "0.4496977", "text": "def setup(self):\n super().setup()\n\n # Create the stream and add the message handler\n self.frontend_stream, _ = self.stream(zmq.ROUTER, self.bind_addr, bind=True, identity=self.identity)\n\n # Create the stream and add the message handler\n self.backend_stream, _ = self.stream(zmq.ROUTER, self.backend_addr, bind=True, identity=self.identity)\n\n # Create the stream and add the message handler\n self.subscribe_stream, _ = self.stream(zmq.SUB, self.subs_addr, bind=True, identity=self.identity, subscribe=b\"topic\")\n\n # Attach handlers to the streams\n brokerHandler = BrokerHandler(self.frontend_stream, self.backend_stream, \n self.stop)\n\n sendHandler = SendHandler(sender='Backend')\n\n self.frontend_stream.on_recv(brokerHandler)\n\n # Attach handlers to the streams\n self.backend_stream.on_recv(brokerHandler)\n self.backend_stream.on_send(sendHandler.logger)\n\n # Attach handlers to the streams\n # Consumes data in the form of ['topic', 'msg_type', 'identity, 'payloads'....]\n self.subscribe_stream.on_recv(brokerHandler)\n\n return", "title": "" }, { "docid": "19a97dfe850aaf710d585038a84270a4", "score": "0.44856873", "text": "def _map_logger(func):\n for logger in LOGGERS.itervalues():\n func(logger)", "title": "" }, { "docid": "66e84055b850911b76e2728fe71fa218", "score": "0.44827485", "text": "def streams(self):\n # type: () -> list[MuxingStream]\n return self._streams", "title": "" }, { "docid": "1e4349e792883b98759a6f99cc583209", "score": "0.44698796", "text": "def start_logging(self):\r\n for stream_manager in self._streams:\r\n stream_manager.start_logging()\r\n self._started = True", "title": "" }, { "docid": "098876a610b6674aa5ea9df0e6844c5e", "score": "0.44604483", "text": "def _find_streams(text):\n re_stream = re.compile(r\"<< /Length \\d+ >>\\n(stream.*?endstream)\", re.DOTALL)\n streams = []\n for m in re_stream.finditer(text):\n streams.append(text[m.start(1):m.end(1)])\n return streams", "title": "" }, { "docid": "30446cfd99f077036ac6afec0136b3a5", "score": "0.44568974", "text": "def register_all(self):\r\n for hook, handler in self.connections:\r\n hook.register_handler(handler)\r\n self.registered = True", "title": "" }, { "docid": "a396afba4623ce5bf94880a8494e3583", "score": "0.44496498", "text": "def write_fileno(self):\r\n raise NotImplementedError()", "title": "" }, { "docid": "97506bd2039db643ddd64817cf59913d", "score": "0.4435073", "text": "def get_metrics_stats_over_streamlines(streamlines, metrics_files,\n density_weighting=True):\n\n # Compute weighting matrix taking the possible compression into account\n anat_dim = metrics_files[0].header.get_data_shape()\n weights = compute_tract_counts_map(streamlines, anat_dim)\n\n if not density_weighting:\n weights = weights > 0\n\n return map(lambda metric_file:\n weighted_mean_stddev(\n weights,\n metric_file.get_data().astype(np.float64)),\n metrics_files)", "title": "" }, { "docid": "1d69aa8739634f4e4a2a4f7ebecec746", "score": "0.44267422", "text": "def _get_handler(self, request_handlers):\n\n for handler in request_handlers:\n if handler.can_handle(self._handler_input):\n return handler", "title": "" }, { "docid": "5958e9a16fef11897ba8978c5b462073", "score": "0.44110668", "text": "def handlers(self):\n handlers = []\n for cls in type(self).__mro__:\n if not hasattr(cls, \"_handlers\"):\n continue\n\n # This class is using it's own internal interface here\n # pylint: disable=protected-access\n new_handlers = cls._handlers[cls]\n logging.debug(f\"Including {new_handlers} from {cls} for {self}\")\n handlers.extend(new_handlers)\n\n return handlers", "title": "" }, { "docid": "5cdbd377c99388de6be62809701ef5a9", "score": "0.4410977", "text": "def process_streams(traces):\n trace_file=[]\n for files in os.listdir(traces):\n trace_file.append(os.path.join(os.getcwd(),traces,files))\n return trace_file", "title": "" }, { "docid": "ed9f0b1ee20cefbf28101a692934c439", "score": "0.44097677", "text": "def getAllStream(self , **kargs):", "title": "" }, { "docid": "41ebcbcba3efef7642d5530d8d63769f", "score": "0.4408345", "text": "def _categorize_handlers(self, event_handlers):\n\n train_begin = []\n epoch_begin = []\n batch_begin = []\n batch_end = []\n epoch_end = []\n train_end = []\n for handler in event_handlers:\n if isinstance(handler, TrainBegin):\n train_begin.append(handler)\n if isinstance(handler, EpochBegin):\n epoch_begin.append(handler)\n if isinstance(handler, BatchBegin):\n batch_begin.append(handler)\n if isinstance(handler, BatchEnd):\n batch_end.append(handler)\n if isinstance(handler, EpochEnd):\n epoch_end.append(handler)\n if isinstance(handler, TrainEnd):\n train_end.append(handler)\n return train_begin, epoch_begin, batch_begin, batch_end, epoch_end, train_end", "title": "" }, { "docid": "b24813f7f1fe30c7055b32b26f53ef69", "score": "0.44073662", "text": "def init_simple_stream_logging(level: int = logging.INFO,\n streams: Optional[Sequence[TextIO]] = None,\n format: Optional[str] = None,\n date_format: Optional[str] = None):\n if not streams:\n streams = [sys.stdout]\n\n if not format:\n format = '%(asctime)s %(message)s'\n\n if not date_format:\n date_format = '%H:%M:%S'\n\n logging.basicConfig(level=level)\n handlers = []\n\n formatter = WrappingLogFormatter(format=format, date_format=date_format)\n for stream in streams:\n log_handler = logging.StreamHandler(stream)\n log_handler.setLevel(level)\n log_handler.setFormatter(formatter)\n\n handlers += [log_handler]\n\n logging.getLogger('').handlers = handlers", "title": "" }, { "docid": "5701e7876cb0c271ea5d75691994c6a6", "score": "0.4407288", "text": "def register(self, fd):\n self.handlers[fd] = { key: [] for key in self.EVENT_MAPPING }", "title": "" }, { "docid": "b9805caf2a1ff0c1f2f0e7016f020796", "score": "0.44066778", "text": "def get_handlers(self):\n return []", "title": "" }, { "docid": "6f029593938bb511f724d2310e8cce06", "score": "0.44059324", "text": "def seen_handlers(self) -> Iterable[BaseHandler]:\n return self._handlers.values()", "title": "" }, { "docid": "fdb32c34c517972ba9a16749526272bb", "score": "0.43997818", "text": "def _add_log_handlers(self, add_handlers_fn):\r\n self._flush_all_streams()\r\n context = self._get_context()\r\n\r\n add_handlers_fn(context)\r\n\r\n self._push_context(context)", "title": "" }, { "docid": "232e3d763d79b9368df97eead000c452", "score": "0.43996677", "text": "def testFilters(self):\n # Prepare a handler\n handler, _ = self._register_handler(None, '(answer=42)')\n\n # Assert the handler is empty\n self.assertEqual(handler.pop_event(), None)\n\n # Send event, with matching properties\n for topic in ('/titi/toto', '/toto/', '/titi/42', '/titi/toto/tata'):\n for value in (42, '42', [1, 2, 42, 20], {42, 10},\n (10, 21, 42)):\n evt_props = {'answer': value}\n self.eventadmin.send(topic, evt_props)\n\n # Check properties\n self.assertDictContainsSubset(evt_props, handler.last_props)\n self.assertIsNot(handler.last_props, evt_props)\n\n # Check topic\n self.assertEqual(handler.pop_event(), topic)\n\n # Send events, with a non-matching properties\n for value in (' 42 ', 21, [1, 2, 3], (4, 5, 6), {7, 8, 9}):\n self.eventadmin.send(topic, {'answer': value})\n self.assertEqual(handler.pop_event(), None)", "title": "" } ]
9b484ac9123e68d269b5f623c2c64969
Surface2Right { get; } > SurfaceSurfaceIntersectorConfiguration
[ { "docid": "d277b3aca70753fa1664609bf69d1cf1", "score": "0.9119296", "text": "def Surface2Right(self) -> SurfaceSurfaceIntersectorConfiguration:", "title": "" } ]
[ { "docid": "a0061321502479ed8b10ba1ceaec5cb3", "score": "0.9002965", "text": "def Surface1Right(self) -> SurfaceSurfaceIntersectorConfiguration:", "title": "" }, { "docid": "fa008281c2a24443fcc5dd185b9a55d1", "score": "0.7710068", "text": "def Surface2Left(self) -> SurfaceSurfaceIntersectorConfiguration:", "title": "" }, { "docid": "b3d6737d269468fd1df15e13486d06a0", "score": "0.7018399", "text": "def Surface1Left(self) -> SurfaceSurfaceIntersectorConfiguration:", "title": "" }, { "docid": "e465087882b282420b11a9d0ce5eb4a1", "score": "0.5861007", "text": "def Surface2(self) -> Surface:", "title": "" }, { "docid": "dac2d5229ec80e29d51e0a20e608ef05", "score": "0.5727361", "text": "def IntersectionType(self) -> SurfaceSurfaceIntersectorType:", "title": "" }, { "docid": "b130b470fd649021fd14b780974f7c50", "score": "0.558582", "text": "def _turn2right(self):\n buffer = self.right.topleft.colour\n self.right.topleft.colour = self.right.bottomright.colour\n self.right.bottomright.colour = buffer\n\n buffer = self.right.top.colour\n self.right.top.colour = self.right.bottom.colour\n self.right.bottom.colour = buffer\n\n buffer = self.right.topright.colour\n self.right.topright.colour = self.right.bottomleft.colour\n self.right.bottomleft.colour = buffer\n\n buffer = self.right.right.colour\n self.right.right.colour = self.right.left.colour\n self.right.left.colour = buffer\n\n buffer = self.up.topright.colour\n self.up.topright.colour = self.down.bottomright.colour\n self.down.bottomright.colour = buffer\n\n buffer = self.up.right.colour\n self.up.right.colour = self.down.right.colour\n self.down.right.colour = buffer\n\n buffer = self.up.bottomright.colour\n self.up.bottomright.colour = self.down.topright.colour\n self.down.topright.colour = buffer\n\n buffer = self.front.topright.colour\n self.front.topright.colour = self.back.bottomright.colour\n self.back.bottomright.colour = buffer\n\n buffer = self.front.right.colour\n self.front.right.colour = self.back.right.colour\n self.back.right.colour = buffer\n\n buffer = self.front.bottomright.colour\n self.front.bottomright.colour = self.back.topright.colour\n self.back.topright.colour = buffer", "title": "" }, { "docid": "4b9f7ee6c326bfad0c368ed0f4935abe", "score": "0.53312206", "text": "def _turnright(self):\n buffer = self.right.topleft.colour\n self.right.topleft.colour = self.right.bottomleft.colour\n self.right.bottomleft.colour = self.right.bottomright.colour\n self.right.bottomright.colour = self.right.topright.colour\n self.right.topright.colour = buffer\n\n buffer = self.right.top.colour\n self.right.top.colour = self.right.left.colour\n self.right.left.colour = self.right.bottom.colour\n self.right.bottom.colour = self.right.right.colour\n self.right.right.colour = buffer\n\n buffer = self.up.bottomright.colour\n self.up.bottomright.colour = self.front.bottomright.colour\n self.front.bottomright.colour = self.down.topright.colour\n self.down.topright.colour = self.back.topright.colour\n self.back.topright.colour = buffer\n\n buffer = self.up.right.colour\n self.up.right.colour = self.front.right.colour\n self.front.right.colour = self.down.right.colour\n self.down.right.colour = self.back.right.colour\n self.back.right.colour = buffer\n\n buffer = self.up.topright.colour\n self.up.topright.colour = self.front.topright.colour\n self.front.topright.colour = self.down.bottomright.colour\n self.down.bottomright.colour = self.back.bottomright.colour\n self.back.bottomright.colour = buffer", "title": "" }, { "docid": "21fa7b7c7bdb5c6a7ac911a367c78f84", "score": "0.52520955", "text": "def Direction2(self) -> Vector2d:", "title": "" }, { "docid": "d57808e68b6e47049d5967a34387c0fe", "score": "0.5249383", "text": "def r2(self) -> Mapping[str, str]:\n return pulumi.get(self, \"r2\")", "title": "" }, { "docid": "d353ea610ae93a0a44675381e3736bf8", "score": "0.5219448", "text": "def r2(self):\r\n return self._r2_raw", "title": "" }, { "docid": "a9f1a2c88468b16d09eee6332525e00d", "score": "0.51929814", "text": "def _roller_surface_params(self, inner):\n\n if self.roller_angle == 0:\n # Rollers are cylindrical, the maths gets a lot simpler\n radius_delta = (self.roller_diam / 2) + self.tolerance\n gradient = 0\n if inner:\n radius = self.rolling_radius - radius_delta\n else:\n radius = self.rolling_radius + radius_delta\n else:\n # Conical rollers\n focal_length = tan(radians(90 - self.roller_angle)) * self.rolling_radius\n cone_angle = atan2( # 1/2 angle made by cone's point (unit: radians)\n (self.roller_diam / 2),\n sqrt(focal_length ** 2 + self.rolling_radius ** 2)\n )\n\n multiplier = 1\n multiplier *= -1 if inner else 1\n multiplier *= 1 if (self.roller_angle > 0) else -1\n gradient = -tan(radians(self.roller_angle) + (cone_angle * multiplier))\n\n radius = gradient * -focal_length\n radius = radius + (-self.tolerance if inner else self.tolerance)\n\n return {\n 'roller_surface_radius': radius, # radius of cone at XY plane\n 'roller_surface_gradient': gradient, # rate of change of radius along z axis.\n }", "title": "" }, { "docid": "c1f397b54d033ceeef100b74f0a39bfb", "score": "0.51747066", "text": "def Surface1(self) -> Surface:", "title": "" }, { "docid": "81a9a9f2d256e96106927bd31c94aaa7", "score": "0.5172453", "text": "def OrthoBoundBlock(self) -> BoundBlock2d:", "title": "" }, { "docid": "bf00e0e776eee418f6c95f5c22c4c4b3", "score": "0.5125267", "text": "def ptsRight(self):\n return self._pts1", "title": "" }, { "docid": "15013b71c5567d484e19c5f5b18709a8", "score": "0.5119256", "text": "def closed_solid(self):\n return CloseSurface([self.lower_surface, self.upper_surface])", "title": "" }, { "docid": "2fc3a55de7222e7eebd1591014defae3", "score": "0.5060321", "text": "def Surface(self) -> Surface:", "title": "" }, { "docid": "552465a5fc6ea35f2c7c940e0eed8311", "score": "0.50546205", "text": "def right_edge(self):\n return self.well_center + (self.well_width / 2)", "title": "" }, { "docid": "19fe55bf39fc0e5555c62f268e52941f", "score": "0.50386864", "text": "def l2fw_settings(self):\n if \"l2fw_settings\" in self.data:\n return Layer2Settings(self)\n raise UnsupportedEngineFeature(\n \"Layer2FW settings are only supported on layer 3 engines using \"\n \"engine and SMC version >= 6.3\"\n )", "title": "" }, { "docid": "50d2451828ae12f805598f941f2b229a", "score": "0.5021015", "text": "def getRight(self):\n return self.right #ADD MORE ATTRIBUTES", "title": "" }, { "docid": "fcc3640d875b61d224da7dd18fa3c65f", "score": "0.50139534", "text": "def r2_adj(self):\r\n return self._r2_adj_raw", "title": "" }, { "docid": "58ce5e79873b54a96ae8c0c357555de2", "score": "0.5013371", "text": "def getsurface(self,r):\n\n a = pygame.Surface((40,20))\n a.fill((0,0,0))\n if(r == 1): # ORANGE - weakest one\n pygame.draw.rect(a,(255,215,0),pygame.Rect(a.get_rect().top+1,a.get_rect().left+1,38,18))\n elif(r == 2): # WOOD\n pygame.draw.rect(a,(184,134,11),pygame.Rect(a.get_rect().top+1,a.get_rect().left+1,38,18))\n elif(r == 3): # METAL\n pygame.draw.rect(a,(192,192,192),pygame.Rect(a.get_rect().top+1,a.get_rect().left+1,38,18))\n return a", "title": "" }, { "docid": "f52fcc0de67812655281b3736badabf6", "score": "0.5008466", "text": "def surface(self):\n return self[\"surface\"]", "title": "" }, { "docid": "8b5ba0e99bf561a5c5e4787c10e7b851", "score": "0.500284", "text": "def stereoRectify(K1, D1, K2, D2, imageSize, R, tvec, flags, R1=None, R2=None, P1=None, P2=None, Q=None, newImageSize=None, balance=None, fov_scale=None): # real signature unknown; restored from __doc__\r\n pass", "title": "" }, { "docid": "3313cde80cdac0a1aa192ff07b58ce8b", "score": "0.49697912", "text": "def ExternalSurfaceDefinition(self) -> _n_1_t_8:", "title": "" }, { "docid": "58f5d5cdc855543b1304b26d6477c9ac", "score": "0.49572212", "text": "def _stereo_layer(ich):\n ptt = (version_pattern() +\n SLASH + _formula_sublayer_pattern() +\n app.maybe(SLASH + _main_layer_pattern()) +\n app.maybe(SLASH + _charge_layer_pattern()) +\n SLASH + app.capturing(_stereo_layer_pattern()))\n lyr = apf.first_capture(ptt, ich)\n return lyr", "title": "" }, { "docid": "6bc4230888bf7a49c4df8c3347b3f5bb", "score": "0.49489367", "text": "def get_surface(self):\r\n return 3.14 * self.rayon **2", "title": "" }, { "docid": "6484afa9ab51d3f7d35cb1d1d482f1aa", "score": "0.492675", "text": "def _turnrightinv(self):\n buffer = self.right.topleft.colour\n self.right.topleft.colour = self.right.topright.colour\n self.right.topright.colour = self.right.bottomright.colour\n self.right.bottomright.colour = self.right.bottomleft.colour\n self.right.bottomleft.colour = buffer\n\n buffer = self.right.top.colour\n self.right.top.colour = self.right.right.colour\n self.right.right.colour = self.right.bottom.colour\n self.right.bottom.colour = self.right.left.colour\n self.right.left.colour = buffer\n\n buffer = self.up.bottomright.colour\n self.up.bottomright.colour = self.back.topright.colour\n self.back.topright.colour = self.down.topright.colour\n self.down.topright.colour = self.front.bottomright.colour\n self.front.bottomright.colour = buffer\n\n buffer = self.up.right.colour\n self.up.right.colour = self.back.right.colour\n self.back.right.colour = self.down.right.colour\n self.down.right.colour = self.front.right.colour\n self.front.right.colour = buffer\n\n buffer = self.up.topright.colour\n self.up.topright.colour = self.back.bottomright.colour\n self.back.bottomright.colour = self.down.bottomright.colour\n self.down.bottomright.colour = self.front.topright.colour\n self.front.topright.colour = buffer", "title": "" }, { "docid": "80bebc62156814828235639cf8214e7e", "score": "0.49166632", "text": "def Direction2(self) -> Vector3d:", "title": "" }, { "docid": "23cd34b4160098dfaf03ccb37a71c117", "score": "0.48990288", "text": "def surface(self):\r\n return self.rupture.surface", "title": "" }, { "docid": "e1235e61bd4e8b706e45c9d66ce63107", "score": "0.4897508", "text": "def __init__(self, face_right=True, filter_landmarks=False):\n self.NOSE = 0\n self.LEFT_EYE_INNER = 1\n self.LEFT_EYE = 2\n self.LEFT_EYE_OUTER = 3\n self.RIGHT_EYE_INNER = 4\n self.RIGHT_EYE = 5\n self.RIGHT_EYE_OUTER = 6\n self.LEFT_EAR = 7\n self.RIGHT_EAR = 8\n self.MOUTH_LEFT = 9\n self.MOUTH_RIGHT = 10\n self.LEFT_SHOULDER = 11\n self.RIGHT_SHOULDER = 12\n self.LEFT_ELBOW = 13\n self.RIGHT_ELBOW = 14\n self.LEFT_WRIST = 15\n self.RIGHT_WRIST = 16\n self.LEFT_PINKY = 17\n self.RIGHT_PINKY = 18\n self.LEFT_INDEX = 19\n self.RIGHT_INDEX = 20\n self.LEFT_THUMB = 21\n self.RIGHT_THUMB = 22\n self.LEFT_HIP = 23\n self.RIGHT_HIP = 24\n self.LEFT_KNEE = 25\n self.RIGHT_KNEE = 26\n self.LEFT_ANKLE = 27\n self.RIGHT_ANKLE = 28\n self.LEFT_HEEL = 29\n self.RIGHT_HEEL = 30\n self.LEFT_FOOT_INDEX = 31\n self.RIGHT_FOOT_INDEX = 32\n self.POSE_CONNECTIONS = None\n self.LANDMARKS = None\n\n if filter_landmarks:\n if face_right:\n self.POSE_CONNECTIONS = [\n # Custom join\n (self.RIGHT_SHOULDER, self.RIGHT_EAR),\n (self.RIGHT_SHOULDER, self.NOSE),\n (self.RIGHT_SHOULDER, self.RIGHT_ELBOW),\n (self.RIGHT_ELBOW, self.RIGHT_WRIST),\n (self.RIGHT_WRIST, self.RIGHT_INDEX),\n (self.RIGHT_SHOULDER, self.RIGHT_HIP),\n (self.RIGHT_HIP, self.RIGHT_KNEE),\n (self.RIGHT_KNEE, self.RIGHT_ANKLE),\n (self.RIGHT_ANKLE, self.RIGHT_HEEL),\n (self.RIGHT_HEEL, self.RIGHT_FOOT_INDEX),\n (self.RIGHT_ANKLE, self.RIGHT_FOOT_INDEX),\n ]\n\n self.LANDMARKS = [\n self.NOSE,\n self.RIGHT_EAR,\n self.RIGHT_SHOULDER,\n self.RIGHT_ELBOW,\n # Might not need wrist and index\n self.RIGHT_WRIST,\n self.RIGHT_INDEX,\n self.RIGHT_HIP,\n self.RIGHT_KNEE,\n self.RIGHT_ANKLE,\n self.RIGHT_HEEL,\n self.RIGHT_FOOT_INDEX\n ]\n\n self.HIP_ANGLE_CONNECTIONS = [\n self.RIGHT_SHOULDER,\n self.RIGHT_HIP,\n self.RIGHT_KNEE\n ]\n\n self.KNEE_ANGLE_CONNECTIONS = [\n self.RIGHT_HIP,\n self.RIGHT_KNEE,\n self.RIGHT_ANKLE\n # self.RIGHT_HEEL\n ]\n else:\n self.POSE_CONNECTIONS = [\n # Custom join\n (self.LEFT_SHOULDER, self.LEFT_EAR),\n (self.LEFT_SHOULDER, self.NOSE),\n (self.LEFT_SHOULDER, self.LEFT_ELBOW),\n (self.LEFT_ELBOW, self.LEFT_WRIST),\n (self.LEFT_WRIST, self.LEFT_INDEX),\n (self.LEFT_SHOULDER, self.LEFT_HIP),\n (self.LEFT_HIP, self.LEFT_KNEE),\n (self.LEFT_KNEE, self.LEFT_ANKLE),\n (self.LEFT_ANKLE, self.LEFT_HEEL),\n (self.LEFT_HEEL, self.LEFT_FOOT_INDEX),\n (self.LEFT_ANKLE, self.LEFT_FOOT_INDEX),\n ]\n\n self.LANDMARKS = [\n self.NOSE,\n self.LEFT_EAR,\n self.LEFT_SHOULDER,\n self.LEFT_ELBOW,\n # Might not need wrist and index\n self.LEFT_WRIST,\n self.LEFT_INDEX,\n self.LEFT_HIP,\n self.LEFT_KNEE,\n self.LEFT_ANKLE,\n self.LEFT_HEEL,\n self.LEFT_FOOT_INDEX\n ]\n\n self.HIP_ANGLE_CONNECTIONS = [\n self.LEFT_SHOULDER,\n self.LEFT_HIP,\n self.LEFT_KNEE\n ]\n\n self.KNEE_ANGLE_CONNECTIONS = [\n self.LEFT_HIP,\n self.LEFT_KNEE,\n self.LEFT_ANKLE\n # self.LEFT_HEEL\n ]", "title": "" }, { "docid": "f4712dbdd9873c56ff27e440c538044a", "score": "0.48874354", "text": "def __init__(self):\n # State bounds.\n self.bounds = np.array([\n [-2, 2], # axis_0 = state, axis_1=bounds.\n [-2, 10]\n ])\n self.low = self.bounds[:, 0]\n self.high = self.bounds[:, 1]\n\n # Time step parameter.\n self.time_step = 0.05\n\n # Dubins car parameters.\n self.upward_speed = 2.0\n\n # Control parameters.\n self.horizontal_rate = 1\n self.discrete_controls = np.array([\n -self.horizontal_rate, 0, self.horizontal_rate\n ])\n\n # Constraint set parameters.\n # X,Y position and Side Length.\n self.box1_x_y_length = np.array([1.25, 2, 1.5]) # Bottom right.\n self.corners1 = np.array([\n (self.box1_x_y_length[0] - self.box1_x_y_length[2] / 2.0),\n (self.box1_x_y_length[1] - self.box1_x_y_length[2] / 2.0),\n (self.box1_x_y_length[0] + self.box1_x_y_length[2] / 2.0),\n (self.box1_x_y_length[1] + self.box1_x_y_length[2] / 2.0)\n ])\n self.box2_x_y_length = np.array([-1.25, 2, 1.5]) # Bottom left.\n self.corners2 = np.array([\n (self.box2_x_y_length[0] - self.box2_x_y_length[2] / 2.0),\n (self.box2_x_y_length[1] - self.box2_x_y_length[2] / 2.0),\n (self.box2_x_y_length[0] + self.box2_x_y_length[2] / 2.0),\n (self.box2_x_y_length[1] + self.box2_x_y_length[2] / 2.0)\n ])\n self.box3_x_y_length = np.array([0, 6, 1.5]) # Top middle.\n self.corners3 = np.array([\n (self.box3_x_y_length[0] - self.box3_x_y_length[2] / 2.0),\n (self.box3_x_y_length[1] - self.box3_x_y_length[2] / 2.0),\n (self.box3_x_y_length[0] + self.box3_x_y_length[2] / 2.0),\n (self.box3_x_y_length[1] + self.box3_x_y_length[2] / 2.0)\n ])\n\n # Target set parameters.\n self.box4_x_y_length = np.array([0, 9.25, 1.5]) # Top.\n\n # Gym variables.\n self.action_space = gym.spaces.Discrete(3) # horizontal_rate={-1,0,1}\n self.midpoint = (self.low + self.high) / 2.0\n self.interval = self.high - self.low\n self.observation_space = gym.spaces.Box(\n np.float32(self.midpoint - self.interval / 2),\n np.float32(self.midpoint + self.interval / 2)\n )\n self.viewer = None\n\n # Discretization.\n self.grid_cells = None\n\n # Internal state.\n self.state = np.zeros(3)\n\n self.seed_val = 0\n\n # Visualization params\n self.vis_init_flag = True\n (\n self.x_box1_pos, self.x_box2_pos, self.x_box3_pos, self.y_box1_pos,\n self.y_box2_pos, self.y_box3_pos\n ) = self.constraint_set_boundary()\n (self.x_box4_pos, self.y_box4_pos) = self.target_set_boundary()\n self.visual_initial_states = [\n np.array([0, 0]),\n np.array([-1, -2]),\n np.array([1, -2]),\n np.array([-1, 4]),\n np.array([1, 4])\n ]\n self.scaling = 1.\n\n # Set random seed.\n np.random.seed(self.seed_val)", "title": "" }, { "docid": "a54d8af85b05bfe5ab9be587cdca4bd9", "score": "0.4856279", "text": "def set_surface(self,surface):\n if self.manualCanopy==False:\n self.surface,self.canopy_height,self.leafAreaIndex,self.dragCoeff,self.surfaceRoughess = getSurfaceProperties(surface,self.surfaceDataFile)\n else:\n self.surface,self.dumpCanopy,self.leafAreaIndex,self.dragCoeff,self.surfaceRoughess = getSurfaceProperties(surface,self.surfaceDataFile)", "title": "" }, { "docid": "09dd072c7bf17a68f4fa250650ac4937", "score": "0.48540297", "text": "def createSurf40(self):\n # SurfaceD = rectangle to erase readout\n\n # Readout is the 5th position in 'self.displayTextsList'\n readoutList = self.displayTextsList[4]\n textRect = readoutList[3][1]\n textRect.topleft = (self.position[0]+readoutList[2][0],\n self.position[1]+readoutList[2][1])\n self.surfaceD = readoutList[3][0]\n \n # Add surface to 'layerManager'\n self.addSurftoLM(self.surfaceD, textRect.topleft, 40, self.ordinal)\n\n # Explicit variables to speed up readout print\n self.readoutScale = readoutList[1]\n self.readoutRectTL = (self.position[0] + readoutList[2][0],\n self.position[1] + readoutList[2][1])", "title": "" }, { "docid": "24b0cbfb83ddc9cf54d6162a06951115", "score": "0.48417872", "text": "def rectangle_field(self):\n\n rectangle_field = self.side_a * self.side_b\n return rectangle_field", "title": "" }, { "docid": "db6965be77b69774e367e47c046e1acb", "score": "0.48243245", "text": "def GetRightHandOrientation(self):\n ...", "title": "" }, { "docid": "3b310b79468daa829f50f38278a6f2aa", "score": "0.47981092", "text": "def r2(self):\n return self._read2", "title": "" }, { "docid": "3b310b79468daa829f50f38278a6f2aa", "score": "0.47981092", "text": "def r2(self):\n return self._read2", "title": "" }, { "docid": "bf6f7cd55ca7449b63314d76f3be462a", "score": "0.47912267", "text": "def rot_R2(self):\n\n self.cubes['FRD'][0], self.cubes['RBU'][1] = \\\n self.cubes['RBU'][1], self.cubes['FRD'][0]\n\n self.cubes['FRD'][1], self.cubes['RBU'][0] = \\\n self.cubes['RBU'][0], self.cubes['FRD'][1]\n\n self.cubes['FRD'][2], self.cubes['RBU'][2] = \\\n self.cubes['RBU'][2], self.cubes['FRD'][2]\n\n self.cubes['RBD'][0], self.cubes['FRU'][1] = \\\n self.cubes['FRU'][1], self.cubes['RBD'][0]\n\n self.cubes['RBD'][1], self.cubes['FRU'][0] = \\\n self.cubes['FRU'][0], self.cubes['RBD'][1]\n\n self.cubes['RBD'][2], self.cubes['FRU'][2] = \\\n self.cubes['FRU'][2], self.cubes['RBD'][2]\n\n self.cubes['RD'], self.cubes['RU'] = self.cubes['RU'], self.cubes['RD']\n\n self.cubes['FR'], self.cubes['BR'] = self.cubes['BR'], self.cubes['FR']", "title": "" }, { "docid": "c97199c14f01205c790bc9086d63138b", "score": "0.47847623", "text": "def OriginalSurface(self) -> Surface:", "title": "" }, { "docid": "33c362cfcd08df8ab836d487a5081b0d", "score": "0.47805697", "text": "def get_mode_two_ramping_capability(t2, min_loading, current_mode_time, effective_ramp_rate):\n\n # Amount of time remaining in T2\n t2_time_remaining = t2 - current_mode_time\n\n # Time unit is above min loading level over the dispatch interval\n min_loading_time = max([0, 5 - t2_time_remaining])\n\n # If T2=0 then unit immediately operates at min loading after synchronisation complete\n if t2 == 0:\n t2_ramp_capability = min_loading\n\n # Else unit must follow fixed startup trajectory\n else:\n t2_ramp_capability = (min_loading / t2) * t2_time_remaining\n\n # Ramping capability for T3 and beyond\n t3_ramp_capability = (effective_ramp_rate / 60) * min_loading_time\n\n # Total ramp up capability\n ramp_up_capability = t2_ramp_capability + t3_ramp_capability\n\n return ramp_up_capability", "title": "" }, { "docid": "8ddb6176aea2c00548d1763ebc5cd6a2", "score": "0.47710675", "text": "def Orientation2d(self) -> bool:", "title": "" }, { "docid": "c03bb6f233360f1b7d5a8eaadc20806c", "score": "0.4770688", "text": "def get_configuration_template(self):\n\t\treturn {'ALLOWED_ROOMS': ['room_1', 'room_2']}", "title": "" }, { "docid": "f95281edb55be9e496250e487b4b8ebd", "score": "0.47612482", "text": "def IsisTrillPseudoIfaceAttPoint2Config(self):\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.isistrillpseudoifaceattpoint2config import IsisTrillPseudoIfaceAttPoint2Config\n return IsisTrillPseudoIfaceAttPoint2Config(self)", "title": "" }, { "docid": "27f713659dd56be27f436967a7501720", "score": "0.4724865", "text": "def ExternalSurfaceDefiniton(self) -> _n_1_t_8:", "title": "" }, { "docid": "799c85467c6f50c85206209a2489c33b", "score": "0.47135726", "text": "def BoundBlock(self) -> BoundBlock2d:", "title": "" }, { "docid": "1c436a6c362846995daa454d214e19ac", "score": "0.47085255", "text": "def LightingMode(self) -> RapidRTLightingMode:", "title": "" }, { "docid": "715f1c8f32c6441e15a15b7d078e9ce7", "score": "0.4696753", "text": "def co2_equivalent(self):\n return self.indoor_air_quality[0]", "title": "" }, { "docid": "45b9499f9c1c293a70911186d41a52f6", "score": "0.4685617", "text": "def surface(self):\n return self._surface", "title": "" }, { "docid": "cf1166555a996b37199520b64b77134b", "score": "0.46837267", "text": "def ConstructionSurface(self) -> Surface:", "title": "" }, { "docid": "16a8d948fbbdd49cc04b77c5699f1b38", "score": "0.46778372", "text": "def mp2_two_rdm(self):\n mp2 = self._pyscf_data.get('mp2', None)\n if mp2 is None:\n return None\n\n mf = self._pyscf_data.get('scf', None)\n if isinstance(mf, scf.uhf.UHF):\n raise ValueError('Spin trace for UMP2 density matrix.')\n\n rdm2 = mp2.make_rdm2()\n if isinstance(mf, scf.rohf.ROHF):\n aa, ab, bb = rdm2\n rdm2 = aa + bb + ab + ab.transpose(2, 3, 0, 1)\n return rdm2.transpose(0, 2, 3, 1)", "title": "" }, { "docid": "b4dd1d897c9e6add97521e06b9e8bdfa", "score": "0.46777868", "text": "def getDirectionVectorTwo(self):\n return self.__directionVectorTwo", "title": "" }, { "docid": "1b4d448b4f5ce4cd0520889bf46890be", "score": "0.46682587", "text": "def config_differential(self):\r\n\t\tif self.channel == 0:\r\n\t\t\tCONFIG_REG = [ADS1115_REG_CONFIG_OS_SINGLE | ADS1115_REG_CONFIG_MUX_DIFF_0_1 | ADS1115_REG_CONFIG_PGA_4_096V | ADS1115_REG_CONFIG_MODE_CONTIN, ADS1115_REG_CONFIG_DR_128SPS | ADS1115_REG_CONFIG_CQUE_NONE]\r\n\t\telif self.channel == 1:\r\n\t\t\tCONFIG_REG = [ADS1115_REG_CONFIG_OS_SINGLE | ADS1115_REG_CONFIG_MUX_DIFF_0_3 | ADS1115_REG_CONFIG_PGA_4_096V | ADS1115_REG_CONFIG_MODE_CONTIN, ADS1115_REG_CONFIG_DR_128SPS | ADS1115_REG_CONFIG_CQUE_NONE]\r\n\t\telif self.channel == 2:\r\n\t\t\tCONFIG_REG = [ADS1115_REG_CONFIG_OS_SINGLE | ADS1115_REG_CONFIG_MUX_DIFF_1_3 | ADS1115_REG_CONFIG_PGA_4_096V | ADS1115_REG_CONFIG_MODE_CONTIN, ADS1115_REG_CONFIG_DR_128SPS | ADS1115_REG_CONFIG_CQUE_NONE]\r\n\t\telif self.channel == 3:\r\n\t\t\tCONFIG_REG = [ADS1115_REG_CONFIG_OS_SINGLE | ADS1115_REG_CONFIG_MUX_DIFF_2_3 | ADS1115_REG_CONFIG_PGA_4_096V | ADS1115_REG_CONFIG_MODE_CONTIN, ADS1115_REG_CONFIG_DR_128SPS | ADS1115_REG_CONFIG_CQUE_NONE]\r\n\t\t\r\n\t\tbus.write_i2c_block_data(ADS1115_DEFAULT_ADDRESS, ADS1115_REG_POINTER_CONFIG, CONFIG_REG)", "title": "" }, { "docid": "fd19820aa09309b343bf70dd077fa776", "score": "0.46665156", "text": "def Cn2_r0(self,Cn2=1e-14,deltz=50*LP.m):\r\n \r\n wvn = 2*np.pi/self.wvl\r\n r0 = (1.46*wvn**2*Cn2*deltz)**(-3/5)*2.1\r\n return r0", "title": "" }, { "docid": "1424fa02b8432bf19bcabf30b712ef6c", "score": "0.46600118", "text": "def cb_rect_right(self, msg):\n self.headers['rect_right'] = copy.deepcopy(msg.header)\n if self.use_cv_types:\n try:\n self.rect_right = self.bridge.imgmsg_to_cv2(msg, self.cv_type)\n except:\n rospy.logdebug('Failed to process rect_right image')\n self.rect_right = None\n else:\n self.rect_right = msg", "title": "" }, { "docid": "1c03a33a433a296efe5859871ee7db0a", "score": "0.46579942", "text": "def fci_two_rdm(self):\n if self._fci_two_rdm is None:\n fci = self._pyscf_data.get('fci', None)\n if fci is None:\n return None\n\n mf = self._pyscf_data.get('scf', None)\n if isinstance(mf, scf.uhf.UHF):\n raise ValueError('Spin trace for UHF-FCI density matrix.')\n\n norb = self.canonical_orbitals.shape[1]\n nelec = self.n_electrons\n fci_rdm2 = fci.make_rdm2(fci.ci, norb, nelec)\n self._fci_two_rdm = fci_rdm2.transpose(0, 2, 3, 1)\n return self._fci_two_rdm", "title": "" }, { "docid": "ef8dab1d11fc20211cb71470f5ab84ab", "score": "0.46499088", "text": "def DiagnosticBSPMode(self) -> DiagnosticBSPMode:", "title": "" }, { "docid": "c8a18af8b1b891da947ab1c871ac2562", "score": "0.46436408", "text": "def GetRightHandPosition(self):\n ...", "title": "" }, { "docid": "9af4477e24244849d08dc3149a9e06ff", "score": "0.46385527", "text": "def logical_z2(self):\n max_x, max_y = self.code.bounds\n self.site('Z', *((0, y) for y in range(max_y + 1)))\n return self", "title": "" }, { "docid": "af8fcf399ee973f0b22c9d5b29f70ab7", "score": "0.4631357", "text": "def right(self):\n\n return self.__opts.get(\"right\")", "title": "" }, { "docid": "23794ed5ef0a9246d5dad7e52f74a910", "score": "0.46277118", "text": "def CoordinateSystem2d(self) -> CoordinateSystem2d:", "title": "" }, { "docid": "a6326d9d3e8234c4d19b02c418a1af74", "score": "0.4621761", "text": "def right(self):\n x, y = self.position\n if x == self.map.width - 1:\n return None\n return self.map.grid[x+1][y]", "title": "" }, { "docid": "048d4e2d69270cc8d9833d5fcebc00aa", "score": "0.46088052", "text": "def getRight(self):\n return self.right", "title": "" }, { "docid": "e976fb32ef2e3a21a7fcc24bb691ad0f", "score": "0.4606444", "text": "def _turn2y(self):\n self._turn2up()\n self._turn2down()\n\n buffer = self.front.left.colour\n self.front.left.colour = self.back.right.colour\n self.back.right.colour = buffer\n\n buffer = self.front.middle.colour\n self.front.middle.colour = self.back.middle.colour\n self.back.middle.colour = buffer\n\n buffer = self.front.right.colour\n self.front.right.colour = self.back.left.colour\n self.back.left.colour = buffer\n\n buffer = self.right.left.colour\n self.right.left.colour = self.left.left.colour\n self.left.left.colour = buffer\n\n buffer = self.right.middle.colour\n self.right.middle.colour = self.left.middle.colour\n self.left.middle.colour = buffer\n\n buffer = self.right.right.colour\n self.right.right.colour = self.left.right.colour\n self.left.right.colour = buffer", "title": "" }, { "docid": "dc20729228677032076fb4ea83e7afaf", "score": "0.45995158", "text": "def nativeSurface(self) -> Surface:", "title": "" }, { "docid": "818aa001923e107270c70fb301ab15c8", "score": "0.45837763", "text": "def curvatureRight(self):\n return self._curvature(self._coefs1)", "title": "" }, { "docid": "18175b8462f5a459289264ef125d6b13", "score": "0.45835435", "text": "def right(self):\n return self.angle == 90", "title": "" }, { "docid": "82f71ea6c4863f713ace4f1597872bc6", "score": "0.45814893", "text": "def s2_kernel_shapes(self):\n p = self.params\n kshape = p.s1_num_orientations, p.s2_kwidth, p.s2_kwidth\n return (kshape,)", "title": "" }, { "docid": "76d6e80fe1503961d61489f3525730e7", "score": "0.458108", "text": "def __warp_sides_surface(self, grading_percentage = .5, precision = 25):\n\n # first of all checking whether you have to do anything at all\n if not(self.warp_type == 0):\n local_srf = self.warped_srf\n u_extra_precision = int(math.ceil(25 / grading_percentage)) - precision\n half_pi = math.pi / 2.0\n half_pi_over_precision = half_pi / (precision - 1)\n\n # setting up the base grading t_vals\n ini_t_vals = []\n total_t_vals = u_extra_precision\n for i in range(precision):\n alfa = half_pi_over_precision * i\n local_t_val = math.sin(alfa)\n ini_t_vals.append(local_t_val)\n total_t_vals += local_t_val\n\n [ini_t_vals.append(1) for i in range(u_extra_precision)]\n\n # setting up the grading list for the amount of sides\n local_t_val = 0\n if (self.warp_type == 1):\n # only on the left side\n t_vals = []\n for t_val in ini_t_vals:\n local_t_val += t_val\n t_vals.append(local_t_val)\n elif (self.warp_type == 2):\n # only on the right side\n t_vals = []\n ini_t_vals.reverse()\n local_ini_t_vals = [0]\n local_ini_t_vals.extend(ini_t_vals[:-1])\n for t_val in local_ini_t_vals:\n local_t_val += t_val\n t_vals.append(local_t_val)\n elif (self.warp_type == 3):\n # on both sides\n t_vals = []\n local_ini_t_vals = ini_t_vals[:]\n ini_t_vals.reverse()\n local_ini_t_vals.extend(ini_t_vals[:-1])\n for t_val in local_ini_t_vals:\n local_t_val += t_val\n t_vals.append(local_t_val)\n\n # getting the v isocurves\n val_0, val_1 = t_vals[0], t_vals[-1]\n local_srf.SetDomain(1, rg.Interval(0, precision - 1))\n temp_srf_iscrv_set = [local_srf.IsoCurve(0, v_val) for v_val in range(precision)]\n pt_list = [[] for i in range(len(t_vals))]\n for isocrv in temp_srf_iscrv_set:\n t_start, t_end = isocrv.Domain[0], isocrv.Domain[1]\n t_delta = t_end - t_start\n t_differential = t_delta / val_1\n [pt_list[i].append(isocrv.PointAt(t_start + t_val * t_differential)) for i, t_val in enumerate(t_vals)]\n\n # constructing new isocurves\n loft_curves = [rg.NurbsCurve.Create(False, 3, pt_set) for pt_set in pt_list]\n loft_type = rg.LoftType.Tight\n local_srf = rg.Brep.CreateFromLoftRebuild(loft_curves, rg.Point3d.Unset, rg.Point3d.Unset, loft_type, False, 50)[0]\n # getting the loft as a nurbssurface out of the resulting brep\n new_srf = local_srf.Faces.Item[0].ToNurbsSurface()\n\n domain = rg.Interval(0, 1)\n new_srf.SetDomain(0, domain)\n new_srf.SetDomain(1, domain)\n self.warped_srf = local_srf\n else:\n # in case you don't have to do anything at all you do nothing at all !?\n pass", "title": "" }, { "docid": "08cecbeda7b1fd860635a37e5ebb71f8", "score": "0.45809785", "text": "def Direction(self) -> Vector2d:", "title": "" }, { "docid": "aa0d7cd880ba5153d950a18c918fcabf", "score": "0.45729887", "text": "def right(self):\n return self.rect.right", "title": "" }, { "docid": "d43df59255847c6fbf861e18cb20c72e", "score": "0.45715597", "text": "def IsisSpbPseudoIfaceAttPoint2Config(self):\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.isisspbpseudoifaceattpoint2config import IsisSpbPseudoIfaceAttPoint2Config\n return IsisSpbPseudoIfaceAttPoint2Config(self)", "title": "" }, { "docid": "8af9eafce170910716ba082644e5ad4e", "score": "0.45705968", "text": "def Curve2(self) -> Curve2d:", "title": "" }, { "docid": "6a7c5d3550e80e99efb9dcb1b889f453", "score": "0.45702863", "text": "def region_wta(self, config):\n if config is None:\n config = ConfigStereoDisparity()\n\n if config.type == DisparityAlgorithms.FIVE_RECT:\n alg_type = gateway.jvm.boofcv.factory.feature.disparity.DisparityAlgorithms.RECT_FIVE\n elif config.type == DisparityAlgorithms.RECT:\n alg_type = gateway.jvm.boofcv.factory.feature.disparity.DisparityAlgorithms.RECT\n else:\n raise RuntimeError(\"Unknown algorithm type\")\n\n if config.subPixel:\n java_obj = gateway.jvm.boofcv.factory.feature.disparity.FactoryStereoDisparity. \\\n regionSubpixelWta(alg_type, int(config.minDisparity), int(config.maxDisparity),\n int(config.regionRadiusX), int(config.regionRadiusY),float(config.maxPerPixelError),\n int(config.validateRtoL), float(config.texture), self.boof_image_type)\n else:\n java_obj = gateway.jvm.boofcv.factory.feature.disparity.FactoryStereoDisparity. \\\n regionWta(alg_type, int(config.minDisparity), int(config.maxDisparity),\n int(config.regionRadiusX), int(config.regionRadiusY), float(config.maxPerPixelError),\n int(config.validateRtoL), float(config.texture), self.boof_image_type)\n\n return StereoDisparity(java_obj)", "title": "" }, { "docid": "179f2c10fc9c5ebf3f9dda22acce210b", "score": "0.45688042", "text": "def BaseSurface(self) -> Surface:", "title": "" }, { "docid": "59891ab9b4d7b130e76c3180400fde2a", "score": "0.4564586", "text": "def IsisL3PseudoIfaceAttPoint2Config(self):\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.isisl3pseudoifaceattpoint2config import IsisL3PseudoIfaceAttPoint2Config\n return IsisL3PseudoIfaceAttPoint2Config(self)", "title": "" }, { "docid": "98782f2ce0d5e330a1ade7dd35806219", "score": "0.45584077", "text": "def Yaxis(self) -> Vector2d:", "title": "" }, { "docid": "46f7da132993e0e623368410e3a56b69", "score": "0.45562455", "text": "def double_side_polished(self, Reflection_front=-1, Reflection_rear=-1):\n\n self.width_from_xlegnth()\n self.theta = 0 # This is for a polished sample\n\n if Reflection_front == -1:\n Reflection_front = self.Reflection_front\n if Reflection_rear == -1:\n Reflection_rear = self.Reflection_rear\n\n xd_rear = self.optics.abs_cof_bb * \\\n ((np.ones([self.x.shape[0], self.optics.abs_cof_bb.shape[0]]).T)\n * (self.Width - self.x)).T\n\n b_rear = Reflection_front * \\\n np.exp(-2 * self.optics.abs_cof_bb * self.Width)\n\n xd_frot = self.optics.abs_cof_bb * \\\n (np.ones([self.x.shape[0], self.optics.abs_cof_bb.shape[0]]).T *\n self.x).T\n b_frot = Reflection_rear * \\\n np.exp(-2 * self.optics.abs_cof_bb * self.Width)\n\n self.Escape_rear = (\n np.exp(-xd_rear) + b_rear * np.exp(xd_rear)\n ) / (1 - b_rear * Reflection_rear)\n\n self.Escape_front = (\n np.exp(-xd_frot) + b_frot * np.exp(xd_frot)\n ) / (1 - b_frot * Reflection_front)\n\n # self.optics.abs_cof_bb*=cos(self.theta*np.pi/180)", "title": "" }, { "docid": "33a707b74129fc22fae05963db6e0427", "score": "0.45526984", "text": "def closed_surface(self):\n return self._closed_surface", "title": "" }, { "docid": "fd5c18d94176f205f8a4d5a51851b90d", "score": "0.4549044", "text": "def Ooe2_wavelength(self):\n lower_wl = c_uint16(0)\n upper_wl = c_uint16(0)\n self.dll(\n 'readLED_HeadOoe2',\n self.channel, byref(lower_wl), byref(upper_wl))\n return (lower_wl.value, upper_wl.value)", "title": "" }, { "docid": "f1d5655621d0fc3dac7526957a0a4423", "score": "0.4546518", "text": "def get_potential_tiles_player2(self):\n return self.potential_tiles_player2", "title": "" }, { "docid": "65a670b1b3ee9a71be63b9bee6ff8054", "score": "0.45412236", "text": "def get_halfspaces(self):\n return self.A, self.b", "title": "" }, { "docid": "e1031135e1967241f67062cb0fe5f41d", "score": "0.4540414", "text": "def make_right_handed(self):\r\n if not self.right_handed:\r\n self.grid_coordinates.flip_z()\r\n self.flow_solution.flip_z()\r\n self.right_handed = True", "title": "" }, { "docid": "45031f043b41b6f3c6c9283d3a91cfb6", "score": "0.45270047", "text": "def right_curvature(self):\n return self._right_curverad", "title": "" }, { "docid": "77cedf798794c3836fb7c72c19aa41ca", "score": "0.45236808", "text": "def state_tiltRight(car):\n\tcmds.xform(car, t=(-0.1,0,0), r=True, os=True, p=True)", "title": "" }, { "docid": "cb28b145d33448a6e5c4e1aa29f5c444", "score": "0.45222035", "text": "def GetRight(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "8a2e8a961fc888b56160db07595fdcc9", "score": "0.45125878", "text": "def test_two_mode_rect_overparameterised(self):\n N = 2\n wires = range(N)\n dev = qml.device('default.gaussian', wires=N)\n\n theta = [0.321]\n phi = [0.234]\n varphi = [0.42342, 0.543]\n\n def circuit(varphi):\n qml.template.Interferometer(theta, phi, varphi, wires=wires)\n return [qml.expval.MeanPhoton(w) for w in wires]\n\n qnode = qml.QNode(circuit, dev)\n self.assertAllAlmostEqual(qnode(varphi), [0, 0], delta=self.tol)\n\n queue = qnode.queue\n self.assertEqual(len(queue), 3)\n\n self.assertTrue(isinstance(qnode.queue[0], qml.Beamsplitter))\n self.assertAllEqual(qnode.queue[0].parameters, theta+phi)\n\n self.assertTrue(isinstance(qnode.queue[1], qml.Rotation))\n self.assertAllEqual(qnode.queue[1].parameters, [varphi[0]])\n\n self.assertTrue(isinstance(qnode.queue[2], qml.Rotation))\n self.assertAllEqual(qnode.queue[2].parameters, [varphi[1]])", "title": "" }, { "docid": "d984cb9c66f1576aec644ea1bbb8173b", "score": "0.4505084", "text": "def stereo_sublayers(ich):\n return _sublayers(_stereo_layer(ich))", "title": "" }, { "docid": "d4556183d19d86f06c42726b374af7c0", "score": "0.45032918", "text": "def __call__(self, *args) -> \"double const &\":\n return _itkSymmetricSecondRankTensorPython.itkSymmetricSecondRankTensorD2___call__(self, *args)", "title": "" }, { "docid": "8e90f9b757e22d8128f32e050aea3824", "score": "0.45009676", "text": "def adjusted_r2(self):\n r2 = self.r2()\n return 1 - (1-r2)*(self.train_length - 1)/(self.train_length - self.coeff_size - 1)", "title": "" }, { "docid": "8f1c17faa92e6b71dbab37a20407e110", "score": "0.44990766", "text": "def test_two_mode_rect(self):\n N = 2\n wires = range(N)\n dev = qml.device('default.gaussian', wires=N)\n\n theta = [0.321]\n phi = [0.234]\n varphi = [0.42342]\n\n def circuit(varphi):\n qml.template.Interferometer(theta, phi, varphi, wires=wires)\n return [qml.expval.MeanPhoton(w) for w in wires]\n\n qnode = qml.QNode(circuit, dev)\n self.assertAllAlmostEqual(qnode(varphi), [0, 0], delta=self.tol)\n\n queue = qnode.queue\n self.assertEqual(len(queue), 2)\n\n self.assertTrue(isinstance(qnode.queue[0], qml.Beamsplitter))\n self.assertAllEqual(qnode.queue[0].parameters, theta+phi)\n\n self.assertTrue(isinstance(qnode.queue[1], qml.Rotation))\n self.assertAllEqual(qnode.queue[1].parameters, varphi)", "title": "" }, { "docid": "06b2a67dc63cb23077e13408cc984f32", "score": "0.44962606", "text": "def forward_right(self, speed=None):\n return self.api.manual_drive(8, speed)", "title": "" }, { "docid": "78f91050dbfd58fa75e5e7a80ef21da6", "score": "0.44926167", "text": "def right(self):\n return self.extent[1] if self.orthodox[1] else self.offset[1]", "title": "" }, { "docid": "8828710e62b715a913afda5a98eba2ba", "score": "0.44867212", "text": "def _Surface(self, debug=False):\n logger.log(9, \"start\")\n if debug: print( 'Begin _Surface()' )\n ## Calculate some quantities\n self._Calc_qp1by2om2()\n\n ## Saddle point, i.e. the Roche-lobe radius at L1 (on the near side)\n xl1 = self._Saddle(0.5)\n self.L1 = xl1\n if debug: print( 'Saddle %f' %xl1 )\n ## Potential at the saddle point, L1\n psil1 = self._Potential(xl1, 0., 0.)[-1]\n if debug: print( 'Potential psil1 %f' %psil1 )\n\n ## rc_l1 is the stellar radius on the near side, i.e. the nose of the star\n self.rc_l1 = self.filling*xl1\n if debug: print( 'rc_l1 %f' %self.rc_l1 )\n ## Potential at rc_l1, the nose of the star\n trc, trx, dpsi, dpsidx, dpsidy, dpsidz, psi0 = self._Potential(self.rc_l1, 0., 0.)\n self.psi0 = psi0\n if debug: print( 'Potential psi0\\n trc: %f, trx %f, dpsi %f, dpsidx %f, dpsidy %f, dpsidz %f, psi0 %f' % (trc, trx, dpsi, dpsidx, dpsidy, dpsidz, self.psi0) )\n\n ## rc_pole is stellar radius at 90 degrees, i.e. at the pole, which is perpendicular to the line separating the two stars and the orbital plane\n if debug: print( 'psi0,r '+str(self.psi0)+' '+str(r) )\n self.rc_pole = self._Radius(0.,0.,1.,self.psi0,self.rc_l1)\n trc, trx, dpsi, dpsidx, dpsidy, dpsidz, psi = self._Potential(0.,0.,self.rc_pole)\n ## log surface gravity at the pole of the star\n self.logg_pole = np.log10(np.sqrt(dpsidx**2+dpsidy**2+dpsidz**2))\n\n ## rc_eq is stellar radius at 90 degrees in the orbital plane, i.e. at the equator, but not in the direction of the companion\n self.rc_eq = self._Radius(0.,1.,0.,self.psi0,self.rc_l1)\n trc, trx, dpsi, dpsidx, dpsidy, dpsidz, psi = self._Potential(0.,self.rc_eq,0.)\n ## log surface gravity at the pole of the star\n self.logg_eq = np.log10(np.sqrt(dpsidx**2+dpsidy**2+dpsidz**2))\n\n ## r_vertices are the radii of the vertices. shape = n_vertices\n self.r_vertices = self._Radius(self.vertices[:,0], self.vertices[:,1], self.vertices[:,2], self.psi0, self.rc_l1)\n\n ### Calculate useful quantities for all surface elements\n ## rc corresponds to r1 from Tjemkes et al., the distance from the center of mass of the pulsar companion. shape = n_faces\n self.rc = self._Radius(self.cosx, self.cosy, self.cosz, self.psi0, self.rc_l1)\n ## rx corresponds to r2 from Tjemkes et al., the distance from the center of mass of the pulsar. shape = n_faces\n trc, self.rx, dpsi, dpsidx, dpsidy, dpsidz, psi = self._Potential(self.rc*self.cosx,self.rc*self.cosy,self.rc*self.cosz)\n ## log surface gravity. shape = n_faces\n geff = self._Geff(dpsidx, dpsidy, dpsidz)\n self.logg = np.log10(geff)\n ## gradient of the gravitational potential in x,y,z. shape = n_faces\n self.gradx = -dpsidx/geff\n self.grady = -dpsidy/geff\n self.gradz = -dpsidz/geff\n if self.oldchi:\n ## coschi is the cosine angle between the rx and the surface element. shape = n_faces\n ## A value of 1 means that the companion's surface element is directly facing the pulsar, 0 is at the limb and -1 on the back.\n ## The following is the old way, which is derived from the spherical approximation, i.e. that the normal to the surface is approximately the same as the radial position\n #self.coschi = -(self.rc-self.cosx)/self.rx\n\n ## The better calculation should use the gradient as the normal vector, and the direction to the pulsar as positive x.\n ## This implies that the angle coschi is simply the x component of the gradient.\n self.coschi = self.gradx.copy()\n else:\n ## coschi = (N * rx) / (abs(N) abs(rx))\n ## N: vector normal to the surface, which is the grad of the potential\n ## rx: vector from the secondary (e.g. neutron star) to the primary (e.g. companion)\n ## note that N is normalised already\n self.coschi = -self.rc*((self.cosx-1/self.rc)*self.gradx + self.cosy*self.grady + self.cosz*self.gradz) / np.abs(self.rx)\n ## surface area. shape = n_faces\n self.area = self.rc**2 * self.pre_area\n logger.log(9, \"end\")\n return", "title": "" }, { "docid": "46e46a0c587c78f337f2f785908032f8", "score": "0.44864747", "text": "def c2(self):\n return self[2]", "title": "" }, { "docid": "a060a26e08624391ee9d98da8f673ec9", "score": "0.44806764", "text": "def get_sides(self):\n return self.__sides", "title": "" }, { "docid": "7b55e0a5342d64044e77964e3a55b17d", "score": "0.44806585", "text": "def rot_D2(self):\n\n self.cubes['FRD'], self.cubes['BLD'] = \\\n self.cubes['BLD'], self.cubes['FRD']\n\n self.cubes['RBD'], self.cubes['LFD'] = \\\n self.cubes['LFD'], self.cubes['RBD']\n\n self.cubes['FD'], self.cubes['BD'] = self.cubes['BD'], self.cubes['FD']\n\n self.cubes['LD'], self.cubes['RD'] = self.cubes['RD'], self.cubes['LD']", "title": "" }, { "docid": "63bf6625a2a4ac394c28fe8d3095d016", "score": "0.44786063", "text": "def get_planes():\n return \"XY\"", "title": "" }, { "docid": "8f494ebdda5b348130654234f99d07dd", "score": "0.44744873", "text": "def right(angle):", "title": "" }, { "docid": "3b8202bb5dd20f0f6558238f154ddc0b", "score": "0.44713655", "text": "def boundary(self, direction='x_min'):\n return {\n 'x_min': self.stageArea[0][0],\n 'x_max': self.stageArea[0][1],\n 'y_min': self.stageArea[1][0],\n 'y_max': self.stageArea[1][1],\n 'z_min': self.stageArea[2][0],\n 'z_max': self.stageArea[2][1]\n }[direction]", "title": "" }, { "docid": "a2b0e8a7eb3ed26536a424425372a700", "score": "0.44695082", "text": "def _get_f2rx(self, C, r_x, r_1, r_2):\n drx = (r_x - r_1) / (r_2 - r_1)\n return self.CONSTS[\"h4\"] + (C[\"h5\"] * drx) + (C[\"h6\"] * (drx ** 2.))", "title": "" } ]
45c5f13c77ebdc051ef9f5aef0a88630
takes the initiate_mining_package dictionary and the qMachines of each machine to calculate emissions
[ { "docid": "9dc3685d84a3954bced6dda399dcfe18", "score": "0.6304393", "text": "def package_emissions(self):\n\n longwallShearer_emissions = longwallSheerer.qMachine(self.longwallShearer.power,\n self.required_output,\n self.longwallShearer.production_output,\n self.usage_factor,\n self.units) * self.initiate_mining_package[\n 'longwall shearer']\n # face_emissions = longwallShearer_emissions + stageLoader\n borerMiner_emissions = borerMiner.qMachine(self.borerMiner.power,\n self.usage_factor,\n self.units) * self.initiate_mining_package['borer miner']\n shuttle_car_emissions = shuttleCar.qMachine(self.shuttle_car.power,\n self.shuttle_load,\n self.shuttle_car.nameplate_rating,\n self.usage_factor,\n self.units) * self.initiate_mining_package['shuttle car']\n # TODO: check the validity of this\n afc_emissions = miscSupportEqmt.afc_att.qMachine(0, self.afc_drive.drive_power,\n self.usage_factor,\n self.units) * self.initiate_mining_package['afc drive']\n\n stageLoader_emissions = stageLoader.qMachine(self.stageLoader.power,\n self.required_output,\n self.stageLoader.max_output,\n self.usage_factor,\n self.units) * self.initiate_mining_package[\"stage loader\"]\n\n emissions_df = pd.DataFrame(data={\"miner_emissions\": [longwallShearer_emissions],\n \"support_emissions\": [borerMiner_emissions],\n 'transportation_emissions': [stageLoader_emissions +\n afc_emissions +\n shuttle_car_emissions]},\n index=['longwall method'])\n emissions_df['sum'] = emissions_df.sum(axis=1)\n total_emissions_df = emissions_df.mul(self.mining_packages)\n # print(total_emissions_df)\n return emissions_df, total_emissions_df", "title": "" } ]
[ { "docid": "115568d12c9f6c0dc12ac73118cf65c6", "score": "0.5337584", "text": "def post_solve(m, outdir=None):\n\n if outdir is None:\n outdir = m.options.outputs_dir\n\n zone_fuel_cost = get_zone_fuel_cost(m)\n has_subsidies = hasattr(m, 'gen_investment_subsidy_fraction')\n\n gen_data = OrderedDict()\n gen_period_data = OrderedDict()\n gen_vintage_period_data = OrderedDict()\n for g, p in sorted(m.GEN_PERIODS):\n # helper function to calculate annual sums\n def ann(expr):\n try:\n return sum(\n expr(g, t) * m.tp_weight_in_year[t]\n for t in m.TPS_IN_PERIOD[p]\n )\n except AttributeError:\n # expression uses a component that doesn't exist\n return None\n\n # is this a storage gen?\n is_storage = hasattr(m, 'STORAGE_GENS') and g in m.STORAGE_GENS\n\n BuildGen = m.BuildGen[g, p] if (g, p) in m.GEN_BLD_YRS else 0.0\n # BuildStorageEnergy = (\n # m.BuildStorageEnergy[g, p]\n # if is_storage and (g, p) in m.GEN_BLD_YRS\n # else 0.0\n # )\n\n gen_data[g] = OrderedDict(\n gen_tech=m.gen_tech[g],\n gen_load_zone=m.gen_load_zone[g],\n gen_energy_source=m.gen_energy_source[g],\n gen_is_intermittent=int(m.gen_is_variable[g])\n )\n\n # temporary storage of per-generator data to be allocated per-vintage\n # below\n gen_period_data = OrderedDict(\n total_output=0.0 if is_storage else ann(\n lambda g, t: m.DispatchGen[g, t]\n ),\n renewable_output=0.0 if is_storage else ann(\n lambda g, t: renewable_mw(m, g, t)\n ),\n non_renewable_output=0.0 if is_storage else ann(\n lambda g, t: m.DispatchGen[g, t]-renewable_mw(m, g, t)\n ),\n storage_load=(\n ann(lambda g, t: m.ChargeStorage[g, t] - m.DispatchGen[g, t])\n if is_storage else 0.0\n ),\n fixed_om=m.GenFixedOMCosts[g, p],\n variable_om=ann(\n lambda g, t: m.DispatchGen[g, t] * m.gen_variable_om[g]\n ),\n startup_om=ann(\n lambda g, t:\n m.gen_startup_om[g]\n * m.StartupGenCapacity[g, t] / m.tp_duration_hrs[t]\n ),\n fuel_cost=ann(\n lambda g, t: sum(\n 0.0 # avoid nan fuel prices for unused fuels\n if (\n abs(value(m.GenFuelUseRate[g, t, f])) < 1e-10\n and math.isnan(zone_fuel_cost[m.gen_load_zone[g], f, m.tp_period[t]])\n ) else (\n m.GenFuelUseRate[g, t, f]\n * zone_fuel_cost[m.gen_load_zone[g], f, m.tp_period[t]]\n )\n for f in m.FUELS_FOR_GEN[g]\n ) if g in m.FUEL_BASED_GENS else 0.0\n )\n )\n\n for v in m.BLD_YRS_FOR_GEN_PERIOD[g, p]:\n # fill in data for each vintage of generator that is active now\n gen_vintage_period_data[g, v, p] = OrderedDict(\n capacity_in_place=m.BuildGen[g, v],\n capacity_added=m.BuildGen[g, p] if p == v else 0.0,\n capital_outlay=(\n m.BuildGen[g, p] * (\n m.gen_overnight_cost[g, p] +\n m.gen_connect_cost_per_mw[g]\n ) * (\n (1.0 - m.gen_investment_subsidy_fraction[g, p])\n if has_subsidies else 1.0\n ) + (\n (\n m.BuildStorageEnergy[g, p]\n * m.gen_storage_energy_overnight_cost[g, p]\n ) if is_storage else 0.0\n )\n ) if p == v else 0.0,\n amortized_cost=\n m.BuildGen[g, v] * m.gen_capital_cost_annual[g, v]\n + ((\n m.BuildStorageEnergy[g, v]\n * m.gen_storage_energy_overnight_cost[g, v]\n * crf(m.interest_rate, m.gen_max_age[g])\n ) if is_storage else 0.0)\n - ((\n m.gen_investment_subsidy_fraction[g, v]\n * m.BuildGen[g, v]\n * m.gen_capital_cost_annual[g, v]\n ) if has_subsidies else 0.0),\n )\n # allocate per-project values among the vintages based on amount\n # of capacity currently online (may not be physically meaningful if\n # gens have discrete commitment, but we assume the gens are run\n # roughly this way)\n vintage_share = ratio(m.BuildGen[g, v], m.GenCapacity[g, p])\n for var, val in gen_period_data.items():\n gen_vintage_period_data[g, v, p][var] = vintage_share * val\n\n # record capacity retirements\n # (this could be done earlier if we included the variable name\n # in the dictionary key tuple instead of having a data dict for\n # each key)\n for g, v in m.GEN_BLD_YRS:\n retire_year = v + m.gen_max_age[g]\n # find the period when this retires\n for p in m.PERIODS:\n if p >= retire_year:\n gen_vintage_period_data \\\n .setdefault((g, v, p), OrderedDict())['capacity_retired'] \\\n = m.BuildGen[g, v]\n break\n\n # convert dicts to data frames\n generator_df = (\n pd.DataFrame(evaluate(gen_vintage_period_data))\n .unstack()\n .to_frame(name='value')\n )\n generator_df.index.names = [\n 'generation_project', 'gen_vintage', 'period', 'variable'\n ]\n for g, d in gen_data.items():\n for k, v in d.items():\n # assign generator general data to all rows with generator==g\n generator_df.loc[g, k] = v\n # convert from float\n generator_df['gen_is_intermittent'] = generator_df['gen_is_intermittent'].astype(int)\n generator_df = generator_df.reset_index().set_index([\n 'generation_project', 'gen_vintage', 'gen_tech', 'gen_load_zone',\n 'gen_energy_source', 'gen_is_intermittent',\n 'variable'\n ]).sort_index()\n generator_df.to_csv(\n os.path.join(outdir, 'generation_project_details.csv'), index=True\n )\n\n # dict should be var, gen, period\n # but gens have all-years values too (technology, fuel, etc.)\n # and there are per-year non-gen values\n\n # report other costs on an undiscounted, annualized basis\n # (custom modules, transmission, etc.)\n\n # List of comparisons to make later; dict value shows which model\n # components should match which variables in generator_df\n itemized_cost_comparisons = {\n 'gen_fixed_cost': (\n [\n 'TotalGenFixedCosts', 'StorageEnergyFixedCost',\n 'TotalGenCapitalCostsSubsidy'\n ],\n ['amortized_cost', 'fixed_om']\n ),\n 'fuel_cost': (\n ['FuelCostsPerPeriod', 'RFM_Fixed_Costs_Annual'],\n ['fuel_cost']\n ),\n 'variable_om': (\n ['GenVariableOMCostsInTP', 'Total_StartupGenCapacity_OM_Costs'],\n ['startup_om', 'variable_om']\n )\n }\n\n ##### most detailed level of data:\n # owner, tech, generator, fuel (if relevant, otherwise 'all' or specific fuel or 'multiple'?)\n # then aggregate up\n \"\"\"\n In generic summarize_results.py:\n - lists of summary expressions; each creates a new variable per indexing set\n then those get added to summary tables, which then get aggregated\n gen_fuel_period_exprs\n gen_period_exprs (can incl. owner, added to top of list from outside)\n gen_exprs -> get pushed down into gen_period table? or only when creating by-period summaries?\n period_exprs (get added as quasi-gens)\n fuel_period_exprs\n\n these create tables like 'summary_per_gen_fuel_period' (including quasi gen\n data from period_exprs and fuel_period_exprs).\n Those get pivoted\n to make 'summary_per_gen_fuel_by_period', with data from 'summary_per_gen_fuel'\n added to the same rows. Maybe there should be a list of summary groups too. ugh.\n \"\"\"\n\n\n\n\n # list of costs that should have already been accounted for\n itemized_gen_costs = set(\n component\n for model_costs, df_costs in itemized_cost_comparisons.values()\n for component in model_costs\n )\n\n non_gen_costs = OrderedDict()\n for p in m.PERIODS:\n non_gen_costs[p] = {\n cost: getattr(m, cost)[p]\n for cost in m.Cost_Components_Per_Period\n if cost not in itemized_gen_costs\n }\n for cost in m.Cost_Components_Per_TP:\n if cost not in itemized_gen_costs:\n non_gen_costs[p][cost] = sum(\n getattr(m, cost)[t] * m.tp_weight_in_year[t]\n for t in m.TPS_IN_PERIOD[p]\n )\n non_gen_costs[p]['co2_emissions'] = m.AnnualEmissions[p]\n non_gen_costs[p]['gross_load'] = ann(\n lambda g, t: sum(m.zone_demand_mw[z, t] for z in m.LOAD_ZONES)\n )\n non_gen_costs[p]['ev_load'] = 0.0\n if hasattr(m, 'ChargeEVs'):\n non_gen_costs[p]['ev_load'] += ann(\n lambda g, t: sum(m.ChargeEVs[z, t] for z in m.LOAD_ZONES)\n )\n if hasattr(m, 'ev_charge_min') and hasattr(m, 'ChargeEVs_min'):\n m.logger.error(\n 'ERROR: Need to update {} to handle combined loads from '\n 'ev_simple and ev_advanced modules'.format(__name__)\n )\n if hasattr(m, 'StorePumpedHydro'):\n non_gen_costs[p]['Pumped_Hydro_Net_Load'] = ann(\n lambda g, t: sum(\n m.StorePumpedHydro[z, t] - m.GeneratePumpedHydro[z, t]\n for z in m.LOAD_ZONES\n )\n )\n\n non_gen_df = pd.DataFrame(evaluate(non_gen_costs)).unstack().to_frame(name='value')\n non_gen_df.index.names=['period', 'variable']\n non_gen_df.to_csv(os.path.join(outdir, 'non_generation_costs_by_period.csv'))\n\n # check whether reported generator costs match values used in the model\n gen_df_totals = generator_df.groupby(['variable', 'period'])['value'].sum()\n gen_total_costs = defaultdict(float)\n for label, (model_costs, df_costs) in itemized_cost_comparisons.items():\n for p in m.PERIODS:\n for cost in model_costs:\n if cost in m.Cost_Components_Per_Period:\n cost_val = value(getattr(m, cost)[p])\n elif cost in m.Cost_Components_Per_TP:\n # aggregate to period\n cost_val = value(sum(\n getattr(m, cost)[t] * m.tp_weight_in_year[t]\n for t in m.TPS_IN_PERIOD[p]\n ))\n else:\n cost_val = 0.0\n gen_total_costs[label, p, 'model'] += cost_val\n gen_total_costs[label, p, 'reported'] = (\n gen_df_totals.loc[df_costs, p].sum()\n )\n mc = gen_total_costs[label, p, 'model']\n rc = gen_total_costs[label, p, 'reported']\n if different(mc, rc):\n m.logger.warning(\n \"WARNING: model values ({}) don't match reported values ({}) for {} in \"\n \"{}: {:,.0f} != {:,.0f}; NPV of difference: {:,.0f}.\"\n .format(\n '+'.join(model_costs), '+'.join(df_costs),\n label, p, mc, rc,\n m.bring_annual_costs_to_base_year[p]*(mc-rc)\n )\n )\n breakpoint()\n # else:\n # m.logger.info(\n # \"INFO: model and reported values match for {} in \"\n # \"{}: {} == {}.\".format(label, p, mc, rc)\n # )\n\n # check costs on an aggregated basis too (should be OK if the gen costs are)\n cost_vars = [\n var\n for model_costs, df_costs in itemized_cost_comparisons.values()\n for var in df_costs\n ]\n total_costs = (\n generator_df.loc[pd.IndexSlice[:, :, :, :, cost_vars], :]\n .groupby('period')['value'].sum()\n ) + non_gen_df.unstack(0).drop(\n ['co2_emissions', 'gross_load', 'Pumped_Hydro_Net_Load']\n ).sum()\n npv_cost = value(sum(\n m.bring_annual_costs_to_base_year[p] * v\n for ((_, p), v) in total_costs.iteritems()\n ))\n system_cost = value(m.SystemCost)\n if different(npv_cost, system_cost):\n m.logger.warning(\n \"WARNING: NPV of all costs in model doesn't match reported total: \"\n \"{:,.0f} != {:,.0f}; difference: {:,.0f}.\"\n .format(npv_cost, system_cost, npv_cost - system_cost)\n )\n\n\n print()\n print(\"TODO: *** check for missing MWh terms in {}.\".format(__name__))\n print()\n\n print(\"Creating RIST summary; may take several minutes.\")\n summarize_for_rist(m, outdir)\n\n # data for HECO info request 2/14/20\n print(\"Saving hourly reserve data.\")\n report_hourly_reserves(m)\n if hasattr(m, 'Smooth_Free_Variables'):\n # using the smooth_dispatch module; re-report dispatch data\n print(\"Re-saving dispatch data after smoothing.\")\n import switch_model.generators.core.dispatch as dispatch\n dispatch.post_solve(m, m.options.outputs_dir)\n else:\n print(\n \"WARNING: the smooth_dispatch module is not being used. Hourly \"\n \"dispatch may be rough and hourly contingency reserve targets may \"\n \"inflated.\"\n )\n\n print(\"Comparing Switch to EIA production data.\")\n if True:\n compare_switch_to_eia_production(m)\n else:\n print(\"(skipped, takes several minutes)\")\n\n # value(m.SystemCost) ==\n # import code\n # code.interact(local=dict(list(globals().items()) + list(locals().items())))", "title": "" }, { "docid": "58924aa200c43dc4f6dabf5e9d39d8b1", "score": "0.5330536", "text": "def calculate(**kwargs):\n from afmMiner import selfCompare\n \n x=[xvector1.get(),xvector2.get(),xvector3.get(),xvector4.get()]\n if xvector1.get() == '':\n print('Need to input x data!')\n if yvector.get() == '':\n print('Need to input y data!')\n \n x_in=[]\n for entry in x:\n if entry == '' :\n pass\n else:\n x_in.append(entry)\n \n selfCompare(x_input=x_in, y_input=yvector.get(), n_trees=trees.get(), n_depth=depths.get(),\\\n n_feature_vector=feature_vector.get())", "title": "" }, { "docid": "7fd107936b6829e90af19b2f5d47085c", "score": "0.5324006", "text": "def __initial_setup(inline_terms, global_variables):\n \n #Construct prepare_variables dictionary using_inline terms and global_variables\n prepare_variables = term_extractor(inline_terms, global_variables, keywords())\n \n #Initialize calculation_variables\n calculation_variables = DM()\n \n #Save terms that must be singular-valued to calculation_variables \n for keyword, default in singular_keywords().iteritems():\n calculation_variables[keyword] = atomman_input.get_value(prepare_variables, keyword, default)\n \n #Fill in mandatory blank values\n for keyword in unused_keywords():\n calculation_variables[keyword] = ''\n \n #Issue a warning if the keyword is defined in global_variables\n if keyword in global_variables:\n print 'Warning: high-throughput of', __calc_type__, 'ignores term', keyword\n \n #Convert 'copy_files' to boolean flag\n if calculation_variables['copy_files'].lower() == 'true':\n calculation_variables['copy_files'] = True\n elif calculation_variables['copy_files'].lower() == 'false':\n calculation_variables['copy_files'] = False\n else:\n raise ValueError('copy_files must be either True or False!')\n \n \n #Set default values for iterated variables\n if len(prepare_variables.aslist('size_mults')) == 0: prepare_variables['size_mults'] = '3 3 3'\n \n #Check lengths of the iterated variables\n assert len(prepare_variables.aslist('potential_file')) == len(prepare_variables.aslist('potential_dir')), 'potential_file and potential_dir must be of the same length'\n assert len(prepare_variables.aslist('load')) == len(prepare_variables.aslist('load_options')), 'load and load_options must be of the same length'\n assert len(prepare_variables.aslist('load')) == len(prepare_variables.aslist('load_elements')), 'load and load_elements must be of the same length'\n assert len(prepare_variables.aslist('load')) == len(prepare_variables.aslist('box_parameters')), 'load and box_parameters must be of the same length'\n assert len(prepare_variables.aslist('bain_a_scale')) == len(prepare_variables.aslist('bain_c_scale')), 'bain_a_scale and bain_c_scale must be of the same length'\n assert len(prepare_variables.aslist('bain_a_scale')) > 0, 'no bain_a_scale found'\n\n return prepare_variables, calculation_variables", "title": "" }, { "docid": "79ad2a3c0578b6d5464b23ca5cf6a090", "score": "0.5321633", "text": "def update_MUSEInput(self):\n \n for objRegion in self.lsRegion:\n for objCountry in objRegion.lsCountry:\n \n # carbon price\n io_import_regionNcountry.get_CountryCarbonPrice(objCountry, self.iAllYearSteps_YS)\n \n # fuel price\n io_import_regionNcountry.get_CountryFuelPrice(self, objCountry)\n \n for objMarket in self.lsMarket:\n for objZone in objMarket.lsZone:\n \n # power demand and heat demand\n io_import_market.update_ZonePowerHeatDemand(objZone, self.lsTimeSlice, self.iAllYearSteps_YS)\n\n # import/export\n io_import_market.update_ZonePowerImport(objZone, self.lsTimeSlice, self.iAllYearSteps_YS)\n \n return", "title": "" }, { "docid": "3a91714e598dc9773c22c70e60b9a463", "score": "0.5283198", "text": "def dict_prod_sol(input_file, sol_for_csv, model, universal):\n biomass = get_biomass_equation(model)\n set_constraint_production(input_file, model) # set max uptake as indicated by the user \n #full_out = eval_sol(input_file, model, universal1)\n #processed_out = remove_duplicated_sol(full_out)\n to_produce = get_production_objectives(input_file)\n print('\\nThe metabs to produce are: ', to_produce)\n logging.debug(type(model.solver))\n dict_sol_prod = {}\n for i in sol_for_csv:\n dict_for_models = {}\n if sol_for_csv[i][1][1] > 0:\n logging.debug(sol_for_csv[i][1][0])\n if sol_for_csv[i][1][0]!= None:\n for reaction in sol_for_csv[i][1][0]:\n to_add = universal.reactions.get_by_id(reaction)\n model.add_reaction(to_add)\n logging.debug('passed through here')\n for reaction in sol_for_csv[i][1][0]:\n added = model.reactions.get_by_id(reaction)\n logging.debug(added)\n #add_sol_cons(sol_for_csv, i, model, universal)\n model.solver = 'glpk'\n #logging.debug(type(model.solver))\n model.optimize()\n \n #if biomass.flux < 0.426:\n # biomass.lower_bound = biomass.flux\n #elif biomass.flux > 0.426:\n # biomass.lower_bound = 0.426\n \n print(\"\\n---\"+str(sol_for_csv[i][0]['model'][-1:])+\"---\")\n\n sol_gf_production = production_analysis(input_file, model, universal)\n \n if type(sol_gf_production) == str:\n for target in to_produce:\n info_fba_optimization(input_file, model, target, sol_for_csv[i][1][0])\n else:\n dict_for_models[sol_for_csv[i][0]['model'][-1:]]=sol_gf_production\n if sol_for_csv[i][1][0]!= None:\n list_reactions_cons = sol_for_csv[i][1][0]\n logging.debug(list_reactions_cons)\n remove_rlist(list_reactions_cons, model)\n dict_sol_prod[sol_for_csv[i][0]['model'][-1:]] = dict_for_models\n else:\n pass\n \n return dict_sol_prod", "title": "" }, { "docid": "87791956f1942c7db759938452d73fba", "score": "0.5282127", "text": "def makeStartingCalculations():\n v = {}\n r = {}\n connections = {}\n input_file = open(argvMap[\"--iF\"])\n lineNumber = 1\n for line in input_file:\n if lineNumber == 1:\n argvMap[\"name\"] = line.strip().split()[1]\n argvMap[\"T\"] = line.strip().split()[-1]\n else:\n line = line.strip().split()\n line = map(float,line)\n v[line[0]] = [line[4],line[5],line[6]]\n r[line[0]] = [line[1],line[2],line[3]]\n if len(line) > 7:\n connections[line[0]] = line[7:]\n lineNumber+=1\n distMatrix = getDistMatrix(r)\n b0s = calculateBondLengths(connections,distMatrix)\n r0s = calculateNonBondLengths(r,distMatrix)\n startingCalcs = [b0s,r0s,v,r]\n input_file.close()\n return startingCalcs", "title": "" }, { "docid": "0db31c4bcdd76991ef8389f012d8e8db", "score": "0.5235383", "text": "def minimal_test_esM():\n\n numberOfTimeSteps = 4\n hoursPerTimeStep = 2190\n\n # Create an energy system model instance \n esM = fn.EnergySystemModel(locations={'ElectrolyzerLocation', 'IndustryLocation'}, \n commodities={'electricity', 'hydrogen'}, \n numberOfTimeSteps=numberOfTimeSteps,\n commodityUnitsDict={'electricity': r'kW$_{el}$', 'hydrogen': r'kW$_{H_{2},LHV}$'},\n hoursPerTimeStep=hoursPerTimeStep, costUnit='1 Euro', \n lengthUnit='km', \n verboseLogLevel=1)\n\n # time step length [h]\n timeStepLength = numberOfTimeSteps * hoursPerTimeStep\n\n\n ### Buy electricity at the electricity market\n costs = pd.DataFrame([np.array([ 0.05, 0., 0.1, 0.051,]),np.array([0., 0., 0., 0.,])],\n index = ['ElectrolyzerLocation', 'IndustryLocation']).T\n revenues = pd.DataFrame([np.array([ 0., 0.01, 0., 0.,]),np.array([0., 0., 0., 0.,])],\n index = ['ElectrolyzerLocation', 'IndustryLocation']).T\n maxpurchase = pd.DataFrame([np.array([1e6, 1e6, 1e6, 1e6,]),np.array([0., 0., 0., 0.,])],\n index = ['ElectrolyzerLocation', 'IndustryLocation']).T * hoursPerTimeStep\n esM.add(fn.Source(esM=esM, name='Electricity market', commodity='electricity', \n hasCapacityVariable=False, operationRateMax = maxpurchase,\n commodityCostTimeSeries = costs, \n commodityRevenueTimeSeries = revenues, \n )) # eur/kWh\n\n ### Electrolyzers\n esM.add(fn.Conversion(esM=esM, name='Electrolyzers', physicalUnit=r'kW$_{el}$',\n commodityConversionFactors={'electricity':-1, 'hydrogen':0.7},\n hasCapacityVariable=True, \n investPerCapacity=500, # euro/kW\n opexPerCapacity=500*0.025, \n interestRate=0.08,\n economicLifetime=10))\n\n ### Hydrogen filled somewhere\n esM.add(fn.Storage(esM=esM, name='Pressure tank', commodity='hydrogen',\n hasCapacityVariable=True, capacityVariableDomain='continuous',\n stateOfChargeMin=0.33, \n investPerCapacity=0.5, # eur/kWh\n interestRate=0.08,\n economicLifetime=30))\n\n ### Hydrogen pipelines\n esM.add(fn.Transmission(esM=esM, name='Pipelines', commodity='hydrogen',\n hasCapacityVariable=True,\n investPerCapacity=0.177, \n interestRate=0.08, \n economicLifetime=40))\n\n ### Industry site\n demand = pd.DataFrame([np.array([0., 0., 0., 0.,]), np.array([6e3, 6e3, 6e3, 6e3,]),],\n index = ['ElectrolyzerLocation', 'IndustryLocation']).T * hoursPerTimeStep\n esM.add(fn.Sink(esM=esM, name='Industry site', commodity='hydrogen', hasCapacityVariable=False,\n operationRateFix = demand,\n ))\n\n return esM", "title": "" }, { "docid": "53d012479c84d0a395b147d7a3678080", "score": "0.52283365", "text": "def main(comp=\"La0.5Sr0.5MnO3\", energy=-43.3610, ostart=\"\", oend=\"\", ostep=\"\"): \n #a = MPRester(\"<YOUR_MPREST_API_KEY_HERE>\")\n a = MPRester(\"wfmUu5VSsDCvIrhz\")\n \n mycomp=Composition(comp)\n print \"Composition: \", mycomp\n myenergy=energy\n print \"Energy: \", myenergy\n myPDEntry = PDEntry(mycomp, myenergy)\n\n elements = mycomp.elements\n ellist = map(str, elements)\n \n chemsys_entries = a.get_entries_in_chemsys(ellist)\n #For reference: other ways of getting entries\n #entries = a.mpquery(criteria={'elements':{'$in':['La','Mn'],'$all':['O']},'nelements':3})\n #entries = a.mpquery(criteria={'elements':{'$in':['La','Mn','O'],'$all':['O']}},properties=['pretty_formula'])\n #entries = a.get_entries_in_chemsys(['La', 'Mn', 'O', 'Sr'])\n \n if ostart==\"\": #Regular phase diagram\n entries = list(chemsys_entries)\n entries.append(myPDEntry)\n pd = PhaseDiagram(entries)\n #plotter = PDPlotter(gppd)\n #plotter.show()\n ppda = PDAnalyzer(pd)\n eabove=ppda.get_decomp_and_e_above_hull(myPDEntry)\n print \"Energy above hull: \", eabove[1]\n print \"Decomposition: \", eabove[0]\n return eabove\n else: #Grand potential phase diagram\n orange = np.arange(ostart, oend+ostep, ostep) #add ostep because otherwise the range ends before oend\n for o_chem_pot in orange:\n entries = list(chemsys_entries)\n myGrandPDEntry = GrandPotPDEntry(myPDEntry,{Element('O'): float(o_chem_pot)}) #need grand pot pd entry for GPPD\n entries.append(myGrandPDEntry)\n gppd = GrandPotentialPhaseDiagram(entries,{Element('O'): float(o_chem_pot)})\n gppda = PDAnalyzer(gppd)\n geabove=gppda.get_decomp_and_e_above_hull(myGrandPDEntry, True)\n print \"******** Decomposition for mu_O = %s eV ********\" % o_chem_pot\n print \"%30s%1.4f\" % (\"mu_O: \",o_chem_pot)\n print \"%30s%1.4f\" % (\"Energy above hull (eV): \",geabove[1])\n decomp=geabove[0]\n #print \"Decomp: \", decomp\n print \"%30s\" % \"Decomposition: \"\n for dkey in decomp.keys():\n print \"%30s:%1.4f\" % (dkey.composition,decomp[dkey])\n return", "title": "" }, { "docid": "3b413205971db6e766bcc0746bd353b1", "score": "0.5203844", "text": "def emission3(filename, data, tag_column):\n\n df = data\n df = df[df[0] != \"start\"].reset_index().drop(\"index\", 1)\n\n df = df.applymap(str.lower)\n\n # data disimpan di dict akan lebih cepat drpd akses langsung df\n dict_tag = {}\n dict_ner = {}\n for i in range(len(df)):\n dict_tag[i] = df[tag_column-1][i]\n dict_ner[i] = df[tag_column][i]\n count_tag = Counter(dict_tag.values())\n count_ner = Counter(dict_ner.values())\n\n # previous\n prev_frasa = [dict_tag[i - 1] + \" \" + dict_ner[i] for i in range(1, len(df))]\n prev = Counter(prev_frasa)\n previous = {}\n for item in prev.keys():\n previous[item] = prev[item] / count_tag[item.split()[0]]\n\n matrix_previous = daframe_em3(count_tag, count_ner, previous)\n pickel1 = filename.replace(\".txt\", \"\") + '_emission_previous.pkl'\n joblib.dump(matrix_previous, pickel1)\n\n # current\n current_frasa = [dict_tag[i] + \" \" + dict_ner[i] for i in range(len(df))]\n cur = Counter(current_frasa)\n current = {}\n for item in cur.keys():\n current[item] = cur[item] / count_ner[item.split()[1]]\n\n matrix_current = daframe_em3(count_tag, count_ner, current)\n pickel2 = filename.replace(\".txt\", \"\") + '_emission_current.pkl'\n joblib.dump(matrix_current, pickel2)\n\n # next\n next_frasa = [dict_tag[i + 1] + \" \" + dict_ner[i] for i in range(len(df) - 1)]\n ne = Counter(next_frasa)\n nextt = {}\n for item in ne.keys():\n nextt[item] = ne[item] / count_ner[item.split()[1]]\n\n matrix_next = daframe_em3(count_tag, count_ner, nextt)\n pickel3 = filename.replace(\".txt\", \"\") + '_emission_next.pkl'\n joblib.dump(matrix_next, pickel3)\n\n return [[matrix_current, matrix_previous, matrix_next],\n [pickel2, pickel1, pickel3]]", "title": "" }, { "docid": "27cd63f031b7ec0d98b28474760f3016", "score": "0.51931036", "text": "def em_process_multiseq(self, seqs):\n try:\n total_ll = 0\n initial_counts = self.initial_counts\n transition_counts = self.transition_counts\n final_counts = self.final_counts\n emission_counts = self.emission_counts\n\n c = 0\n for c, seq in enumerate(seqs, 1):\n # prepare trellis\n initial_scores, transition_scores, final_scores, emission_scores = self.trellis_scores(seq)\n # inference (obtain gammas (state and transition posteriors)and ll):\n state_posteriors, transition_posteriors, ll = self.inference.compute_posteriors(initial_scores,\n transition_scores,\n final_scores,\n emission_scores)\n\n length = len(seq.x)\n initial_counts += state_posteriors[0, :]\n for pos in range(length):\n x = seq.x[pos]\n emission_counts[x, :] += state_posteriors[pos, :]\n transition_counts += transition_posteriors.sum(axis=0)\n final_counts += state_posteriors[length - 1, :]\n total_ll += ll\n\n return initial_counts, transition_counts, final_counts, emission_counts, total_ll\n\n except KeyboardInterrupt:\n pass", "title": "" }, { "docid": "c3b9eee5b4b13a15dfa27a8e33dc831e", "score": "0.51633143", "text": "def calcMineralDamping(self) :\n self.dampMineral = {}\n for i in self.fuel.effMineralContent.items() :\n self.dampMineral[i[0]] = math.pow(i[1], -0.19) * 0.174", "title": "" }, { "docid": "f6d6e3a676c40b348ff9fc723243d0c3", "score": "0.51460826", "text": "def emission(filename, data, state, tag_column):\n # data_bersih = pd.read_csv(filename, sep, header = None, skip_blank_lines = True, quoting=csv.QUOTE_NONE)\n data = data[data[0] != \"start\"].reset_index().drop(\"index\", 1)\n\n data_emission = data.applymap(str.lower)\n data_emission[\"frase_emission\"] = data_emission[tag_column-1] + \" \" + data_emission[tag_column]\n\n statecount = Counter(list(data_emission[tag_column]))\n bigram_count = Counter(list(data_emission[\"frase_emission\"]))\n\n emission = {}\n for item in bigram_count.keys():\n n = item.split()\n emission[item] = bigram_count[item] / statecount[n[len(n)-1]]\n\n # unik kata\n kata = list(set(list(data_emission[tag_column-1])))\n\n fra = []\n nilai = []\n for i in range(1,len(state)):\n frame = []\n nilae = []\n for j in range(len(kata)):\n bigram = kata[j] + \" \" + state[i]\n val = emission.get(bigram, 0)\n frame.append(bigram)\n nilae.append(val)\n fra.append(frame)\n nilai.append(nilae)\n\n empty_emission = numpy.zeros(shape = (len(state)-1, len(kata)))\n for i in range(len(nilai)):\n empty_emission[i] = nilai[i]\n\n matrix_emission = pd.DataFrame(data = empty_emission,\n index = numpy.array(state[1:], dtype='|S10'),\n columns = numpy.array(kata, dtype='|S20'))\n\n pickel = filename.replace(\".txt\",\"\") + '_emission.pkl'\n joblib.dump(matrix_emission, pickel)\n\n return [matrix_emission, pickel]", "title": "" }, { "docid": "ae342525f0d249bf2543ecd43bf2db7c", "score": "0.51118296", "text": "def eval_energy(input):\n if input[0]==None:\n energy=0\n bul=0\n individ=0\n rank = MPI.COMM_WORLD.Get_rank()\n signal='Evaluated none individual on '+repr(rank)+'\\n'\n else:\n [Optimizer, individ]=input\n if Optimizer.calc_method=='MAST':\n energy = individ.energy\n bul = individ.energy\n signal = 'Recieved MAST structure\\n'\n else:\n if Optimizer.parallel: rank = MPI.COMM_WORLD.Get_rank()\n if not Optimizer.genealogy:\n STR='----Individual ' + str(individ.index)+ ' Optimization----\\n'\n else:\n STR='----Individual ' + str(individ.history_index)+ ' Optimization----\\n'\n indiv=individ[0]\n if 'EE' in Optimizer.debug:\n debug = True\n else:\n debug = False\n if debug: \n write_xyz(Optimizer.debugfile,indiv,'Recieved by eval_energy')\n Optimizer.debugfile.flush()\n if Optimizer.structure=='Defect':\n indi=indiv.copy()\n if Optimizer.alloy==True:\n bulk=individ.bulki\n else:\n bulk=individ.bulko\n nat=indi.get_number_of_atoms()\n csize=bulk.get_cell() \n totalsol=Atoms(cell=csize, pbc=True)\n totalsol.extend(indi)\n totalsol.extend(bulk)\n for sym,c,m,u in Optimizer.atomlist:\n nc=len([atm for atm in totalsol if atm.symbol==sym])\n STR+='Defect configuration contains '+repr(nc)+' '+repr(sym)+' atoms\\n'\n \n elif Optimizer.structure=='Surface':\n totalsol=Atoms()\n totalsol.extend(indiv)\n nat=indiv.get_number_of_atoms()\n totalsol.extend(individ.bulki)\n for sym,c,m,u in Optimizer.atomlist:\n nc=len([atm for atm in totalsol if atm.symbol==sym])\n STR+='Surface-Bulk configuration contains '+repr(nc)+' '+repr(sym)+' atoms\\n'\n cell=numpy.maximum.reduce(indiv.get_cell())\n totalsol.set_cell([cell[0],cell[1],500])\n totalsol.set_pbc([True,True,False])\n \n if Optimizer.constrain_position:\n ts = totalsol.copy()\n indc,indb,vacant,swap,stro = find_defects(ts,Optimizer.solidbulk,0)\n sbulk = Optimizer.solidbulk.copy()\n bcom = sbulk.get_center_of_mass()\n #totalsol.translate(-bulkcom)\n #indc.translate(-bulkcom)\n #totalsol.append(Atom(position=[0,0,0]))\n # \t\t\tfor one in indc:\n # \t\t\t\tindex = [atm.index for atm in totalsol if atm.position[0]==one.position[0] and atm.position[1]==one.position[1] and atm.position[2]==one.position[2]][0]\n # \t\t\t\tif totalsol.get_distance(-1,index) > Optimizer.sf:\n # \t\t\t\t\tr = random.random()\n # \t\t\t\t\ttotalsol.set_distance(-1,index,Optimizer.sf*r,fix=0)\n # \t\t\ttotalsol.pop()\n # \t\t\ttotalsol.translate(bulkcom)\n com = indc.get_center_of_mass()\n dist = (sum((bcom[i] - com[i])**2 for i in range(3)))**0.5\n if dist > Optimizer.sf:\n STR+='Shifting structure to within region\\n'\n r = random.random()*Optimizer.sf\n comv = numpy.linalg.norm(com)\n ncom = [one*r/comv for one in com]\n trans = [ncom[i]-com[i] for i in range(3)]\n indices = []\n for one in indc:\n id = [atm.index for atm in totalsol if atm.position[0]==one.position[0] and atm.position[1]==one.position[1] and atm.position[2]==one.position[2]][0]\n totalsol[id].position += trans\n \n # Check for atoms that are too close\n min_len=0.7\n #pdb.set_trace()\n if not Optimizer.fixed_region:\n if Optimizer.structure=='Defect' or Optimizer.structure=='Surface':\n cutoffs=[2.0 for one in totalsol]\n nl=NeighborList(cutoffs,bothways=True,self_interaction=False)\n nl.update(totalsol)\n for one in totalsol[0:nat]:\n nbatoms=Atoms()\n nbatoms.append(one)\n indices, offsets=nl.get_neighbors(one.index)\n for index, d in zip(indices,offsets):\n index = int(index)\n sym=totalsol[index].symbol\n pos=totalsol[index].position + numpy.dot(d,totalsol.get_cell())\n at=Atom(symbol=sym,position=pos)\n nbatoms.append(at)\n while True:\n dflag=False\n for i in range(1,len(nbatoms)):\n d=nbatoms.get_distance(0,i)\n if d < min_len:\n nbatoms.set_distance(0,i,min_len+.01,fix=0.5)\n STR+='--- WARNING: Atoms too close (<0.7A) - Implement Move ---\\n'\n dflag=True\n if dflag==False:\n break\n for i in range(len(indices)):\n totalsol[indices[i]].position=nbatoms[i+1].position\n totalsol[one.index].position=nbatoms[0].position\n nl.update(totalsol)\n if debug:\n write_xyz(Optimizer.debugfile,totalsol,'After minlength check')\n Optimizer.debugfile.flush()\n else:\n for i in range(len(indiv)):\n for j in range(len(indiv)):\n if i != j:\n d=indiv.get_distance(i,j)\n if d < min_len:\n indiv.set_distance(i,j,min_len,fix=0.5)\n STR+='--- WARNING: Atoms too close (<0.7A) - Implement Move ---\\n'\n if debug:\n write_xyz(Optimizer.debugfile,indiv,'After minlength check')\n Optimizer.debugfile.flush()\n \n # Set calculator to use to get forces/energies\n if Optimizer.parallel:\n calc = setup_calculator(Optimizer)\n if Optimizer.fixed_region:\n pms=copy.deepcopy(calc.parameters)\n try:\n pms['mass'][len(pms['mass'])-1] += '\\ngroup RO id >= '+repr(nat)+'\\nfix freeze RO setforce 0.0 0.0 0.0\\n'\n except KeyError:\n pms['pair_coeff'][0] += '\\ngroup RO id >= '+repr(nat)+'\\nfix freeze RO setforce 0.0 0.0 0.0\\n'\n calc = LAMMPS(parameters=pms, files=calc.files, keep_tmp_files=calc.keep_tmp_files, tmp_dir=calc.tmp_dir)\n lmin = copy.copy(Optimizer.lammps_min)\n Optimizer.lammps_min = None\n Optimizer.static_calc = setup_calculator(Optimizer)\n Optimizer.lammps_min = lmin\n else:\n calc=Optimizer.calc\n if Optimizer.structure=='Defect' or Optimizer.structure=='Surface':\n totalsol.set_calculator(calc)\n totalsol.set_pbc(True)\n else:\n indiv.set_calculator(calc)\n indiv.set_pbc(True)\t#Current bug in ASE optimizer-Lammps prevents pbc=false \n if Optimizer.structure=='Cluster':\n indiv.set_cell([500,500,500])\n indiv.translate([250,250,250])\n \n cwd=os.getcwd()\n # Perform Energy Minimization\n if not Optimizer.parallel:\n Optimizer.output.flush()\n if Optimizer.ase_min == True:\n try:\n if Optimizer.structure=='Defect' or Optimizer.structure=='Surface':\n dyn=BFGS(totalsol)\n else:\n dyn=BFGS(indiv)\n dyn.run(fmax=Optimizer.ase_min_fmax, steps=Optimizer.ase_min_maxsteps)\n except OverflowError:\n STR+='--- Error: Infinite Energy Calculated - Implement Random ---\\n'\n box=Atoms()\n indiv=gen_pop_box(Optimizer.natoms, Optimizer.atomlist, Optimizer.size)\n indiv.set_calculator(calc)\n dyn=BFGS(indiv)\n dyn.run(fmax=fmax, steps=steps)\n except numpy.linalg.linalg.LinAlgError:\n STR+='--- Error: Singular Matrix - Implement Random ---\\n'\n indiv=gen_pop_box(Optimizer.natoms, Optimizer.atomlist, Optimizer.size)\n indiv.set_calculator(calc)\n dyn=BFGS(indiv)\n dyn.run(fmax=fmax, steps=steps)\n # Get Energy of Minimized Structure\n if Optimizer.structure=='Defect' or Optimizer.structure=='Surface':\n en=totalsol.get_potential_energy()\n #force=numpy.maximum.reduce(abs(totalsol.get_forces()))\n if Optimizer.fitness_scheme == 'enthalpyfit':\n pressure=totalsol.get_isotropic_pressure(totalsol.get_stress())\n cell_max=numpy.maximum.reduce(totalsol.get_positions())\n cell_min=numpy.minimum.reduce(totalsol.get_positions())\n cell=cell_max-cell_min\n volume=cell[0]*cell[1]*cell[2]\n else:\n pressure=0\n volume=0\n na=totalsol.get_number_of_atoms()\n ena=en/na\n energy=en\n individ[0]=totalsol[0:nat]\n bul=totalsol[(nat):len(totalsol)]\n STR+='Number of positions = '+repr(len(bul)+len(individ[0]))+'\\n'\n individ[0].set_cell(csize)\n indiv=individ[0]\n else:\n en=indiv.get_potential_energy()\n if Optimizer.fitness_scheme == 'enthalpyfit':\n pressure=indiv.get_isotropic_pressure(indiv.get_stress())\n cell_max=numpy.maximum.reduce(indiv.get_positions())\n cell_min=numpy.minimum.reduce(indiv.get_positions())\n cell=cell_max-cell_min\n volume=cell[0]*cell[1]*cell[2]\n else: \n pressure=0\n volume=0\n na=indiv.get_number_of_atoms()\n ena=en/na\n energy=ena\n individ[0]=indiv\n bul=0\n else:\n if Optimizer.structure=='Defect' or Optimizer.structure=='Surface':\n if Optimizer.calc_method=='VASP':\n en=totalsol.get_potential_energy()\n calcb=Vasp(restart=True)\n totalsol=calcb.get_atoms()\n stress=calcb.read_stress()\n else:\n try:\n totcop=totalsol.copy()\n if debug: write_xyz(Optimizer.debugfile,totcop,'Individual sent to lammps')\n OUT=totalsol.calc.calculate(totalsol)\n totalsol=OUT['atoms']\n totalsol.set_pbc(True)\n if Optimizer.fixed_region:\n if debug:\n print 'Energy of fixed region calc = ', OUT['thermo'][-1]['pe']\n totalsol.set_calculator(Optimizer.static_calc)\n OUT=totalsol.calc.calculate(totalsol)\n totalsol=OUT['atoms']\n totalsol.set_pbc(True)\n if debug:\n print 'Energy of static calc = ', OUT['thermo'][-1]['pe']\n en=OUT['thermo'][-1]['pe']\n stress=numpy.array([OUT['thermo'][-1][i] for i in ('pxx','pyy','pzz','pyz','pxz','pxy')])*(-1e-4*GPa)\n #force=numpy.maximum.reduce(abs(totalsol.get_forces()))\n if debug:\n write_xyz(Optimizer.debugfile,totalsol,'After Lammps Minimization')\n Optimizer.debugfile.flush()\n except Exception, e:\n os.chdir(cwd)\n STR+='WARNING: Exception during energy eval:\\n'+repr(e)+'\\n'\n f=open('problem-structures.xyz','a')\n write_xyz(f,totcop,data='Starting structure hindex='+individ.history_index)\n write_xyz(f,totalsol,data='Lammps Min structure')\n en=10\n stress=0\n f.close()\n if Optimizer.fitness_scheme == 'enthalpyfit':\n pressure=totalsol.get_isotropic_pressure(stress)\n cell_max=numpy.maximum.reduce(totalsol.get_positions())\n cell_min=numpy.minimum.reduce(totalsol.get_positions())\n cell=cell_max-cell_min\n volume=cell[0]*cell[1]*cell[2]\n else:\n pressure=totalsol.get_isotropic_pressure(stress)\n volume=0\n na=totalsol.get_number_of_atoms()\n ena=en/na\n energy=en\n if Optimizer.structure=='Defect':\n if Optimizer.fixed_region==True or Optimizer.finddefects==False:\n individ[0]=totalsol[0:nat]\n bul=totalsol[(nat):len(totalsol)]\n individ[0].set_cell(csize)\n else:\n if 'FI' in Optimizer.debug:\n outt=find_defects(totalsol,Optimizer.solidbulk,Optimizer.sf,atomlistcheck=Optimizer.atomlist,trackvacs=Optimizer.trackvacs,trackswaps=Optimizer.trackswaps,debug=Optimizer.debugfile)\n else:\n outt=find_defects(totalsol,Optimizer.solidbulk,Optimizer.sf,atomlistcheck=Optimizer.atomlist,trackvacs=Optimizer.trackvacs,trackswaps=Optimizer.trackswaps,debug=False)\n individ[0]=outt[0]\n bul=outt[1]\n individ.vacancies = outt[2]\n individ.swaps = outt[3]\n STR += outt[4]\n indiv=individ[0]\n else:\n top,bul=find_top_layer(totalsol,Optimizer.surftopthick)\n indiv=top.copy()\n individ[0]=top.copy()\n else:\n if Optimizer.calc_method=='VASP':\n en=totalsol.get_potential_energy()\n calcb=Vasp(restart=True)\n totalsol=calcb.get_atoms()\n stress=calcb.read_stress()\n else:\n OUT=indiv.calc.calculate(indiv)\n en=OUT['thermo'][-1]['pe']\n #indiv.set_positions(OUT['atoms'].get_positions())\n #indiv.set_cell(OUT['atoms'].get_cell())\n indiv=OUT['atoms']\n indiv.set_pbc(True)\n stress=numpy.array([OUT['thermo'][-1][i] for i in ('pxx','pyy','pzz','pyz','pxz','pxy')])*(-1e-4*GPa)\n if Optimizer.fitness_scheme == 'enthalpyfit':\n pressure=indiv.get_isotropic_pressure(stress)\n cell_max=numpy.maximum.reduce(indiv.get_positions())\n cell_min=numpy.minimum.reduce(indiv.get_positions())\n cell=cell_max-cell_min\n volume=cell[0]*cell[1]*cell[2]\n else: \n pressure=indiv.get_isotropic_pressure(stress)\n volume=0\n na=indiv.get_number_of_atoms()\n ena=en/na\n energy=en\n individ[0]=indiv\n bul=0\n STR+='EnergypAtm = '+repr(ena)+'\\n'\n if Optimizer.structure=='Crystal':\n STR+='Cell structure = '+repr(check_cell_type(indiv))+'\\n'\n \n # Add concentration energy dependence\n if Optimizer.forcing=='Energy_bias':\n n=[0]*len(Optimizer.atomlist)\n for i in range(len(Optimizer.atomlist)):\n n[i]=len([inds for inds in indiv if inds.symbol==Optimizer.atomlist[i][0]])\n n[i]=abs(n[i]-Optimizer.atomlist[i][1])\n factor=sum(n)**3\n energy=(en+factor)/na\n elif Optimizer.forcing=='Chem_pot':\n n=[0]*len(Optimizer.atomlist)\n for i in range(len(Optimizer.atomlist)):\n if Optimizer.structure=='Defect':\n n[i]=len([inds for inds in totalsol if inds.symbol==Optimizer.atomlist[i][0]])\n else:\n n[i]=len([inds for inds in indiv if inds.symbol==Optimizer.atomlist[i][0]])\n n[i]=n[i]*Optimizer.atomlist[i][3]\n factor=sum(n)\n energy=(en+factor)/na\n STR+='Energy with Chemical Potential = '+repr(energy[0])+'\\n'\n\n # Add explosion prevention protection\n # \tif 'prevent_explosions' in globals():\n # \t\tdist=[10]*len(indiv)\n # \t\tfor i in range(len(indiv)):\n # \t\t\tfor j in range(len(indiv)):\n # \t\t\t\tif i != j:\n # \t\t\t\t\tdist[j]=indiv.get_distance(i,j,mic=True)\n # \t\t\tif min(dist) > 3.5:\n # \t\t\t\tenergy+=10\n \n #if Optimizer.structure=='Defect':\n #\tindivid.force=force\n individ.energy=energy\n individ.buli=bul\n individ.pressure=pressure\n individ.volume=volume\n \n if Optimizer.structure=='Cluster':\n indiv.translate([-250,-250,-250])\n if Optimizer.fingerprinting:\n individ.fingerprint=get_fingerprint(Optimizer,individ,Optimizer.fpbin,Optimizer.fpcutoff)\n if Optimizer.parallel:\n calc.clean()\n signal = 'Evaluated individual '+repr(individ.index)+' on '+repr(rank)+'\\n'\n signal +=STR\n else:\n signal=STR\n\n return energy, bul, individ, signal", "title": "" }, { "docid": "52915df9d5ea558e347730ebcebffe34", "score": "0.510851", "text": "def setup_opt_problem(self,m):\n # Set parent model \"pm\"\n self.pm = m\n prt = m.opt_print['setup']\n\n ## SETS\n self.set_TIME = pye.Set(initialize=m.idx_time)\n self.set_AREA = pye.Set(initialize=m.areas)\n self.set_COUNTRY = pye.Set(initialize=m.opt_countries)\n self.set_C2A = pye.Set(self.set_COUNTRY,within=self.set_AREA,initialize=m.country_to_areas)\n self.set_SYNCAREA = pye.Set(initialize=m.syncareas)\n self.set_ROR_AREA = pye.Set(initialize=m.ror_areas)\n self.set_ROR_COUNTRY = pye.Set(initialize=m.ror_countries)\n self.set_PUMP_AREA = pye.Set(initialize=m.pump_areas)\n self.set_PUMP_RES_AREA = pye.Set(initialize=m.pump_res_areas)\n self.set_PUMP_NORES_AREA = pye.Set(initialize=m.pump_nores_areas)\n\n self.set_DAYS = pye.Set(initialize=m.idx_day)\n\n # all gens\n self.set_GEN = pye.Set(initialize = [i for i in range(1,m.nGen+1)])\n # hydro gens\n self.set_HYDRO_GEN = pye.Set(within = self.set_GEN,initialize = [i for i in range(1,m.nGen+1) if m.gen_data.at[i,'gtype'] == 'Hydro'])\n # all gens - hydro gens\n self.set_THERMAL_GEN = pye.Set(within = self.set_GEN,initialize = [i for i in range(1,m.nGen+1) if not m.gen_data.at[i,'gtype'] == 'Hydro'])\n # nuclear\n self.set_NUCLEAR_GEN = pye.Set(within=self.set_GEN,initialize=[i for i in range(1,m.nGen+1) if m.gen_data.at[i,'gtype'] == 'Nuclear'])\n # define set of combined generators\n self.set_COMB_GEN = pye.Set(initialize = [i for i in range(1,m.nGenComb+1)])\n self.set_COMB_GEN_TO_GEN = pye.Set(self.set_COMB_GEN,within=self.set_GEN,initialize = m.gen_comb)\n self.set_RESERVE_AREA = pye.Set(initialize = m.resareas)\n self.set_RESERVE_AREA_TO_GEN = pye.Set(self.set_RESERVE_AREA,within=self.set_GEN,initialize = m.reserve_gens)\n self.set_RESERVE_COUNTRY = pye.Set(initialize = [c for c in m.opt_reserves_fcrn if m.opt_reserves_fcrn[c] > 0])\n self.set_RESERVE_COUNTRY_TO_GEN = pye.Set(self.set_RESERVE_COUNTRY,within=self.set_GEN,initialize=m.reserve_gens_country)\n\n self.set_WIND_AREA = pye.Set(domain=self.set_AREA,initialize =m.wind_areas)\n self.set_AREA_TO_GEN = pye.Set(self.set_AREA,within=self.set_GEN,initialize=m.gen_in_area)\n\n self.set_HYDRO_AREA = pye.Set(within = self.set_AREA,initialize = m.hydrores)\n self.set_HYDRO_AREA_TO_GEN = pye.Set(self.set_HYDRO_AREA,within=self.set_HYDRO_GEN,initialize=m.reservoir2hydro)\n # all areas with positive solar capacity\n self.set_SOLAR_AREA = pye.Set(domain=self.set_AREA,initialize=m.solar_areas)\n\n # internal connections\n self.set_XINT = pye.Set(initialize = [i for i in range(1,m.nXint+1)])\n self.set_XINT_FW = pye.Set(self.set_AREA,within=self.set_XINT,initialize=m.xintf)\n self.set_XINT_BW = pye.Set(self.set_AREA,within=self.set_XINT,initialize=m.xintr)\n\n # external connections\n self.set_XEXT = pye.Set(initialize = [i for i in range(1,m.nXext+1)])\n # divide external connections into fixed price and fixed transfer\n # fixed price connections\n self.set_XEXT_VAR = pye.Set(initialize = m.fixed_price_connections)\n # fixed transfer connections\n self.set_XEXT_PAR = pye.Set(initialize = m.fixed_transfer_connections)\n # connections to modeled external regions\n self.set_AREA_TO_XEXT_PAR = pye.Set(self.set_AREA,within=self.set_XEXT,initialize=m.xext_ft)\n self.set_AREA_TO_XEXT_VAR = pye.Set(self.set_AREA,within=self.set_XEXT,initialize=m.xext_fp)\n\n self.set_HVDC = pye.Set(initialize = [i for i in m.combined_hvdc.keys()])\n self.set_HVDC_TO_XINT = pye.Set(self.set_HVDC,within=self.set_XINT,initialize = m.combined_hvdc)\n\n ## PARAMETERS\n if m.opt_hydro_daily:\n self.param_INFLOW = pye.Param(self.set_HYDRO_AREA,self.set_DAYS,initialize=pparam_INFLOW,mutable=True)\n else:\n self.param_INFLOW = pye.Param(self.set_HYDRO_AREA,self.set_TIME,initialize=pparam_INFLOW,mutable=True)\n\n ## VARIABLES\n def tag_var():\n pass\n if prt:\n print('Setting up VARIABLES')\n\n # load shedding\n self.var_LS = pye.Var(self.set_AREA,self.set_TIME,domain=pye.Reals,bounds=plim_LS,initialize=pini_LS)\n ## all generators\n # production\n self.var_PG = pye.Var(self.set_GEN,self.set_TIME,domain=pye.Reals,bounds=plim_PG,initialize=pini_PG)\n # wind generation\n self.var_WIND = pye.Var(self.set_WIND_AREA,self.set_TIME,domain=pye.Reals,bounds=plim_W,initialize=pini_W)\n # solar generation\n self.var_SOLAR = pye.Var(self.set_SOLAR_AREA,self.set_TIME,domain=pye.Reals,bounds=plim_SOLAR,initialize=pini_SOLAR)\n ## hydro generators\n if m.opt_hydro_daily:\n # spillage\n self.var_SPILLAGE = pye.Var(self.set_HYDRO_AREA,self.set_DAYS,domain=pye.Reals,bounds=(0,None),initialize=0)\n # reservoir storage\n self.var_RES = pye.Var(self.set_HYDRO_AREA,self.set_DAYS,domain=pye.Reals,bounds=plim_RES,initialize=pini_RES)\n else:\n # spillage\n self.var_SPILLAGE = pye.Var(self.set_HYDRO_AREA,self.set_TIME,domain=pye.Reals,bounds=(0,None),initialize=0)\n # reservoir storage\n self.var_RES = pye.Var(self.set_HYDRO_AREA,self.set_TIME,domain=pye.Reals,bounds=plim_RES,initialize=pini_RES)\n # run of river hydro\n self.var_HROR = pye.Var(self.set_ROR_AREA,self.set_TIME,domain=pye.Reals,bounds=plim_HROR,initialize=pini_HROR)\n # pump hydro\n self.var_PUMP = pye.Var(self.set_PUMP_AREA,self.set_TIME,domain=pye.Reals,bounds=plim_PUMP,initialize=pini_PUMP)\n self.var_REL = pye.Var(self.set_PUMP_RES_AREA,self.set_TIME,domain=pye.Reals,bounds=plim_REL,initialize=0)\n self.var_PRES = pye.Var(self.set_PUMP_RES_AREA,self.set_TIME,domain=pye.Reals,bounds=plim_PRES,initialize=0)\n\n ## internal transmission\n self.var_X1 = pye.Var(self.set_XINT,self.set_TIME,domain=pye.Reals,bounds=plim_X1,initialize=pini_X1)\n self.var_X2 = pye.Var(self.set_XINT,self.set_TIME,domain=pye.Reals,bounds=plim_X2,initialize=pini_X2)\n # external transmission to areas with fixed price\n self.var_XEXT = pye.Var(self.set_XEXT_VAR,self.set_TIME,domain=pye.Reals,bounds=plim_XEXT,initialize=pini_XEXT)\n\n ## OBJECTIVE\n\n self.OBJ = pye.Objective(rule=pobj_THERMAL_COST)\n\n ## CONSTRAINTS\n if prt:\n print('Setting up CONSTRAINTS')\n self.constr_POWER_BALANCE = pye.Constraint(self.set_AREA,self.set_TIME,rule=pconstr_POWER_BALANCE)\n\n if m.opt_hydro_daily:\n self.constr_RESERVOIR_BALANCE = pye.Constraint(self.set_HYDRO_AREA,self.set_DAYS,rule=pconstr_RESERVOIR_BALANCE_DAILY)\n else:\n self.constr_RESERVOIR_BALANCE = pye.Constraint(self.set_HYDRO_AREA,self.set_TIME,rule=pconstr_RESERVOIR_BALANCE)\n if m.opt_hydro_daily:\n self.constr_FIX_RESERVOIR = pye.Constraint(self.set_HYDRO_AREA,rule=pconstr_FINAL_RESERVOIR_DAILY)\n else:\n self.constr_FIX_RESERVOIR = pye.Constraint(self.set_HYDRO_AREA,rule=pconstr_FINAL_RESERVOIR)\n\n self.constr_MIN_HYDRO = pye.Constraint(self.set_HYDRO_AREA,self.set_TIME,rule=pconstr_MIN_HYDRO)\n self.constr_MAX_HYDRO = pye.Constraint(self.set_HYDRO_AREA,self.set_TIME,rule=pconstr_MAX_HYDRO)\n\n self.constr_PUMP_BALANCE = pye.Constraint(self.set_PUMP_RES_AREA,self.set_TIME,rule=pconstr_PUMP_BALANCE)\n\n self.constr_HVDC_RAMP = pye.Constraint(self.set_HVDC,self.set_TIME,rule=pconstr_HVDC_RAMP)\n # #\n self.constr_THERMAL_RAMP = pye.Constraint(self.set_THERMAL_GEN,self.set_TIME,rule=pconstr_THERMAL_RAMP)\n self.constr_HYDRO_RAMP = pye.Constraint(self.set_HYDRO_AREA,self.set_TIME,rule=pconstr_HYDRO_RAMP)\n # #\n self.constr_HVDC_RAMP_EXT = pye.Constraint(self.set_XEXT_VAR,self.set_TIME,rule=pconstr_HVDC_RAMP_EXT)\n\n if m.opt_use_reserves:\n if m.opt_country_reserves: # reserves by country\n self.constr_RESERVES_UP = pye.Constraint(self.set_RESERVE_COUNTRY,self.set_TIME,rule=pconstr_RESERVES_UP)\n self.constr_RESERVES_DW = pye.Constraint(self.set_RESERVE_COUNTRY,self.set_TIME,rule=pconstr_RESERVES_DW)\n else: # reserves by area\n self.constr_RESERVES_UP = pye.Constraint(self.set_RESERVE_AREA,self.set_TIME,rule=pconstr_RESERVES_UP)\n self.constr_RESERVES_DW = pye.Constraint(self.set_RESERVE_AREA,self.set_TIME,rule=pconstr_RESERVES_DW)\n\n if m.opt_use_inertia_constr:\n self.constr_INERTIA = pye.Constraint(self.set_TIME,rule=pconstr_INERTIA)", "title": "" }, { "docid": "dff0f82f789a64ecd51b1546e8c7f87e", "score": "0.50920576", "text": "def algorithm_par_mp1(self):\n comm = MPI.COMM_WORLD\n rank = MPI.COMM_WORLD.Get_rank()\n if rank==0:\n if 'MA' in self.debug:\n debug = True\n else:\n debug = False\n self.algorithm_initialize()\n self.convergence = False\n convergence = False\n while not convergence:\n if rank==0:\n self.calc = tools.setup_calculator(self) #Set up calculator for atomic structures\n #Set up calculator for fixed region calculations\n if self.fixed_region:\n self.static_calc = self.calc #May need to copy this\n self.calc = tools.setup_fixed_region_calculator(self)\n pop = self.population\n offspring = self.generation_set(self,pop)\n # Identify the individuals with an invalid fitness\n invalid_ind = [ind for ind in offspring if ind.energy==0]\n #Evaluate the individuals with invalid fitness\n self.output.write('\\n--Evaluate Structures--\\n')\n proc_dist = int(comm.Get_size()/self.n_proc_eval)\n ntimes=int(math.ceil(float(len(invalid_ind))/float(proc_dist)))\n nadd=int(ntimes*proc_dist-len(invalid_ind))\n maplist=[[] for n in range(ntimes)]\n strt=0\n for i in range(len(maplist)):\n maplist[i]=[[self,indi] for indi in invalid_ind[strt:proc_dist+strt]]\n strt+=proc_dist\n for i in range(nadd):\n maplist[len(maplist)-1].append([None,None])\n masterlist = [i*self.n_proc_eval for i in range(proc_dist)]\n else:\n ntimes=None\n masterlist = None\n ntimes = comm.bcast(ntimes,root=0)\n outs=[]\n for i in range(ntimes):\n if rank==0:\n one=maplist[i]\n for j in range(len(one)):\n comm.send(ind, dest=1, tag=11)\n elif rank == 1:\n ind = comm.recv(source=0, tag=11)\n else:\n one=None\n ind =comm.scatter(one,root=0)\n out = switches.fitness_switch(ind)\n else:\n invalid_ind=[]\n poorlist = []\n for i in range(len(invalid_ind)):\n if self.fitness_scheme=='STEM_Cost':\n if self.stem_coeff==None:\n ind = invalid_ind.pop()\n from MAST.structopt_stem.tools.StemCalc import find_stem_coeff\n outs = find_stem_coeff(self,ind)\n ind = outs[1]\n self.stem_coeff = outs[0]\n self.output.write('stem_coeff Calculated to be: '+repr(self.stem_coeff)+'\\n')\n pop.append(ind)\n ind=invalid_ind[i]\n if 'MA' in self.debug: write_xyz(self.debugfile,ind[0],'Individual to fitness_switch')\n outs = switches.fitness_switch([self,ind])\n self.output.write(outs[1])\n invalid_ind[i]=outs[0]\n if invalid_ind[i].energy == float('inf'):\n poorlist.append(i)\n self.output.write('Removing infinite energy individual '+repr(ind.history_index)+'\\n')\n elif invalid_ind[i].energy == float('nan'):\n poorlist.append(i)\n self.output.write('Removing nan energy individual '+repr(ind.history_index)+'\\n')\n self.output.flush()\n if len(poorlist) != 0:\n poorlist.sort(reverse=True)\n for one in poorlist:\n del invalid_ind[one]\n if rank==0:\n pop.extend(invalid_ind)\n pop = self.generation_eval(pop)\n convergence = comm.bcast(self.convergence, root=0)\n \n if rank==0:\n end_signal = self.algorithm_stats(self.population)\n else:\n end_signal = None\n end_signal = comm.bcast(end_signal, root=0)\n return end_signal", "title": "" }, { "docid": "661e5d62630d753437a09be1cdab35d6", "score": "0.50640976", "text": "def do_energy_calculation(self, organism, dictionary, key):\n\n # make the job directory\n job_dir_path = str(os.getcwd()) + '/temp/' + str(organism.id)\n os.mkdir(job_dir_path)\n\n # copy the lammps input script to the job directory and get its path\n shutil.copy(self.input_script, job_dir_path)\n script_name = os.path.basename(self.input_script)\n input_script_path = job_dir_path + '/' + str(script_name)\n\n self.conform_to_lammps(organism)\n self.write_data_file(organism, job_dir_path) # write in.data file\n\n # just for testing, write out the unrelaxed structure to a poscar file\n # organism.structure.to(fmt='poscar', filename= job_dir_path +\n # '/POSCAR.' + str(organism.id) + '_unrelaxed')\n\n # run 'calllammps' script as a subprocess to run LAMMPS\n print('Starting LAMMPS calculation on organism {} '.format(\n organism.id))\n try:\n lammps_output = subprocess.check_output(\n ['calllammps', input_script_path], stderr=subprocess.STDOUT)\n # convert from bytes to string (for Python 3)\n lammps_output = lammps_output.decode('utf-8')\n except subprocess.CalledProcessError as e:\n # write the output of a bad LAMMPS call to for the user's reference\n with open(job_dir_path + '/log.lammps', 'w') as log_file:\n log_file.write(e.output)\n print('Error running LAMMPS on organism {} '.format(organism.id))\n dictionary[key] = None\n return\n\n # write the LAMMPS output\n with open(job_dir_path + '/log.lammps', 'w') as log_file:\n log_file.write(lammps_output)\n\n # parse the relaxed structure from the atom.dump file\n symbols = organism.structure.symbol_set\n try:\n relaxed_structure = self.get_relaxed_structure(\n job_dir_path + '/dump.atom', job_dir_path + '/in.data',\n symbols)\n except:\n print('Error reading structure of organism {} from LAMMPS '\n 'output '.format(organism.id))\n dictionary[key] = None\n return\n\n # parse the total energy from the log.lammps file\n try:\n total_energy = self.get_energy(job_dir_path + '/log.lammps')\n except:\n print('Error reading energy of organism {} from LAMMPS '\n 'output '.format(organism.id))\n dictionary[key] = None\n return\n\n organism.structure = relaxed_structure\n organism.total_energy = total_energy\n organism.epa = total_energy/organism.structure.num_sites\n print('Setting energy of organism {} to {} eV/atom '.format(\n organism.id, organism.epa))\n dictionary[key] = organism", "title": "" }, { "docid": "d87e012a8a5981deabcb474b380a1bc6", "score": "0.50148493", "text": "def _extract_results(self):\n\t\tif rank==0: print \"Extracting the results\",\n\t\tfirings = {}\n\t\tself._meanFr = {}\n\t\tself._estimatedEMG = {}\n\t\tfor muscle in self._nn.actionPotentials:\n\t\t\tfirings[muscle]={}\n\t\t\tself._meanFr[muscle]={}\n\t\t\tself._estimatedEMG[muscle]={}\n\t\t\tfor cell in self._nn.actionPotentials[muscle]:\n\t\t\t\tfirings[muscle][cell] = tlsf.exctract_firings(self._nn.actionPotentials[muscle][cell])\n\t\t\t\tself._meanFr[muscle][cell] = tlsf.compute_mean_firing_rate(firings[muscle][cell])\n\t\t\t\tself._estimatedEMG[muscle][cell] = tlsf.synth_rat_emg(firings[muscle][cell])\n\t\t\t\tif rank==0: print \".\",\n\t\tif rank==0: print \"...completed.\"", "title": "" }, { "docid": "65f24f59656b011136d8db2887b74bdf", "score": "0.50113845", "text": "def algorithm_par_mp(self):\n global logger\n comm = MPI.COMM_WORLD\n rank = MPI.COMM_WORLD.Get_rank()\n if rank==0:\n if 'MA' in self.debug:\n debug = True\n else:\n debug = False\n self.algorithm_initialize()\n self.convergence = False\n convergence = False\n while not convergence:\n if rank==0:\n pop = self.population\n offspring = self.generation_set(self,pop)\n # Identify the individuals with an invalid fitness\n invalid_ind = [ind for ind in offspring if ind.energy==0]\n #Evaluate the individuals with invalid fitness\n self.output.write('\\n--Evaluate Structures--\\n')\n else:\n invalid_ind=[]\n for i in range(len(invalid_ind)):\n if self.fitness_scheme=='STEM_Cost':\n if self.stem_coeff==None:\n ind = invalid_ind.pop()\n from MAST.structopt_stem.tools.StemCalc import find_stem_coeff\n outs = find_stem_coeff(self,ind)\n ind = outs[1]\n self.stem_coeff = outs[0]\n self.output.write('stem_coeff Calculated to be: '+repr(self.stem_coeff)+'\\n')\n pop.append(ind)\n ind=invalid_ind[i]\n if 'MA' in self.debug: write_xyz(self.debugfile,ind[0],'Individual to fitness_switch')\n outs = switches.fitness_switch([self,ind])\n self.output.write(outs[1])\n invalid_ind[i]=outs[0]\n self.output.flush()\n if rank==0:\n pop.extend(invalid_ind)\n pop = self.generation_eval(pop)\n self.write()\n convergence = comm.bcast(self.convergence, root=0)\n \n if rank==0:\n end_signal = self.algorithm_stats(self.population)\n else:\n end_signal = None\n end_signal = comm.bcast(end_signal, root=0)\n return end_signal", "title": "" }, { "docid": "7b1487366424b15653c57c83493fb5fe", "score": "0.49805322", "text": "def calc_machine_output(self, embeddings):\r\n output = self.test_machine_m.eval({self.test_embA: embeddings,self.test_embB: embeddings})\r\n return output", "title": "" }, { "docid": "316b6c647908ca2a819cfc6616b0f28e", "score": "0.49549648", "text": "def do_stuff(self):\n #self.intersect_jobs()\n #self.gmes_weighted()\n #self.intersect_jobs_umland()\n #self.intersect_einwohner_umland()\n self.intersect_jobs_corine_umland()", "title": "" }, { "docid": "1f3aecc99278e088ada07e5b697eb7b3", "score": "0.49531397", "text": "def test_computation_table_ageing_mixed_requests_and_ready_computations(self):\n name = Name(\"/test/NFN\")\n name2 = Name(\"/data/NFN\")\n\n self.computationList.add_computation(name, 0, Interest(name))\n self.computationList.add_computation(name2, 0, Interest(name2))\n\n self.computationList.container[0].timeout = 1.0\n self.computationList.container[1].timeout = 1.0\n\n request_name = Name(\"/request/NFN\")\n request_name1 = Name(\"/request1\")\n request_name2 = Name(\"/request2/NFN\")\n\n self.computationList.container[0].add_name_to_await_list(request_name)\n self.computationList.container[0].add_name_to_await_list(request_name1)\n self.computationList.container[1].add_name_to_await_list(request_name2)\n\n self.assertEqual(len(self.computationList.container), 2)\n self.assertEqual(len(self.computationList.container[0].awaiting_data), 2)\n self.assertEqual(len(self.computationList.container[1].awaiting_data), 1)\n\n res = self.computationList.ageing()\n self.assertEqual(res, ([], []))\n time.sleep(2)\n\n res = self.computationList.ageing()\n\n self.assertEqual(len(self.computationList.container), 1)\n self.assertEqual(len(self.computationList.container[0].awaiting_data), 1)\n\n self.assertEqual(res, ([request_name2], [name]))\n\n v = self.computationList.push_data(Content(request_name2))\n self.assertTrue(v)\n ready_comps = self.computationList.get_ready_computations()\n self.assertEqual(len(ready_comps), 1)\n self.assertEqual(ready_comps[0].original_name, name2)", "title": "" }, { "docid": "96d84b60e948c810c7e29bf3fc21115a", "score": "0.49514353", "text": "def estimate_params(self, transition_counts, emission_counts):\r\n # now calculate the information\r\n ml_transitions = self.ml_estimator(transition_counts)\r\n ml_emissions = self.ml_estimator(emission_counts)\r\n\r\n return ml_transitions, ml_emissions", "title": "" }, { "docid": "41917203f98a7b147a8893deef8f4478", "score": "0.49424878", "text": "def compute(self):\n names,number_of_masses,times,readings = self.extract()\n writing_rows,supp_writ_rows,sigma = calc_circ.unknown_set(names,number_of_masses,times,readings)\n\n self.writing_rows = writing_rows\n self.supp_writ_rows = supp_writ_rows\n self.sigma = sigma\n self.show_results()\n self.get_tolerances()", "title": "" }, { "docid": "597e61904700c6e30f65f6bd30efca14", "score": "0.4934212", "text": "def cal_emission_param(self):\n for k in self.emission_counts.keys():\n # print k, self.emission_counts[k], tuple(k[-1:]), self.ngram_counts[0][tuple(k[-1:])]\n self.emission_params[k] = float(self.emission_counts[k]) / float(self.ngram_counts[0][tuple(k[-1:])])\n # print self.emission_params[k]", "title": "" }, { "docid": "8bbbc21c85d038d14669df9b3ccca1db", "score": "0.49304497", "text": "def process(self):\n\n # conduct initial setup/warmup period\n # self.order_gen.send_order(self.init_qty) \n # self.entity_ordered(self.init_qty) \n # yield self.hold(self.warmup_time)\n\n # prepopulate entitys\n if self.init_qty > 0:\n # # yield from self.populate()\n # self.on_order.set(self.init_qty)\n for _ in range(self.init_qty):\n self.queue.append(sim.Component(name='dummy'))\n # self.order_gen.populate_inv(location=self.env.objs[self._name])\n self.update_inv()\n\n while True:\n yield self.wait((self.total_inv, lambda v, c, s: v < self.order_point)) # wait until the inventory level falls below the indicated threshold\n self.order_gen.send_order(self.order_qty)\n self.entity_ordered(self.order_qty)", "title": "" }, { "docid": "38bd41f0a015ad2d60ccdd4447e0f11e", "score": "0.49298108", "text": "def do_energy_calculation(self, organism, dictionary, key):\n\n # make the job directory\n job_dir_path = str(os.getcwd()) + '/temp/' + str(organism.id)\n os.mkdir(job_dir_path)\n\n # just for testing, write out the unrelaxed structure to a poscar file\n # organism.structure.to(fmt='poscar', filename= job_dir_path +\n # '/POSCAR.' + str(organism.id) + '_unrelaxed')\n\n # write the GULP input file\n gin_path = job_dir_path + '/' + str(organism.id) + '.gin'\n self.write_input_file(organism, gin_path)\n\n # run 'calllgulp' script as a subprocess to run GULP\n print('Starting GULP calculation on organism {} '.format(organism.id))\n try:\n gulp_output = subprocess.check_output(['callgulp', gin_path],\n stderr=subprocess.STDOUT)\n # convert from bytes to string (for Python 3)\n gulp_output = gulp_output.decode('utf-8')\n except subprocess.CalledProcessError as e:\n # write the output of a bad GULP call to for the user's reference\n with open(job_dir_path + '/' + str(organism.id) + '.gout',\n 'w') as gout_file:\n gout_file.write(e.output)\n print('Error running GULP on organism {} '.format(organism.id))\n dictionary[key] = None\n return\n\n # write the GULP output for the user's reference\n with open(job_dir_path + '/' + str(organism.id) + '.gout',\n 'w') as gout_file:\n gout_file.write(gulp_output)\n\n # check if not converged (part of this is copied from pymatgen)\n conv_err_string = 'Conditions for a minimum have not been satisfied'\n gradient_norm = self.get_grad_norm(gulp_output)\n if conv_err_string in gulp_output and gradient_norm > 0.1:\n print('The GULP calculation on organism {} did not '\n 'converge '.format(organism.id))\n dictionary[key] = None\n return\n\n # parse the relaxed structure from the gulp output\n try:\n # TODO: change this line if pymatgen fixes the gulp parser\n relaxed_structure = self.get_relaxed_structure(gulp_output)\n except:\n print('Error reading structure of organism {} from GULP '\n 'output '.format(organism.id))\n dictionary[key] = None\n return\n\n # parse the total energy from the gulp output\n try:\n total_energy = self.get_energy(gulp_output)\n except:\n print('Error reading energy of organism {} from GULP '\n 'output '.format(organism.id))\n dictionary[key] = None\n return\n\n # sometimes gulp takes a supercell\n num_atoms = self.get_num_atoms(gulp_output)\n\n organism.structure = relaxed_structure\n organism.epa = total_energy/num_atoms\n organism.total_energy = organism.epa*organism.structure.num_sites\n print('Setting energy of organism {} to {} eV/atom '.format(\n organism.id, organism.epa))\n dictionary[key] = organism", "title": "" }, { "docid": "c5f60f186a94433b133cf1ef564c6f72", "score": "0.4911825", "text": "def process_itm_on_rxn(conf,itm,rxn,item,state,dampt1,dampt2) :\n \n if state=='is1' or state=='is2' : \n semirxn='d'\n sign='-' # Consume reactants \n elif state=='fs1' or state=='fs2' : \n semirxn='i'\n sign='+' # Increase products \n else : \n print(\"Wrong state for reaction\", item, \"\\nOnly 'is1', 'is2', 'fs1', and 'fs2' supported\") \n exit() \n \n # Get energy of the (initial/final) state \"i\" \n if rxn[item][state]=='None' or rxn[item][state]==None :\n G=0.0\n else:\n try:\n G=itm[rxn[item][state]]['G'] \n except: \n print(\"\\n Error!, reaction \",item, \" comes from \",state, rxn[item][state],\n \" whose energy was not found.\")\n exit()\n # If (initial/final) state \"i\" is on catalyst, include concentration in rxn equation\n # and add rxn to respective differential equation. \n if itm[rxn[item][state]]['phase']=='cat':\n rxn[item]['rt'+semirxn]+=\"*c\"+rxn[item][state]+\"(t)\"\n rxn[item]['srt'+semirxn]+=\"*sc\"+rxn[item][state]\n # Do not generate differential equation for site-balance species (empty site?). \n if rxn[item][state]!=conf['Catalyst']['sitebalancespecies'] :\n itm[rxn[item][state]]['diff']+=sign+\"r\"+item+\"(t)\"\n # If (initial/final) state \"i\" is \"gas\" (or aqueous) use P instead of c(t) \n # and do not generate any differential equation. \n elif itm[rxn[item][state]]['phase']=='gas':\n rxn[item]['rt'+semirxn]+= dampt1+\"*P\"+rxn[item][state]\n rxn[item]['srt'+semirxn]+=dampt2+\"*P\"+rxn[item][state] \n # If (initial/final) state \"i\" is \"aqu\" (or aqueous) use CSL instead of c(t) \n # and do not generate any differential equation. \n elif itm[rxn[item][state]]['phase']=='aqu': \n rxn[item]['rt'+semirxn]+= dampt1+\"*CSL\"+rxn[item][state] \n rxn[item]['srt'+semirxn]+=dampt2+\"*CSL\"+rxn[item][state] \n return G", "title": "" }, { "docid": "805bd5effa14d05cb23680bbcd20f55a", "score": "0.49114093", "text": "def process_results(master_state_map):\n # for state_num, state_data in master_state_map.items():\n # print(state_data)\n\n for state_num, state_data in master_state_map.items():\n #print(f\"state_num: {state_num}, this represents the item number in the dictionary.\")\n #print(f\"state_data: {state_data}, this contains all the state data\")\n # print(state_data)\n dem_elect = {'democratic': 0}\n rep_elect = {'republican': 0}\n lib_elect = {'libertarian': 0}\n ind_elect = {'independent': 0}\n for key, value in state_data.items():\n if key == 'party':\n if value == 'democratic':\n if key == 'electorate':\n for key,value in dem_elect:\n Democratic = key\n Electors = value\n print(dem_elect)\n elif 'party' == 'republican':\n if key == 'electorate':\n for key, value in rep_elect:\n Republican = key\n Electors = value\n print(rep_elect)\n elif 'party' == 'libertarian':\n if key == 'electorate':\n for key, value in lib_elect:\n Libertarian = key\n Electors = value\n print(lib_elect)\n elif 'party' == 'Independent':\n if key == 'electorate':\n for key, value in ind_elect:\n Libertarian = key\n Electors = value\n print(ind_elect)\n\n print(f\"state: {value}\")\n elif key == 'num_voters':\n print(f\"num_voters: {value}\")\n elif key == 'party':\n print(f\"party: {value}\")\n elif key == 'votes':\n print(f\"votes: {value}\")\n elif key == 'electorate':\n print(f\"electorate: {value}\")", "title": "" }, { "docid": "3d94165a3088a4c8aa9a41c427baac2f", "score": "0.48999926", "text": "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_name', default='our_imdb')\n parser.add_argument('--norm', default='cosine') # cosine / L2 Norm / L1 Norm\n args = parser.parse_args()\n norm = set(args.norm)\n all_micro = []\n all_macro = []\n all_acc = []\n all_auc = []\n number = 1000\n times = 2\n ratio_arr = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n graph_maker = GraphImporter(args.data_name)\n graph = graph_maker.import_imdb_multi_graph()\n # nodes = graph.nodes()\n # indexes = np.linspace(0, len(nodes)-1, 5000)\n # indexes = indexes.astype(int)\n # relevant_nodes = np.array(nodes)[indexes]\n # graph = nx.subgraph(graph, relevant_nodes)\n embeddings_maker = EmbeddingCreator(args.data_name, graph)\n dict_embeddings_event2vec = embeddings_maker.create_event2vec_embeddings()\n # dict_embeddings_node2vec = embeddings_maker.create_node2vec_embeddings()\n # dict_event2vec_embeddings = embedding_model.create_event2vec_embeddings()\n # nodes = list(dict_event2vec_embeddings.keys())\n # relevant_edges = edges_to_predict(multi_graph)\n # true_edges = choose_true_edges(relevant_edges, number)\n # false_edges = choose_false_edges(multi_graph, relevant_edges, number)\n true_edges = choose_true_edges(graph, number, times)\n false_edges = choose_false_edges(graph, number, args.data_name, times, True)\n dict_measures_event2vec = compute_final_measures(true_edges, false_edges, dict_embeddings_event2vec, ratio_arr, number, times, norm)\n # dict_measures_node2vec = compute_final_measures(true_edges, false_edges, dict_embeddings_node2vec, ratio_arr, number, times, norm)\n dict_measures = {}\n # dict_measures['node2vec'] = dict_measures_node2vec\n dict_measures['event2vec'] = dict_measures_event2vec\n plots_maker(dict_measures, ratio_arr, 'AUC', args.data_name.upper(), number, args.norm)\n print('avg acc e2v: ', dict_measures_event2vec['Accuracy'])\n print('avg auc e2v: ', dict_measures_event2vec['AUC'])\n print('avg micro e2v: ', dict_measures_event2vec['Micro-f1'])\n print('avg macro e2v: ', dict_measures_event2vec['Macro-f1'])\n print('std acc e2v: ', dict_measures_event2vec['std_acc'])\n print('std auc e2v: ', dict_measures_event2vec['std_auc'])\n print('std micro e2v: ', dict_measures_event2vec['std_micro'])\n print('std macro e2v: ', dict_measures_event2vec['std_macro'])\n # print('avg acc n2v: ', dict_measures_node2vec['Accuracy'])\n # print('avg auc n2v: ', dict_measures_node2vec['AUC'])\n # print('avg micro n2v: ', dict_measures_node2vec['Micro-f1'])\n # print('avg macro n2v: ', dict_measures_node2vec['Macro-f1'])\n # print('std acc n2v: ', dict_measures_node2vec['std_acc'])\n # print('std auc n2v: ', dict_measures_node2vec['std_auc'])\n # print('std micro n2v: ', dict_measures_node2vec['std_micro'])\n # print('std macro n2v: ', dict_measures_node2vec['std_macro'])\n # dict_embeddings = embeddings_maker.create_node2vec_embeddings()\n # micro, macro, acc, auc = exp_lp(X, Y, ratio_arr, 3)\n # avg_micro, avg_macro, avg_acc, avg_auc = calculate_all_avg_scores(micro, macro, acc, auc, 3)\n # all_micro.append(avg_micro)\n # all_macro.append(avg_macro)\n # all_acc.append(avg_acc)\n # all_auc.append(avg_auc)\n # fig1, fig2, fig3, fig4 = split_vs_score(all_micro[0], all_macro[0], all_micro[1], all_macro[1], all_acc[0],\n # all_acc[1], all_auc[0], all_auc[1], ratio_arr)\n # plt.show()", "title": "" }, { "docid": "253446caee50e5cbdade7e1b0adaae66", "score": "0.48959243", "text": "def _compute_qty(self):\n for rec in self:\n if rec.product_id:\n res = self.env['product.product'].search([('id', '=', rec.product_id.id)])._compute_quantities_dict(self._context.get('lot_id'), self._context.get('owner_id'), self._context.get('package_id'), self._context.get('from_date'), self._context.get('to_date'))\n if res:\n rec.qty_on_hand = res[rec.product_id.id]['qty_available']\n rec.forecasted_qty = res[rec.product_id.id]['virtual_available']\n # rec.available_to_sell = res[rec.product_id.id]['qty_available'] - res[rec.product_id.id]['outgoing_qty']\n else:\n rec.qty_on_hand = 0\n rec.forecasted_qty = 0\n # rec.available_to_sell = 0", "title": "" }, { "docid": "bca93d0275beb9cc9f56eb30da3d04a1", "score": "0.48722813", "text": "def computing_chunk(self, terms, start_date, end_date):", "title": "" }, { "docid": "47188e728ce6820b98ef53309182fc1d", "score": "0.48699966", "text": "def _calculate(self, nid_pt_dict, extras):\n pass", "title": "" }, { "docid": "91c585fac9d38793c93a7d5e199ec200", "score": "0.4866258", "text": "def process_intermediates(conf,itm,ltp) :\n \n # Initialize variables related to intermediates. \n sbalance=\"c\"+conf['Catalyst']['sitebalancespecies']+\":=(t)-> 1.0\" \n sodesolv=\"Solution:=dsolve({\" \n initialc=\"IC0:=\"\n rhsparse=\"\" \n index=1 \n # Initialize list-to-print for postprocessing\n ltp['prs']=[] # ltp of pressures and concentrations-in-second-layer. \n ltp['itm']=[\"sc\"+conf['Catalyst']['sitebalancespecies']] # ltp of interm.: init w/ s-b species \n #ltp['itm']=[conf['Catalyst']['sitebalancespecies']] # ltp of interm.: init w/ s-b species\n \n # Process intermediates, starting by adsorbed (cat), then gas. \n for item in sorted(itm) : \n # SERGIO: \n # for key,value in sorted(itm).items() : #key~item ; value~itM[item] (all line) \n # so the input of the sub-function will be the key and value\n if itm[item]['phase']=='cat' and item!=conf['Catalyst']['sitebalancespecies'] : \n # A surface species \n \n # Initialize diff equations to count in which reactions each species participate \n itm[item]['diff']=\"eqd\"+item+\":=diff(c\"+item+\"(t),t)=\" \n #value['diff']=\"eqd\"+key+\":=diff(c\"+key+\"(t),t)=\"\n \n # Prepare site balance \n sbalance+=\" -c\"+item+\"(t)\"\n \n # Prepare list of differential equations for the SODE solver \n sodesolv+=\"eqd\"+item+\", \"\n \n # Prepare list of default initial conditions as clean surface\n initialc+=\" c\"+item+\"(0.0)=0.0,\"\n \n # Prepare parser of concentrations after SODE is solved \n index+=1 # First element should be 1+1=2. Do not touch. \n rhsparse+=\"sc\"+item+\":=rhs(S[\"+str(index)+\"]) : \"\n \n # List of reactions for fprintf function in Maple \n ltp['itm'].append(\"sc\"+item)\n \n elif itm[item]['phase']=='gas' : \n # Get partial pressures \n try : \n itm[item]['pressure']=conf['Pressures'][item] \n except : \n itm[item]['pressure']=0 \n # Generate list of pressures\n ltp['prs'].append(\"P\"+item)\n \n elif itm[item]['phase']=='aqu' : \n # Get concentrations and convert to molecules/activesite. \n try : \n itm[item]['concentration']=(float(conf['Concentrations'][item])*\n float(conf['Catalyst']['areaactivesite'])*\n float(conf['Catalyst']['secondlayerthickness'])*\n avogadro*1E-27) \n except : \n itm[item]['concentration']=0.0 \n # Generate list-to-print of concentrations-in-the-second-layer; put along pressures. \n ltp['prs'].append(\"CSL\"+item) \n \n elif item!=conf['Catalyst']['sitebalancespecies'] : \n print(\"Unknown phase for \",item,itm[item]['phase'],\n \"\\n I only recognize 'aqu', 'cat', and 'gas'\") \n exit()\n \n # Close the site-balance equation \n sbalance=sbalance+\" : \" \n \n # Close the sodesolv\n sodesolv=sodesolv+\"IC0}, numeric, method=rosenbrock, maxfun=0, abserr=1E-16, interr=false);\"\n \n # In the initial conditions, replace the last comma by a colon \n initialc=initialc[:-1]+\" : \"\n \n return itm, sbalance, sodesolv, initialc, rhsparse", "title": "" }, { "docid": "77b40a2431201d569349f0a527c09ec4", "score": "0.48496044", "text": "def main():\n citation='This study'\n MeasRecs=[]\n units='cgs'\n meth=\"LP-HYS\"\n version_num=pmag.get_version()\n args=sys.argv\n fmt='old'\n er_sample_name,er_site_name,er_location_name=\"\",\"\",\"\"\n inst=\"\"\n er_location_name=\"unknown\"\n er_synthetic_name=\"\"\n user=\"\"\n er_site_name=\"\"\n dir_path='.'\n dm=3\n if \"-WD\" in args:\n ind=args.index(\"-WD\")\n dir_path=args[ind+1]\n if \"-ID\" in args:\n ind = args.index(\"-ID\")\n input_dir_path = args[ind+1]\n else:\n input_dir_path = dir_path\n output_dir_path = dir_path\n specfile = output_dir_path+'/er_specimens.txt'\n output = output_dir_path+\"/agm_measurements.txt\"\n if \"-h\" in args:\n print(main.__doc__)\n sys.exit()\n if \"-bak\" in args:\n meth=\"LP-IRM-DCD\"\n output = output_dir_path+\"/irm_measurements.txt\"\n if \"-new\" in args: fmt='new'\n if \"-usr\" in args:\n ind=args.index(\"-usr\")\n user=args[ind+1]\n if '-F' in args:\n ind=args.index(\"-F\")\n output = output_dir_path+'/'+args[ind+1]\n if '-f' in args:\n ind=args.index(\"-f\")\n agm_file= input_dir_path+'/'+args[ind+1]\n er_specimen_name=args[ind+1].split('.')[0]\n else:\n print(\"agm_file field is required option\")\n print(main.__doc__)\n sys.exit()\n if '-Fsp' in args:\n ind=args.index(\"-Fsp\")\n specfile= output_dir_path+'/'+args[ind+1]\n specnum,samp_con,Z=0,'1',1\n if \"-spc\" in args:\n ind=args.index(\"-spc\")\n specnum=int(args[ind+1])\n if specnum!=0:specnum=-specnum\n if \"-spn\" in args:\n ind=args.index(\"-spn\")\n er_specimen_name=args[ind+1]\n #elif \"-syn\" not in args:\n # print \"you must specify a specimen name\"\n # sys.exit()\n if \"-syn\" in args:\n ind=args.index(\"-syn\")\n er_synthetic_name=args[ind+1]\n er_specimen_name=\"\"\n if \"-loc\" in args:\n ind=args.index(\"-loc\")\n er_location_name=args[ind+1]\n if \"-fsa\" in args:\n ind=args.index(\"-fsa\")\n sampfile = input_dir_path+'/'+args[ind+1]\n Samps,file_type=pmag.magic_read(sampfile)\n print('sample_file successfully read in')\n if \"-ncn\" in args:\n ind=args.index(\"-ncn\")\n samp_con=sys.argv[ind+1]\n if \"4\" in samp_con:\n if \"-\" not in samp_con:\n print(\"option [4] must be in form 4-Z where Z is an integer\")\n sys.exit()\n else:\n Z=samp_con.split(\"-\")[1]\n samp_con=\"4\"\n if \"7\" in samp_con:\n if \"-\" not in samp_con:\n print(\"option [7] must be in form 7-Z where Z is an integer\")\n sys.exit()\n else:\n Z=samp_con.split(\"-\")[1]\n samp_con=\"7\"\n if \"-ins\" in args:\n ind=args.index(\"-ins\")\n inst=args[ind+1]\n if \"-u\" in args:\n ind=args.index(\"-u\")\n units=args[ind+1]\n dm = pmag.get_named_arg(\"-DM\", 2)\n ErSpecRecs,filetype=pmag.magic_read(specfile)\n ErSpecRec,MeasRec={},{}\n ErSpecRec['er_citation_names']=\"This study\"\n ErSpecRec['er_specimen_name']=er_specimen_name\n ErSpecRec['er_synthetic_name']=er_synthetic_name\n if specnum!=0:\n ErSpecRec[\"er_sample_name\"]=er_specimen_name[:specnum]\n else:\n ErSpecRec[\"er_sample_name\"]=er_specimen_name\n if \"-fsa\" in args and er_synthetic_name==\"\":\n for samp in Samps:\n if samp[\"er_sample_name\"] == ErSpecRec[\"er_sample_name\"]:\n ErSpecRec[\"er_location_name\"]=samp[\"er_location_name\"]\n ErSpecRec[\"er_site_name\"]=samp[\"er_site_name\"]\n break\n elif int(samp_con)!=6 and int(samp_con)!=8:\n site=pmag.parse_site(ErSpecRec['er_sample_name'],samp_con,Z)\n ErSpecRec[\"er_site_name\"]=site\n ErSpecRec[\"er_location_name\"]=er_location_name\n ErSpecRec['er_scientist_mail_names']=user.strip()\n insert=1\n for rec in ErSpecRecs:\n if rec['er_specimen_name']==er_specimen_name:\n insert=0\n break\n if insert==1:\n ErSpecRecs.append(ErSpecRec)\n ErSpecRecs,keylist=pmag.fillkeys(ErSpecRecs)\n pmag.magic_write(specfile,ErSpecRecs,'er_specimens')\n print(\"specimen name put in \",specfile)\n f=open(agm_file,'r')\n Data=f.readlines()\n if \"ASCII\" not in Data[0]:fmt='new'\n measnum,start=1,\"\"\n if fmt=='new': # new Micromag formatted file\n end=2\n for skip in range(len(Data)):\n line=Data[skip]\n rec=line.split()\n if 'Units' in line:units=rec[-1]\n if \"Raw\" in rec:\n start=skip+2\n if \"Field\" in rec and \"Moment\" in rec and start==\"\":\n start=skip+2\n break\n else:\n start = 2\n end=1\n for i in range(start,len(Data)-end): # skip header stuff\n\n MeasRec={}\n for key in list(ErSpecRec.keys()):\n MeasRec[key]=ErSpecRec[key]\n MeasRec['magic_instrument_codes']=inst\n MeasRec['magic_method_codes']=meth\n if 'er_synthetic_name' in list(MeasRec.keys()) and MeasRec['er_synthetic_name']!=\"\":\n MeasRec['magic_experiment_name']=er_synthetic_name+':'+meth\n else:\n MeasRec['magic_experiment_name']=er_specimen_name+':'+meth\n line=Data[i]\n rec=line.split(',') # data comma delimited\n if rec[0]!='\\n':\n if units=='cgs':\n field =float(rec[0])*1e-4 # convert from oe to tesla\n else:\n field =float(rec[0]) # field in tesla\n if meth==\"LP-HYS\":\n MeasRec['measurement_lab_field_dc']='%10.3e'%(field)\n MeasRec['treatment_dc_field']=''\n else:\n MeasRec['measurement_lab_field_dc']=''\n MeasRec['treatment_dc_field']='%10.3e'%(field)\n if units=='cgs':\n MeasRec['measurement_magn_moment']='%10.3e'%(float(rec[1])*1e-3) # convert from emu to Am^2\n else:\n MeasRec['measurement_magn_moment']='%10.3e'%(float(rec[1])) # Am^2\n MeasRec['treatment_temp']='273' # temp in kelvin\n MeasRec['measurement_temp']='273' # temp in kelvin\n MeasRec['measurement_flag']='g'\n MeasRec['measurement_standard']='u'\n MeasRec['measurement_number']='%i'%(measnum)\n measnum+=1\n MeasRec['magic_software_packages']=version_num\n MeasRecs.append(MeasRec)\n# now we have to relabel LP-HYS method codes. initial loop is LP-IMT, minor loops are LP-M - do this in measurements_methods function\n if meth=='LP-HYS':\n recnum=0\n while float(MeasRecs[recnum]['measurement_lab_field_dc'])<float(MeasRecs[recnum+1]['measurement_lab_field_dc']) and recnum+1<len(MeasRecs): # this is LP-IMAG\n MeasRecs[recnum]['magic_method_codes']='LP-IMAG'\n MeasRecs[recnum]['magic_experiment_name']=MeasRecs[recnum]['er_specimen_name']+\":\"+'LP-IMAG'\n recnum+=1\n#\n if int(dm)==2:\n pmag.magic_write(output,MeasRecs,'magic_measurements')\n else:\n print ('MagIC 3 is not supported yet')\n sys.exit()\n pmag.magic_write(output,MeasRecs,'measurements')\n\n print(\"results put in \", output)", "title": "" }, { "docid": "fcffad81a8ae02174d2f5af1859f3766", "score": "0.48480514", "text": "def _parallel_mc(processes, path_stem, calc_formula, l, M, build_output, variable_name, sigma_function, interp_fn, angles_fn, Lmin, Lmax, wavetype, dictionary_type, iteration): \n \n # putting runs into queues\n in_queue = mp.Queue()\n out_queue = mp.Queue()\n future_res = []\n for i in range(processes):\n path = path_stem + str(i) + '/'\n if not os.path.exists(path):\n\t os.makedirs(path)\n \n future_res.append(mp.Process(target = multilevel, args = (path, in_queue, out_queue)))\n future_res[-1].start()\n\n for j in range(iteration):\n in_queue.put((l, M, build_output, sigma_function, variable_name, Lmin, Lmax, wavetype, dictionary_type, interp_fn))\n # send stop signals\n for i in range(processes):\n in_queue.put('stop')\n \n # collect output \n results = []\n for i in range(iteration):\n if (i+1)%1000 == 0:\n print(i)\n results.append(out_queue.get())\n\n outputf = [f[0] for f in results]\n outputc = [f[1] for f in results]\n outputfc = 0\n outputfc2 = 0\n outputfc3 = 0\n outputfc4 = 0\n outputfsum = 0\n outputf2sum = 0\n \n # record output values in csv (necessary for doing inverse sampling)\n pd.DataFrame(outputf).to_csv('outputf' + str(l) + '.csv')\n pd.DataFrame(outputc).to_csv('outputc' + str(l) + '.csv') \n \n for i in range(len(outputf)):\n outputfc += (outputf[i] - outputc[i])\n outputfc2 += ((outputf[i] - outputc[i])**2)\n outputfc3 += ((outputf[i] - outputc[i])**3)\n outputfc4 += ((outputf[i] - outputc[i])**4)\n outputfsum += outputf[i]\n outputf2sum += outputf[i]**2\n \n sums = np.array([outputfc, outputfc2, outputfc3, outputfc4, outputfsum, outputf2sum])\n return sums", "title": "" }, { "docid": "95fea25777fc655115e2962788af36a4", "score": "0.48464578", "text": "def run(self, generations):\n self.calculate_fitnesses()\n\n #-----------------------------------------------------------------------\n # Print a short summary\n #-----------------------------------------------------------------------\n print 'population size=%d, representation=%s, ' \\\n 'crossover probability=%f, mutation probability=%f, ' \\\n 'elite count=%d' \\\n % (len(self.population), self.representation,\n self.crossover_probability, self.mutation_probability,\n self.elite_count)\n print 'selection scheme=%s, crossover scheme=%s, mutation scheme=%s, ' \\\n 'fitness function=%s, natural_fitness=%s' \\\n % (self.selection_func, self.crossover_func,\n self.mutation_func,\n self.fitness_func, self.natural_fitness)\n\n print 'generation=0, total fitness=%d, mean fitness=%s, ' \\\n 'min individual=%s (len=%d), max individual=%s (len=%d)' \\\n % (self.total_fitness(), self.mean_fitness(),\n self.min_individual().fitness(),\n len(self.min_individual()),\n self.max_individual().fitness(),\n len(self.max_individual()))\n\n self.plotter.update(self.mean_fitness(),\n self.max_individual().fitness(),\n self.min_individual().fitness())\n\n for i in self.population:\n if hasattr(i, 'average_sigmas') and i.average_sigmas is not None:\n self.average_sigmas.append(sum(i.average_sigmas)\n / len(i.average_sigmas))\n\n # print self.average_sigmas\n # print len(self.average_sigmas)\n\n #-----------------------------------------------------------------------\n # Loop for each generation\n #-----------------------------------------------------------------------\n for i in xrange(1, generations):\n #-------------------------------------------------------------------\n # Perform elitism\n #-------------------------------------------------------------------\n self.store_elites()\n\n #-------------------------------------------------------------------\n # Select the mating pool\n #-------------------------------------------------------------------\n self.select_parents()\n\n #-------------------------------------------------------------------\n # Apply crossover\n #-------------------------------------------------------------------\n self.crossover(self.crossover_probability)\n\n #-------------------------------------------------------------------\n # Apply mutation\n #-------------------------------------------------------------------\n self.mutate(self.mutation_probability)\n\n #-------------------------------------------------------------------\n # Re-add the elites to the population\n #-------------------------------------------------------------------\n self.load_elites()\n\n #-------------------------------------------------------------------\n # Recalculate fitnesses\n #-------------------------------------------------------------------\n self.calculate_fitnesses()\n\n min_individual = self.min_individual()\n max_individual = self.max_individual()\n\n # print 'generation=%d, total fitness=%d, mean fitness=%s, ' \\\n # 'min individual=%s (%s), max individual=%s (%s)' \\\n # % (i, self.total_fitness(), self.mean_fitness(),\n # min_individual.genes,\n # min_individual.raw_fitness(),\n # max_individual.genes,\n # max_individual.raw_fitness())\n\n print 'generation=%d, total fitness=%d, mean fitness=%s, ' \\\n 'min individual=%s (len=%d), max individual=%s (len=%d)' \\\n % (i, self.total_fitness(), self.mean_fitness(),\n self.min_individual().fitness(),\n len(self.min_individual()),\n self.max_individual().fitness(),\n len(self.max_individual()))\n\n self.plotter.update(self.mean_fitness(),\n max_individual.fitness(),\n min_individual.fitness())\n\n for i in self.population:\n if hasattr(i, 'average_sigmas') and i.average_sigmas is not None:\n self.average_sigmas.append(sum(i.average_sigmas)\n / len(i.average_sigmas))\n\n # print self.average_sigmas\n # print len(self.average_sigmas)", "title": "" }, { "docid": "6ea960b501edf7928fac80cd732c89be", "score": "0.4834861", "text": "def createSequenceDict(submitted_data):\n\n start_time = datetime.now()\n sys.stdout.write(\"\\nStarted analysis on: \" +str(start_time)+ \" for the job with id: \" +str(submitted_data.uuid)+\"\\n\")\n\n inputDIR = os.path.join(MEDIA_ROOT, 'species/')\n outputDIRPATH = os.path.join(MEDIA_ROOT, 'output/')\n workingDIR = os.path.join(outputDIRPATH, str(submitted_data.uuid)) + '/'\n\n variety_list = []\n variety_consensus_files = []\n data_dict = {}\n\n jobid = str(submitted_data.uuid)\n outfmts = submitted_data.outputfmt.split(',')\n\n \n\n for entry in submitted_data.varieties.all():\n if not os.path.exists(inputDIR): # make input directory\n os.makedirs(inputDIR)\n\n if not os.path.exists(workingDIR): # make output directory\n os.makedirs(workingDIR)\n\n varietiesDIR = os.path.join(MEDIA_ROOT, entry.location)\n if not os.path.exists(varietiesDIR):\n os.makedirs(varietiesDIR)\n\n\n\n\n sequence_file = os.path.join(workingDIR, str(submitted_data.uuid) + '.fa')\n reference_basename = os.path.basename(str(entry.species.species_file))\n reference_file = os.path.join(inputDIR, reference_basename)\n variety_consensus_basename = os.path.basename(str(entry.variety_consensus))\n variety_consensus_file = os.path.join(varietiesDIR, variety_consensus_basename)\n\n\n # get varieties selected\n variety_list.append(entry.variety_name)\n variety_consensus_files.append(variety_consensus_file)\n\n # get sequence submitted\n sequenceContent = StringIO(str(submitted_data.sequence)) # get the content of the gene ids submitted\n sequenceData = sequenceContent.getvalue()\n if not os.path.exists(sequence_file):\n with open(sequence_file, 'w') as fhandle:\n fhandle.write(sequenceData)\n data_dict[jobid] = [variety_list, variety_consensus_files, reference_file, sequence_file, workingDIR, outfmts]\n return data_dict", "title": "" }, { "docid": "7506c144da01e2660f4886ca8e61000c", "score": "0.48324946", "text": "def demands_from_args(args: Dict, graphs: List[nx.DiGraph]) -> List[List[\n List[Tuple[np.ndarray, float]]]]:\n demands_per_graph = []\n for graph in graphs:\n num_demands = graph.number_of_nodes() * (graph.number_of_nodes() - 1)\n # because the first n demands are used to build the history\n sequence_length = args['sequence_length'] + args['memory_length']\n\n # select demand type:\n if args['demand_type'] == 'bimodal':\n dm_getter = lambda rs_l: dm.bimodal_demand(num_demands, rs_l)\n elif args['demand_type'] == 'gravity':\n dm_getter = lambda _: dm.gravity_demand(graph)\n else:\n raise Exception(\"No such demand type\")\n\n # select sequence type:\n if args['sequence_type'] == 'cyclical':\n dm_sequence_getter = lambda seed, q: dm.cyclical_sequence(\n dm_getter,\n sequence_length, q, args['sparsity'],\n seed=seed)\n elif args['sequence_type'] == 'average':\n dm_sequence_getter = lambda seed, q: dm.average_sequence(\n dm_getter,\n sequence_length, q, args['sparsity'],\n seed=seed)\n elif args['sequence_type'] == 'totem':\n dm_sequence_getter = lambda seed, _: dm.totem_sequence(\n sequence_length, seed=seed)\n else:\n raise Exception(\"No such sequence type\")\n\n # combine cycle lengths with the seeds\n demand_specs = args['demand_seeds']\n if 'demand_qs' in args:\n demand_specs = list(zip(demand_specs, args['demand_qs']))\n elif 'cycle_length' in args:\n demand_specs = [(d, args['cycle_length']) for d in demand_specs]\n\n mlu = MaxLinkUtilisation(graph)\n demand_sequences = [dm_sequence_getter(s, q) for (s, q) in demand_specs]\n demands_with_opt = [[(demand, mlu.opt(demand)) for demand in sequence]\n for\n sequence in demand_sequences]\n demands_per_graph.append(demands_with_opt)\n return demands_per_graph", "title": "" }, { "docid": "cd3458b8d7e3d976283e48788e9b8b25", "score": "0.48322526", "text": "def _do_em_iteration(\n scene,\n observation_reduction,\n transition_reductions,\n state_reductions):\n process_count = scene['process_count']\n node_count = scene['node_count']\n edge_count = node_count - 1\n edge_rates = [None] * edge_count\n edge_processes = scene['tree']['edge_processes']\n for i in range(process_count):\n\n # Extract the transition reduction and the state reduction.\n # Use these to define expectation requests.\n if observation_reduction is not None:\n trans_request = dict(\n property = 'WDNTRAN',\n observation_reduction = observation_reduction,\n transition_reduction = transition_reductions[i])\n dwell_request = dict(\n property = 'WDWDWEL',\n observation_reduction = observation_reduction,\n state_reduction = state_reductions[i])\n else:\n trans_request = dict(\n property = 'SDNTRAN',\n transition_reduction = transition_reductions[i])\n dwell_request = dict(\n property = 'SDWDWEL',\n state_reduction = state_reductions[i])\n\n # Compute the expectations.\n j_in = dict(\n scene = scene,\n requests = [trans_request, dwell_request])\n j_out = interface.process_json_in(j_in)\n trans_response, dwell_response = j_out['responses']\n\n # Update the edge rates for edges whose associated process index is i.\n for edge_idx in range(edge_count):\n if edge_processes[edge_idx] == i:\n transitions = trans_response[edge_idx]\n opportunity = dwell_response[edge_idx]\n edge_rate = transitions / opportunity\n edge_rates[edge_idx] = edge_rate\n\n # Assert that every edge rate has been defined.\n for edge_rate in edge_rates:\n assert_(edge_rate is not None)\n\n # Return the new edge rates for this EM iteration.\n return edge_rates", "title": "" }, { "docid": "c3744c0c4d2e401829e0b50eb40fcee2", "score": "0.48256755", "text": "def main():\n\t#Datasets\n\t#exp_list = [,'Snoek_2012','Keurentjes_2007']\n\texp_list = ['Ligterink_2014']\n\n\t#Variables\n\tchromosome = [1,2,3,4,5]\n\tcutoff_list = [3]#[6.7, 4.3, 3]\n\tgxe_bool = True\n\n\trestart_necessary = False\n\t\n\tfor dataset in exp_list:\n\t\t\n\t\t#make a new folder for the dataset\n\t\tif not gxe_bool:\n\t\t\tstorage_folder = \"%s/%s/genelist_%s\"%(mr_folder, glist_folder, dataset)\n\t\telse:\n\t\t\tstorage_folder = \"%s/%s/genelist_%s_gxe\"%(mr_folder, glist_folder, dataset)\n\t\tif not os.path.exists(storage_folder):\n\t\t\tos.mkdir(storage_folder)\n\t\t\n\t\tfor cutoff in cutoff_list:\n\t\t\t\n\t\t\tif not gxe_bool:\n\t\t\t\ttraitfile = \"%s/%s/emr_traitlist_%s_co%s.txt\"%(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmr_folder, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttrait_folder, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdataset, cutoff\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t)\n\t\t\t\tfname = \"%s/eqtls_%s_co%s.txt\"%(storage_folder, dataset, cutoff)\n\t\t\t\n\t\t\telse:\n\t\t\t\ttraitfile = \"%s/%s/emr_traitlist_%s_gxe_co%s.txt\"%(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmr_folder, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttrait_folder, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdataset, cutoff\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t)\n\t\t\t\tfname = \"%s/eqtls_%s_gxe_co%s.txt\"%(storage_folder, dataset, cutoff)\t\n\n\n\t\t\ttraitlist = get_traits(dataset, cutoff, chromosome, traitfile)\n\t\t\t##################################\n\t\t\tif restart_necessary:\n\t\t\t\tlast_trait = \"AT3G47450\"\n\t\t\t\ttrait_index = traitlist.index(last_trait)\n\t\t\t\tnew_traitlist = traitlist[trait_index:]\n\t\t\t##################################\n\t\t\tprint \"--------------------------------------------------\"\n\t\t\t\n\t\t\tte_data = []\n\t\t\tfor trait in traitlist:\n\t\t\t\tprint trait\n\t\t\t\teQTLs = give_eqtls_for_trait(trait, cutoff, dataset, gxe_bool)\n\t\t\t\tif eQTLs:\n\t\t\t\t\tte_data.append([trait, eQTLs])\n\t\t\t\t\n\t\t\twrite_eqtls_to_file(fname, te_data)\n\t\t\t\n\t\t\tdel te_data\n\t\t\tdel traitlist\t\t\t\t\t\n\t\t\tgc.collect()", "title": "" }, { "docid": "4b9aa39137577a75d54cb5ab853a091c", "score": "0.48130324", "text": "def initialize(self,t0=0.0):\n \n # An connection_distribution_list (store unique connection(defined by weight,syn,prob))\n self.connection_distribution_collection = ConnectionDistributionCollection() # this is \n self.t = t0\n\n # Matrix to record \n numCGPatch = self.Net_settings['nmax'] * 2 # excitatory and inhibitory\n # 2 * numCGPatch = External Population and Recurrent Population\n # set Matrix to record only Internal Population\n self.m_record = np.zeros((numCGPatch+1, self.ntt + 10)) \n \n # put all subpopulation and all connections into the same platform\n for subpop in self.population_list:\n subpop.simulation = self # .simulation = self(self is what we called 'simulation')\n for connpair in self.connection_list:\n connpair.simulation = self\n \n # initialize population_list, calculate \n for p in self.population_list:\n p.initialize() # 2 \n \n for c in self.connection_list:\n #print 'initialize population'\n c.initialize() # 1\n \n # Calculate MFE-probability\n self.iteration_max = self.ntt + 100\n iteration_max = self.iteration_max\n self.tbin_tmp = 0\n self.tbinsize = 1.0\n dtperbin = int(self.tbinsize/self.dt)\n \n iteration_bin = int(iteration_max/dtperbin)\n NPATCH,NE,NI = self.Net_settings['hyp_num'],self.NE,self.NI\n \n # Parameters and Variables recorded\n self.mEbin_ra = np.zeros((iteration_bin,NPATCH))\n self.mIbin_ra = np.zeros((iteration_bin,NPATCH))\n# self.HNMDAEbin_ra = np.zeros((iteration_bin,NPATCH))\n# self.HNMDAIbin_ra = np.zeros((iteration_bin,NPATCH))\n self.NMDAEbin_ra = np.zeros((iteration_bin,NPATCH))\n self.NMDAIbin_ra = np.zeros((iteration_bin,NPATCH))\n self.P_MFEbin_ra = np.zeros((iteration_bin,NPATCH))\n # MFE or Not\n self.P_MFE_eff = np.zeros((iteration_max,2)) # ,0] Prob and ,1] index\n # rhov \n self.rEbin_ra = np.zeros((NPATCH,200,iteration_bin))\n self.rIbin_ra = np.zeros((NPATCH,200,iteration_bin))\n # STILL NEED _RA \n self.LE_ra = np.zeros((iteration_max,NPATCH))\n self.LI_ra = np.zeros((iteration_max,NPATCH))\n \n # Prepare for MFE\n DEE,DIE,DEI,DII = self.DEE,self.DIE,self.DEI,self.DII \n vT = 1.0\n dv = self.Net_settings['dv']\n self.Vedges = util.get_v_edges(-1.0,1.0,dv)\n # Nodes(Vbins) = Nodes(Vedges)-1\n self.Vbins = 0.5*(self.Vedges[:-1] + self.Vedges[1:])\n # >>>>>>>>>>>>>>>>>>> splin\n self.Vedgesintp = util.get_v_edges(-1.0,1.0,1e-3)\n self.Vbinsintp = 0.5*(self.Vedgesintp[:-1] + self.Vedgesintp[1:])\n # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n Vedges = self.Vedges.copy()\n Vbins = self.Vbins.copy()\n \n Vedgesintp = self.Vedgesintp.copy()\n Vbinsintp = self.Vbinsintp.copy()\n \n idx_vT = len(Vedgesintp) - 1\n idx_kickE,idx_kickI = np.zeros((NPATCH,NPATCH),dtype= int), np.zeros((NPATCH,NPATCH),dtype= int)\n for it in range(self.NPATCH):\n for js in range(self.NPATCH):\n value_kickE = vT - DEE[it,js]\n value_kickI = vT - DIE[it,js]\n \n Ind_k1 = np.where(Vedgesintp>value_kickE)\n IndI_k1 = np.where(Vedgesintp>value_kickI)\n if np.shape(Ind_k1)[1]>0:\n idx_kickE[it,js] = Ind_k1[0][0]\n else:\n idx_kickE[it,js] = idx_vT\n if np.shape(IndI_k1)[1]>0:\n idx_kickI[it,js] = IndI_k1[0][0]\n else:\n idx_kickI[it,js] = idx_vT\n self.idx_kickE,self.idx_kickI = idx_kickE,idx_kickI\n print('kick!>>>',self.idx_kickE)\n self.idx_vT = idx_vT\n self.MFE_pevent = np.zeros(self.NPATCH)\n self.p_single = np.zeros(self.NPATCH)", "title": "" }, { "docid": "8feacbc53d48fa1e77cea4c83914b6f3", "score": "0.48046803", "text": "def algorithm_parallel1(self):\n global logger\n comm = MPI.COMM_WORLD\n rank = MPI.COMM_WORLD.Get_rank()\n if 'MA' in self.debug:\n debug = True\n else:\n debug = False\n if rank==0:\n self.algorithm_initialize()\n logger.info('Beginning main algorithm loop')\n #Begin main algorithm loop\n self.convergence = False\n convergence=False\n while not convergence:\n if rank==0:\n pop = self.population\n offspring = self.generation_set(pop)\n # Identify the individuals with an invalid fitness\n invalid_ind = [ind for ind in offspring if ind.fitness==0]\n #Evaluate the individuals with invalid fitness\n self.output.write('\\n--Evaluate Structures--\\n')\n ntimes=int(math.ceil(float(len(invalid_ind))/float(comm.Get_size())))\n nadd=int(ntimes*comm.Get_size()-len(invalid_ind))\n maplist=[[] for n in range(ntimes)]\n strt=0\n for i in range(len(maplist)):\n maplist[i]=[[self,indi] for indi in invalid_ind[strt:comm.Get_size()+strt]]\n strt+=comm.Get_size()\n for i in range(nadd):\n maplist[len(maplist)-1].append([None,None])\n# for i in range(ntimes):\n# for j in range(\n else:\n ntimes=None\n# worker = MPI.COMM_SELF.Spawn(cmd, None, 5)\n# \n# n = array('i', [100])\n# worker.Bcast([n,MPI.INT], root=MPI.ROOT)\n# \n# pi = array('d', [0.0])\n# worker.Reduce(sendbuf=None,\n# recvbuf=[pi, MPI.DOUBLE],\n# op=MPI.SUM, root=MPI.ROOT)\n# pi = pi[0]\n# \n# worker.Disconnect()\n# ntimes = comm.bcast(ntimes,root=0)\n outs=[]\n for i in range(ntimes):\n if rank==0:\n one=maplist[i]\n else:\n one=None\n ind =comm.scatter(one,root=0)\n out = switches.fitness_switch(ind)\n outt = comm.gather(out,root=0)\n if rank==0:\n outs.extend(outt)\n if rank==0:\n for i in range(len(invalid_ind)):\n invalid_ind[i] = outs[i][0]\n for i in range(len(outs)):\n self.output.write(outs[i][1])\n pop.extend(invalid_ind)\n pop = self.generation_eval(pop)\n self.write()\n convergence =comm.bcast(self.convergence, root=0)\n \n if rank==0:\n logger.info('Run algorithm stats')\n end_signal = self.algorithm_stats(self.population)\n else:\n end_signal = None\n end_signal = comm.bcast(end_signal, root=0)\n return end_signal", "title": "" }, { "docid": "d9204f6d3fd23471b73da33626e54006", "score": "0.48038998", "text": "def simulations_fish_micro(Tech_opdict_distributions,\n Biodict_distributions,\n Locationdict_distributions,\n Physicdict_distributions,\n Fish_farm_and_compound_effect_dict_distributions,\n Production_mix_dict_distributions,\n size, \n Tech_opdict,\n Biodict,\n Locationdict,\n Physicdict,\n Fish_farm_and_compound_effect_dict,\n Production_mix_dict,\n LCIdict_micro,\n months_suitable_for_cultivation,\n fishfeed_table_withNP,\n elemental_contents,\n size_nested_sample,\n list_points_grid,\n demand_vector,\n Techno_Matrix_Fish,\n list_FU_combined_names_mc,\n list_array_total_mc_sorted,\n activities_fish_background,\n filter_names_ok_activities_fish,\n list_processes_electricity_micro,\n list_meth,\n Dict_incumbent_losses_growth_stages_loss_level,\n Dict_incumbent_losses_growth_stages_loss_red,\n Dict_incumbent_outputs_growth_stages_loss_red,\n Dict_incumbent_outputs_growth_stages_loss_level,\n biochem_profile_feed_incumbent,\n N_P_profile_feed_incumbent,\n ingredient_profile_incumbent,\n index_growth_stages,\n index_feed,\n index_dead_fish,\n digestibility_list,\n index_Nemissions,\n index_Pemissions,\n index_growth_stages_no_filter_laguna,\n index_sludge,\n electricity_low_voltage_input_per_m3_wastewater,\n electricity_high_voltage_input_per_m3_wastewater,\n index_roe,\n Dict_FCR_bio,\n list_cfs,\n ECO_FCR_0,\n ratio_loss_biological_INC_0,\n index_micro_compound,\n index_growing_DK,\n index_300g,\n drug_inputs_names,\n index_FU,\n dict_correspondance_techno_growth_stagenames,\n index_growth_stages_to_modif,\n bio_FCR_0,\n index_biogas_updgrade,\n index_N_substitution,\n index_P_substitution,\n index_heat_substitution): \n \n \n\n # Generate microalgae sample\n \n timeA=time()\n\n \n output_sample_fish_micro = sampling_func_total_montecarlo(Tech_opdict_distributions,\n Biodict_distributions,\n Locationdict_distributions, \n Physicdict_distributions,\n Fish_farm_and_compound_effect_dict_distributions,\n Production_mix_dict_distributions,\n size)\n \n\n \n sample_total_input=output_sample_fish_micro[0]\n \n \n names_param_set=output_sample_fish_micro[1]\n \n \n names_param_op=output_sample_fish_micro[2]\n \n names_param_bio=output_sample_fish_micro[3]\n \n names_param_geo=output_sample_fish_micro[4]\n \n names_param_phy=output_sample_fish_micro[5]\n \n names_param_fish_farm_compound_effect=output_sample_fish_micro[6]\n \n names_param_prod_mix=output_sample_fish_micro[7]\n \n nested_list_techno_op=output_sample_fish_micro[8]\n \n\n\n names_values_simu = ['bioact_molec_dbio',\n 'surfaceyield',\n 'tubelength',\n 'facilityvolume',\n 'totalcooling_thermal',\n 'volumetricyield',\n 'total_production_kg_dw',\n 'total_production_harvested_kg_dw']\n\n \n time1=time()\n \n \"\"\" Calculate LCIs in parallel\"\"\"\n\n # Start Ray.\n ray.shutdown()\n \n ray.init()\n \n # Inputs common to all tasks\n constant_inputs = ray.put([Tech_opdict,\n Biodict,\n Locationdict,\n Physicdict,\n Fish_farm_and_compound_effect_dict,\n Production_mix_dict,\n LCIdict_micro,\n months_suitable_for_cultivation,\n fishfeed_table_withNP,\n elemental_contents,\n names_param_op,\n names_param_bio,\n names_param_geo,\n names_param_phy,\n names_param_fish_farm_compound_effect,\n names_param_prod_mix,\n names_param_set,\n size_nested_sample,\n nested_list_techno_op,\n list_points_grid,\n demand_vector,\n Techno_Matrix_Fish,\n names_values_simu,\n biochem_profile_feed_incumbent,\n index_growth_stages,\n index_feed,\n index_dead_fish,\n index_Nemissions,\n index_Pemissions,\n index_growth_stages_no_filter_laguna,\n index_sludge,\n electricity_low_voltage_input_per_m3_wastewater,\n electricity_high_voltage_input_per_m3_wastewater,\n ingredient_profile_incumbent,\n N_P_profile_feed_incumbent,\n digestibility_list,\n index_roe,\n Dict_FCR_bio,\n ECO_FCR_0,\n ratio_loss_biological_INC_0,\n index_micro_compound,\n index_growing_DK,\n index_300g,\n index_FU,\n list_meth,\n dict_correspondance_techno_growth_stagenames,\n index_growth_stages_to_modif,\n bio_FCR_0,\n index_biogas_updgrade,\n index_N_substitution,\n index_P_substitution,\n index_heat_substitution]) \n\n \n # Calculate all the LCIs (foreground supply vectors)\n arrayresult_raw =ray.get([calculateLCI_1param_parallel.remote(constant_inputs,\n param_set) for param_set in sample_total_input]) \n\n \n time_B=time()\n time_LCI =time_B-timeA\n print(\"time_LCI\",time_LCI)\n\n\n \"\"\" Calculate LCIAs in parallel by combining LCis with mc results for the background\"\"\"\n \n list_micro_algae_names = [a for a in LCIdict_micro]\n \n # Inputs common to all tasks\n constant_inputs_LCIA = ray.put([list_micro_algae_names,\n filter_names_ok_activities_fish,\n list_FU_combined_names_mc,\n activities_fish_background,\n list_processes_electricity_micro,\n list_meth,\n Dict_incumbent_losses_growth_stages_loss_level,\n Dict_incumbent_losses_growth_stages_loss_red,\n Dict_incumbent_outputs_growth_stages_loss_red,\n Dict_incumbent_outputs_growth_stages_loss_level,\n names_param_set,\n Fish_farm_and_compound_effect_dict,\n nested_list_techno_op,\n list_cfs,\n drug_inputs_names])\n \n\n\n # Calculate all the LCIAs \n arrayresult_LCIA = ray.get([LCIA_parallel.remote(constant_inputs_LCIA,\n row_LCI,row_mc) for row_LCI,row_mc in zip(arrayresult_raw,list_array_total_mc_sorted)])\n \n \n ray.shutdown()\n \n print(\"Done with LCIAS\")\n time_LCIA=time()-time_B\n print(\"time_LCIA\",time_LCIA)\n \n\n # Rebuild a proper dataframe\n \n # Separate the contributions and the absolute results\n \n table_contrib=[pd.DataFrame(np.zeros([len(arrayresult_LCIA),len(activities_fish_background)]),columns=activities_fish_background)for meth in range(len(list_meth))]\n\n \n list_result_LCIA_without_contrib =[]\n\n\n\n print(\"Now sorting results\")\n \n for row_index in range(len(arrayresult_LCIA)):\n \n row_LCIA =arrayresult_LCIA[row_index] # Collect the row containing the LCIA\n \n\n row_performance_indicators = arrayresult_raw[row_index][-1] # Collect the row containing the performance indicators\n \n list_result_LCIA_without_contrib.append(row_LCIA[0]+row_performance_indicators)\n\n\n \n for index_meth in range(len(list_meth)):\n \n table_contrib[index_meth] = row_LCIA[-1][index_meth] \n\n\n # Collect the rest\n\n \n activities_fish_background_inc = [name+\"_inc\" for name in activities_fish_background]\n \n list_meth_short = [meth[-1] for meth in list_meth]\n \n names_total_INC = [meth+\"INC\" for meth in list_meth_short]\n names_total_AH = [meth+\"AH\" for meth in list_meth_short]\n names_ratio = [meth+\"AH/INC\" for meth in list_meth_short]\n \n\n\n names_nested = [a[0]+\"mean\" for a in nested_list_techno_op ]\n \n \n # Columns'names for the performance indicators\n list_name_impact_drugs=[]\n \n for meth in list_meth:\n name= \"impact_drug_\"+meth[-1]\n \n list_name_impact_drugs.append(name)\n\n name_indicators =[\"Loss_fraction_increase_INC_indic\",\n \"Loss_fraction_increase_AH_indic\",\n \"Input_micro_comp_per_FU_indic\",\n \"Economic_FCR_red_0_indic\",\n \"Economic_FCR_red_1_indic\",\n \"Economic_FCR_red_rate_1\",\n \"ECO_FCR_AH_1\",\n \"Biological_FCR_red_1_indic\",\n \"Biological_FCR_red_0_indic\",\n \"Biological_FCR_red_rate_1\",\n \"dose_per_total_bio_AH_1\",\n \"Economic_FCR_red_rate_1_tot_bio\",\n \"Biological_FCR_red_rate_1_tot_bio\"]+list_name_impact_drugs\n \n\n\n names_col_dataframe = names_param_set + names_nested + list_micro_algae_names + names_values_simu + names_total_AH + names_total_INC + names_ratio + name_indicators\n\n\n\n\n results_table_df = pd.DataFrame(np.array(list_result_LCIA_without_contrib), columns=names_col_dataframe) \n\n\n return results_table_df, table_contrib", "title": "" }, { "docid": "1825a806b6c6f69dff9a130cb13a928c", "score": "0.48025692", "text": "def algorithm_parallel(self):\n global logger\n comm = MPI.COMM_WORLD\n rank = MPI.COMM_WORLD.Get_rank()\n if 'MA' in self.debug:\n debug = True\n else:\n debug = False\n if rank==0:\n self.algorithm_initialize()\n logger.info('Beginning main algorithm loop')\n #Begin main algorithm loop\n self.convergence = False\n convergence=False\n while not convergence:\n if rank==0:\n pop = self.population\n offspring = self.generation_set(pop)\n # Identify the individuals with an invalid fitness\n invalid_ind = [ind for ind in offspring if ind.fitness==0]\n #Evaluate the individuals with invalid fitness\n self.output.write('\\n--Evaluate Structures--\\n')\n ntimes=int(math.ceil(float(len(invalid_ind))/float(comm.Get_size())))\n nadd=int(ntimes*comm.Get_size()-len(invalid_ind))\n maplist=[[] for n in range(ntimes)]\n strt=0\n for i in range(len(maplist)):\n maplist[i]=[indi for indi in invalid_ind[strt:comm.Get_size()+strt]]\n # maplist[i]=[[self,indi] for indi in invalid_ind[strt:comm.Get_size()+strt]]\n strt+=comm.Get_size()\n for i in range(nadd):\n maplist[len(maplist)-1].append(None)\n else:\n ntimes=None\n ntimes = comm.bcast(ntimes,root=0)\n outs=[]\n for i in range(ntimes):\n if rank==0:\n one=maplist[i]\n else:\n one=None\n one = comm.scatter(one,root=0)\n logger.info('M:geometry scattering')\n out = switches.fitness_switch(self,one)\n logger.info('M:fitness evaluation done')\n out = comm.gather(out,root=0)\n logger.info('M:gethering done')\n if rank==0:\n outs.extend(out)\n if rank==0:\n for i in range(len(invalid_ind)):\n invalid_ind[i] = outs[i][0]\n for i in range(len(outs)):\n self.output.write(outs[i][1])\n pop.extend(invalid_ind)\n pop = self.generation_eval(pop)\n self.write()\n convergence =comm.bcast(self.convergence, root=0)\n if rank==0:\n logger.info('Run algorithm stats, generation:{0}'.format(self.generation))\n end_signal = self.algorithm_stats(self.population)\n else:\n end_signal = None\n end_signal = comm.bcast(end_signal, root=0)\n return end_signal", "title": "" }, { "docid": "d1ababc5bd1c44162845df345b5a232b", "score": "0.4797549", "text": "def requires(self):\n for samp, fastq in self.fastq_dic.items():\n map_dir = os.path.join(self.workdir, \"processes\", \"mapping\", samp)\n trim_dir = os.path.join(self.workdir, \"processes\", \"qc\", samp)\n stng_dir = os.path.join(self.workdir, \"processes\", \"stringtie\", samp)\n if os.path.isdir(stng_dir) is False:\n os.makedirs(stng_dir)\n if self.kingdom in ['prokarya', 'eukarya']:\n if self.kingdom == 'prokarya':\n apd = '_prok'\n elif self.kingdom == 'eukarya':\n apd = '_euk'\n yield StringTieScores(num_cpus=self.num_cpus,\n gff_file=self.gff_file,\n out_gtf=os.path.join(stng_dir, samp + apd + \"_sTie.gtf\"),\n out_cover=os.path.join(stng_dir, samp + apd + \"_covered_sTie.gtf\"),\n out_abun=os.path.join(stng_dir, samp + apd + \"_sTie.tab\"),\n in_bam_file=os.path.join(map_dir, samp + \"_srt.bam\"))\n elif self.kingdom == 'both':\n prok_gff = self.gff_file.split(\",\")[0]\n euk_gff = self.gff_file.split(\",\")[1]\n yield StringTieScores(gff_file=self.gff_file.split(\",\")[0],\n num_cpus=self.num_cpus,\n out_gtf=stng_dir + \"/\" + samp + \"_prok\" + \"_sTie.gtf\",\n out_cover=stng_dir + \"/\" + samp + \"_prok\" + \"_covered_sTie.gtf\",\n out_abun=stng_dir + \"/\" + samp + \"_prok\" + \"_sTie.tab\",\n in_bam_file=map_dir + \"/\" + samp + \"_srt_prok.bam\")\n yield StringTieScores(gff_file=self.gff_file.split(\",\")[1],\n num_cpus=self.num_cpus,\n out_gtf=os.path.join(stng_dir, samp + \"_euk_sTie.gtf\"),\n out_cover=os.path.join(stng_dir, samp + \"_euk_covered_sTie.gtf\"),\n out_abun=os.path.join(stng_dir, samp + \"_euk_sTie.tab\"),\n in_bam_file=map_dir + \"/\" + samp + \"_srt_euk.bam\")", "title": "" }, { "docid": "0616f8b6df5b15fe2707dd96ce3fe154", "score": "0.4796133", "text": "def compute_transport_map(x0, polynomial_order=2, MPIsetup=None):\n\n Dim = x0.shape[1]\n\n # create the distribution object for my samples \n pi = DistributionFromSamples(x0)\n \n # create reference distribution\n rho = DIST.StandardNormalDistribution(Dim)\n\n\n # first we pack the distribution into neighborhood of origin \n # via a linear adjustment\n beta = 1.0 \n b= beta/np.std(x0,0)\n a= - b * np.mean(x0,0) # centering\n L = MAPS.FrozenLinearDiagonalTransportMap(a,b)\n\n\n # we use Square Integral formulation of transport maps (robust and fast)\n S = TM.Default_IsotropicIntegratedSquaredTriangularTransportMap(Dim, polynomial_order, 'total')\n print(30*\"-\")\n print(\"Computing transport maps with polynomial order: %d\" % polynomial_order)\n print(\"Number of coefficients: %d\" % S.n_coeffs)\n \n\n push_L_pi = DIST.PushForwardTransportMapDistribution(L, pi)\n push_SL_pi = DIST.PushForwardTransportMapDistribution(S, push_L_pi)\n qtype = 0 # Monte-Carlo quadratures from pi\n qparams = np.size(x0,0) # Number of MC points = all available points\n reg = None # No regularization\n tol = 1e-10 # Optimization tolerance\n ders = 2 # Use gradient and Hessian\n\n # MPI setup\n if MPIsetup is None:\n MyPool = Dim*[None]\n else:\n npools=len(MPIsetup)\n MyPool= (Dim-npools)*[None] \n\n pool_dic={}\n for jp in range(0,npools):\n pool_dic[\"mpi_pool{0}\".format(jp+1)]=TM.get_mpi_pool()\n pool_dic[\"mpi_pool{0}\".format(jp+1)].start(MPIsetup[jp])\n MyPool.append(pool_dic[\"mpi_pool{0}\".format(jp+1)])\n # print('MyPool for parallization is '+str(MyPool))\n print('Number of cores for parallel dimensions is '+ str(MPIsetup))\n\n # compute the polynomial map \n log = push_SL_pi.minimize_kl_divergence(rho, qtype=qtype, qparams=qparams, regularization=reg, tol=tol, ders=ders,maxit=300,mpi_pool=MyPool)\n\n # compose with the linear part \n SL = MAPS.CompositeMap(S,L)\n\n return SL", "title": "" }, { "docid": "3702e61b3ee75cbbdaa1373d96bb9f5f", "score": "0.47891903", "text": "def OptimizeVariablesTraining():\n\n\n democtratic_xsect=GetXSection(tanb,mass_for_mix)\n\n # signal processing\n signal_files=[]\n files=[ GetListFiles(trees_path+\"/*\"+m+scenario_template,trees_path) for m in mass ]\n for item in files:\n# print item\n if (len(item)==0): continue\n (item,masses)=GetMassFromName(item)\n (item,numevts)=GetNumEvt(item)\n (item,lumiwgts)=GetLumiWeight(item)\n if (democtratic_mix):\n xsects=map(lambda x: democtratic_xsect,masses)\n else:\n xsects=map(lambda x: GetXSection(tanb,float(x)),masses)\n newwgts=GetNewWeight(1e3,lumiwgts,numevts,xsects)\n filesnew=map(lambda x: item[x].replace(\".root\",\"_update.root\") ,range(len(item)))\n if (not os.path.exists(filesnew[0])): ReWeight(tree_name='KinVarsBDT', list_of_files=item, weight=newwgts)\n signal_files+=filesnew\n\n # background processing\n bkg_files=[]\n# files=[ GetListFiles(trees_path+\"/*\"+ptbin+\"*/*/*/TripleBtagAnalysis.root\",trees_path) for ptbin in ptbins ]\n files=[ GetListFiles(trees_path+\"/*\"+ptbin+scenario_template,trees_path) for ptbin in ptbins ]\n for item in files:\n# print item\n if (len(item)==0): continue\n (item,numevts)=GetNumEvt(item)\n (item,lumiwgts)=GetLumiWeight(item)\n newwgts=GetNewWeight(1e3,lumiwgts,numevts)\n filesnew=map(lambda x: item[x].replace(\".root\",\"_update.root\") ,range(len(item)))\n if (not os.path.exists(filesnew[0])): ReWeight(tree_name='KinVarsBDT', list_of_files=item, weight=newwgts)\n bkg_files+=filesnew\n\n\n #create db\n if (not os.path.isfile(\"./\"+mva_method+\"_bestvariables.gz\")): CreateBestVariablesDB(mva_method)\n\n\n #create config\n\n (config,_vars)=CreateConfigForVariables(mva_method)\n\n\n# print signal_files,bkg_files,mva_method,config\n\n #run mva\n RunMVAFromConfig(signal_files,bkg_files,mva_method,config)\n\n# get results\n _separation=GetSeparationMVA(mva_method,config)\n\n (_sgnKS,_bkgKS)=GetKSMVA(mva_method,config)\n\n\n mode=0\n\n if os.path.exists(\"variables.mode\"):\n file=open(\"variables.mode\",\"r\")\n mode=int(file.readline())\n file.close()\n \n if (mode==0):\n result=config+(_separation,_sgnKS,_bkgKS,{})\n else:\n KS=GetKSIntputVariables(mva_method,config[0])\n result=config+(_separation,_sgnKS,_bkgKS,KS)\n\n# save to DB\n UpLoadBestVariables(mva_method,result)\n\n# check results\n print \"CONFIGSSSSS 22222!!!\"\n print result\n# print ReadResults(mva_method)\n# ROOT.gROOT.ProcessLine('.L TMVAGui.C')\n# ROOT.TMVAGu()\n return", "title": "" }, { "docid": "799315de1a86d67b13d885f6f78b09dc", "score": "0.47882664", "text": "def execute(self):\n if 'poes' not in self.datastore: # for short report\n return\n oq = self.oqparam\n num_rlzs = self.datastore['csm_info'].get_num_rlzs()\n if num_rlzs == 1: # no stats to compute\n return {}\n elif not oq.hazard_stats():\n if oq.hazard_maps or oq.uniform_hazard_spectra:\n logging.warn('mean_hazard_curves was false in the job.ini, '\n 'so no outputs were generated.\\nYou can compute '\n 'the statistics without repeating the calculation'\n ' with the --hc option')\n return {}\n # initialize datasets\n N = len(self.sitecol.complete)\n L = len(oq.imtls.array)\n pyclass = 'openquake.hazardlib.probability_map.ProbabilityMap'\n all_sids = self.sitecol.complete.sids\n nbytes = N * L * 4 # bytes per realization (32 bit floats)\n totbytes = 0\n if num_rlzs > 1:\n for name, stat in oq.hazard_stats():\n self.datastore.create_dset(\n 'hcurves/%s/array' % name, F32, (N, L, 1))\n self.datastore['hcurves/%s/sids' % name] = all_sids\n self.datastore.set_attrs(\n 'hcurves/%s' % name, __pyclass__=pyclass)\n totbytes += nbytes\n if 'hcurves' in self.datastore:\n self.datastore.set_attrs('hcurves', nbytes=totbytes)\n self.datastore.flush()\n\n with self.monitor('sending pmaps', autoflush=True, measuremem=True):\n ires = parallel.Starmap(\n self.core_task.__func__, self.gen_args()\n ).submit_all()\n nbytes = ires.reduce(self.save_hcurves)\n return nbytes", "title": "" }, { "docid": "951ccbbc8bdb9d81126e519a9152edbe", "score": "0.4787463", "text": "def compute(self) :\n self.calcWeightingParameters()\n self.aggregateIntoCategories()\n self.aggregateIntoComplex()", "title": "" }, { "docid": "d16ef81ed41b568a203d305721fd4178", "score": "0.4783896", "text": "def update_masses(self, masses_dict):\n\n self.launch_vehicle.M0 = masses_dict['m0']/1000. # convert from kg to Mg\n\n for element in self.launch_vehicle.element_list:\n\n if isinstance(element, VTOStageFlybackVehicle):\n owe = masses_dict['s1'] + self.num_engines_dict['e1'] * masses_dict['e1']\n\n if 'ab' in masses_dict:\n owe += self.num_engines_dict['ab'] * masses_dict['ab']\n\n element.m = owe\n\n else:\n element.m = masses_dict[element.name]\n\n for prop in self.vehicle_props_dict:\n self.vehicle_props_dict[prop] = masses_dict[prop]", "title": "" }, { "docid": "44419f946fdc97abcd4311b3085394e2", "score": "0.47807807", "text": "def main(args):\n\n\n print args.subjects\n subject_list = args.subjects\n\n\n \"\"\"\n Map field names to individual subject runs\n \"\"\"\n\n info = dict(dwi=[['subject_id', 'data']],\n bvecs=[['subject_id', 'bvecs']],\n bvals=[['subject_id', 'bvals']],\n struct=[['subject_id', 'brain']],\n seed_file=[['subject_id', 'thalamus']],\n target_masks=[['subject_id', \n ['LPFC', 'LTC', 'MPFC','MTC','OCC','OFC','PC','SMC']]],\n bedpost = [['subject_id','dti.bedpostx']],\n thsample = [['subject_id','merged_th2samples']],\n phsample = [['subject_id','merged_ph2samples']],\n fsample = [['subject_id','merged_f2samples']],\n matrix = [['subject_id','FREESURFERT1toNodif.mat']],\n bet_mask = [['subject_id','nodif_brain_mask']],\n fsLoc = [['subject_id','freesurfer']],\n aseg = [['subject_id','aseg']],\n aparc_aseg= [['subject_id','aparc+aseg']],\n )\n #seed_file = [['subject_id','MASK_average_thal_right']],\n #target_masks = [['subject_id',['MASK_average_M1_right',\n #'MASK_average_S1_right',\n #'MASK_average_occipital_right',\n #'MASK_average_pfc_right',\n #'MASK_average_pmc_right',\n #'MASK_average_ppc_right',\n #'MASK_average_temporal_right']]])\n\n infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']),\n name=\"infosource\")\n\n \"\"\"\n Here we set up iteration over all the subjects. The following line\n is a particular example of the flexibility of the system. The\n ``datasource`` attribute ``iterables`` tells the pipeline engine that\n it should repeat the analysis on each of the items in the\n ``subject_list``. In the current example, the entire first level\n preprocessing and estimation will be repeated for each subject\n contained in subject_list.\n \"\"\"\n\n infosource.iterables = ('subject_id', subject_list)\n\n \"\"\"\n Now we create a :class:`nipype.interfaces.io.DataGrabber` object and\n fill in the information from above about the layout of our data. The\n :class:`nipype.pipeline.engine.Node` module wraps the interface object\n and provides additional housekeeping and pipeline specific\n functionality.\n \"\"\"\n\n #datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],\n #outfields=info.keys()),\n #name = 'datasource')\n\n datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],\n outfields=info.keys()),\n name = 'datasource')\n\n #datasource.inputs.template = \"%s/%s\"\n datasource.inputs.template = \"%s/%s\"\n\n # This needs to point to the fdt folder you can find after extracting\n # http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz\n datasource.inputs.base_directory = os.path.abspath('/Volumes/CCNC_3T_2/kcho/ccnc/GHR_project')\n\n datasource.inputs.field_template = dict(dwi='%s/dti/%s.nii.gz',\n bvecs='%s/dti/%s',\n bvals='%s/dti/%s',\n struct='%s/freesurfer/mri/%s.nii.gz',\n seed_file='%s/ROI/{side}_%s.nii.gz'.format(side=args.side),\n target_masks='%s/ROI/{side}_%s.nii.gz'.format(side=args.side),\n matrix='%s/registration/%s',\n bet_mask='%s/dti/%s.nii.gz',\n thsample='%s/DTI.bedpostX/%s.nii.gz',\n fsample='%s/DTI.bedpostX/%s.nii.gz',\n phsample='%s/DTI.bedpostX/%s.nii.gz',\n fsLoc='%s/%s',\n aseg='%s/FREESURFER/mri/%s.mgz',\n aparc_aseg='%s/FREESURFER/mri/%s.mgz',\n #datasource.inputs.field_template = SelectFiles(dict(dwi='%s/{timeline}/DTI/%s'.format(timeline=args.timeline),))\n )\n datasource.inputs.template_args = info\n datasource.inputs.sort_filelist = True\n\n\n \"\"\"\n FREESURFER read\n \"\"\"\n\n insulaExtract= pe.Node(interface=fs.Binarize(out_type='nii.gz'), name='insula')\n if args.side == 'lh':\n insulaExtract.inputs.match = [2035]\n else:\n insulaExtract.inputs.match = [1035]\n\n\n brainStemExtract = pe.Node(interface=fs.Binarize(out_type='nii.gz'), name='brainStem')\n brainStemExtract.inputs.match = [16, 6, 7, 8, 45, 46, 47]\n\n terminationRoiMaker = pe.Node(interface=fsl.MultiImageMaths(), name='add_masks')\n terminationRoiMaker.inputs.op_string = '-add %s '*8\n\n\n\n ##(infosource, datasink,[('subject_id','container')]),\n ##(('subject_id', getstripdir),'strip_dir')]),\n\n\n\n probtrackx = pe.Node(interface=fsl.ProbTrackX(), name='probtrackx')\n probtrackx.inputs.mode = 'seedmask'\n probtrackx.inputs.c_thresh = 0.2\n probtrackx.inputs.n_steps = 2000\n probtrackx.inputs.step_length = 0.5\n probtrackx.inputs.n_samples = 5000\n probtrackx.inputs.opd = True\n probtrackx.inputs.os2t = True\n probtrackx.inputs.loop_check = True\n\n\n #tractography.add_nodes([bedpostx, flirt])\n \"\"\"\n Setup data storage area\n \"\"\"\n\n datasink = pe.Node(interface=nio.DataSink(),name='datasink')\n datasink.inputs.base_directory = os.path.abspath('/Volumes/CCNC_3T_2/kcho/ccnc/GHR_project/prac')\n\n datasink.inputs.substitutions = [('_variable', 'variable'),\n ('_subject_id_', '')]\n\n def get_opposite(roiList):\n import os\n print '******************'\n print '******************'\n print '******************'\n print '******************'\n print '******************'\n print '******************'\n if os.path.basename(roiList[0]).startswith('lh'):\n newList = [x.replace('lh','rh') for x in roiList]\n print newList\n else:\n newList = [x.replace('rh','lh') for x in roiList]\n print newList\n return newList\n\n dwiproc = pe.Workflow(name=\"dwiproc\")\n dwiproc.base_dir = os.path.abspath('tractography')\n dwiproc.connect([\n (infosource,datasource,[('subject_id', 'subject_id')]),\n (datasource, insulaExtract, [('aparc_aseg', 'in_file')]),\n (datasource, brainStemExtract, [('aseg', 'in_file')]),\n (insulaExtract,terminationRoiMaker,[('binary_file', 'in_file')]),\n (datasource,terminationRoiMaker,[(('target_masks',get_opposite), 'operand_files')]),\n (brainStemExtract, probtrackx,[('binary_file', 'avoid_mp')]),\n (terminationRoiMaker, probtrackx,[('out_file', 'stop_mask')]),\n (datasource, probtrackx,[('matrix', 'xfm')]),\n (datasource, datasink,[('struct', 't1')]),\n (datasource, datasink,[(('target_masks',get_opposite), 'masks.opposite_mask')]),\n (datasource, datasink,[('target_masks','masks.target_mask')]),\n (datasource, datasink,[('seed_file','masks.seed_file')]),\n (terminationRoiMaker, datasink,[('out_file', 'masks.stop_mask')]),\n (brainStemExtract, datasink,[('binary_file', 'masks.avoid_mp')]),\n (datasource,probtrackx,[('seed_file','seed'),\n ('target_masks','target_masks'),\n ('bet_mask','mask'),\n ('phsample','phsamples'),\n ('fsample','fsamples'),\n ('thsample','thsamples'),\n ]),\n\n\n (probtrackx,datasink,[('fdt_paths','probtrackx.@fdt_paths'),\n ('log', 'probtrackx.@log'),\n ('particle_files', 'probtrackx.@particle_files'),\n ('targets', 'probtrackx.@targets'),\n ('way_total', 'probtrackx.@way_total'),\n ])\n ])\n\n dwiproc.run(plugin='MultiProc', plugin_args={'n_procs' : 8})\n subprocess.call(\"osascript -e '{}'\".format(applescript), shell=True)\n #ccncDti.write_graph()\n\n\n\n\n #\"\"\"\n #Setup for Diffusion Tensor Computation\n #--------------------------------------\n\n #Here we will create a generic workflow for DTI computation\n #\"\"\"\n\n #computeTensor = pe.Workflow(name='computeTensor')\n\n #\"\"\"\n #extract the volume with b=0 (nodif_brain)\n #\"\"\"\n\n #fslroi = pe.Node(interface=fsl.ExtractROI(),name='fslroi')\n #fslroi.inputs.t_min=0\n #fslroi.inputs.t_size=1\n\n #\"\"\"\n #create a brain mask from the nodif_brain\n #\"\"\"\n\n #bet = pe.Node(interface=fsl.BET(),name='bet')\n #bet.inputs.mask=True\n #bet.inputs.frac=0.35\n\n #\"\"\"\n #correct the diffusion weighted images for eddy_currents\n #\"\"\"\n\n #eddycorrect = create_eddy_correct_pipeline('eddycorrect')\n #eddycorrect.inputs.inputnode.ref_num=0\n\n #\"\"\"\n #compute the diffusion tensor in each voxel\n #\"\"\"\n\n #dtifit = pe.Node(interface=fsl.DTIFit(),name='dtifit')\n\n #\"\"\"\n #connect all the nodes for this workflow\n #\"\"\"\n\n #computeTensor.connect([\n #(fslroi,bet,[('roi_file','in_file')]),\n ##(eddycorrect, dtifit,[('outputnode.eddy_corrected','dwi')]),\n #(infosource, dtifit,[['subject_id','base_name']]),\n #(bet,dtifit,[('mask_file','mask')]),\n #])\n\n\n\n #\"\"\"\n #Setup for Tracktography\n #-----------------------\n\n #Here we will create a workflow to enable probabilistic tracktography\n #and hard segmentation of the seed region\n #\"\"\"\n\n #tractography = pe.Workflow(name='tractography')\n #tractography.base_dir = os.path.abspath('fsl_dti_tutorial')\n\n #\"\"\"\n #estimate the diffusion parameters: phi, theta, and so on\n #\"\"\"\n\n #bedpostx = create_bedpostx_pipeline()\n #bedpostx.get_node(\"xfibres\").iterables = (\"n_fibres\",[1,2])\n\n\n #flirt = pe.Node(interface=fsl.FLIRT(), name='flirt')\n #flirt.inputs.in_file = fsl.Info.standard_image('MNI152_T1_2mm_brain.nii.gz')\n #flirt.inputs.dof = 12\n\n #\"\"\"\n #perform probabilistic tracktography\n #\"\"\"\n\n #probtrackx = pe.Node(interface=fsl.ProbTrackX(),name='probtrackx')\n #probtrackx.inputs.mode='seedmask'\n #probtrackx.inputs.c_thresh = 0.2\n #probtrackx.inputs.n_steps=2000\n #probtrackx.inputs.step_length=0.5\n #probtrackx.inputs.n_samples=5000\n #probtrackx.inputs.opd=True\n #probtrackx.inputs.os2t=True\n #probtrackx.inputs.loop_check=True\n\n\n #\"\"\"\n #perform hard segmentation on the output of probtrackx\n #\"\"\"\n\n #findthebiggest = pe.Node(interface=fsl.FindTheBiggest(),name='findthebiggest')\n\n\n #\"\"\"\n #connect all the nodes for this workflow\n #\"\"\"\n\n\n\n #\"\"\"\n #Setup the pipeline that combines the two workflows: tractography and computeTensor\n #----------------------------------------------------------------------------------\n #\"\"\"\n\n\n #ccncDti = pe.Workflow(name=\"CCNC_DTI_tensor\")\n #ccncDti.connect([\n #(infosource,datasource, [('subject_id', 'subject_id')]),\n #(datasource,computeTensor,[('dwi','fslroi.in_file'),\n #('bvals','dtifit.bvals'),\n #('bvecs','dtifit.bvecs'),\n #('dwi','dtifit.dwi')]),\n ##('dwi','eddycorrect.inputnode.in_file')]),\n ##(infosource, datasink,[('subject_id','container')]),\n ##(('subject_id', getstripdir),'strip_dir')]),\n #(computeTensor,datasink,[('dtifit.FA','dtifit.@FA'),\n #('dtifit.MD','dtifit.@MD')]),\n ##(datasource,fs,[('freesurfer','subject_id')]),\n ##(fs,datasink,[('annot','brainmask')]),\n #])", "title": "" }, { "docid": "958d84ea82e1548401ad31b44c25a456", "score": "0.4779367", "text": "def execute(self, parameters, messages):\r\n \r\n # helper function\r\n def getCombs(arr):\r\n \r\n combs=[]\r\n for L in range(0, len(arr) +1):\r\n for subset in itertools.combinations(arr, L):\r\n combs.append(subset)\r\n \r\n return combs\r\n \r\n arcpy.AddMessage(\"entered script\")\r\n \r\n # specify path to MGET toolbox\r\n mget_path = r\"C:\\Program Files\\GeoEco\\ArcGISToolbox\\Marine Geospatial Ecology Tools.tbx\"\r\n try:\r\n arcpy.AddMessage(\"importing toolbox\")\r\n # arcpy.ImportToolbox(mget_path,\"GeoEco\")\r\n gp = arcgisscripting.create()\r\n gp.AddToolbox(mget_path, \"GeoEco\")\r\n \r\n except Exception as e:\r\n arcpy.AddError(e.message)\r\n \r\n # Get the traceback object\r\n tb = sys.exc_info()[2]\r\n tbinfo = traceback.format_tb(tb)[0]\r\n\r\n # Concatenate information together concerning the error into a message string\r\n pymsg = \"PYTHON ERRORS:\\nTraceback info:\\n\" + tbinfo + \"\\nError Info:\\n\" + str(sys.exc_info()[1])\r\n\r\n # Print Python error messages for use in Python / Python Window\r\n arcpy.AddError(pymsg + \"\\n\")\r\n \r\n return\r\n \r\n # gather parameters\r\n arcpy.AddMessage(\"gathering parameters\")\r\n fc = parameters[0].valueAsText\r\n model = parameters[1].valueAsText\r\n out_dir = parameters[2].valueAsText\r\n \r\n try:\r\n # check for output folder\r\n if os.path.exists(out_dir):\r\n arcpy.AddMessage(\"directory exists...\")\r\n else:\r\n arcpy.AddWarning(\"Output folder does not exist. Creating..\")\r\n os.mkdir(out_dir)\r\n \r\n # add the binomial test variable to an in_memory feature set\r\n arcpy.AddMessage(\"copy feature class\")\r\n fc_test = \"in_memory/test_fc\"\r\n arcpy.CopyFeatures_management(fc, fc_test)\r\n arcpy.AddField_management(fc_test, \"TEST\", \"DOUBLE\")\r\n\r\n # calculate the test feature based on change code\r\n arcpy.AddMessage(\"changing classification codes\")\r\n wc_change = \"changeDesc2 <> 0\"\r\n templyr = 'templyr'\r\n arcpy.MakeFeatureLayer_management(fc_test, templyr)\r\n arcpy.SelectLayerByAttribute_management(templyr, \"NEW_SELECTION\", wc_change)\r\n arcpy.CalculateField_management(templyr, \"TEST\", 1, \"PYTHON_9.3\")\r\n\r\n wc_noChange = \"changeDesc2 = 0\"\r\n arcpy.SelectLayerByAttribute_management(templyr, \"NEW_SELECTION\", wc_noChange)\r\n arcpy.CalculateField_management(templyr, \"TEST\", 0, \"PYTHON_9.3\")\r\n\r\n # clear the selection\r\n arcpy.AddMessage(\"clearing selection\")\r\n arcpy.SelectLayerByAttribute_management(templyr,\"CLEAR_SELECTION\")\r\n\r\n # DEBUG\r\n arcpy.AddMessage(\"copying to disk...\")\r\n temp_fc = \"in_memory/temp_fc\"\r\n arcpy.CopyFeatures_management(templyr, temp_fc)\r\n\r\n # get field names for model fitting. TEST will be the last field added, so don't look at it\r\n flds = arcpy.ListFields(temp_fc)\r\n field_names = [f.name for f in flds[:-1] if (f.type.upper() == \"DOUBLE\" and (\"SHAPE\" not in f.name.upper()))]\r\n\r\n # #construct the string for the python call\r\n # contPreds = \"\"\r\n # for v in field_names:\r\n # contPreds += \"{} # # #;\".format(v)\r\n \r\n # # strip the last semicolon\r\n # contPreds = contPreds[:-1]\r\n \r\n arcpy.AddMessage(\"calling MGET\")\r\n \r\n # if import toolbox worked, use arcpy.GAMFitToArcGISTable_GeoEco() otherwise...\r\n var_combs = getCombs(field_names)\r\n \r\n for i,com in enumerate(var_combs):\r\n \r\n if len(com)>1:\r\n arcpy.AddMessage(\"on iteration {} with variables {}\\n\".format(i+1, com))\r\n \r\n model_dir = \"model_{}_{}\".format(model, i)\r\n model_fol = os.path.join(out_dir, model_dir)\r\n if not os.path.exists(model_fol):\r\n os.mkdir(model_fol)\r\n \r\n rdatafile = \"{}_{}.RData\".format(model, i)\r\n outModelFile = os.path.join(model_fol, rdatafile)\r\n \r\n contPreds = \"\"\r\n for v in com:\r\n contPreds += \"{} # # #;\".format(v)\r\n \r\n # strip the last semicolon\r\n contPreds = contPreds[:-1] \r\n \r\n # gp.GAMFitToArcGISTable_GeoEco(inputTable=templyr, outputModelFile=\"V:/DBS_ChangeDetection/MGET_models/test/GAM_fit_test.RData\", responseVariable=\"TEST\", family=\"binomial\", rPackage=\"mgcv\", continuousPredictors=\"grayStats_corr # # #;grayStats_energy # # #;grayStats_entropy # # #;grayStats_kurtosis # # #;grayStats_skewness # # #;grayStats_mean # # #;grayStats_variance # # #\", categoricalPredictors=\"\",offsetVariable=\"\", offsetTransform=\"\",bivariateInteractions=\"\",where=\"\",link=\"\",variance=\"\",theta=\"\", method=\"GCV.Cp\",optimizer=\"outer\",alternativeOptimizer=\"newton\",select=\"false\",gamma=\"1\",selectionMethod=\"\",logSelectionDetails=\"true\", writeSummaryFile=\"true\",writeDiagnosticPlots=\"true\",writeTermPlots=\"true\",residuals=\"false\",xAxis=\"true\",commonScale=\"true\",plotFileFormat=\"png\",res=\"1000\",width=\"3000\",height=\"3000\", pointSize=\"10\", bg=\"white\")\r\n gp.GAMFitToArcGISTable_GeoEco(templyr, outModelFile, \"TEST\", \"binomial\", \"mgcv\", contPreds) \r\n \r\n \r\n \r\n \r\n except Exception as e:\r\n arcpy.AddError(e.message)\r\n arcpy.AddError(arcpy.GetMessages())\r\n \r\n # Get the traceback object\r\n tb = sys.exc_info()[2]\r\n tbinfo = traceback.format_tb(tb)[0]\r\n\r\n # Concatenate information together concerning the error into a message string\r\n pymsg = \"PYTHON ERRORS:\\nTraceback info:\\n\" + tbinfo + \"\\nError Info:\\n\" + str(sys.exc_info()[1])\r\n\r\n # Print Python error messages for use in Python / Python Window\r\n arcpy.AddError(pymsg + \"\\n\")\r\n return\r\n \r\n return", "title": "" }, { "docid": "38b5eb4c4780c2e80f3f4a0bd7be6e3b", "score": "0.476969", "text": "def update_minerals():\n now = dt.strftime(dt.utcnow(), \"%Y-%m-%dT%H:%M:%S%Z\")\n sleep(1)\n for region_id in MARKET_REGIONS:\n for type_id in [m[1] for m in MINERALS]:\n params = {\n \"datasource\": \"tranquility\",\n \"order_type\": \"all\",\n \"type_id\": type_id\n }\n path = API_URL + \"markets/{}/orders/\".format(region_id)\n res = rq.get(path, params=params)\n rows = res.json()\n for r in rows:\n r[\"queried_on\"] = now\n r[\"type\"] = \"buy\" if r[\"is_buy_order\"] else \"sell\"\n _save_page(rows)\n print(\"Updated mineral prices for region {}\".format(str(region_id)))", "title": "" }, { "docid": "2a439d494c3f78148fae3626771b2775", "score": "0.4769084", "text": "def _yield_thermochemistry(self): # noqa: ANN101\n scheme = rx.core._check_scheme(self.model.scheme) # noqa: SLF001\n\n molecular_masses = np.array(\n [np.sum(data.atommasses) for name, data in self.model.compounds.items()],\n )\n energies = np.array(\n [data.energy for name, data in self.model.compounds.items()],\n )\n internal_energies = rx.get_internal_energies(\n self.model.compounds,\n qrrho=self.qrrho,\n temperature=self.temperature,\n )\n enthalpies = rx.get_enthalpies(\n self.model.compounds,\n qrrho=self.qrrho,\n temperature=self.temperature,\n )\n entropies = rx.get_entropies(\n self.model.compounds,\n qrrho=self.qrrho,\n temperature=self.temperature,\n )\n freeenergies = enthalpies - self.temperature * entropies\n assert np.allclose(\n freeenergies,\n rx.get_freeenergies(\n self.model.compounds,\n qrrho=self.qrrho,\n temperature=self.temperature,\n pressure=self.pressure,\n ),\n ), \"free energies do not match enthalpies and entropies\"\n\n compounds_table = Table(\n Column(\"no\", justify=\"right\"),\n Column(\"compound\", justify=\"left\"),\n Column(\"mass\\n〈amu〉\", justify=\"center\"),\n Column(\"Gᶜᵒʳʳ\\n〈kcal/mol〉\", justify=\"center\", style=\"bright_green\"),\n Column(\"Uᶜᵒʳʳ\\n〈kcal/mol〉\", justify=\"center\"),\n Column(\"Hᶜᵒʳʳ\\n〈kcal/mol〉\", justify=\"center\"),\n Column(\"S\\n〈cal/mol·K〉\", justify=\"center\"),\n title=\"estimated thermochemistry (compounds)\",\n box=self.box_style,\n )\n for i, (name, data) in enumerate(self.model.compounds.items()):\n compounds_table.add_row(\n f\"{i:d}\",\n name,\n f\"{molecular_masses[i]:6.2f}\",\n f\"{(freeenergies[i] - data.energy) / constants.kcal:14.2f}\",\n f\"{(internal_energies[i] - data.energy) / constants.kcal:14.2f}\",\n f\"{(enthalpies[i] - data.energy) / constants.kcal:14.2f}\",\n f\"{entropies[i] / constants.calorie:10.2f}\",\n )\n yield compounds_table\n\n delta_mass = rx.get_delta(scheme.A, molecular_masses)\n delta_energies = rx.get_delta(scheme.A, energies)\n delta_internal_energies = rx.get_delta(scheme.A, internal_energies)\n delta_enthalpies = rx.get_delta(scheme.A, enthalpies)\n # TODO(schneiderfelipe): log the contribution of reaction symmetry\n delta_entropies = rx.get_delta(scheme.A, entropies) + rx.get_reaction_entropies(\n scheme.A,\n temperature=self.temperature,\n pressure=self.pressure,\n )\n delta_freeenergies = delta_enthalpies - self.temperature * delta_entropies\n assert np.allclose(\n delta_freeenergies,\n rx.get_delta(scheme.A, freeenergies)\n - self.temperature\n * rx.get_reaction_entropies(\n scheme.A,\n temperature=self.temperature,\n pressure=self.pressure,\n ),\n ), \"reaction free energies do not match reaction enthalpies and reaction entropies\" # noqa: E501\n\n delta_activation_mass = rx.get_delta(scheme.B, molecular_masses)\n delta_activation_energies = rx.get_delta(scheme.B, energies)\n delta_activation_internal_energies = rx.get_delta(scheme.B, internal_energies)\n delta_activation_enthalpies = rx.get_delta(scheme.B, enthalpies)\n # TODO(schneiderfelipe): log the contribution of reaction symmetry\n delta_activation_entropies = rx.get_delta(\n scheme.B,\n entropies,\n ) + rx.get_reaction_entropies(\n scheme.B,\n temperature=self.temperature,\n pressure=self.pressure,\n )\n delta_activation_freeenergies = (\n delta_activation_enthalpies - self.temperature * delta_activation_entropies\n )\n assert np.allclose(\n delta_activation_freeenergies,\n rx.get_delta(scheme.B, freeenergies)\n - self.temperature\n * rx.get_reaction_entropies(\n scheme.B,\n temperature=self.temperature,\n pressure=self.pressure,\n ),\n ), \"activation free energies do not match activation enthalpies and activation entropies\" # noqa: E501\n\n circ_table = Table(\n Column(\"no\", justify=\"right\"),\n Column(\"reaction\", justify=\"left\"),\n Column(\"Δmass°\\n〈amu〉\", justify=\"center\"),\n Column(\"ΔG°\\n〈kcal/mol〉\", justify=\"center\", style=\"bright_green\"),\n Column(\"ΔE°\\n〈kcal/mol〉\", justify=\"center\"),\n Column(\"ΔU°\\n〈kcal/mol〉\", justify=\"center\"),\n Column(\"ΔH°\\n〈kcal/mol〉\", justify=\"center\"),\n Column(\"ΔS°\\n〈cal/mol·K〉\", justify=\"center\"),\n title=\"estimated (reaction°) thermochemistry\",\n box=self.box_style,\n )\n dagger_table = Table(\n Column(\"no\", justify=\"right\"),\n Column(\"reaction\", justify=\"left\"),\n Column(\"Δmass‡\\n〈amu〉\", justify=\"center\"),\n Column(\"ΔG‡\\n〈kcal/mol〉\", justify=\"center\", style=\"bright_green\"),\n Column(\"ΔE‡\\n〈kcal/mol〉\", justify=\"center\"),\n Column(\"ΔU‡\\n〈kcal/mol〉\", justify=\"center\"),\n Column(\"ΔH‡\\n〈kcal/mol〉\", justify=\"center\"),\n Column(\"ΔS‡\\n〈cal/mol·K〉\", justify=\"center\"),\n title=\"estimated (activation‡) thermochemistry\",\n box=self.box_style,\n )\n for i, reaction in enumerate(scheme.reactions):\n if scheme.is_half_equilibrium[i]:\n circ_row = [\n f\"{i:d}\",\n reaction,\n f\"{delta_mass[i]:6.2f}\",\n f\"{delta_freeenergies[i] / constants.kcal:10.2f}\",\n f\"{delta_energies[i] / constants.kcal:10.2f}\",\n f\"{delta_internal_energies[i] / constants.kcal:10.2f}\",\n f\"{delta_enthalpies[i] / constants.kcal:10.2f}\",\n f\"{delta_entropies[i] / constants.calorie:11.2f}\",\n ]\n dagger_row = [\n f\"{i:d}\",\n reaction,\n None,\n None,\n None,\n None,\n None,\n None,\n ]\n else:\n circ_row = [\n f\"{i:d}\",\n reaction,\n f\"{delta_mass[i]:6.2f}\",\n f\"{delta_freeenergies[i] / constants.kcal:10.2f}\",\n f\"{delta_energies[i] / constants.kcal:10.2f}\",\n f\"{delta_internal_energies[i] / constants.kcal:10.2f}\",\n f\"{delta_enthalpies[i] / constants.kcal:10.2f}\",\n f\"{delta_entropies[i] / constants.calorie:11.2f}\",\n ]\n dagger_row = [\n f\"{i:d}\",\n reaction,\n f\"{delta_activation_mass[i]:6.2f}\",\n f\"{delta_activation_freeenergies[i] / constants.kcal:10.2f}\",\n f\"{delta_activation_energies[i] / constants.kcal:10.2f}\",\n f\"{delta_activation_internal_energies[i] / constants.kcal:10.2f}\",\n f\"{delta_activation_enthalpies[i] / constants.kcal:10.2f}\",\n f\"{delta_activation_entropies[i] / constants.calorie:11.2f}\",\n ]\n\n circ_table.add_row(*circ_row)\n dagger_table.add_row(*dagger_row)\n yield circ_table\n yield dagger_table", "title": "" }, { "docid": "1f62bc04eebea9361e2253fbc8e92a23", "score": "0.476703", "text": "def setup_basis_models(self):\n\n par_table = self.par.output_dir + '/' + self.par.obj + '.par_table.fits'\n with fits.open(par_table) as hdul:\n t = Table(hdul[1].data)\n\n comp_models = {}\n\n for component in self.components:\n\n # acquire elines that belong to component from AGN fit output file\n compmodels = np.full(len(self.components.keys()), models.Gaussian1D())\n for idx, eline in enumerate(self.components[component].elines):\n row = self.components[component].elines[eline].idx\n eline = EmissionLine(name=t['name'][row],\n component=t['component'][row],\n tied=t['tied'][row],\n idx=idx,\n amplitude= t['amplitude'][row],\n vel=t['vel'][row],\n disp=t['disp'][row]\n )\n compmodels[idx] = eline.model\n\n # combine the eline models\n for idx in range(len(compmodels))[1:]:\n compmodels[0] += compmodels[idx]\n\n comp_models[component] = compmodels[0]\n\n self.models = comp_models", "title": "" }, { "docid": "19fa94c897b239fb945929f4e344ead3", "score": "0.47616148", "text": "def execute(self):\n nbays=self.JcktGeoIn.nbays\n\n #X-brace precalculations: Next two useful in case Dbrc0 is the only optimization variable in optimization problems\n self.Xbrcouts=self.Xbrcinputs #initialize\n\n if self.Xbrcinputs.Dbrc0:\n self.Xbrcouts.Dbrc=np.array([self.Xbrcinputs.Dbrc0]*nbays)\n if self.Xbrcinputs.tbrc0:\n self.Xbrcouts.tbrc=np.array([self.Xbrcinputs.tbrc0]*nbays)\n\n if any(self.Xbrcouts.Dbrc == 0.) :\n self.Xbrcouts.Dbrc=self.Xbrcouts.Dbrc[0].repeat(self.Xbrcinputs.Dbrc.size)\n if any(self.Xbrcouts.tbrc == 0.) :\n self.Xbrcouts.tbrc=self.Xbrcouts.tbrc[0].repeat(self.Xbrcinputs.tbrc.size)\n\n #TP precalculations\n self.BuildTPouts=self.TPinputs #here I transfer all the other inputs before expanding them with new info\n PreBuildLvl=self.PreBuildTPLvl #shorten name\n\n #Set central stem following tower base and bump up thikness\n if PreBuildLvl:\n self.BuildTPouts.Dstem=np.asarray([self.TwrDb]).repeat(self.TPinputs.hstem.size)\n self.BuildTPouts.tstem=np.asarray([self.TwrDb/self.TwrDTRb*self.TPinputs.stemwallfactor]).repeat(self.TPinputs.hstem.size)\n\n if PreBuildLvl==1 or PreBuildLvl==2 or PreBuildLvl==5:\n #Set struts equal to top of leg\n self.BuildTPouts.Dstrut=self.LegtD\n self.BuildTPouts.tstrut=self.Legtt\n self.BuildTPouts.Dstump=self.LegtD\n self.BuildTPouts.tstump=self.Legtt\n\n if PreBuildLvl==2 or PreBuildLvl==3:\n #set girders and diagonals equal to top braces\n self.BuildTPouts.Dgir=self.Xbrcinputs.Dbrc[-1]\n self.BuildTPouts.tgir=self.Xbrcinputs.tbrc[-1]\n self.BuildTPouts.Dbrc=self.Xbrcinputs.Dbrc[-1]\n self.BuildTPouts.tbrc=self.Xbrcinputs.tbrc[-1]\n if PreBuildLvl==4 or PreBuildLvl==5:\n self.BuildTPouts.Dbrc=self.TPinputs.Dgir\n self.BuildTPouts.tbrc=self.TPinputs.tgir\n\n #Leg bottom stump height set here if we have leg data and the user did not put a different value\n self.legbot_stmph=self.legbot_stmphin #default value\n if (self.LegbD) and not(self.legbot_stmphin): #User can never select =0, since it is forbidden\n self.legbot_stmph=1.5 * self.LegbD\n\n #Deck width set here if we have tower data and the user did not put a different fixed value for the deck, but a value function of Db\n if (self.TwrDb) and not(self.JcktGeoIn.dck_width) and self.JcktGeoIn.dck_widthfrac:\n self.dck_width = self.JcktGeoIn.dck_widthfrac * self.TwrDb\n else:\n self.dck_width = self.JcktGeoIn.dck_width\n\n #shorten names of class Jacket's attributes and calculates a few more parameters\n #nbays\n nlegs=self.JcktGeoIn.nlegs #legs\n batter=self.JcktGeoIn.batter #2D batter (=tg(angle w.r.t. vertical))\n\n self.al_bat2D= np.arctan(1./batter) #angle between vertical and leg in projection (2D) [rad]\n self.al_bat3D=np.arctan(np.sqrt(2.)/batter) #angle between vertical and leg (3D) [rad]\n\n dck_botz=self.JcktGeoIn.dck_botz #deck-bottom height\n weld2D=self.JcktGeoIn.weld2D #fraction of chord OD used in the weldment\n\n wdpth=self.wdepth #water depth\n #pileZtop=self.JcktGeoIn.pileZtop #top of pile z coordinate\n\n legZbot=self.legZbot #bottom of leg z coordinate\n\n #Calculate some basic properties THAT CONTAIN SOME ASSUMPTIONS ON GEOMETRY\n self.innr_ang=pi/6. * (nlegs==3) + pi/4. *(nlegs==4) #Angle between radial direction and base side\n\n self.JcktH= dck_botz -self.BuildTPouts.hstump + wdpth - legZbot #Jckt Height, from legtop to legbot\n Hbays= self.JcktH-self.legbot_stmph#-self.legtop_stmp #Jacket Height available for bays,\n #removing 1D for stump at bottom and 0.5D at top and bottom for welds to brace\n\n #other params\n self.wbase=self.dck_width-2.*self.BuildTPouts.Dstump/2.*(1.+weld2D) +2.*(wdpth+dck_botz)/batter #virtual width at sea bed\n self.wbas0=self.wbase-2*np.tan(self.al_bat2D)*(legZbot+self.legbot_stmph) #width at 1st horiz. brace joint\n\n #Calculate bay width, height, brace angle, and angle between X-braces (larger of the two)\n self.bay_bs,self.bay_hs,self.beta2D=FindBrcAng(Hbays,nbays,self.wbas0,self.al_bat2D)\n self.beta3D, al_Xbrc=FindBeta3D(self.beta2D, self.al_bat2D, self.al_bat3D, self.innr_ang, self.wbas0) #[rad],[rad]", "title": "" }, { "docid": "5bc6f1332d86375483c286f0ed3cc89f", "score": "0.4759062", "text": "def minimisation_function(self, stellar_parameters):\n generated_met = []\n if self.mgcog.interp_method != \"SKIGP\":\n for i in range(len(self.mgcog.models)):\n result = self.generate_met(self.mgcog.models[i], stellar_parameters[0],\n stellar_parameters[1], stellar_parameters[2],\n self.mgcog.obs_ew[i])\n\n generated_met.append(result[0])\n\n if self.limited_feii:\n generated_ew = []\n generated_ewerror = []\n for i in range(len(self.mgcog.models)):\n line = str(round(self.mgcog.obs_wavelength[i], 2)) \\\n +\"_\"+ str(round(self.mgcog.obs_ep[i], 2)) +\"_\" + self.mgcog.obs_ele[i]\n ewdiff_file = self.mgcog.ewdiffpath + self.mgcog.stellar_type + \"/\" + line + \".csv\"\n ewdiff_df = pd.read_csv(ewdiff_file)\n generated_ew.append(self.generate_ew(self.mgcog.models[i], stellar_parameters[0],\n stellar_parameters[1], stellar_parameters[2],\n stellar_parameters[3], self.mgcog.obs_ew[i]))\n generated_ewerror.append(self.generate_ewerror(line, ewdiff_df))\n Achi1, AREW1 = self.obs_calculation(generated_met, self.mgcog.obs_ele, self.mgcog.obs_ew, self.mgcog.obs_wavelength, self.mgcog.obs_ep)\n generated_ew = np.array(generated_ew)\n generated_ewerror = np.array(generated_ewerror)\n\n return (Achi1[0]/Achi1[1]**0.5)**2+(AREW1[0]/Achi1[1]**0.5)**2+np.sum((generated_ew/generated_ewerror)**2)\n\n Achi1, AREW1, dFe = self.obs_calculation(generated_met, self.mgcog.obs_ele, self.mgcog.obs_ew, self.mgcog.obs_wavelength, self.mgcog.obs_ep)\n return (Achi1[0]/Achi1[1]**0.5)**2+(AREW1[0]/Achi1[1]**0.5)**2+(dFe[0]/dFe[1])**2.\n\n if self.mgcog.interp_method == \"SKIGP\":\n generated_met_err = []\n pred_xs = []\n for i in range(len(self.mgcog.obs_ew)):\n pred_x = torch.from_numpy(np.array([[stellar_parameters[0],\n stellar_parameters[1], stellar_parameters[2],\n self.mgcog.obs_ew[i]]])).to(torch.float)\n pred_xs.append(pred_x)\n\n self.mgcog.models.eval()\n self.mgcog.likelihoods.eval()\n with torch.no_grad(), gpytorch.settings.fast_pred_var():\n predictions = self.mgcog.likelihoods(*self.mgcog.models(*pred_xs))\n\n for p in predictions:\n generated_met.append(p.mean.tolist()[0])\n generated_met_err.append(p.variance.tolist()[0]**0.5)\n\n Achi1, AREW1, dFe = self.obs_calculation(generated_met, self.mgcog.obs_ele,\n self.mgcog.obs_ew, self.mgcog.obs_wavelength,\n self.mgcog.obs_ep, generated_met_err)\n return (Achi1[0]/Achi1[1]**0.5)**2+(AREW1[0]/Achi1[1]**0.5)**2+(dFe[0]/dFe[1])**2.", "title": "" }, { "docid": "85b13d364bf65e8c4e2cdc49bf245e96", "score": "0.47590497", "text": "def return_results(self):\n distancelist = []\n t_energylist = []\n t_energylist_peratom = []\n vol_peratom_success = []\n outnodedict = {}\n natoms = len(self.inputs.structure.sites)\n e_u = 'eV'\n dis_u = 'me/bohr^3'\n for label in self.ctx.labels:\n calc = self.ctx[label]\n\n if not calc.is_finished_ok:\n message = f'One SCF workflow was not successful: {label}'\n self.ctx.warnings.append(message)\n self.ctx.successful = False\n continue\n\n try:\n outputnode_scf = calc.outputs.output_scf_wc_para\n except KeyError:\n message = f'One SCF workflow failed, no scf output node: {label}. I skip this one.'\n self.ctx.errors.append(message)\n self.ctx.successful = False\n continue\n\n outnodedict[label] = outputnode_scf\n\n outpara = outputnode_scf.get_dict()\n\n t_e = outpara.get('total_energy', float('nan'))\n e_u = outpara.get('total_energy_units', 'eV')\n if e_u in ['Htr', 'htr']:\n t_e = t_e * HTR_TO_EV\n dis = outpara.get('distance_charge', float('nan'))\n dis_u = outpara.get('distance_charge_units', 'me/bohr^3')\n t_energylist.append(t_e)\n t_energylist_peratom.append(t_e / natoms)\n vol_peratom_success.append(self.ctx.volume_peratom[label])\n distancelist.append(dis)\n\n not_ok, an_index = check_eos_energies(t_energylist_peratom)\n\n if not_ok:\n message = f'Abnormality in Total energy list detected. Check entr(ies) {an_index}.'\n hint = ('Consider refining your basis set.')\n self.ctx.info.append(hint)\n self.ctx.warnings.append(message)\n\n en_array = np.array(t_energylist_peratom)\n vol_array = np.array(vol_peratom_success)\n\n write_defaults_fit = False\n # TODO: different fits\n if len(en_array): # for some reason just en_array does not work\n volume, bulk_modulus, bulk_deriv, residuals = birch_murnaghan_fit(en_array, vol_array)\n\n # something went wrong with the fit\n for i in volume, bulk_modulus, bulk_deriv, residuals:\n if isinstance(i, complex):\n write_defaults_fit = True\n\n if all(i is not None for i in (volume, bulk_modulus, bulk_deriv, residuals)):\n # cast float, because np datatypes are sometimes not serialable\n volume, bulk_modulus = float(volume), float(bulk_modulus)\n bulk_deriv, residuals = float(bulk_deriv), residuals.tolist()\n\n volumes = self.ctx.volume\n gs_scale = volume * natoms / self.ctx.org_volume\n bulk_modulus = bulk_modulus * 160.217733 # *echarge*1.0e21,#GPa\n if (volume * natoms < volumes[0]) or (volume * natoms > volumes[-1]):\n warn = ('Groundstate volume was not in the scaling range.')\n hint = f'Consider rerunning around point {gs_scale}'\n self.ctx.info.append(hint)\n self.ctx.warnings.append(warn)\n # TODO maybe make it a feature to rerun with centered around the gs.\n else:\n write_defaults_fit = True\n\n if write_defaults_fit:\n volumes = None\n gs_scale = None\n residuals = None\n volume = 0\n bulk_modulus = None\n bulk_deriv = None\n\n out = {\n 'workflow_name': self.__class__.__name__,\n 'workflow_version': self._workflowversion,\n 'scaling': self.ctx.scalelist,\n 'scaling_gs': gs_scale,\n 'initial_structure': self.inputs.structure.uuid,\n 'volume_gs': volume * natoms,\n 'volumes': volumes,\n 'volume_units': 'A^3',\n 'natoms': natoms,\n 'total_energy': t_energylist,\n 'total_energy_units': e_u,\n 'structures': self.ctx.structures_uuids,\n 'calculations': [], # self.ctx.calcs1,\n 'scf_wfs': [], # self.converge_scf_uuids,\n 'distance_charge': distancelist,\n 'distance_charge_units': dis_u,\n 'nsteps': self.ctx.points,\n 'guess': self.ctx.guess,\n 'stepsize': self.ctx.step,\n # 'fitresults' : [a, latticeconstant, c],\n # 'fit' : fit_new,\n 'residuals': residuals,\n 'bulk_deriv': bulk_deriv,\n 'bulk_modulus': bulk_modulus,\n 'bulk_modulus_units': 'GPa',\n 'info': self.ctx.info,\n 'warnings': self.ctx.warnings,\n 'errors': self.ctx.errors\n }\n\n if self.ctx.successful:\n self.report('Done, Equation of states calculation complete')\n else:\n self.report('Done, but something went wrong.... Probably some individual calculation failed or'\n ' a scf-cycle did not reach the desired distance.')\n\n outnode = Dict(out)\n outnodedict['results_node'] = outnode\n\n # create links between all these nodes...\n outputnode_dict = create_eos_result_node(**outnodedict)\n outputnode = outputnode_dict.get('output_eos_wc_para')\n outputnode.label = 'output_eos_wc_para'\n outputnode.description = ('Contains equation of states results and information of an FleurEosWorkChain run.')\n\n returndict = {}\n returndict['output_eos_wc_para'] = outputnode\n\n outputstructure = outputnode_dict.get('gs_structure', None)\n if outputstructure:\n outputstructure.label = 'output_eos_wc_structure'\n outputstructure.description = ('Structure with the scaling/volume of the lowest total '\n 'energy extracted from FleurEosWorkChain')\n\n returndict['output_eos_wc_structure'] = outputstructure\n\n # create link to workchain node\n for link_name, node in returndict.items():\n self.out(link_name, node)", "title": "" }, { "docid": "362e666bccfe96090fe18262ac6e4c12", "score": "0.475875", "text": "def do_energy_calculation(self, organism, dictionary, key):\n\n # make the job directory\n job_dir_path = str(os.getcwd()) + '/temp/' + str(organism.id)\n os.mkdir(job_dir_path)\n\n # copy the INCAR and KPOINTS files to the job directory\n shutil.copy(self.incar_file, job_dir_path)\n shutil.copy(self.kpoints_file, job_dir_path)\n\n # sort the structure of the organism and write to POSCAR file\n organism.structure.sort()\n organism.structure.to(fmt='poscar', filename=job_dir_path + '/POSCAR')\n\n # get a list of the element symbols in the sorted order\n symbols = []\n for site in organism.structure.sites:\n symbols.append(site.specie.symbol)\n symbols = list(set(symbols))\n\n # write the POTCAR file by concatenating the appropriate elemental\n # POTCAR files\n total_potcar_path = job_dir_path + '/POTCAR'\n with open(total_potcar_path, 'w') as total_potcar_file:\n for symbol in symbols:\n with open(self.potcar_files[symbol], 'r') as potcar_file:\n for line in potcar_file:\n total_potcar_file.write(line)\n\n # run 'callvasp' script as a subprocess to run VASP\n print('Starting VASP calculation on organism {} '.format(organism.id))\n try:\n subprocess.call(['callvasp', job_dir_path],\n stderr=subprocess.STDOUT)\n except:\n print('Error running VASP on organism {} '.format(organism.id))\n dictionary[key] = None\n return\n\n # parse the relaxed structure from the CONTCAR file\n try:\n relaxed_structure = Structure.from_file(job_dir_path + '/CONTCAR')\n except:\n print('Error reading structure of organism {} from CONTCAR '\n 'file '.format(organism.id))\n dictionary[key] = None\n return\n\n # make a Vasprun object by reading the vasprun.xml file\n try:\n with warnings.catch_warnings(): # don't need warning, so suppress\n warnings.simplefilter('ignore')\n vasprun = Vasprun(job_dir_path + '/vasprun.xml',\n ionic_step_skip=None, ionic_step_offset=None,\n parse_dos=False, parse_eigen=False,\n parse_projected_eigen=False,\n parse_potcar_file=False)\n except:\n print('Error parsing vasprun.xml file for organism '\n '{} '.format(organism.id))\n dictionary[key] = None\n return\n\n # check if the vasp calculation converged\n if not vasprun.converged:\n print('VASP relaxation of organism {} did not converge '.format(\n organism.id))\n dictionary[key] = None\n return\n\n # get the total energy from the vasprun\n try:\n total_energy = float(vasprun.final_energy)\n except:\n print('Error reading energy of organism {} from vasprun.xml '\n 'file '.format(organism.id))\n dictionary[key] = None\n return\n\n organism.structure = relaxed_structure\n organism.total_energy = total_energy\n organism.epa = total_energy/organism.structure.num_sites\n print('Setting energy of organism {} to {} '\n 'eV/atom '.format(organism.id, organism.epa))\n dictionary[key] = organism", "title": "" }, { "docid": "efeb5de9a6f6f2c45e2eccf9b9e82b97", "score": "0.47573277", "text": "def __call__(self, days=1):\n period_gp = 0\n for day in range(days):\n daily_gp = 0\n print(\"day {0}\".format(self.day))\n \n for location in self.locations:\n for product in self.products:\n supply = 0\n cost = 0\n for producer in self.locations[location].producers:\n producer_supply,producer_cost = self.producers[producer](product) #TODO expose producers to logs\n supply += producer_supply\n cost += producer_cost\n product_gp,product_message = self.products[product](self.locations[location],supply,cost)\n daily_gp += product_gp\n print(product_message)\n print(\"for day {0} in {1} the total profit was {2}\".format(self.day,location,daily_gp))\n self.day += 1 #increment day\n period_gp += daily_gp\n self.total_gp += period_gp\n print(\"in the last {0} days, {1} gp was earned. The total balance is {2}\".format(days,period_gp, self.total_gp))", "title": "" }, { "docid": "8f41aef727926101d605fe300263d47b", "score": "0.47550377", "text": "def calc_inputs(self) -> dict:\n\n # Initialize input_dict\n input_dict = {}\n\n # Add subset inputs\n for subset in self.subsets:\n subset.calc_inputs(input_dict)\n\n # Remove unused subset inputs\n del input_dict['transform']\n del input_dict['ucell']\n\n # Add calculation-specific inputs\n input_dict['strainrange'] = self.strainrange\n\n # Return input_dict\n return input_dict", "title": "" }, { "docid": "0bcfd98d1087dfb6ba22819064369745", "score": "0.47541288", "text": "def applysLoad(self,tranform):\n # Number of beams\n N = self.geo.nFuselage + self.geo.nWings\n for i in range(N):\n M = len(self.geo.aircraftNodesPoints[i])\n for j in range(M):\n name = self.geo.aircraftNodesNames[i][j]\n\n # Distributes loads due to inertia. m__ for mass, _f_ for\n # force, _m_ for moment, __* direction\n if self.geo.settings['G_loads']:\n if j < int(np.floor(M/2)):\n coef = 1\n else:\n coef = 1\n mfx = np.round(tranform.smf[i][j,0],decimals=15)\n mfy = np.round(tranform.smf[i][j,1],decimals=15)\n mfz = np.round(tranform.smf[i][j,2],decimals=15)\n mmx = coef * np.round(tranform.smm[i][j,0],decimals=15)\n mmy = coef * np.round(tranform.smm[i][j,1],decimals=15)\n mmz = coef * np.round(tranform.smm[i][j,2],decimals=15)\n else:\n mfx = 0\n mfy = 0\n mfz = 0\n mmx = 0\n mmy = 0\n mmz = 0\n\n # Distributes loads due to aerodynamics if CFD solver is Pytornado\n if (self.geo.settings['CFD_solver'] == 'Pytornado'\n and i >= self.geo.nFuselage\n ):\n if j < int(np.floor(M/2)):\n coef = 1\n else:\n coef = 1\n fx = np.round(tranform.sfx[i - self.geo.nFuselage][j],decimals=15)\n fy = np.round(tranform.sfy[i - self.geo.nFuselage][j],decimals=15)\n fz = np.round(tranform.sfz[i - self.geo.nFuselage][j],decimals=15)\n mx = np.round(tranform.smx[i - self.geo.nFuselage][j],decimals=15)\n my = coef * np.round(tranform.smy[i - self.geo.nFuselage][j],decimals=15)\n mz = np.round(tranform.smz[i - self.geo.nFuselage][j],decimals=15)\n elif self.geo.settings['CFD_solver'] == 'Pytornado':\n fx = 0\n fy = 0\n fz = 0\n mx = 0\n my = 0\n mz = 0\n # logger.debug(fx)\n # logger.debug(fz)\n # Distributes loads due to aerodynamics if CFD solver is SU2\n if self.geo.settings['CFD_solver'] == 'SU2':\n if j < int(np.floor(M/2)):\n coef = 1\n else:\n coef = 1\n fx = np.round(tranform.sfx[i][j],decimals=15)\n fy = np.round(tranform.sfy[i][j],decimals=15)\n fz = np.round(tranform.sfz[i][j],decimals=15)\n mx = np.round(tranform.smx[i][j],decimals=15)\n my = coef * np.round(tranform.smy[i][j],decimals=15)\n mz = np.round(tranform.smz[i][j],decimals=15)\n # Warning the frame of reference of the structure mesh is\n # rotated from the one of the airplane so:\n # airplaine x -> structure y\n # airplaine y -> structure x\n # airplaine z -> structure z\n if i >= self.geo.nFuselage:\n load = [(fx-mfx), (fy-mfy), (fz-mfz), (mx-mmx), (my-mmy), (mz-mmz)]\n self.beams[i].add('point_load', {'at': name, 'load': load})\n \n # logger.debug('Forces : '+ str(fx) + ' ' + str(fy) + ' ' + str(fz))\n # logger.debug('Forces : '+ str(mfx) + ' ' + str(mfy) + ' ' + str(mfz))\n # logger.debug('load: ' + str(load))\n # logger.debug('Moments: '+ str( mx) + ' ' + str( my) + ' ' + str( mz))\n # logger.debug('Moments: '+ str(mmx) + ' ' + str(mmy) + ' ' + str(mmz))\n\n \n # else:\n # # Fuselage\n # load = [0*fx+mfx, 0*fy+mfy, 0*fz+mfz, 0*mx+mmx, 0*my+mmy, 0*mz+mmz]\n # self.beams[i].add('point_load', {'at': name, 'load': load})\n \n # load = [0*fx+mfx, 0*fy+mfy, 0*fz+mfz, mx+mmx, my+mmy, mz+mmz]\n # self.beams[i].add('point_load', {'at': name, 'load': load})\n \n logger.debug('iteration finised')\n # sys.exit()\n # sys.exit()\n # for removing them at the end of the simulation but keeping\n # the same mesh for computation time efficiency.\n # self.minusLoads = []\n # for t in load:\n # self.minusLoads.append(-2*t)\n # self.minusLoads = [-fx-mfx, -fy-mfy, -fz-mfz, -mx-mmx, -my-mmy, -mz-mmz]", "title": "" }, { "docid": "f303ad75169cde03d3d04886773b7b06", "score": "0.47492307", "text": "def main():\n # fatal sanity checks\n test_list_types()\n test_scalar_types()\n test_list_lengths()\n # add the input entry of effective collection time\n INPUT[\"Effective Collection Time\"] = INPUT[\"Collection Time\"] -\\\n INPUT[\"Voltage Rampdown Time\"]\n # non-fatal sanity checks:\n test_value_sanity()\n # now call the function that does stuff\n calculate_beam_rates()", "title": "" }, { "docid": "06a19061006609f33a8736653f9eec02", "score": "0.4749173", "text": "def ProcessSimulationaer(airmass_num,pwv_num,oz_num,wl0_num,tau0_num):\n \n print('--------------------------------------------')\n print(' 1) airmass = ', airmass_num)\n print(' 2) pwv = ', pwv_num)\n print(' 3) oz = ', oz_num)\n print(' 4) wl0 = ',wl0_num)\n print(' 5) tau0 = ',tau0_num)\n print('--------------------------------------------') \n \n \n ensure_dir(TOPDIR)\n\n \n # build the part 1 of filename\n BaseFilename_part1=Prog+'_'+Obs+'_'+Rte+'_'\n \n\n # Set up type of run\n runtype='aerosol_special' #'no_scattering' #aerosol_special #aerosol_default# #'clearsky'# \n if Proc == 'sc':\n runtype='no_absorption'\n outtext='no_absorption'\n elif Proc == 'ab':\n runtype='no_scattering'\n outtext='no_scattering'\n elif Proc == 'sa':\n runtype=='clearsky'\n outtext='clearsky'\n elif Proc == 'ae': \n runtype='aerosol_default'\n outtext='aerosol_default'\n elif Proc == 'as': \n runtype='aerosol_special'\n outtext='aerosol_special'\n else:\n runtype=='clearsky'\n outtext='clearsky'\n\n# Selection of RTE equation solver \n if Rte == 'pp': # parallel plan\n rte_eq='disort'\n elif Rte=='ps': # pseudo spherical\n rte_eq='sdisort'\n \n \n# Selection of absorption model \n molmodel='reptran'\n if Mod == 'rt':\n molmodel='reptran'\n if Mod == 'lt':\n molmodel='lowtran'\n if Mod == 'kt':\n molmodel='kato'\n if Mod == 'k2':\n molmodel='kato2'\n if Mod == 'fu':\n molmodel='fu' \n if Mod == 'cr':\n molmodel='crs' \n \n\n\n \t \n # for simulation select only two atmosphere \n #theatmospheres = np.array(['afglus','afglms','afglmw','afglt','afglss','afglsw'])\n atmosphere_map=dict() # map atmospheric names to short names \n atmosphere_map['afglus']='us'\n atmosphere_map['afglms']='ms'\n atmosphere_map['afglmw']='mw' \n atmosphere_map['afglt']='tp' \n atmosphere_map['afglss']='ss' \n atmosphere_map['afglsw']='sw' \n \n theatmospheres= []\n for skyindex in Atm:\n if re.search('us',skyindex):\n theatmospheres.append('afglus')\n if re.search('sw',skyindex):\n theatmospheres.append('afglsw')\n \n \n \n\n # 1) LOOP ON ATMOSPHERE\n for atmosphere in theatmospheres:\n #if atmosphere != 'afglus': # just take us standard sky\n # break\n atmkey=atmosphere_map[atmosphere]\n \n # manage input and output directories and vary the ozone\n TOPDIR2=TOPDIR+'/'+Rte+'/'+atmkey+'/'+Proc+'/'+Mod\n ensure_dir(TOPDIR2)\n INPUTDIR=TOPDIR2+'/'+'in'\n ensure_dir(INPUTDIR)\n OUTPUTDIR=TOPDIR2+'/'+'out'\n ensure_dir(OUTPUTDIR)\n \n \n # loop on molecular model resolution\n #molecularresolution = np.array(['COARSE','MEDIUM','FINE']) \n # select only COARSE Model\n molecularresolution = np.array(['COARSE']) \n for molres in molecularresolution:\n if molres=='COARSE':\n molresol ='coarse'\n elif molres=='MEDIUM':\n molresol ='medium'\n else:\n molresol ='fine'\n \n \n #water vapor \n pwv_val=pwv_num\n pwv_str='H2O '+str(pwv_val)+ ' MM'\n wvfileindex=int(10*pwv_val)\n \n #aerosols\n aerosol_str=str(wl0_num)+ ' '+str(tau0_num)\n aer_index=int(tau0_num*100.)\n \n # airmass\n airmass=airmass_num\n amfileindex=int(airmass_num*10)\n \n # Ozone \n oz_str='O3 '+str(oz_num)+ ' DU'\n ozfileindex=int(oz_num/10.)\n \n \n BaseFilename=BaseFilename_part1+atmkey+'_'+Proc+'_'+Mod+'_z'+str(amfileindex)+'_'+WVXX+str(wvfileindex) +'_'+OZXX+str(ozfileindex)+'_'+AEXX+str(aer_index) \n \n verbose=True\n uvspec = UVspec3.UVspec()\n uvspec.inp[\"data_files_path\"] = libradtranpath+'data'\n \n uvspec.inp[\"atmosphere_file\"] = libradtranpath+'data/atmmod/'+atmosphere+'.dat'\n uvspec.inp[\"albedo\"] = '0.2'\n \n uvspec.inp[\"rte_solver\"] = rte_eq\n \n \n \n if Mod == 'rt':\n uvspec.inp[\"mol_abs_param\"] = molmodel + ' ' + molresol\n else:\n uvspec.inp[\"mol_abs_param\"] = molmodel\n\n # Convert airmass into zenith angle \n am=airmass\n sza=math.acos(1./am)*180./math.pi\n\n # Should be no_absorption\n if runtype=='aerosol_default':\n uvspec.inp[\"aerosol_default\"] = ''\n elif runtype=='aerosol_special':\n uvspec.inp[\"aerosol_default\"] = ''\n uvspec.inp[\"aerosol_set_tau_at_wvl\"] = aerosol_str\n \n if runtype=='no_scattering':\n uvspec.inp[\"no_scattering\"] = ''\n if runtype=='no_absorption':\n uvspec.inp[\"no_absorption\"] = ''\n \n # set up the ozone value \n uvspec.inp[\"mol_modify\"] = pwv_str\n uvspec.inp[\"mol_modify2\"] = oz_str\n \n \n uvspec.inp[\"output_user\"] = 'lambda edir'\n uvspec.inp[\"altitude\"] = OBS_Altitude # Altitude LSST observatory\n uvspec.inp[\"source\"] = 'solar '+libradtranpath+'data/solar_flux/kurudz_1.0nm.dat'\n #uvspec.inp[\"source\"] = 'solar '+libradtranpath+'data/solar_flux/kurudz_0.1nm.dat'\n uvspec.inp[\"sza\"] = str(sza)\n uvspec.inp[\"phi0\"] = '0'\n uvspec.inp[\"wavelength\"] = '250.0 1200.0'\n uvspec.inp[\"output_quantity\"] = 'reflectivity' #'transmittance' #\n# uvspec.inp[\"verbose\"] = ''\n uvspec.inp[\"quiet\"] = ''\n\n \n\n if \"output_quantity\" in uvspec.inp.keys():\n outtextfinal=outtext+'_'+uvspec.inp[\"output_quantity\"]\n\n \n \n inputFilename=BaseFilename+'.INP'\n outputFilename=BaseFilename+'.OUT'\n inp=os.path.join(INPUTDIR,inputFilename)\n out=os.path.join(OUTPUTDIR,outputFilename)\n \n \n uvspec.write_input(inp)\n uvspec.run(inp,out,verbose,path=libradtranpath)\n \n \n return OUTPUTDIR,outputFilename", "title": "" }, { "docid": "7f5391d813006b5437a7d42485b91129", "score": "0.47438195", "text": "def OptimizeMVA():\n\n democtratic_xsect=GetXSection(tanb,mass_for_mix)\n\n # signal processing\n signal_files=[]\n# files=[ GetListFiles(trees_path+\"/*\"+m+\"*/*/*/TripleBtagAnalysis.root\",trees_path) for m in mass ]\n files=[ GetListFiles(trees_path+\"/*\"+m+scenario_template,trees_path) for m in mass ]\n\n for item in files:\n# print item\n if (len(item)==0): continue\n (item,masses)=GetMassFromName(item)\n (item,numevts)=GetNumEvt(item)\n (item,lumiwgts)=GetLumiWeight(item)\n if (democtratic_mix):\n xsects=map(lambda x: democtratic_xsect,masses)\n else:\n xsects=map(lambda x: GetXSection(tanb,float(x)),masses)\n newwgts=GetNewWeight(1e3,lumiwgts,numevts,xsects)\n filesnew=map(lambda x: item[x].replace(\".root\",\"_update.root\") ,range(len(item)))\n if (not os.path.exists(filesnew[0])): ReWeight(tree_name='KinVarsBDT', list_of_files=item, weight=newwgts)\n signal_files+=filesnew\n\n # background processing\n bkg_files=[]\n# files=[ GetListFiles(trees_path+\"/*\"+ptbin+\"*/*/*/TripleBtagAnalysis.root\",trees_path) for ptbin in ptbins ]\n files=[ GetListFiles(trees_path+\"/*\"+ptbin+scenario_template,trees_path) for ptbin in ptbins ]\n\n\n\n for item in files:\n# print item\n if (len(item)==0): continue\n (item,numevts)=GetNumEvt(item)\n (item,lumiwgts)=GetLumiWeight(item)\n newwgts=GetNewWeight(1e3,lumiwgts,numevts)\n filesnew=map(lambda x: item[x].replace(\".root\",\"_update.root\") ,range(len(item)))\n if (not os.path.exists(filesnew[0])): ReWeight(tree_name='KinVarsBDT', list_of_files=item, weight=newwgts)\n bkg_files+=filesnew\n\n #create config\n\n (config,_vars)=CreateConfigForOptimization(mva_method,\"\",0.2)\n\n# print \"CONFIG\"\n# print config\n\n #run mva\n RunMVAFromConfig(signal_files,bkg_files,mva_method,config)\n\n #save config to db\n UpLoadConfig(mva_method,config) \n print \"CONFIGSSSSS 22222!!!\"\n# print ReadConfigs(mva_method)\n\n\n# get results\n _id=config[0]\n _separation=GetSeparationMVA(mva_method,config)\n (_sgnKS,_bkgKS)=GetKSMVA(mva_method,config)\n result=(_id,_vars,_separation,_sgnKS,_bkgKS)\n# save results\n\n print \"result=\",result\n UploadResult(mva_method,result)\n\n# check results\n print \"CONFIGSSSSS 22222!!!\"\n# print ReadResults(mva_method)\n\n# ROOT.gROOT.ProcessLine('.L TMVAGui.C')\n# ROOT.TMVAGu()\n return", "title": "" }, { "docid": "76d272cb7b80ce2253e8252ee4a61aac", "score": "0.47434103", "text": "def ess_analysis(language):\n k = 3\n\n training_path = '../Datasets/' + language + '/train'\n test_path = '../Datasets/' + language + '/dev.in'\n output_path = '../EvalScript/' + language\n\n optimal_y_dict = {}\n\n train_data = read_in_file(training_path)\n print('done reading training file')\n s_emission_count, s_transition_count, s_y_count, s_x_count = count_sentiment_only(train_data, k)\n print('done counting x, y, emissions for sentiment only')\n\n s_b, s_a = get_parameters(s_emission_count, s_transition_count, s_y_count)\n print('done getting all transition and emission parameters for sentiment only')\n\n e_emission_count, e_transition_count, e_y_count, e_x_count = count_entity_only(train_data, k)\n print('done counting x, y, emissions for entity only')\n\n e_b, e_a = get_parameters(e_emission_count, e_transition_count, e_y_count)\n print('done getting all transition and emission parameters for entity only')\n\n\n test_data = read_in_file(test_path)\n print('done reading test file')\n #\n main_path = os.path.dirname(__file__)\n save_path = os.path.join(main_path, output_path)\n with codecs.open(os.path.join(save_path,'dev.p5.out'), 'w', 'utf-8') as file:\n for sentence in test_data:\n mod_sentence = []\n for word in sentence:\n # To check if word in test data appears in training data\n if word not in s_x_count or s_x_count[word] < k:\n mod_word = '#UNK#'\n else:\n mod_word = word\n mod_sentence.append(mod_word)\n\n # Run viterbi but only to get the sentiments\n sentiment_pi = viterbi_sentiment_only(mod_sentence, s_a, s_b)\n output_states_sentiment = back_propagation_sentiment_only(sentiment_pi)\n\n #Run viterbi but only to get the entities\n entity_pi = viterbi_entity_only(mod_sentence, e_a, e_b)\n output_states_entity = back_propagation_entity_only(entity_pi)\n\n # print('sentiment: ', output_states_sentiment)\n # print('entity: ', output_states_entity)\n\n fixed_output_states = output_states_sentiment\n\n # Compare output states from the viterbi_entity_only and viterbi_sentiment_only\n for i in range(len(sentence)):\n entity_label = output_states_entity[i+1]\n sentiment_label = output_states_sentiment[i+1]\n\n if(entity_label != 'O'):\n if(sentiment_label[0] != 'O'):\n fixed_output_states[i+1][0] = entity_label + sentiment_label[0]\n\n elif(sentiment_label[1] != 'O'):\n fixed_output_states[i+1][0] = entity_label + sentiment_label[1]\n\n else:\n fixed_output_states[i+1][0] = entity_label + 'neutral'\n\n #Check if the beginning of the entity is a B-, not I-\n for j in range(len(sentence)):\n curr_state = fixed_output_states[j+1][0]\n prev_state = fixed_output_states[j][0]\n #Check if all previous entries in the state sequence are Os\n if(curr_state != 'O'):\n if(prev_state == 'O'):\n if('I-' in curr_state):\n curr_state.replace('I-','B-')\n fixed_output_states[j+1][0] = curr_state\n if('I-' in prev_state or 'B-' in prev_state):\n if('B-' in curr_state):\n curr_state.replace('B-','I-')\n fixed_output_states[j+1][0] = curr_state\n\n for i in range(len(sentence)):\n output = sentence[i] + ' ' + fixed_output_states[i+1][0] + '\\n'\n # output = word + ' ' + optimum_y + '\\n'\n file.write(output)\n file.write('\\n')\n\n print('Done!')\n file.close()", "title": "" }, { "docid": "2ad8cc7fa9a6159ed0029c516e3a5fb2", "score": "0.47332442", "text": "def eval_sol(input_file, model, universal):\n biomass = get_biomass_equation(model)\n model.objective = biomass\n logging.debug(model.objective.expression)\n logging.debug(biomass.bounds)\n #right_model = deepcopy(model)\n iterations = iter_number(input_file) \n logging.debug('passed here')\n print('Starting reaction search with GapFilling . . .')\n solutions = iter_gf(input_file, model, universal)\n metabs = get_metabolites(input_file)\n reacts = get_reactions(input_file)\n #substitute the following two lines with consumption_metabs and get_production_objectives\n #ex_c_source = get_ex_c_source_metab(input_file, model)\n #ex_non_c_metabs = get_ex_non_c_source_metabs(input_file, model)\n to_consume = consumption_metabs(input_file)\n to_produce = get_production_objectives(input_file)\n output = {}\n models={}\n for n in range(1,iterations+1): #number of iterations\n consume ={}\n produce ={}\n models={}\n added_reacts={}\n print('\\n---Model {}---'.format(n))\n logging.debug(type(model.solver))\n models['model']='Model{}'.format(n)\n if type(solutions)==dict and len(solutions.keys())!=0: #runs the analysis of the results only if reaction finding was needed.\n sol = solutions['Run {}'.format(n)]\n m = len(sol) #number of reactions as solution fo each iteration\n for i in range(m):\n identifier = sol[i]\n react = universal.reactions.get_by_id(identifier)\n model.add_reaction(react)\n print('\\nReaction {}, solution of round {} has been added to the model'.format(react.id, n))\n model.solver = 'glpk'\n logging.debug(type(model.solver))\n fba_model_x = model.optimize()\n print('\\nGrowth rate: ', biomass.flux)\n for x in metabs:\n r=model.reactions.get_by_id('EX_'+x+'_e')\n print('\\nThe flux throughr {} is: '.format(r.id), r.flux)\n for i in range(m):\n identifier = sol[i]\n r = model.reactions.get_by_id(identifier)\n print('\\nThe flux throughr {} is: '.format(identifier), r.flux)\n added_reacts[sol[i]]=r.flux\n for compound in to_consume:\n exchange = model.reactions.get_by_id('EX_'+compound+'_e')\n consume[exchange.id] = exchange.flux\n for target in to_produce:\n exch = model.reactions.get_by_id('EX_'+target+'_e')\n produce[exch.id] = exch.flux\n\n #carbon_flux[ex_c_source.id] = ex_c_source.flux\n #for x in ex_non_c_metabs:\n # flux_non_c_metabs[x.id] = x.flux\n info_models = (sol, biomass.flux, added_reacts, consume, produce)\n #(reactions, growth_rate, added_reactions, carbon, non_carbon)=info_models\n output[n] = (models, info_models)\n remove_rlist(sol, model)\n else: #if the solution of iter_gf was the flux value though biomass \n exch_c_source = get_ex_c_source_metab(input_file, model)\n fba_model = model.optimize()\n for target in to_produce:\n exch = model.reactions.get_by_id('EX_'+target+'_e')\n output[1] = ({'model':'Model1'}, (None, solutions, None, {exch_c_source.id: exch_c_source.flux}, {exch.id: exch.flux}))\n return output", "title": "" }, { "docid": "5a0e2bc680c481933674c307ce4de564", "score": "0.47317028", "text": "def marketSimulation(numTourists, numAgents, maxPop, maxBirthProb, clearProb,\n numTrials, timesteps=60):\n\n populations = []\n for i in range(timesteps):\n populations.append(0)\n\n tireds = np.zeros(timesteps)\n agent_money = np.zeros(timesteps)\n\n temp_tourists = []\n temp_agents = []\n\n for trial in range(numTrials):\n calendar = Calendar()\n\n for num in range(numTourists):\n temp_tourists.append(SimpleTourist(maxBirthProb, clearProb))\n for num in range(numAgents):\n temp_agents.append(SimpleAgent())\n\n bazar = Market(calendar, temp_tourists, temp_agents, maxPop)\n\n for i in range(timesteps):\n p, t, m = bazar.update()\n populations[i] += p\n tireds[i] += t\n agent_money[i] += m\n print(trial)\n\n for i in range(timesteps):\n populations[i] /= numTrials\n\n tireds = tireds / numTrials\n agent_money = agent_money / numTrials\n\n plotProgress(timesteps, populations, \"SimpleTourist\",\n \"Average Tourist Population\", \"Travel Market simulation\")\n\n plotProgress(timesteps, tireds, \"Tiredness Percent\",\n \"Average Tourist Tiredness\", \"Tourist Tiredness\")\n\n plotProgress(timesteps, agent_money, \"Money amount\",\n \"Average Agency's money\", \"Agency's money\")\n\n pylab.show()", "title": "" }, { "docid": "0a31095b6d1708c872ab26f55f4dbf24", "score": "0.4730213", "text": "def __init__(self, cat, installed_fmris, pub_ranks, variants, progtrack):\n self.__catalog = cat\n self.__installed_fmris = {}\t# indexed by stem\n self.__publisher = {}\t\t# indexed by stem\n self.__possible_dict = {}\t# indexed by stem\n self.__pub_ranks = pub_ranks # rank indexed by pub\n self.__trim_dict = {} # fmris trimmed from\n \t\t\t\t# consideration\n\n self.__pub_trim = {}\t\t# pkg names already\n # trimmed by pub.\n self.__installed_fmris = installed_fmris.copy()\n\n for f in installed_fmris.values(): # record only sticky pubs\n pub = f.get_publisher()\n if self.__pub_ranks[pub][1]:\n self.__publisher[f.pkg_name] = f.get_publisher()\n\n self.__id2fmri = {} \t\t# map ids -> fmris\n self.__fmri2id = {} \t\t# and reverse\n\n self.__solver = pkg.solver.msat_solver()\n\n self.__poss_set = set() # possible fmris after assign\n self.__progtrack = progtrack # progress tracker\n\n self.__addclause_failure = False\n\n self.__variant_dict = {} # fmris -> variant cache\n self.__variants = variants # variants supported by image\n\n self.__cache = {}\n self.__trimdone = False # indicate we're finished\n # trimming\n self.__fmri_state = {} # cache of obsolete, renamed\n # bits so we can print something\n # reasonable\n self.__state = SOLVER_INIT\n self.__iterations = 0\n self.__clauses = 0\n self.__variables = 0\n self.__timings = []\n self.__start_time = 0\n self.__failure_info = \"\"\n self.__dep_dict = {}\n self.__inc_list = []\n self.__dependents = None", "title": "" }, { "docid": "394d7129692233a15c246901f69603d6", "score": "0.4724813", "text": "def get_emissions_testing_runs():\n# folder = get_file_locations('earth0_home_dir')\n folder = ''\n folder += '/data/all_model_simulations/iodine_runs/iGEOSChem_4.0_v10/'\n # Locations of model runs with different iodide fields\n RFR_dir = 'run.XS.UPa.FP.EU.BC.II.FP.2014.NEW_OFFLINE_IODIDE.several_months/'\n Chance_dir = '/run.XS.UPa.FP.EU.BC.II.FP.2014.re_run4HEMCO_diag/'\n MacDonald_dir = 'run.XS.UPa.FP.EU.BC.II.FP.2014.Chance_iodide/'\n extr_dir = '/'\n# extr_dir = '/spin_up/'\n# extr_dir = '/test_dates/'\n wd_dict = {\n 'Chance2014': folder + MacDonald_dir + extr_dir,\n 'MacDonald2014': folder + Chance_dir,\n 'RFR(offline)': folder + RFR_dir + extr_dir,\n }\n return wd_dict", "title": "" }, { "docid": "d97e4abd57aa6cb88d00c86072d65f74", "score": "0.4709739", "text": "def generate(self):\n manager = multiprocessing.Manager()\n return_dict = manager.dict()\n\n jobs = []\n for func in self._generate_hue_band, self._generate_sat_band, self._generate_val_band:\n p = multiprocessing.Process(target=func, args=(return_dict,))\n jobs.append(p)\n p.start()\n for proc in jobs:\n proc.join()\n\n self.bands = return_dict[\"hue_band\"], return_dict[\"sat_band\"], return_dict[\"val_band\"]", "title": "" }, { "docid": "95597c4670d40bbbd42b0230bf9a28f1", "score": "0.4707817", "text": "def qmca_energies(dir_match, fu=1,root='.'):\n \"\"\"Equivalent to doing qmca -q ev, ee, mpc on first sub of current directory\"\"\"\n #TODO: tidy up\n data = Averaged(fu)\n #The dir format: vmc-supertwist[TWIST_SIZE]-supershift[TWIST_GRID_SHIFT]-S[SUPERCELL_SIZE]\n paths = sorted(sorted([ os.path.join(root,path)\n for path in os.listdir(root) if dir_match in path ],\\\n key=lambda path: int(path.split('-')[1].replace('supertwist',''))),\\\n key=lambda path: int(path.split('-')[3].replace('S','')))\n #print('#Path LocalEnergy +/- Variance +/- Electron-Electron +/- MPC +/-')\n for path in paths:\n def rel(x): return os.path.join(path,x)\n scalar_files = [ rel(filename) for filename in os.listdir(path) if filename.endswith('scalar.dat') ]\n qmca_ev = shlex.split(qmca+' -e 2 -q ev --sac -a '+' '.join(scalar_files))\n qmca_ee = shlex.split(qmca+' -e 2 -q ee --sac -a '+' '.join(scalar_files))\n qmca_mpc = shlex.split(qmca+' -e 2 -q mpc --sac -a '+' '.join(scalar_files))\n ev,ee,mpc = ( subprocess.Popen(x,stdout=PIPE) for x in (qmca_ev,qmca_ee,qmca_mpc) )\n #Skip header of qmca output\n ev_out,ee_out,mpc_out = ( x.communicate()[0].decode('utf-8').split('\\n')[y].split()\\\n for x,y in zip([ev,ee,mpc],[1,0,0]) )\n #Make it in the format of '[TWIST_SIZE]-S[SUPERCELL_SIZE]\n def trim(x): return x[-3:] if x.startswith('supertwist') else x\n try:\n ev_out[0] = '-'.join([ trim(path.split('-')[i]) for i in (1,3) ])\n except IndexError:\n print(os.path.basename(__file__)+': No scalar.dat in '+path)\n continue\n #print(' '.join(ev_out))\n thing = [ ev_out[i] for i in (0,3,5,7,9) ]+[ ee_out[i] for i in (5,7) ]+[ mpc_out[i] for i in (5,7) ]\n data.append(thing)\n data.append_line(' '.join(ev_out))\n #TODO: do not overwrite existing file\n\n dat_all = open('dat-all.dat', 'w')\n dat_all.write('#Path LocalEnergy +/- Variance +/- Electron-Electron +/- MPC +/-'+\"\\n\")\n for datapoint in data.get_line():\n dat_all.write(datapoint+\"\\n\")\n dat_all.close()\n\n dat_mpc = open('dat-mpc.dat', 'w')\n dat_mpc.write('#MPC corrected energies\\n#S T MPCCorrectedEnergy +/-'+\"\\n\")\n for datapoint in data.get_corrected_energy():\n dat_mpc.write(' '.join(map(str,datapoint))+\"\\n\")\n dat_mpc.close()\n\n dat_norm = open('dat-norm.dat', 'w')\n dat_norm.write('#Norm energies\\n#S T LocalEnergy(f.u.) +/-'+\"\\n\")\n for datapoint in data.get_norm_energy():\n dat_norm.write(' '.join(map(str,datapoint))+\"\\n\")\n dat_norm.close()\n\n dat_mpc_dake = open('dat-mpc-dake.dat', 'w')\n dat_mpc_dake.write('#MPC correction\\n#S T LocalEnergy(f.u.) +/-'+\"\\n\")\n for datapoint in data.get_mpc_corr():\n dat_mpc_dake.write(' '.join(map(str,datapoint))+\"\\n\")\n dat_mpc_dake.close()", "title": "" }, { "docid": "2d96d6eea2fab41a966a30740dde5081", "score": "0.47038466", "text": "def solve_part1(self, args):\n input_file = self.open_input_file(args)\n required_total_fuel = 0\n for line in input_file:\n mass_of_module = int(line.rstrip())\n required_total_fuel += _calculate_fuel(mass_of_module)\n print(\"What is the sum of the fuel requirements for all \"\n \"of the modules on your spacecraft?\\n{}\".format(required_total_fuel))", "title": "" }, { "docid": "f122c369ed0f1cea454059b0f3eaacfb", "score": "0.47012764", "text": "def Neuman_problem_7(self):\n # self.set_of_collocation_points_elems = set()\n #0\n\n volumes_in_primal_set = self.mb.tag_get_data(self.volumes_in_primal_tag, 0, flat=True)[0]\n volumes_in_primal_set = self.mb.get_entities_by_handle(volumes_in_primal_set)\n dict_wells_n = dict(zip(self.wells_n, self.set_q))\n\n for primal in self.primals:\n #1\n primal_id = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]\n fine_elems_in_primal = self.mb.get_entities_by_handle(primal)\n volumes_in_primal = set(fine_elems_in_primal) & set(volumes_in_primal_set)\n dim = len(fine_elems_in_primal)\n map_volumes = dict(zip(fine_elems_in_primal, range(dim)))\n std_map = Epetra.Map(dim, 0, self.comm)\n b = Epetra.Vector(std_map)\n A = Epetra.CrsMatrix(Epetra.Copy, std_map, 3)\n for elem in fine_elems_in_primal:\n #2\n if elem in self.wells_d or elem in self.set_of_collocation_points_elems:\n #3\n pvol = self.mb.tag_get_data(self.pms_tag, elem, flat=True)[0]\n temp_k = [1.0]\n temp_id = [map_volumes[elem]]\n b[map_volumes[elem]] = pvol\n # b_np[map_volumes[volume]] = value\n #2\n elif elem in volumes_in_primal:\n #3\n temp_k, temp_id, local_elems = self.mount_lines_3(elem, map_volumes)\n q_in = self.mb.tag_get_data(self.qpms_coarse_tag, elem, flat=True)\n b[map_volumes[elem]] += q_in\n\n if elem in self.wells_n:\n #4\n if elem in self.wells_inj:\n #5\n b[map_volumes[elem]] += dict_wells_n[elem]\n #4\n else:\n #5\n b[map_volumes[elem]] -= dict_wells_n[elem]\n\n #2\n else:\n #3\n temp_k, temp_id, local_elems = self.mount_lines_3(elem, map_volumes)\n if elem in self.wells_n:\n #4\n if elem in self.wells_inj:\n #5\n b[map_volumes[elem]] += dict_wells_n[elem]\n #4\n else:\n #5\n b[map_volumes[elem]] -= dict_wells_n[elem]\n\n #2\n A.InsertGlobalValues(map_volumes[elem], temp_k, temp_id)\n # A_np[map_volumes[volume], temp_id] = temp_k\n # print('primal_id')\n # print(self.ident_primal[primal_id])\n # print('gid: {0}'.format(gid1))\n # print('temp_id:{0}'.format(temp_id))\n # print('temp_k:{0}'.format(temp_k))\n # print(A_np[map_volumes[volume]])\n # print('b_np:{0}'.format(b_np[map_volumes[volume]]))\n #1\n A.FillComplete()\n x = self.solve_linear_problem(A, b, dim)\n # x_np = np.linalg.solve(A_np, b_np)\n # print(x_np)\n self.mb.tag_set_data(self.pcorr_tag, fine_elems_in_primal, np.asarray(x))", "title": "" }, { "docid": "e23d567f79a2ad254d1f50ec50062d50", "score": "0.4695813", "text": "def find_metx(dd, min_metformin=1):\n # Load the metformin items\n metonly = set(pd.read_csv(os.path.join(home[0], 'data', 'metformin_items.csv'),\n header=0).values.ravel())\n metx = set(pd.read_csv(os.path.join(home[0], 'data', 'metformin+x_items.csv'),\n header=0).values.ravel())\n\n def condition(group):\n \"\"\"Filtering condition.\"\"\"\n if len(group) <= min_metformin:\n return False\n else:\n sorted_group = group.sort_values(by='SPPLY_DT')\n # these must all be metformin for cond1\n head = set(sorted_group.head(min_metformin)['ITM_CD'].values)\n # there should be both in here for cond2\n tail = set(sorted_group.tail(-min_metformin)['ITM_CD'].values)\n # implementing conditions\n cond1 = head.issubset(metonly)\n cond2 = len(tail.intersection(metonly)) > 0 and len(tail.intersection(metonly)) < len(tail)\n # handling special case of met+x items introduced after 2014\n cond3 = len(tail.intersection(metx)) > 0\n return cond1 and (cond2 or cond3)\n\n grouped = dd.groupby(by='PTNT_ID')\n filtered = grouped.filter(condition).groupby(by='PTNT_ID')\n\n # Init return items\n idx, start_date, end_date = list(), list(), list()\n\n # Build output variables\n for name, group in tqdm(filtered, desc='Finalizing', leave=False):\n idx.append(name)\n sorted_group = group.sort_values(by='SPPLY_DT')\n # first date FIXME: max() added for consistency\n start_date.append(sorted_group.head(1)['SPPLY_DT'].max().strftime('%Y-%m-%d'))\n # get the non metformins\n filtered_group = sorted_group[~sorted_group['ITM_CD'].isin(metonly)]\n end_date.append(filtered_group['SPPLY_DT'].min().strftime('%Y-%m-%d'))\n\n return idx, start_date, end_date", "title": "" }, { "docid": "94ce168344e5e4a92222c427d18a467a", "score": "0.4690701", "text": "def data_model(input_dict, results_dict=None): \n \n #Create the root of the DataModelDict\n output = DM()\n output['calculation-dislocation-monopole'] = calc = DM()\n \n #Assign uuid\n calc['calculation'] = DM()\n calc['calculation']['id'] = input_dict['uuid']\n calc['calculation']['script'] = __calc_name__\n \n calc['calculation']['run-parameter'] = run_params = DM()\n run_params['a-multiplyer'] = input_dict['a_mult']\n run_params['b-multiplyer'] = input_dict['b_mult']\n run_params['c-multiplyer'] = input_dict['c_mult']\n run_params['anneal_temperature'] = input_dict['anneal_temperature']\n run_params['boundary_width'] = input_dict['boundary_width']\n run_params['boundary_shape'] = input_dict['boundary_shape']\n run_params['energy_tolerance'] = input_dict['energy_tolerance']\n run_params['force_tolerance'] = input_dict['force_tolerance']\n run_params['maximum_iterations'] = input_dict['maximum_iterations']\n run_params['maximum_evaluations'] = input_dict['maximum_evaluations']\n \n #Copy over potential data model info\n calc['potential'] = input_dict['potential']['LAMMPS-potential']['potential']\n \n #Save info on system file loaded\n system_load = input_dict['load'].split(' ') \n calc['system-info'] = DM()\n calc['system-info']['artifact'] = DM()\n calc['system-info']['artifact']['file'] = os.path.basename(' '.join(system_load[1:]))\n calc['system-info']['artifact']['format'] = system_load[0]\n calc['system-info']['artifact']['family'] = input_dict['system_family']\n calc['system-info']['symbols'] = input_dict['symbols']\n \n calc['elastic-constants'] = input_dict['elastic_constants_model'].find('elastic-constants')\n \n #Save data model of the initial ucell\n calc['dislocation-monopole-parameters'] = input_dict['dislocation_model']['dislocation-monopole-parameters']\n \n if results_dict is None:\n calc['status'] = 'not calculated'\n else:\n calc['defect-free-system'] = DM()\n calc['defect-free-system']['artifact'] = DM()\n calc['defect-free-system']['artifact']['file'] = 'base.dat'\n calc['defect-free-system']['artifact']['format'] = 'atom_data'\n old_symbols = input_dict['symbols'][:len(input_dict['symbols'])/2]\n if len(old_symbols) == 1: old_symbols = old_symbols[0]\n calc['defect-free-system']['symbols'] = old_symbols\n \n calc['defect-system'] = DM()\n calc['defect-system']['artifact'] = DM()\n calc['defect-system']['artifact']['file'] = 'disl.dump'\n calc['defect-system']['artifact']['format'] = 'atom_dump'\n calc['defect-system']['symbols'] = input_dict['symbols']\n\n \n #Save the final cohesive energy\n calc['potential-energy'] = DM([('value', uc.get_in_units(results_dict['potential_energy'], \n input_dict['energy_unit'])), \n ('unit', input_dict['energy_unit'])])\n calc['pre-ln-factor'] = DM([('value', uc.get_in_units(results_dict['pre-ln_factor'], \n input_dict['energy_unit']+'/'+input_dict['length_unit'])), \n ('unit', input_dict['energy_unit']+'/'+input_dict['length_unit'])])\n\n return output", "title": "" }, { "docid": "246183d2b18d173ab5b2c61af9182d23", "score": "0.4688903", "text": "def emission_prob():\n for i in range(vocab_count):\n for j in range(12):\n print j\n tag = tags[j]\n x = count_tag[j]\n y = count3(vocab[i],tag)\n emission_table[i].append(float(y)/float(x))\n writer1.writerow(emission_table[i])\n for i in range(12):\n for j in range(vocab_count):\n emission_table[j][i] = 0\n\n print \"Initialization complete\"\n\n for i in m:\n x = vocab.index(i[0])\n y = tags.index(i[1])\n emission_table[x][y] = emission_table[x][y] + 1\n\n print\"vocab table partial\"\n\n for i in range(12):\n for j in range(vocab_count):\n emission_table[j][i] = float(emission_table[j][i])/float(count_tag[i])\n\n return", "title": "" }, { "docid": "6f27f425f54c88b3221d9b44696bd7c3", "score": "0.46868345", "text": "def scenario_dict_maker(json1, json2, rows, columns, desired_pct, scenario_tag): #remember to define optimization\n df = pd.read_json(json1)\n future_procurement = pd.read_json(json2)\n\n starting_demand = int(list(df['demand'])[0])\n\n lcoe_df = pd.DataFrame(rows, columns=[c['name'] for c in columns])\n cols = lcoe_df.columns.drop(['Generation Source'])\n lcoe_df[cols] = lcoe_df[cols].apply(pd.to_numeric, errors='coerce') #convert all columns to numeric\n\n desired_pct = desired_pct/100\n\n start_year = list(df.index)[0]\n end_year = list(df.index)[-1]\n start_demand = list(df.demand)[0]\n end_demand = list(df.demand)[-1]\n start_recs = list(df.rec_change)[0] #RECs currently being created\n\n lcoe_df['current_MWh'] = (lcoe_df['Percent of Utility Energy Mix'] / 100) * starting_demand\n\n lcoe_df['start_price'] = lcoe_df['Levelized Cost of Energy (₱ / kWh)'] * lcoe_df['current_MWh'] * 1000\n \n lcoe_df['fuel_emissions'] = lcoe_df['Generation Source'].map(resources.emissions_dict)\n lcoe_df['emissions'] = lcoe_df['fuel_emissions'] * lcoe_df['current_MWh']\n\n # --- Merge on planned procurement ---\n future_procurement = future_procurement.groupby('Generation Source', as_index=False)['generation'].sum()\n lcoe_df = lcoe_df.merge(future_procurement, on=['Generation Source'], how = 'left')\n lcoe_df['generation'] = lcoe_df['generation'].fillna(0)\n lcoe_df = lcoe_df.rename({'generation':'planned_generation'}, axis='columns')\n\n start_re = lcoe_df.loc[lcoe_df['Generation Source'].isin(resources.re_tech)]['current_MWh'].sum()\n start_re_pct = round(start_re / start_demand,2)\n start_expense = round(lcoe_df['start_price'].sum(),0)\n start_fossil = lcoe_df.loc[lcoe_df['Generation Source'].isin(resources.fossil_tech)]['current_MWh'].sum()\n\n planned_re = lcoe_df['planned_generation'].sum()\n new_re_Need = (end_demand * desired_pct) - start_re - planned_re #include losses, because this is interms of generation pct, not RECS\n new_re_Need = max(new_re_Need, 0)\n fossil_Need = end_demand - new_re_Need - start_re - planned_re\n scenario = resources.scenario_pct_dict[scenario_tag]\n\n curr_total_fossil_gen = lcoe_df.loc[lcoe_df['Generation Source'].isin(resources.fossil_tech), 'current_MWh'].sum() #df of fossil fuels\n fossil_discrepancy = fossil_Need - curr_total_fossil_gen #if negative, indicates the amount that can be retired, if positive, the amount of new fossil gen needed\n\n lcoe_df['future_generation'] = 0\n for f in resources.re_tech:\n current_gen_f = lcoe_df.loc[lcoe_df['Generation Source'] == f, 'current_MWh'][0:1].item()\n if f in scenario.keys():\n lcoe_df.loc[lcoe_df['Generation Source'] == f, 'future_generation'] = (scenario[f] * new_re_Need) + current_gen_f\n else:\n lcoe_df.loc[lcoe_df['Generation Source'] == f, 'future_generation'] = current_gen_f\n\n\n for f in resources.fossil_tech:\n current_pct_f = lcoe_df.loc[lcoe_df['Generation Source'] == f, 'current_MWh'][0:1].item() / start_fossil\n lcoe_df.loc[lcoe_df['Generation Source'] == f, 'future_generation'] = fossil_Need * current_pct_f\n\n # --- Add Planned RE Generation from input ---\n lcoe_df['future_generation'] += lcoe_df['planned_generation']\n\n lcoe_df['future_price'] = lcoe_df['Levelized Cost of Energy (₱ / kWh)'] * lcoe_df['future_generation'] * 1000\n end_re = lcoe_df.loc[lcoe_df['Generation Source'].isin(resources.re_tech)]['future_generation'].sum()\n end_recs = end_re - (start_re - start_recs)\n end_re_pct = end_re / lcoe_df['future_generation'].sum()\n\n\n output_dict = dict()\n output_dict['start_year'] = int(start_year)\n output_dict['start_demand'] = int(start_demand)\n output_dict['start_re'] = int(start_re)\n output_dict['start_recs'] = int(start_recs)\n output_dict['start_re_pct'] = float(start_re_pct)\n output_dict['start_expense'] = int(start_expense)\n output_dict['start_generation_list'] = [int(i) for i in list(lcoe_df['current_MWh'])]\n output_dict['end_year'] = int(end_year)\n output_dict['end_demand'] = int(end_demand)\n output_dict['end_re'] = int(end_re)\n output_dict['end_recs'] = float(end_recs)\n output_dict['end_re_pct'] = float(end_re_pct)\n output_dict['end_expense'] = int(lcoe_df['future_price'].sum())\n output_dict['end_generation_list'] = [int(i) for i in list(lcoe_df['future_generation'])]\n output_dict['techs'] = list(lcoe_df['Generation Source'])\n output_dict['rps_min_increase'] = df['rps_marginal_req'].sum()\n output_dict['scenario_lcoe_df'] = lcoe_df.to_json()\n\n return json_func.dumps(output_dict)", "title": "" }, { "docid": "8ecd655094e752eb97f4bd21d57acf74", "score": "0.46852925", "text": "def map_summarize(self):\n if self.aligner == \"hisat2\":\n build([hisat2.SummarizeHisatMap(fastq_dic=self.fastq_dic,\n workdir=self.workdir,\n indexfile=self.hisat_index,\n num_cpus=self.num_cpus,\n kingdom=self.kingdom)],\n local_scheduler=self.local_scheduler, workers=1)\n elif self.aligner == \"STAR\":\n build([star.SummarizeStarMap(fastq_dic=self.fastq_dic,\n workdir=self.workdir,\n stardb_dir=self.stardb_dir,\n num_cpus=self.num_cpus)],\n local_scheduler=self.local_scheduler, workers=1)", "title": "" }, { "docid": "fad8773ab4545dcdfa2874bad9321ce1", "score": "0.468279", "text": "def set_up():\n nthr = int(os.getenv('OMP_NUM_THREADS','1'))\n z,D = 0.8,0.6819\n klin,plin = np.loadtxt(\"pk.dat\",unpack=True)\n plin *= D**2\n mome = MomentExpansion(klin,plin,threads=nthr,\\\n cutoff=10,extrap_min=-4,extrap_max=3,jn=10)\n return(mome)\n #", "title": "" }, { "docid": "4de1c25e2f03a6430cf7d5aa2b40d3a4", "score": "0.46821862", "text": "def __call__(\n\t\tself, \n\t\tweight,\n\t\tmin_protein, max_protein,\n\t\tmin_fat, max_fat,\n\t\tmin_carb, max_carb,\n\t\tdaily_nutrient_intake,\n\t\tage):\n\n\t\tresult = {}\n\t\t\t\t\n\t\t# User calculated\n\t\tresult[\"prot\"] = self.goal_protein_fn(\n\t\t\tmin_protein=min_protein,\n\t\t\tmax_protein=max_protein,\n\t\t\tweight=weight)\n\t\t\n\t\tresult[\"fat\"] = self.goal_fat_fn(\n\t\t\tmin_fat=min_fat,\n\t\t\tmax_fat=max_fat,\n\t\t\tweight=weight)\n\t\t\n\t\tresult[\"carb\"] = self.goal_carb_fn(\n\t\t\tmin_carb=min_carb,\n\t\t\tmax_carb=max_carb,\n\t\t\tweight=weight)\n\t\t\t\n\t\tresult[\"kcal\"] = self.goal_kcal_fn(\n\t\t\tmin_protein=min_protein, max_protein=max_protein,\n\t\t\tmin_fat=min_fat, max_fat=max_fat,\n\t\t\tmin_carb=min_carb, max_carb=max_carb,\n\t\t\tweight=weight)\n\n\n\t\t# Derived nutrient amounts\n\t\tresult[\"tsat\"] = result[\"fat\"] * 0.25\n\n\t\t# Static nutrient amounts\n\t\tresult[\"tdf\"] = 25.0\n\t\tresult[\"tsug\"] = 0.0\n\t\tresult[\"na\"] = 1700.0\n\t\tresult[\"trfa\"] = 1.0\n\n\t\t# print('Planner nutrients {}'.format(len(self.planner_nutrients)))\n\n\t\t# Fill in the nutrients amounts left unfilled from the daily intake chart\n\t\tfor nutrient_id, nutrient_amount in daily_nutrient_intake.items():\n\t\t\tif nutrient_id in result:\n\t\t\t\t# print('Nutrient {} already calculated'.format(nutrient_symbol))\n\t\t\t\tcontinue\n\t\t\tresult[nutrient_id] = nutrient_amount\n\n\t\treturn result", "title": "" }, { "docid": "40fc1b49e6b8d76981ffa58f4010a0e5", "score": "0.4678697", "text": "def run(self):\n inventory_levels = [self.current_inventory_and_orders.value[0]]\n order_ledger = []\n costs = []\n\n n = len(self.demand_ts)- self.max_lead_time\n l = len(self.demand_ts)\n\n # for j in range(n):\n for j in range(l):\n self.current_step = j\n self.receive_orders()\n\n demand_window = self.demand_ts[j:j+self.max_lead_time]\n lead_time_window = self.lead_times_ts[j:j+self.max_lead_time]\n if j>n:\n demand_window = np.concatenate((demand_window, np.zeros(j-n, dtype=np.int64)))\n lead_time_window = np.concatenate((lead_time_window, np.ones(j-n, dtype=np.int64)))\n orders, inventory_forecast = self.set_and_solve(demand_window, lead_time_window)\n\n self.place_order(orders[0], lead_time_window[0], order_ledger)\n current_inventory = self.use_inventory(demand_window[0])\n cost = self.compute_cost(current_inventory, orders[0])\n inventory_levels.append(current_inventory)\n costs.append(cost)\n\n return inventory_levels, order_ledger, costs", "title": "" }, { "docid": "8257e0d045ddf913624923b10e46277d", "score": "0.4677085", "text": "def preprocess(self):\n\n self.final_elec_energy_tol = self.workflow_params.get(\n \"final_elec_energy_tol\", 1e-11\n )\n # default todo\n todo = {\n \"relax\": True,\n \"scf\": True,\n \"dos\": True,\n \"pdos\": True,\n \"broadening\": True,\n \"magres\": True,\n }\n # definition of steps and names\n steps = {\n \"relax\": castep_prerelax,\n \"scf\": partial(\n castep_magres_scf, elec_energy_tol=self.final_elec_energy_tol\n ),\n \"dos\": castep_spectral_dos,\n \"pdos\": optados_pdos,\n \"broadening\": optados_dos_broadening,\n \"magres\": partial(\n castep_magres, elec_energy_tol=self.final_elec_energy_tol\n ),\n }\n\n exts = {\n \"relax\": {\n \"input\": [\".cell\", \".param\"],\n \"output\": [\".castep\", \"-out.cell\", \".*err\"],\n },\n \"scf\": {\"input\": [\".cell\", \".param\"], \"output\": [\".castep\", \".bands\"]},\n \"magres\": {\"input\": [\".cell\", \".param\"], \"output\": [\".castep\", \".magres\"]},\n \"dos\": {\n \"input\": [\".cell\", \".param\"],\n \"output\": [\n \".castep\",\n \".bands\",\n \".pdos_bin\",\n \".dome_bin\",\n \".*err\",\n \"-out.cell\",\n ],\n },\n \"pdos\": {\n \"input\": [\".odi\", \".pdos_bin\", \".dome_bin\"],\n \"output\": [\".odo\", \".*err\"],\n },\n \"broadening\": {\n \"input\": [\".odi\", \".pdos_bin\", \".dome_bin\"],\n \"output\": [\".odo\", \".*err\"],\n },\n }\n\n odi_fname = _get_optados_fname(self.seed)\n if odi_fname is not None:\n odi_dict, _ = arbitrary2dict(odi_fname)\n if todo[\"dos\"]:\n todo[\"broadening\"] = \"broadening\" in odi_dict\n todo[\"pdos\"] = \"pdos\" in odi_dict\n else:\n todo[\"dos\"] = False\n todo[\"pdos\"] = False\n todo[\"broadening\"] = False\n\n # prepare to do pre-relax if there's no check file\n if os.path.isfile(self.seed + \".check\"):\n todo[\"scf\"] = True\n todo[\"relax\"] = False\n LOG.info(\n \"Restarting from {}.check, so not performing re-relaxation\".format(\n self.seed\n )\n )\n\n # If geom force tol is not set, do not perform a relaxation\n if self.calc_doc.get(\"geom_force_tol\") is None:\n todo[\"relax\"] = False\n\n for key in todo:\n if todo[key]:\n self.add_step(\n steps[key],\n key,\n input_exts=exts[key].get(\"input\"),\n output_exts=exts[key].get(\"output\"),\n )", "title": "" }, { "docid": "71ca9b7fa65c019d5e5bb4a5bca8b1b8", "score": "0.4676832", "text": "def initialize(self, inDict):\n self.sourceData = []\n for agrosindex in range(self.numberAggregatedOS):\n foundData = False\n for output in inDict['Output']:\n if output.name.strip() == self.sourceName[agrosindex] and output.type in DataObjects.knownTypes():\n self.sourceData.append(output)\n foundData = True\n if not foundData:\n for inp in inDict['Input']:\n if not type(inp) == type(\"\"):\n if inp.name.strip() == self.sourceName[agrosindex] and inp.type in DataObjects.knownTypes():\n self.sourceData.append(inp)\n foundData = True\n elif type(inp) == Models.ROM:\n self.sourceData.append(inp)\n foundData = True # good enough\n if not foundData and 'TargetEvaluation' in inDict.keys():\n if inDict['TargetEvaluation'].name.strip() == self.sourceName[agrosindex] and inDict['TargetEvaluation'].type in DataObjects.knownTypes():\n self.sourceData.append(inDict['TargetEvaluation'])\n foundData = True\n if not foundData and 'SolutionExport' in inDict.keys():\n if inDict['SolutionExport'].name.strip() == self.sourceName[agrosindex] and inDict['SolutionExport'].type in DataObjects.knownTypes():\n self.sourceData.append(inDict['SolutionExport'])\n foundData = True\n if not foundData:\n self.raiseAnError(IOError, 'the Data named ' + self.sourceName[agrosindex] + ' has not been found!!!!')", "title": "" }, { "docid": "e874aa526e2fc06265416671a28c7e52", "score": "0.46726528", "text": "def main():\n hmm = HMM()\n #transition_matrix,emission_prob,initial_state = hmm.train_HMM(s,state_list)\n #hmm.save_params(\"HMM_params_21_pseudocount.txt\")\n hmm.load_params(\"HMM_params_21.txt\")\n print(hmm.transition_matrix)\n print(\"log trans\",np.log(hmm.transition_matrix))\n print(hmm.emission_prob)\n print(hmm.initial_state)\n s22 = read_sequence(\"chr22.fa\")\n print(len(s22))\n sry = s22[38000000:39000000]\n #sry = s22[38000000:38020000]\n f = open(\"sry_seq.txt\",\"w\")\n f.write(sry)\n f.close()\n state_list_22 = read_pos_regions(\"cpgIslandExt.txt\",len(s22),\"22\")\n print(len(state_list_22))\n state_list_sry = state_list_22[38000000:39000000]\n #state_list_sry = state_list_22[38000000:38020000]\n hmm2 = HMM()\n transition_matrix2,emission_prob2,initial_state2 = hmm2.train_HMM(sry,state_list_sry)\n print(hmm2.transition_matrix)\n print(\"log trans\",np.log(hmm2.transition_matrix))\n print(hmm2.emission_prob)\n print(hmm2.initial_state)\n \n \"\"\"predicted_cpg_raw, predicted_cpg= viterbi(sry,hmm)\n f = open(\"predicted_cpg_sry.txt\",\"w\")\n f.write(str(predicted_cpg))\n f.close()\n f2 = open(\"actual_cpg_sry.txt\",\"w\")\n f2.write(str(state_list_sry))\n f2.close()\n\n predicted_intervals,actual_intervals = intervals(predicted_cpg),intervals(state_list_sry)\n f = open(\"predicted_cpg_sry_intervals.txt\",\"w\")\n for i in range(len(predicted_intervals)):\n f.write(str(predicted_intervals[i]))\n f.write(\"\\n\")\n f.close()\n f2 = open(\"actual_cpg_sry_intervals.txt\",\"w\")\n for i in range(len(actual_intervals)):\n f2.write(str(actual_intervals[i]))\n f2.write(\"\\n\")\n f2.close()\n \n print(sklearn.metrics.confusion_matrix(state_list_sry,predicted_cpg))\n #plot_regions(predicted_cpg,state_list_sry)\n plot_intervals(predicted_intervals,actual_intervals)\n \"\"\"\n s20 = read_sequence(\"chr20.fa\")\n s20_subreg = s20[5000000:6000000]\n state_list_chrom20 = read_pos_regions(\"cpgIslandExt.txt\",len(s20),\"20\")\n state_list_sub20 = state_list_chrom20[5000000:6000000]\n print(\"params set\")\n predicted_cpg_raw, predicted_cpg = viterbi(s20_subreg,hmm)\n predicted_intervals,actual_intervals = intervals(predicted_cpg),intervals(state_list_sub20)\n corrected_intervals = filter(lambda i:i[1]-i[0]>600,predicted_intervals)\n corrected_binary = intervals_to_binary(corrected_intervals,len(s20_subreg))\n print(sklearn.metrics.confusion_matrix(state_list_sub20,predicted_cpg))\n print(sklearn.metrics.confusion_matrix(state_list_sub20,corrected_binary))\n plot_intervals(predicted_intervals,actual_intervals)\n plot_intervals(corrected_intervals,actual_intervals)", "title": "" }, { "docid": "d1a6bbcb7095979ecd18107264746b0f", "score": "0.4672431", "text": "def __init__(self):\r\n # ramp entry data will be used to determine income for a car \r\n self.on_ramps_south = {'I5 North': 'Everett', 'I5 South1': 'Lynnwood', \\\r\n 'I5 South2': 'Mountlake Terrace', 'Canyon Park':\\\r\n 'Bothell', 'WA_522': 'Bothell'} \r\n self.on_ramps_north = {'Bellevue 4th St': 'Bellevue', 'Redmond Way': \\\r\n 'Redmond', 'Central Way': 'Kirkland', 'WA 527': \\\r\n 'Bothell'}\r\n \r\n # income breakdown follows the wealth distribution for Washington State.\r\n # low-upper values will differ based on city data. \r\n # income medians for average household (Pew Research): \r\n # low: <$40000\r\n # mid low: $40k-$70k\r\n # mid: $70k-$100k\r\n # mid upper: $100k-$130k\r\n # upper: <$130k\r\n # income breakdowns given in percentages\r\n self.income_breakdown = {'low': 9.89+11.1+13+12.2, 'low mid': 10.2+8.52+ \\\r\n 7.34,'mid': 5.64+4.36+2.97, 'upper mid': 3.18+ \\\r\n 1.83 +1.99, 'upper': 1.1+.872+1.09+.628+.506+.469+\\\r\n .141+2.93}\r\n \r\n self.everett_income = {'low': [22800, 39300], 'low mid': [39301, 51000], \\\r\n 'mid': [51001, 68400], 'upper mid': [68401, 94500], \\\r\n 'upper': [94501, 124000]}\r\n \r\n self.lynnwood_income = {'low': [43400, 45700], 'low mid': [45701, 48700], \\\r\n 'mid': [48701, 61200], 'upper mid': [61201, 64400],\\\r\n 'upper': [64401, 67100]}\r\n \r\n self.mterrace_income = {'low': [32100, 40400], 'low mid': [40401, 56500], \\\r\n 'mid': [56501, 65700], 'upper mid': [65701, 71300],\\\r\n 'upper': [71301, 75700]}\r\n \r\n self.bothell_income = {'low': [59600, 70000], 'low mid': [70001, 80800], \\\r\n 'mid': [80801, 92200], 'upper mid': [92201, 110000],\\\r\n 'upper': [110001, 136000]}\r\n \r\n self.bellevue_income = {'low': [45800, 80800], 'low mid': [80801, 98400], \\\r\n 'mid': [98401, 117000], \\\r\n 'upper mid': [117001, 143000],\\\r\n 'upper': [143001, 160000]}\r\n \r\n self.redmond_income = {'low': [68600, 94800], 'low mid': [94801, 115000], \\\r\n 'mid': [115001, 147000], \\\r\n 'upper mid': [147001, 200000],\\\r\n 'upper': [200001, 209000]}\r\n \r\n self.kirkland_income = {'low': [58600, 78400], 'low mid': [78401, 90900], \\\r\n 'mid': [90901, 107000], \\\r\n 'upper mid': [107001, 120000], \\\r\n 'upper': [120001, 132000]}", "title": "" }, { "docid": "fb0252da2dec6831d49d3837219467b9", "score": "0.46696728", "text": "def get_weight_estimations(cpacs_path, cpacs_out_path):\n\n # Removing and recreating the ToolOutput folder.\n if os.path.exists('ToolOutput'):\n shutil.rmtree('ToolOutput')\n os.makedirs('ToolOutput')\n\n\n # Classes\n # TODO: Use only one class or subclasses\n ui = weightconvclass.UserInputs()\n mw = weightconvclass.MassesWeights()\n out = weightconvclass.WeightOutput()\n\n if not os.path.exists(cpacs_path):\n raise ValueError ('No \"ToolInput.xml\" file in the ToolInput folder.')\n\n name = aircraft_name(cpacs_path)\n\n shutil.copyfile(cpacs_path, cpacs_out_path) # TODO: shoud not be like that\n newpath = 'ToolOutput/' + name\n if not os.path.exists(newpath):\n os.makedirs(newpath)\n\n ag = geometry.geometry_eval(cpacs_path, name)\n fuse_length = round(ag.fuse_length[0],3)\n fuse_width = round(np.amax(ag.fuse_sec_width[:,0]),3)\n ind = weightconvclass.InsideDimensions(fuse_length, fuse_width)\n ind.nose_length = round(ag.fuse_nose_length[0],3)\n ind.tail_length = round(ag.fuse_tail_length[0],3)\n ind.cabin_length = round(ag.fuse_cabin_length[0],3)\n wing_area = round(ag.wing_plt_area_main,3)\n wing_span = round(ag.wing_span[ag.main_wing_index-1],3)\n wing_area_tot = np.sum(ag.wing_plt_area)\n\n #Has been replace by classes function\n # (ind, ui) = getinput.get_user_inputs(ind, ui, ag, cpacs_out_path)\n ui.get_user_inputs(cpacs_out_path)\n ind.get_inside_dim(cpacs_out_path)\n\n if ui.MAX_FUEL_VOL>0 and ui.MAX_FUEL_VOL<ag.wing_fuel_vol:\n max_fuel_vol = ui.MAX_FUEL_VOL\n else:\n max_fuel_vol = ag.wing_fuel_vol\n\n out.PILOT_NB = ui.PILOT_NB # Number of pilot [-].\n\n # Massimum payload allowed, set 0 if equal to max passenger mass.\n mw.MAX_PAYLOAD = ui.MAX_PAYLOAD\n\n # Adding extra length in case of aircraft with second floor [m].\n if ui.IS_DOUBLE_FLOOR == 1:\n cabin_length2 = ind.cabin_length*1.91\n elif ui.IS_DOUBLE_FLOOR == 2:\n cabin_length2 = ind.cabin_length*1.20\n elif ui.IS_DOUBLE_FLOOR == 0:\n cabin_length2 = ind.cabin_length\n else:\n log.warning('Warning, double floor index can be only 0 (1 floor),\\\n 2 (B747-2nd floor type) or 3 (A380-2nd floor type).\\\n Set Default value (0)')\n cabin_length2 = ind.cabin_length\n\n\n # Maximum Take Off Mass Evaluation\n mw.maximum_take_off_mass = estimate_mtom(fuse_length,fuse_width,wing_area,\n wing_span,name)\n\n # Wing loading\n out.wing_loading = mw.maximum_take_off_mass/wing_area_tot\n\n # Operating Empty Mass evaluation\n mw.operating_empty_mass = estimate_operating_empty_mass(\n mw.maximum_take_off_mass, fuse_length,\n fuse_width, wing_area, wing_span,\n ui.TURBOPROP)\n\n # Passengers and Crew mass evaluation\n if ((fuse_width / (1+(ind.fuse_thick/100))) > (ind.seat_width + ind.aisle_width)):\n (out.pass_nb, out.row_nb, out.abreast_nb, out.aisle_nb,\\\n out.toilet_nb, ind) = estimate_passengers(ui.PASS_PER_TOILET,\\\n cabin_length2, fuse_width, ind)\n\n get_seat_config(out.pass_nb, out.row_nb, out.abreast_nb,\n out.aisle_nb, ui.IS_DOUBLE_FLOOR, out.toilet_nb,\n ui.PASS_PER_TOILET, fuse_length, ind, name)\n else:\n out.pass_nb = 0\n raise Exception('The aircraft can not transport passengers, increase'\\\n + ' fuselage width.' + '\\nCabin Width [m] = '\\\n + str((fuse_width/(1 + ind.fuse_thick)))\\\n + ' is less than seat width [m]'\\\n + ' + aisle width [m] = '\\\n + str(ind.seat_width + ind.aisle_width))\n\n (out.crew_nb, out.cabin_crew_nb, mw.mass_crew)\\\n = estimate_crew(out.pass_nb, ui.MASS_PILOT, ui.MASS_CABIN_CREW,\\\n mw.maximum_take_off_mass, out.PILOT_NB)\n\n mw.mass_payload = out.pass_nb * ui.MASS_PASS + ui.MASS_CARGO\n\n mw.mass_people = mw.mass_crew + out.pass_nb * ui.MASS_PASS\n\n maxp = False\n if (mw.MAX_PAYLOAD > 0 and mw.mass_payload > mw.MAX_PAYLOAD):\n mw.mass_payload = mw.MAX_PAYLOAD\n maxp = True\n log.info('With the fixed payload, passenger nb reduced to: '\\\n + str(round(mw.MAX_PAYLOAD / (ui.MASS_PASS),0)))\n\n # Fuel Mass evaluation\n # Maximum fuel that can be stored with maximum number of passengers.\n\n if not ui.MAX_FUEL_VOL: # TODO while retesting, redo fitting\n if ui.TURBOPROP:\n if wing_area > 55.00:\n mw.mass_fuel_max = round(mw.maximum_take_off_mass/4.6,3)\n else:\n mw.mass_fuel_max = round(mw.maximum_take_off_mass/3.6,3)\n elif wing_area < 90.00:\n if fuse_length < 60.00:\n mw.mass_fuel_max = round(mw.maximum_take_off_mass/4.3,3)\n else:\n mw.mass_fuel_max = round(mw.maximum_take_off_mass/4.0,3)\n elif wing_area < 300.00:\n if fuse_length < 35.00:\n mw.mass_fuel_max = round(mw.maximum_take_off_mass/3.6,3)\n else:\n mw.mass_fuel_max = round(mw.maximum_take_off_mass/3.8,3)\n elif wing_area < 400.00:\n mw.mass_fuel_max = round(mw.maximum_take_off_mass/2.2,3)\n elif wing_area < 600.00:\n mw.mass_fuel_max = round(mw.maximum_take_off_mass/2.35,3)\n else:\n mw.mass_fuel_max = round(mw.maximum_take_off_mass/2.8,3)\n else:\n mw.mass_fuel_max = round(max_fuel_vol*ui.FUEL_DENSITY,3)\n\n mw.mass_fuel_maxpass = round(mw.maximum_take_off_mass \\\n - mw.operating_empty_mass \\\n - mw.mass_payload, 3)\n\n if (mw.MAX_FUEL_MASS > 0 and mw.mass_fuel_maxpass > mw.MAX_FUEL_MASS):\n mw.mass_fuel_maxpass = mw.MAX_FUEL_MASS\n log.info('Maximum fuel ammount allowed reached [kg]: ' + str(mw.mass_fuel_maxpass))\n if (mw.maximum_take_off_mass > (mw.mass_fuel_maxpass\\\n + mw.operating_empty_mass + mw.mass_payload)):\n mw.mass_cargo = mw.maximum_take_off_mass - (mw.mass_fuel_maxpass\\\n + mw.operating_empty_mass + mw.mass_payload)\n if not maxp:\n log.info('Adding extra payload mass [kg]: '\\\n + str(mw.mass_cargo))\n mw.mass_payload = mw.mass_payload + mw.mass_cargo\n else:\n maximum_take_off_mass = maximum_take_off_mass - mw.mass_cargo\n log.info('With all the constrains on the fuel and payload, '\\\n + 'the maximum take off mass is not reached.'\\\n + '\\n Maximum take off mass [kg]: '\\\n + str(maximum_take_off_mass))\n else:\n log.info('Fuel mass with maximum passengers [kg]: '\\\n + str(mw.mass_fuel_maxpass))\n\n if (mw.MAX_FUEL_MASS > 0 and mw.mass_fuel_max > mw.MAX_FUEL_MASS):\n mw.mass_fuel_max = mw.MAX_FUEL_MASS\n\n # Zero Fuel Mass evaluation\n mw.zero_fuel_mass = mw.maximum_take_off_mass - mw.mass_fuel_maxpass\\\n + (ui.RES_FUEL_PERC)*mw.mass_fuel_max\n\n # Log writting (TODO: maybe create a separate function)\n log.info('---- Geometry evaluation from CPACS file ----')\n log.info('Fuselage length [m]: ' + str(round(fuse_length,3)))\n log.info('Fuselage width [m]: ' + str(round(fuse_width,3)))\n log.info('Fuselage mean width [m]: ' + str(round(ag.fuse_mean_width,3)))\n log.info('Wing Span [m]: ' + str(round(wing_span,3)))\n\n log.info('--------- Masses evaluated: -----------')\n log.info('Maximum Take Off Mass [kg]: ' + str(int(round(mw.maximum_take_off_mass))))\n log.info('Operating Empty Mass [kg]: ' + str(int(round(mw.operating_empty_mass))))\n log.info('Zero Fuel Mass [kg]: ' + str(int(round(mw.zero_fuel_mass))))\n log.info('Wing loading [kg/m^2]: ' + str(int(round(out.wing_loading))))\n log.info('Maximum ammount of fuel allowed with no passengers [kg]: ' + str(int(round(mw.mass_fuel_max))))\n log.info('Maximum ammount of fuel allowed with no passengers [l]: ' + str(int(round(mw.mass_fuel_max/ui.FUEL_DENSITY))))\n log.info('--------- Passegers evaluated: ---------')\n log.info('Passengers: ' + str(out.pass_nb))\n log.info('Lavatory: ' + str(out.toilet_nb))\n log.info('Payload mass [kg]: ' + str(mw.mass_payload))\n log.info('------- Crew members evaluated: --------')\n log.info('Pilots: ' + str(out.PILOT_NB))\n log.info('Cabin crew members: ' + str(out.cabin_crew_nb))\n log.info('############### Weight estimation completed ###############')\n\n # Outptu writting\n log.info('-------- Generating output text file --------')\n outputweightgen.output_txt(out, mw, ind, ui, name)\n\n # CPACS writting\n cpacsweightupdate.cpacs_update(mw, out, cpacs_path, cpacs_out_path)", "title": "" }, { "docid": "a0c66eb21aad2b72bbe5117e5b2eb0ab", "score": "0.46680772", "text": "def __get_process(self) :\n #process, materials, and ingredients for mixing epoxy\n mixing_epoxy_params = []\n if self.mixing_time!='' :\n if type(self.mixing_time)==str :\n val = int(self.mixing_time.split(':')[0]) # assume an int # of mins like 05:00:00\n elif type(self.mixing_time)==int :\n val = self.mixing_time\n else :\n self.logger.error(f'ERROR: receivied a mixing time value of type {type(self.mixing_time)}',exc_type=TypeError)\n mixing_epoxy_params.append(\n Parameter(\n name='Mixing Time',\n value=NominalInteger(val), \n template=self.templates.attr('Mixing Time'),\n origin='specified',\n )\n )\n if self.resting_time!='' :\n if type(self.resting_time)==str :\n val = int(self.resting_time.split(':')[0]) # ^ same with resting time\n elif type(self.resting_time)==int :\n val = self.resting_time\n else :\n self.logger.error(f'ERROR: receivied a resting time value of type {type(self.resting_time)}',exc_type=TypeError)\n mixing_epoxy_params.append(\n Parameter(\n name='Resting Time',\n value=NominalInteger(val), \n template=self.templates.attr('Resting Time'),\n origin='specified',\n )\n )\n mixing_epoxy = ProcessSpec(\n name='Mixing Epoxy',\n parameters=mixing_epoxy_params,\n template=self.templates.obj('Mixing Epoxy')\n )\n aq = NominalReal(float(self.part_a),'g') if self.part_a!='' else None\n IngredientSpec(name='Epoxy Part A',material=self.epoxyID if self.epoxyID is not None else None,\n process=mixing_epoxy,absolute_quantity=aq)\n aq = NominalReal(float(self.part_b),'g') if self.part_b!='' else None\n IngredientSpec(name='Epoxy Part B',material=self.epoxyID if self.epoxyID is not None else None,\n process=mixing_epoxy,absolute_quantity=aq)\n MaterialSpec(name='Mixed Epoxy',process=mixing_epoxy)\n mixing_epoxy = self.specs.unique_version_of(mixing_epoxy)\n #process and ingredients for making the glass/epoxy/foil stack\n epoxying_conds = []\n if self.comp_method!='' :\n epoxying_conds.append(\n Condition(\n name='Compression Method',\n value=NominalCategorical(str(self.comp_method)),\n template=self.templates.attr('Compression Method'),\n origin='specified',\n )\n )\n epoxying_params = []\n if self.comp_weight!='' :\n epoxying_params.append(\n Parameter(\n name='Compression Weight',\n value=NominalReal(float(self.comp_weight),'lb'),\n template=self.templates.attr('Compression Weight'),\n origin='specified',\n )\n )\n if self.comp_time!='' :\n epoxying_params.append(\n Parameter(\n name='Compression Time',\n value=NominalReal(float(self.comp_time),'hr'),\n template=self.templates.attr('Compression Time'),\n origin='specified',\n )\n )\n epoxying = ProcessSpec(\n name='Epoxying a Flyer Stack',\n conditions=epoxying_conds,\n parameters=epoxying_params,\n template=self.templates.obj('Epoxying a Flyer Stack'),\n )\n IngredientSpec(name='Glass ID',material=self.glassID if self.glassID is not None else None,process=epoxying)\n IngredientSpec(name='Foil ID',material=self.foilID if self.foilID is not None else None,process=epoxying)\n IngredientSpec(name='Mixed Epoxy',material=mixing_epoxy.output_material,process=epoxying)\n MaterialSpec(name='Glass Epoxy Foil Stack',process=epoxying)\n epoxying = self.specs.unique_version_of(epoxying)\n #process and ingredients for cutting flyer discs into the glass/epoxy/foil stack\n if self.cutting is not None :\n cutting = copy.deepcopy(self.cutting)\n if self.specs.encoder.scope in cutting.uids.keys() :\n _ = cutting.uids.pop(self.specs.encoder.scope)\n else :\n new_cutting_proc_name = self.cutting_proc_name if self.cutting_proc_name!='' else 'Unknown Flyer Cutting'\n cutting = ProcessSpec(name=new_cutting_proc_name,template=self.templates.obj('Flyer Cutting Program'))\n if self.s!='' :\n cutting.parameters.append(\n Parameter(\n name='Flyer Spacing',\n value=NominalReal(float(self.s),'mm'),\n template=self.templates.attr('Flyer Spacing'),\n origin='specified',\n )\n )\n if self.d!='' :\n cutting.parameters.append(\n Parameter(\n name='Flyer Diameter',\n value=NominalReal(float(self.d),'mm'),\n template=self.templates.attr('Flyer Diameter'),\n origin='specified',\n )\n )\n if self.n!='' :\n cutting.parameters.append(\n Parameter(\n name='Rows X Columns',\n value=NominalInteger(float(self.n)),\n template=self.templates.attr('Rows X Columns'),\n origin='specified',\n )\n )\n if self.cutting_energy!='' :\n temp = self.templates.attr('Laser Cutting Energy')\n laser_cutting_energy_par = None\n for p in cutting.parameters :\n if p.name=='LaserCuttingEnergy' :\n laser_cutting_energy_par = p\n break\n if laser_cutting_energy_par is not None :\n new_value=NominalReal(float(self.cutting_energy),temp.bounds.default_units)\n #msg = 'WARNING: replacing laser cutting energy in a created Spec based on new information in the '\n #msg+= f'Flyer Stack layout. Old value = {p.value}, new value = {new_value}'\n #self.logger.warning(msg)\n p.value=new_value\n else :\n cutting.parameters.append(\n Parameter(\n name='LaserCuttingEnergy',\n value=NominalReal(float(self.cutting_energy),temp.bounds.default_units),\n template=temp,\n origin='specified',\n )\n )\n if self.n_passes!='' :\n n_passes_par = None\n for p in cutting.parameters :\n if p.name=='NumberofPasses' :\n n_passes_par = p\n break\n if n_passes_par is not None :\n new_value=NominalInteger(int(self.n_passes))\n #msg = 'WARNING: replacing number of passes in a created Spec based on new information in the '\n #msg+= f'Flyer Stack layout. Old value = {p.value}, new value = {new_value}'\n #self.logger.warning(msg)\n p.value=new_value\n else :\n cutting.parameters.append(\n Parameter(\n name='NumberofPasses',\n value=NominalInteger(int(self.n_passes)),\n template=self.templates.attr('Number of Passes'),\n origin='specified',\n )\n )\n IngredientSpec(name='Glass Epoxy Foil Stack',material=epoxying.output_material,process=cutting)\n cutting = self.specs.unique_version_of(cutting)\n if cutting!=self.cutting and 'ObjectType::LaserShockFlyerCuttingProgram' in cutting.tags :\n cutting.tags.remove('ObjectType::LaserShockFlyerCuttingProgram')\n return cutting", "title": "" }, { "docid": "a716c22d525c3c6f6b1dfaf9c85435bb", "score": "0.46621966", "text": "def calculate(self):\n self.semantics = self.pool()", "title": "" }, { "docid": "1221cdd27abf9ded71c64d6831ac9920", "score": "0.46614337", "text": "def main():\n global sim_num_molds, sim_which_molds, sim_magnitude\n avg_mag()\n sim_num_molds.append(len(mold_id) - num_removed)\n sim_magnitude.append(avgmag)\n sim_which_molds.append(list(kept_molds))", "title": "" }, { "docid": "8706e42e5795e5a0327f3c9a34e23980", "score": "0.46610305", "text": "def run_analysis_iteration(self):\n self.process_analysis_queue()\n self.fetch_backend_packets()", "title": "" }, { "docid": "944ead0dc52c8e48ce69d8ad50bcd8d4", "score": "0.46597704", "text": "def __init__(self, organism_name, fasta_name, \n code=\"\", KO=False, merged=False, work_dir=\"./\"):\n if type(organism_name) == list:\n organism_name = \"_\".join(organism_name)\n\n self.organism_name = organism_name\n self.directory = work_dir + organism_name.replace(\" \", \"_\") #directory - name of the directory with KEGG gene files\n self.code = code #KEGG organism code\n self.number_genes = 0 #Number of genes kept (valid)\n self.valid_ecs = [] #list of one or multiple EC codes as strings\n self.enzs_parsed = [] #List of KEGG enzyme entry file parsers\n self.genes = []\n self.multiple = [] #List of starting indexes of the genes in other species\n self.KO = KO\n \n self.reaction_graph = GraphClass(\"reaction graph\") #reaction graph, filtered\n self.unfiltered_reaction_graph = GraphClass(\"unfiltered reaction graph\") #unfiltered reaction graph (all metabolites)\n self.pathway_reaction_graph = GraphClass(\"pathway reaction graph\") #reaction graph, filtered\n self.pathway_unfiltered_reaction_graph = GraphClass(\"pathway unfiltered reaction graph\") #unfiltered reaction graph (all metabolites)\n \n self.substrate_product_graph = GraphClass(\"substrate-product graph\") # substrate-product graph\n self.unfiltered_substrate_product_graph = GraphClass(\"unfiltered substrate-product graph\") #unfiltered substrate-product graph (all metabolites)\n self.pathway_substrate_product_graph = GraphClass(\"pathway substrate-product graph\") # substrate-product graph\n self.pathway_unfiltered_substrate_product_graph = GraphClass(\"pathway unfiltered substrate-product graph\") #unfiltered substrate-product graph (all metabolites)\n \n self.in_out_graph = GraphClass(\"in-out graph\") # in-out graph\n self.pathway_in_out_graph = GraphClass(\"pathway in-out graph\")\n \n if not KO and not merged: #If not building graphs with KO, need organism code and fastas\n if type(fasta_name) == list: #Hybrid species\n assert type(self.code) == list and len(self.code) > 1, \"Missing multiple codes as list\"\n for i, fname in enumerate(fasta_name) :\n self.multiple.append(len(self.genes))\n self.load_data_file(fname) #Get gene names\n else:\n self.load_data_file(fasta_name) #Get gene names\n \n if self.code == \"\" : #Find organism code\n self.findandtest_organism()\n elif type(self.code) == str or type(self.code) == unicode or type(self.code) == np.unicode or type(self.code) == np.unicode_ :\n self.test_code() #Testing gene name - organism code correspondance (tests a single gene)\n else :\n self.test_multiple_codes()", "title": "" }, { "docid": "cacb98b35e5245249e39256ba4a81967", "score": "0.46596926", "text": "def calculate_final_energy(self, treatment_systems_pop, treatment_systems_agri, time=''):\n\n self.df[f'Final{time}PumpingEnergy'] = self.df['GWPumpingEnergy'] * self.df[f'Final{time}WaterWithdrawals']\n self.df[f'Final{time}DesalinationEnergy'] = self.df['DesalinationEnergy'] * self.df[f'Final{time}WaterWithdrawals']\n self.df[f'Final{time}Energy'] = self.df[f'Final{time}PumpingEnergy'] + self.df[f'Final{time}DesalinationEnergy']\n\n systems_vector_pop = self.df['PopulationLeastCostTechnology'].apply(\n lambda row: treatment_systems_pop[row] + f'{time}Energy')\n systems_vector_agri = self.df['IrrigationLeastCostTechnology'].apply(\n lambda row: treatment_systems_agri[row] + f'{time}Energy')\n self.df[f'Final{time}PopTreatmentEnergy'] = None\n self.df[f'Final{time}AgriTreatmentEnergy'] = None\n systems_vector_pop.loc[systems_vector_pop == f'Na{time}Energy'] = None\n systems_vector_agri.loc[systems_vector_agri == f'Na{time}Energy'] = None\n\n for value in set(systems_vector_pop.dropna()):\n index_vec = systems_vector_pop == value\n self.df.loc[index_vec, f'Final{time}PopTreatmentEnergy'] = self.df.loc[index_vec, value]\n\n for value in set(systems_vector_agri.dropna()):\n index_vec = systems_vector_agri == value\n self.df.loc[index_vec, f'Final{time}AgriTreatmentEnergy'] = self.df.loc[index_vec, value]\n\n self.df[f'Final{time}TreatmentEnergy'] = self.df[[f'Final{time}PopTreatmentEnergy', f'Final{time}AgriTreatmentEnergy']].sum(axis=1)", "title": "" }, { "docid": "3ba7b3393c3294362e9287191229e5a1", "score": "0.46530926", "text": "def analysis_start(self):\n \n \n # Analysis is done after this function returns\n self.CONTINUE_EXECUTION = True\n \n # Extract some important variables\n volatility_profile = self.machine.config.volatility_profile\n machine = self.machine\n \n # Create a queue and data handler\n # This enables us to do many to many data flows\n self.data_queue = multiprocessing.Queue()\n self.data_handler = DataHandler(self.data_queue)\n \n # RabbitMQ queue name\n self.rabbitmq = LOPHI_RabbitMQ_Producer(self.services_host,\n self.data_handler.new_queue(),\n G.RabbitMQ.SENSOR,\n exchange_type=G.RabbitMQ.TYPE_FANOUT,\n exchange=G.RabbitMQ.EXCHANGE_FANOUT)\n # Start data paths\n self.data_handler.start()\n self.rabbitmq.start()\n \n \n # Memory Analysis \n print \"Starting memory analysis...\"\n self.mem_analysis = MemoryAnalysisEngine(self.machine,\n self.data_queue,\n plugins=['pslist'])\n self.mem_analysis.start()\n \n # Disk analysis Analysis \n print \"Starting disk analysis...\"\n self.disk_analysis = DiskAnalysisEngine(self.machine,\n self.data_queue)\n self.disk_analysis.start()", "title": "" }, { "docid": "f4464b0ffdc87aa739201a5bd2e180c3", "score": "0.46489635", "text": "def cal_emission_prob(self):\n res = dict()\n s_res = dict()\n\n for e in self.n_entity_word.keys():\n # Initialize the keys and dicts\n if e not in res.keys():\n res[e] = dict()\n s_res[e] = dict()\n\n total = sum(self.n_entity_word[e].values())\n for w in self.n_entity_word[e].keys():\n res[e][w] = self.n_entity_word[e][w] / total\n s_res[e][w] = (self.n_entity_word[e][w] + 1) / (total + self.unique_word_count)\n s_res[e]['NaN'] = 1 / (total + self.unique_word_count)\n return res, s_res", "title": "" }, { "docid": "4ea6402ac63a9d8fc7123648fca8f2d0", "score": "0.46447662", "text": "def create_process_chain_entry(input_object: DataObject, geometries: str):\n\n rn = randint(0, 1000000)\n pc = []\n\n importer = {\n \"id\": \"v_in_geojson_%i\" % rn,\n \"module\": \"v.in.geojson\",\n \"inputs\": [{\"param\": \"input\",\n \"value\": geometries},\n {\"param\": \"output\",\n \"value\": \"geometries\"},\n ]\n }\n\n g_region_1 = {\n \"id\": \"g_region_1_%i\" % rn,\n \"module\": \"g.region\",\n \"inputs\": [{\"param\": \"save\",\n \"value\": \"previous_region\"}],\n \"flags\": \"g\"}\n\n g_region_2 = {\n \"id\": \"g_region_2_%i\" % rn,\n \"module\": \"g.region\",\n \"inputs\": [{\"param\": \"vector\",\n \"value\": \"polygon\"}],\n \"flags\": \"g\"}\n\n v_to_rast_1 = {\n \"id\": \"v_to_rast_1_%i\" % rn,\n \"module\": \"v.to.rast\",\n \"inputs\": [{\"param\": \"input\",\n \"value\": \"geometries\"},\n {\"param\": \"output\",\n \"value\": \"geometries\"},\n {\"param\": \"type\",\n \"value\": \"point,line,area\"},\n {\"param\": \"use\",\n \"value\": \"cat\"}]\n }\n\n r_mask_1 = {\n \"id\": \"r_mask_1_%i\" % rn,\n \"module\": \"r.mask\",\n \"inputs\": [{\"param\": \"raster\",\n \"value\": \"geometries\"}]\n }\n\n t_rast_univar = {\n \"id\": \"t_rast_univar_%i\" % rn,\n \"module\": \"t.rast.univar\",\n \"inputs\": [{\"param\": \"input\",\n \"value\": input_object.grass_name()}]\n }\n\n r_mask_2 = {\n \"id\": \"r_mask_2_%i\" % rn,\n \"module\": \"r.mask\",\n \"flags\": \"r\"\n }\n\n g_region_3 = {\n \"id\": \"g_region_3_%i\" % rn,\n \"module\": \"g.region\",\n \"inputs\": [{\"param\": \"region\",\n \"value\": \"previous_region\"}],\n \"flags\": \"g\"}\n\n pc.append(importer)\n pc.append(g_region_1)\n pc.append(g_region_2)\n pc.append(v_to_rast_1)\n pc.append(r_mask_1)\n pc.append(t_rast_univar)\n pc.append(r_mask_2)\n pc.append(g_region_3)\n\n return pc", "title": "" }, { "docid": "d4df3a3c639784064402cacee23d25df", "score": "0.46420428", "text": "def EMEP_Create_dict(actual_year, old_year):\n resolutions =['year', 'month', 'hour', 'day']\n\n DATASET_QUERY = '?service=WMS&version=1.3.0&request=GetCapabilities'\n\n Catalog = download_Catalog_Datasets(global_settings.CATALOG_EMEP_URL.replace(\"XXXX\", actual_year))\n old_Catalog = download_Catalog_Datasets(global_settings.CATALOG_EMEP_URL.replace(\"XXXX\", old_year))\n\n dict_EMEP = {}\n dict_EMEP['services'] = {}\n dict_EMEP['resolutions'] = resolutions\n dict_EMEP['datasets'] = {}\n dict_EMEP['ListGases'] = {}\n dict_EMEP['ListDates'] = {}\n dict_EMEP['styles'] = []\n\n dict_EMEP['LimitsBounds'] = {}\n dict_EMEP['LimitsBounds']['west'] = -10.65\n dict_EMEP['LimitsBounds']['east'] = -6.15\n dict_EMEP['LimitsBounds']['south'] = 36.25\n dict_EMEP['LimitsBounds']['north'] = 42.25\n\n for res in resolutions:\n dict_EMEP['datasets'][res] = {}\n\n for res in resolutions:\n dict_EMEP['ListGases'][res] = []\n dict_EMEP['ListDates'][res] = {}\n dict_EMEP['ListDates'][res]['General'] = []\n dict_EMEP['ListDates'][res]['Leap'] = []\n dict_EMEP['ListDates'][res]['NoLeap'] = []\n\n catalog = Catalog['catalog']\n\n for serv in catalog['service']['service']:\n dict_EMEP['services'][serv['@serviceType'].lower()] = serv['@base']\n \n recent_datasets = catalog['dataset']['dataset']\n\n old_datasets = old_Catalog['catalog']['dataset']['dataset']\n\n datasets = recent_datasets + old_datasets\n \n for dataset in datasets:\n for res in resolutions:\n if res in dataset['@name']:\n resolution = res\n break\n meteo_year = dataset['@name'].split(resolution+'.')[1].split('met')[0]\n\n emissions_year = dataset['@name'].split(resolution+'.')[1].split('met_')[1].split('emis')[0]\n\n if emissions_year != meteo_year:\n continue\n\n year = meteo_year\n\n if year not in dict_EMEP['datasets'][resolution].keys():\n dict_EMEP['datasets'][resolution][year] = {}\n dict_EMEP['datasets'][resolution][year]['EMEPSite'] = dataset['@urlPath']\n dict_EMEP['datasets'][resolution][year]['Geoserver'] = \"%s-%s\"%(resolution,year)\n\n infodataset =download_Catalog_Datasets(dict_EMEP['services']['wms']+dataset['@urlPath'] + DATASET_QUERY)\n\n ListGases = dict_EMEP['ListGases'][resolution]\n\n gases = ListGases + list(set(retrieveGases(infodataset))-set(ListGases))\n\n dict_EMEP['ListGases'][resolution] = gases\n\n ListDatesNoLeap = len(dict_EMEP['ListDates'][resolution]['NoLeap'])\n\n ListDatesLeap = len(dict_EMEP['ListDates'][resolution]['Leap'])\n\n if ListDatesLeap == 0 or ListDatesNoLeap == 0 or ListDatesLeap == 0:\n if dict_EMEP['styles'] == []:\n dict_EMEP['styles'] = retrieveStyles(infodataset)\n\n if ListDatesNoLeap == 0:\n if int(year)%4 != 0:\n dates = []\n dates_02 = []\n dates = retrieveDates(infodataset, year, dates, dates_02, resolution)\n dict_EMEP['ListDates'][resolution]['NoLeap'] = dates_02\n\n if len(dict_EMEP['ListDates'][res]['General']) == 0:\n dict_EMEP['ListDates'][res]['General'] = dates\n\n if ListDatesLeap == 0:\n if int(year)%4 == 0:\n dates = []\n dates_02 = []\n dates = retrieveDates(infodataset, year, dates, dates_02, resolution)\n dict_EMEP['ListDates'][resolution]['Leap'] = dates_02\n\n if len(dict_EMEP['ListDates'][res]['General']) == 0:\n dict_EMEP['ListDates'][res]['General'] = dates\n\n dict_EMEP['max_min'] = {}\n for res in resolutions:\n dict_EMEP['max_min'][res] = {}\n dict_EMEP['ListGases'][res] += add_pollutants()\n for gas in dict_EMEP['ListGases'][res]:\n dict_EMEP['max_min'][res][gas] = None\n if gas == 'TDEP_N_critical_load':\n dict_EMEP['max_min'][res][gas] = {}\n dict_EMEP['max_min'][res][gas]['max'] = 2\n dict_EMEP['max_min'][res][gas]['min'] = 0\n\n save_EMEP_dict(dict_EMEP)\n return dict_EMEP", "title": "" }, { "docid": "ac097f8b9fd12b6dfd8740444c53d272", "score": "0.46404865", "text": "def main(argdict):\n\n # load configuration from logfile:\n with open(argdict[\"log\"], 'r') as f:\n argdict = json.load(f)\n\n # load system initial configuration:\n pdb = pdb_file_nonstandard_bonds(argdict[\"pdb\"])\n print(\"--> input topology: \", end=\"\")\n print(pdb.topology)\n\n # physical parameters of simulation:\n sim_temperature = argdict[\"temperature\"] * kelvin\n sim_andersen_coupling = 1/picosecond\n sim_pressure = (\n (argdict[\"pressure\"], argdict[\"pressure\"], argdict[\"pressure\"])*bar\n )\n sim_scale_x = True\n sim_scale_y = True\n sim_scale_z = True\n\n # simulation control parameters:\n sim_timestep = argdict[\"timestep\"]*femtoseconds\n\n # restraints parameters:\n sim_restr_fc = argdict[\"restr_fc\"]*kilojoule_per_mole/nanometer**2\n\n # create force field object:\n ff = ForceField(*argdict[\"ff\"])\n\n # build a simulation system from topology and force field:\n # (note that AMOEBA is intended to be run without constraints)\n # (note that mutualInducedtargetEpsilon defaults to 0.01 unlike what is\n # specified in the documentation which claims 0.00001)\n system = ff.createSystem(\n pdb.topology,\n nonbondedMethod=PME,\n nonbondedCutoff=argdict[\"nonbonded_cutoff\"]*nanometer,\n vdwCutoff=argdict[\"vdw_cutoff\"]*nanometer,\n ewaldErrorTolerance=argdict[\"ewald_error_tolerance\"],\n polarisation=argdict[\"polarisation\"],\n mutualInducedTargetEpsilon=argdict[\"mutual_induced_target_epsilon\"],\n constraints=None,\n rigidWater=False,\n removeCMMotion=True # removes centre of mass motion\n )\n\n # overwrite the polarisation method set at system creation; this is\n # necessary as openMM always sets polarisation method to \"mutual\" of the\n # target epsilon is specified at system creation; this way, target epsilon\n # is ignored for all but the mutual method\n multipole_force = [\n f for f in system.getForces() if isinstance(f, AmoebaMultipoleForce)\n ][0]\n print(\"--> using polarisation method \" + str(argdict[\"polarisation\"]))\n if argdict[\"polarisation\"] == \"mutual\":\n multipole_force.setPolarizationType(multipole_force.Mutual)\n if argdict[\"polarisation\"] == \"extrapolated\":\n multipole_force.setPolarizationType(multipole_force.Extrapolated)\n if argdict[\"polarisation\"] == \"direct\":\n multipole_force.setPolarizationType(multipole_force.Direct)\n\n # will use Andersen thermostat here:\n # (Inhibits particle dynamics somewhat, but little or no ergodicity\n # issues (from Gromacs documenation). However, only alternative is full\n # Langevin dynamics, which is even worse wrt dynamics. Bussi/v-rescale is\n # not available at the moment, it seems (it is available in tinker, but\n # without GPU acceleration))\n system.addForce(AndersenThermostat(\n sim_temperature,\n sim_andersen_coupling))\n\n # use anisotropic barostat:\n # (note that this corresponds to semiisotropic pressure coupling in Gromacs\n # if the pressure is identical for the x- and y/axes)\n # (note that by default this attempts an update every 25 steps)\n system.addForce(MonteCarloAnisotropicBarostat(\n sim_pressure,\n sim_temperature,\n sim_scale_x,\n sim_scale_y,\n sim_scale_z))\n\n # prepare harmonic restraining potential:\n # (note that periodic distance is absolutely necessary here to prevent\n # system from blowing up, as otherwise periodic image position may be used\n # resulting in arbitrarily large forces)\n force = CustomExternalForce(\"k*periodicdistance(x, y, z, x0, y0, z0)^2\")\n force.addGlobalParameter(\"k\", sim_restr_fc)\n force.addPerParticleParameter(\"x0\")\n force.addPerParticleParameter(\"y0\")\n force.addPerParticleParameter(\"z0\")\n\n # apply harmonic restraints to C-alphas:\n if argdict[\"restr\"] == \"capr\":\n print(\"--> applying harmonic positional restraints to CA atoms\")\n for atm in pdb.topology.atoms():\n if atm.name == \"CA\":\n force.addParticle(atm.index, pdb.positions[atm.index])\n elif argdict[\"restr\"] == \"hapr\":\n sys.exit(\n \"Restraints mode \" + str(argdict[\"restr\"]) + \"is not implemented.\"\n )\n elif argdict[\"restr\"] == \"none\":\n print(\"--> applying no harmonic positional restraints to any atom\")\n else:\n sys.exit(\n \"Restraints mode \" + str(argdict[\"restr\"]) + \"is not implemented.\"\n )\n\n # add restraining force to system:\n system.addForce(force)\n\n # make special group for nonbonded forces:\n for f in system.getForces():\n if (\n isinstance(f, AmoebaMultipoleForce)\n or isinstance(f, AmoebaVdwForce)\n or isinstance(f, AmoebaGeneralizedKirkwoodForce)\n or isinstance(f, AmoebaWcaDispersionForce)\n ):\n f.setForceGroup(1)\n\n # select integrator:\n if argdict[\"integrator\"] == \"mts\":\n # use multiple timestep RESPA integrator:\n print(\"--> using RESPA/MTS integrator\")\n integrator = MTSIntegrator(\n sim_timestep, [(0, argdict[\"inner_ts_frac\"]), (1, 1)]\n )\n if argdict[\"integrator\"] == \"verlet\":\n # use Leapfrog Verlet integrator here:\n print(\"--> using Verlet integrator\")\n integrator = VerletIntegrator(sim_timestep)\n\n # select a platform (should be CUDA, otherwise VERY slow):\n platform = Platform.getPlatformByName(argdict[\"platform\"])\n properties = {\"CudaPrecision\": argdict[\"precision\"], \"CudaDeviceIndex\": \"0\"}\n\n # create simulation system:\n sim = Simulation(pdb.topology, system, integrator, platform, properties)\n\n # unit conversion factors:\n ang2nm = 0.1\n\n # create MDA universe:\n u = mda.Universe(args.s, args.f)\n\n # selection for overall system will be needed to set OpenMM positions\n # accordingt to trajectory:\n allsystem = u.select_atoms(\"all\")\n\n # get parameters to define cylinder around protein center of geometry:\n # (the cylinder spans the entire box in the z-direction)\n protein = u.select_atoms(\"protein\")\n radius = str(args.r)\n z_margin = args.z_margin\n z_min = str(\n protein.bbox()[0, 2] - protein.center_of_geometry()[2] - z_margin\n )\n z_max = str(\n protein.bbox()[1, 2] - protein.center_of_geometry()[2] + z_margin\n )\n\n # select all solvent atoms, note that AMOEBA residue name is HOH:\n # (these must be updating, as water may move in and out of pore!)\n solvent = u.select_atoms(\n \"byres (resname HOH SOL) and cyzone \"\n + radius + \" \"\n + z_max + \" \"\n + z_min + \" protein\",\n updating=True\n )\n solvent_ow = solvent.select_atoms(\"name O OW\", updating=True)\n\n # lambda function for converting atomic dipoles to molecular dipoles:\n # (this only works on 1D arrays, hence use apply_along_axis if quantity is\n # vector-valued, e.g. positions and dipoles)\n def atomic2molecular_sum(arr): return np.bincount(allsystem.resindices, arr)\n\n def atomic2molecular_avg(arr): return np.bincount(\n allsystem.resindices, arr) / np.bincount(allsystem.resindices)\n\n # create lambda function for obtaining charges in vectorisable way:\n # (units are elementary_charge)\n get_atomic_charges = np.vectorize(\n lambda index: multipole_force.getMultipoleParameters(int(index))[0]\n .value_in_unit(elementary_charge)\n )\n\n # obtain atomic charges:\n # (charges are static, so need this only once; units are elementary charge)\n atomic_charges = get_atomic_charges(allsystem.ix)\n\n # obtain start and end time as will as time step:\n dt = float(args.dt)\n t_start = float(args.b)\n t_end = float(args.e)\n\n # prepare results dictionary:\n res = {\n \"t\": [],\n \"x\": [],\n \"y\": [],\n \"z\": [],\n \"indu_rho\": [],\n \"indu_costheta\": [],\n \"indu_cosphi\": [],\n \"perm_rho\": [],\n \"perm_costheta\": [],\n \"perm_cosphi\": [],\n \"mono_rho\": [],\n \"mono_costheta\": [],\n \"mono_cosphi\": [],\n \"total_rho\": [],\n \"total_costheta\": [],\n \"total_cosphi\": []\n }\n\n # loop over trajectory:\n for ts in u.trajectory:\n\n # skip all frames before starting frame:\n if ts.time < t_start:\n continue\n\n # only analyse relevant time frames:\n if round(ts.time, 4) % dt == 0:\n\n # inform user:\n print(\n \"analysing frame: \"\n + str(ts.frame)\n + \" at time: \"\n + str(ts.time)\n )\n print(\n \"number of selected solvent molecules in this frame: \"\n + str(solvent.n_residues)\n )\n\n # convert mda positions to OpenMM positions and set context:\n omm_positions = Quantity(\n [tuple(pos) for pos in list(allsystem.positions)],\n unit=angstrom\n )\n sim.context.setPositions(omm_positions)\n\n # calculate molecular positions (or molecular centre of geometry) by\n # averaging over all atomic positions within a residue:\n # (units are Angstrom in MDAnalysis!)\n molecular_positions = np.apply_along_axis(\n atomic2molecular_avg, 0, allsystem.positions) * ang2nm\n\n # calculate charge-weighted positions by multiplying the relative\n # atomic positions with the atomic charges (relative positions are\n # necessary to account for charged residues/molecules, where the\n # dipole moment is calculated relative to the center of geometry of\n # the residue):\n # (units are elementary charge * nanometer)\n atomic_charge_weighted_positions = (\n allsystem.positions - molecular_positions[allsystem.resindices]\n )\n atomic_charge_weighted_positions *= (\n atomic_charges[np.newaxis].T * ang2nm\n )\n\n # obtain induced and permanent atomic dipoles from OpenMM:\n # (units are elementary charge * nm)\n atomic_dipoles_indu = np.array(\n multipole_force.getInducedDipoles(sim.context)\n )\n atomic_dipoles_perm = np.array(\n multipole_force.getLabFramePermanentDipoles(sim.context)\n )\n\n # convert atomic to molecular quantities and calculate total dipole:\n molecular_dipoles_indu = np.apply_along_axis(\n atomic2molecular_sum, 0, atomic_dipoles_indu)\n molecular_dipoles_perm = np.apply_along_axis(\n atomic2molecular_sum, 0, atomic_dipoles_perm)\n molecular_dipoles_mono = np.apply_along_axis(\n atomic2molecular_sum, 0, atomic_charge_weighted_positions)\n molecular_dipoles_total = (\n molecular_dipoles_indu\n + molecular_dipoles_perm\n + molecular_dipoles_mono\n )\n\n # convert to spherical coordinates:\n molecular_dipoles_indu = cartesian2spherical(molecular_dipoles_indu)\n molecular_dipoles_perm = cartesian2spherical(molecular_dipoles_perm)\n molecular_dipoles_mono = cartesian2spherical(molecular_dipoles_mono)\n molecular_dipoles_total = cartesian2spherical(\n molecular_dipoles_total\n )\n\n # insert into results dictionary:\n res[\"t\"].append(np.repeat(ts.time, solvent.n_residues))\n res[\"x\"].append(molecular_positions[solvent_ow.resindices, 0])\n res[\"y\"].append(molecular_positions[solvent_ow.resindices, 1])\n res[\"z\"].append(molecular_positions[solvent_ow.resindices, 2])\n res[\"indu_rho\"].append(\n molecular_dipoles_indu[solvent_ow.resindices, 0]\n )\n res[\"indu_costheta\"].append(\n molecular_dipoles_indu[solvent_ow.resindices, 1]\n )\n res[\"indu_cosphi\"].append(\n molecular_dipoles_indu[solvent_ow.resindices, 2]\n )\n res[\"perm_rho\"].append(\n molecular_dipoles_perm[solvent_ow.resindices, 0]\n )\n res[\"perm_costheta\"].append(\n molecular_dipoles_perm[solvent_ow.resindices, 1]\n )\n res[\"perm_cosphi\"].append(\n molecular_dipoles_perm[solvent_ow.resindices, 2]\n )\n res[\"mono_rho\"].append(\n molecular_dipoles_mono[solvent_ow.resindices, 0]\n )\n res[\"mono_costheta\"].append(\n molecular_dipoles_mono[solvent_ow.resindices, 1]\n )\n res[\"mono_cosphi\"].append(\n molecular_dipoles_mono[solvent_ow.resindices, 2]\n )\n res[\"total_rho\"].append(\n molecular_dipoles_total[solvent_ow.resindices, 0]\n )\n res[\"total_costheta\"].append(\n molecular_dipoles_total[solvent_ow.resindices, 1]\n )\n res[\"total_cosphi\"].append(\n molecular_dipoles_total[solvent_ow.resindices, 2]\n )\n\n # stop iterating through trajectory after end time:\n if ts.time > t_end:\n break\n\n # convert lists of arrays to arrays:\n for k in res.keys():\n res[k] = np.concatenate(res[k])\n\n # convert units of dipole magnitude to Debye:\n eNm2debye = 48.03205\n res[\"indu_rho\"] = eNm2debye*res[\"indu_rho\"]\n res[\"perm_rho\"] = eNm2debye*res[\"perm_rho\"]\n res[\"mono_rho\"] = eNm2debye*res[\"mono_rho\"]\n res[\"total_rho\"] = eNm2debye*res[\"total_rho\"]\n\n # load spline curve data:\n with open(args.j, \"r\") as f:\n chap_data = json.load(f)\n\n # create spline curve from CHAP data:\n spline_curve = BSplineCurve(chap_data)\n\n # calculate s-coordinate from z-coordinate:\n res[\"s\"] = spline_curve.z2s(res[\"z\"])\n\n # convert results to data frame:\n df_res = pd.DataFrame(res)\n\n # loop over various numbers of bins:\n df = []\n for nbins in args.nbins:\n\n # create a temporary data frame:\n tmp = df_res\n\n # drop positional coordinates:\n tmp = tmp.drop([\"x\", \"y\", \"z\", \"t\"], axis=1)\n\n # bin by value of s-coordinate:\n tmp = tmp.groupby(pd.cut(tmp.s, nbins))\n\n # aggregate variables:\n tmp = tmp.agg(\n [np.mean, np.std, sem, np.size, np.median, qlo, qhi]\n ).reset_index()\n\n # rename columns (combines variable name with aggregation method):\n tmp.columns = [\"_\".join(x) for x in tmp.columns.ravel()]\n\n # remove grouping key:\n tmp = tmp.drop(\"s_\", axis=1)\n\n # add column wit number of bins:\n tmp[\"nbins\"] = nbins\n\n # append to list of data frames:\n df.append(tmp)\n\n # combine list of data frames into single data frame:\n df = pd.concat(df)\n\n # write to JSON file:\n df.to_json(args.o, orient=\"records\")\n\n # need to add newline for POSIX compliance:\n with open(args.o, \"a\") as f:\n f.write(\"\\n\")", "title": "" }, { "docid": "2f848bb9ca9e333fd2a112bc1f4c7371", "score": "0.46396163", "text": "def ProcessSimulation(airmass_num,pwv_num,oz_num):\n \n \n print('--------------------------------------------')\n print(' 1) airmass = ', airmass_num)\n print(' 2) pwv = ', pwv_num)\n print(' 3) oz = ', oz_num)\n print('--------------------------------------------') \n \n \n ensure_dir(TOPDIR)\n\n \n # build the part 1 of filename\n BaseFilename_part1=Prog+'_'+Obs+'_'+Rte+'_'\n \n\n # Set up type of run\n runtype='clearsky' #'no_scattering' #aerosol_special #aerosol_default# #'clearsky'# \n if Proc == 'sc':\n runtype='no_absorption'\n outtext='no_absorption'\n elif Proc == 'ab':\n runtype='no_scattering'\n outtext='no_scattering'\n elif Proc == 'sa':\n runtype=='clearsky'\n outtext='clearsky'\n elif Proc == 'ae': \n runtype='aerosol_default'\n outtext='aerosol_default'\n elif Proc == 'as': \n runtype='aerosol_special'\n outtext='aerosol_special'\n else:\n runtype=='clearsky'\n outtext='clearsky'\n\n# Selection of RTE equation solver \n if Rte == 'pp': # parallel plan\n rte_eq='disort'\n elif Rte=='ps': # pseudo spherical\n rte_eq='sdisort'\n \n \n# Selection of absorption model \n molmodel='reptran'\n if Mod == 'rt':\n molmodel='reptran'\n if Mod == 'lt':\n molmodel='lowtran'\n if Mod == 'kt':\n molmodel='kato'\n if Mod == 'k2':\n molmodel='kato2'\n if Mod == 'fu':\n molmodel='fu' \n if Mod == 'cr':\n molmodel='crs' \n \n\n\n \t \n # for simulation select only two atmosphere \n #theatmospheres = np.array(['afglus','afglms','afglmw','afglt','afglss','afglsw'])\n atmosphere_map=dict() # map atmospheric names to short names \n atmosphere_map['afglus']='us'\n atmosphere_map['afglms']='ms'\n atmosphere_map['afglmw']='mw' \n atmosphere_map['afglt']='tp' \n atmosphere_map['afglss']='ss' \n atmosphere_map['afglsw']='sw' \n \n theatmospheres= []\n for skyindex in Atm:\n if re.search('us',skyindex):\n theatmospheres.append('afglus')\n if re.search('sw',skyindex):\n theatmospheres.append('afglsw')\n \n \n \n\n # 1) LOOP ON ATMOSPHERE\n for atmosphere in theatmospheres:\n #if atmosphere != 'afglus': # just take us standard sky\n # break\n atmkey=atmosphere_map[atmosphere]\n \n # manage input and output directories and vary the ozone\n TOPDIR2=TOPDIR+'/'+Rte+'/'+atmkey+'/'+Proc+'/'+Mod\n ensure_dir(TOPDIR2)\n INPUTDIR=TOPDIR2+'/'+'in'\n ensure_dir(INPUTDIR)\n OUTPUTDIR=TOPDIR2+'/'+'out'\n ensure_dir(OUTPUTDIR)\n \n \n # loop on molecular model resolution\n #molecularresolution = np.array(['COARSE','MEDIUM','FINE']) \n # select only COARSE Model\n molecularresolution = np.array(['COARSE']) \n for molres in molecularresolution:\n if molres=='COARSE':\n molresol ='coarse'\n elif molres=='MEDIUM':\n molresol ='medium'\n else:\n molresol ='fine'\n \n \n #water vapor \n pwv_val=pwv_num\n pwv_str='H2O '+str(pwv_val)+ ' MM'\n wvfileindex=int(10*pwv_val)\n \n \n # airmass\n airmass=airmass_num\n amfileindex=int(airmass_num*10)\n \n # Ozone \n oz_str='O3 '+str(oz_num)+ ' DU'\n ozfileindex=int(oz_num/10.)\n \n \n BaseFilename=BaseFilename_part1+atmkey+'_'+Proc+'_'+Mod+'_z'+str(amfileindex)+'_'+WVXX+str(wvfileindex) +'_'+OZXX+str(ozfileindex) \n \n verbose=True\n uvspec = UVspec3.UVspec()\n uvspec.inp[\"data_files_path\"] = libradtranpath+'data'\n \n uvspec.inp[\"atmosphere_file\"] = libradtranpath+'data/atmmod/'+atmosphere+'.dat'\n uvspec.inp[\"albedo\"] = '0.2'\n \n uvspec.inp[\"rte_solver\"] = rte_eq\n \n \n \n if Mod == 'rt':\n uvspec.inp[\"mol_abs_param\"] = molmodel + ' ' + molresol\n else:\n uvspec.inp[\"mol_abs_param\"] = molmodel\n\n # Convert airmass into zenith angle \n am=airmass\n sza=math.acos(1./am)*180./math.pi\n\n # Should be no_absorption\n if runtype=='aerosol_default':\n uvspec.inp[\"aerosol_default\"] = ''\n elif runtype=='aerosol_special':\n uvspec.inp[\"aerosol_default\"] = ''\n uvspec.inp[\"aerosol_set_tau_at_wvl\"] = '500 0.02'\n \n if runtype=='no_scattering':\n uvspec.inp[\"no_scattering\"] = ''\n if runtype=='no_absorption':\n uvspec.inp[\"no_absorption\"] = ''\n \n # set up the ozone value \n uvspec.inp[\"mol_modify\"] = pwv_str\n uvspec.inp[\"mol_modify2\"] = oz_str\n \n \n uvspec.inp[\"output_user\"] = 'lambda edir'\n uvspec.inp[\"altitude\"] = OBS_Altitude # Altitude LSST observatory\n uvspec.inp[\"source\"] = 'solar '+libradtranpath+'data/solar_flux/kurudz_1.0nm.dat'\n #uvspec.inp[\"source\"] = 'solar '+libradtranpath+'data/solar_flux/kurudz_0.1nm.dat'\n uvspec.inp[\"sza\"] = str(sza)\n uvspec.inp[\"phi0\"] = '0'\n uvspec.inp[\"wavelength\"] = '250.0 1200.0'\n # reflectivity do not depends on sun-earth distance\n #\n uvspec.inp[\"output_quantity\"] = 'reflectivity' #'transmittance' #\n #uvspec.inp[\"output_quantity\"] = 'trasmittance' #'transmittance' #\n# uvspec.inp[\"verbose\"] = ''\n uvspec.inp[\"quiet\"] = ''\n\n \n\n if \"output_quantity\" in uvspec.inp.keys():\n outtextfinal=outtext+'_'+uvspec.inp[\"output_quantity\"]\n\n \n \n inputFilename=BaseFilename+'.INP'\n outputFilename=BaseFilename+'.OUT'\n inp=os.path.join(INPUTDIR,inputFilename)\n out=os.path.join(OUTPUTDIR,outputFilename)\n \n \n uvspec.write_input(inp)\n uvspec.run(inp,out,verbose,path=libradtranpath)\n \n \n return OUTPUTDIR,outputFilename", "title": "" }, { "docid": "c068c6d56ecc238332c3dc34421ead48", "score": "0.46354723", "text": "def find_signal_at_electrodes(self, neur_dict, ext_sim_dict, mapping):\n \n print '\\033[1;35mFinding signal at electrodes from %s ...\\033[1;m' % neur_dict['name']\n neur_input = join(ext_sim_dict['neural_input'],\n neur_dict['name'], 'imem.npy')\n imem = np.load(neur_input)\n ntsteps = len(imem[0,:])\n n_elecs = ext_sim_dict['n_elecs']\n n_compartments = len(imem[:,0])\n #signals = np.zeros((n_elecs, ntsteps))\n #for elec in xrange(n_elecs):\n # for comp in xrange(n_compartments):\n # signals[elec,:] += mapping[elec, comp] * imem[comp,:]\n signals = np.dot(mapping, imem)\n try:\n os.mkdir(join(ext_sim_dict['output_folder'], 'signals'))\n except OSError:\n pass\n np.save(join(ext_sim_dict['output_folder'], 'signals', \\\n 'signal_%s.npy' %(neur_dict['name'])), signals) \n return signals", "title": "" } ]
a5c96308d71b0c760e1e640269275490
Gzip a given string content.
[ { "docid": "ee9d130fab1feae0dc17187cfa91f5f9", "score": "0.7249553", "text": "def _compress_content(self, content):\n zbuf = StringIO()\n zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)\n try:\n zfile.write(content.read())\n finally:\n zfile.close()\n content.file = zbuf\n content.seek(0)\n return content", "title": "" } ]
[ { "docid": "c92d8854cb03b0dee3bae0841a817b12", "score": "0.75606656", "text": "def compress_string(self, s):\n import cStringIO, gzip\n zbuf = cStringIO.StringIO()\n zfile = gzip.GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)\n zfile.write(s)\n zfile.close()\n return zbuf.getvalue()", "title": "" }, { "docid": "2793235b25af7992913682d40db510a4", "score": "0.7311923", "text": "def gzip_str(g_str):\n compressed_str = io.BytesIO()\n with gzip.GzipFile(fileobj=compressed_str, mode=\"w\") as file_out:\n file_out.write((json.dumps(g_str).encode()))\n bytes_obj = compressed_str.getvalue()\n return bytes_obj", "title": "" }, { "docid": "c9b67fd5133b2c1580faca248f98b612", "score": "0.721868", "text": "def GzipEncode(s):\n with closing(StringIO()) as sio:\n with gzip.GzipFile(fileobj=sio, mode='wb') as gzfile:\n gzfile.write(escape.utf8(s))\n return sio.getvalue()", "title": "" }, { "docid": "faf0a2ab15abc1440e595edb0338a8f0", "score": "0.6872013", "text": "def doGzip(data, compresslevel=1):\n log_internal.debug(\"data len {}\".format(len(data)))\n\n str_io = cStringIO.StringIO()\n gz_file = gzip.GzipFile(mode='wb', compresslevel=1, fileobj=str_io)\n\n for offset in range(0, len(data), 2**30):\n gz_file.write(data[offset:offset+2**30])\n gz_file.close()\n\n return str_io.getvalue()", "title": "" }, { "docid": "e82c47f6bafebc6734ac473876d651d8", "score": "0.67945224", "text": "def _compress_string(self, decompressed_string):\n try:\n before_bytes = codecs.encode(decompressed_string, \"utf-8\")\n zip = gzip.compress(before_bytes)\n return base64.b64encode(zip)\n except:\n raise", "title": "" }, { "docid": "a40867834cd55ca957439ba05f15cc95", "score": "0.67526", "text": "def string2gzip(item):\n formatted_item = hash_format(item)\n return zlib.compress(formatted_item)", "title": "" }, { "docid": "5aa1c0a443899f66bb44d391c8c56ec8", "score": "0.6737803", "text": "def compress_string(text):\n return", "title": "" }, { "docid": "611ca4be019cb194543a6cbc9e5b3c7b", "score": "0.65627545", "text": "def is_gzipped(text):\n return text[:2] == b\"\\x1f\\x8b\"", "title": "" }, { "docid": "b518154e3f88621078a30725f342a935", "score": "0.65559685", "text": "def compress(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n result = func(*args, **kwargs)\n # pylint: disable=no-member\n if ('gzip' in bottle.request.headers.get('Accept-Encoding', '') and\n isinstance(result, str) and\n len(result) > 1024):\n if isinstance(result, str):\n result = result.encode('utf-8')\n tmp_fo = BytesIO()\n with gzip.GzipFile(mode='wb', fileobj=tmp_fo) as gzip_fo:\n gzip_fo.write(result)\n result = tmp_fo.getvalue()\n bottle.response.add_header('Content-Encoding', 'gzip')\n return result\n return wrapper", "title": "" }, { "docid": "cd4c48f7db2c6e7f604934987d04cef7", "score": "0.64803034", "text": "def get_gzip(self, service_id, version_number, name):\n\t\tcontent = self._fetch(\"/service/%s/version/%d/gzip/%s\" % (service_id, version_number, urllib.quote(name, safe='')))\n\t\treturn FastlyGzip(self, content)", "title": "" }, { "docid": "db58aa66406efa1248b2444a209acbeb", "score": "0.64555407", "text": "def compressStringToBytes(inputString):\n buf = BytesIO()\n buf.write(inputString.encode(\"utf-8\"))\n buf.seek(0)\n stream = BytesIO()\n compressor = gzip.GzipFile(fileobj=stream, mode='w')\n while True: # until EOF\n chunk = buf.read(8192)\n if not chunk: # EOF?\n compressor.close()\n return stream.getvalue()\n compressor.write(chunk)", "title": "" }, { "docid": "2e175f32a46b99ddf96c62797d824de5", "score": "0.6367349", "text": "def compressStringToBytes(inputString):\n bio = BytesIO()\n bio.write(inputString.encode(\"utf-8\"))\n bio.seek(0)\n stream = BytesIO()\n compressor = gzip.GzipFile(fileobj=stream, mode='w')\n while True: # until EOF\n chunk = bio.read(8192)\n if not chunk: # EOF?\n compressor.close()\n return stream.getvalue()\n compressor.write(chunk)", "title": "" }, { "docid": "4e039fa3a39ffacfe2a98e13647aa34e", "score": "0.6346414", "text": "def gzip(sourcefile, template=None, runas=None, options=None):\n cmd = [\"gzip\"]\n if options:\n cmd.append(options)\n cmd.append(\"{}\".format(sourcefile))\n\n return __salt__[\"cmd.run\"](\n cmd, template=template, runas=runas, python_shell=False\n ).splitlines()", "title": "" }, { "docid": "1b8267a619e41561539c66553d753e58", "score": "0.6341247", "text": "def compress(string, method=zlib):\r\n data = method.compress(string)\r\n return base64.encodestring(data)", "title": "" }, { "docid": "88e5e98a484f8a185986421baabc1db3", "score": "0.62888336", "text": "def encode_content(encoding='gzip', lazy=False):", "title": "" }, { "docid": "d1b6751cec0a29e66c80c96d95b02179", "score": "0.62672687", "text": "def GzipDecode(s):\n with closing(StringIO(s)) as sio:\n with gzip.GzipFile(fileobj=sio, mode='rb') as gzfile:\n return gzfile.read()", "title": "" }, { "docid": "f6b63a7d1691b8140535afaef249b92c", "score": "0.6264841", "text": "def zip_gz(job, input_path, input_id):\n work_dir = job.fileStore.getLocalTempDir()\n fa_path = os.path.join(work_dir, os.path.basename(input_path))\n if fa_path.endswith('.gz'):\n fa_path = fa_path[:-3]\n job.fileStore.readGlobalFile(input_id, fa_path, mutable=True)\n cactus_call(parameters=['gzip', '-f', os.path.basename(fa_path)], work_dir=work_dir)\n return job.fileStore.writeGlobalFile(fa_path + '.gz')", "title": "" }, { "docid": "c7562aaa88a5cb123eec7205f8836bd1", "score": "0.6159084", "text": "def get_compressed(data_str, reset_position=None):\n\n reset_position = True if reset_position is None else reset_position\n\n try:\n data_str_utf = data_str.encode(\"utf-8\")\n except AttributeError:\n try:\n data_str_utf = json.dumps(data_str).encode(\"utf-8\")\n except TypeError:\n if reset_position:\n data_str.seek(0)\n if not isinstance(data_str, io.TextIOBase): # file was opened in a binary mode\n data_str_utf = data_str.read()\n else: # file was opened in a text mode and need to be \"utf-8\" encoded\n data_str_utf = data_str.read().encode(\"utf-8\")\n return base64.b64encode(\n gzip.compress(data=data_str_utf)\n ).decode(\"utf-8\")", "title": "" }, { "docid": "00481ff0cf140c87fbb97e6f6a302f4d", "score": "0.6126108", "text": "def gzip_response(resp):\n web.webapi.header('Content-Encoding','gzip')\n zbuf = StringIO.StringIO()\n zfile = GzipFile(mode='wb',fileobj=zbuf,compresslevel=9)\n zfile.write(resp)\n zfile.close()\n data = zbuf.getvalue()\n web.webapi.header('Content-Length',str(len(data)))\n web.webapi.header('Vary','Accept-Encoding',unique=True)\n return data", "title": "" }, { "docid": "041921551050ab9a8c11ad56f9384f97", "score": "0.61215", "text": "def compress_gzip(self, filename):\n with open(filename, 'rb') as input_file:\n with gzip.open(filename + '.gz', 'wb') as output_file:\n shutil.copyfileobj(input_file, output_file)", "title": "" }, { "docid": "a1f6d371263e96a788fd17fa03ab065b", "score": "0.6061627", "text": "def _get_gz_data(data, filepath):\n data_obj = StringIO()\n filename = filepath.split(\"/\")[-1].replace(\".gz\",\"\")\n with gzip.GzipFile(filename=filename, mode='wb', fileobj=data_obj) as gzip_outfile:\n gzip_outfile.write(data)\n return data_obj.getvalue()", "title": "" }, { "docid": "42c43db5b62ebac1128a879f8d7a27ff", "score": "0.6033381", "text": "def _compress_fragments(self, fragments):\n compressed_fragments = [zlib.compress(fragment) for fragment in fragments]\n return compressed_fragments", "title": "" }, { "docid": "9256fe72d5f24225cca29a7fffa94837", "score": "0.5987921", "text": "def string_compress(data, compression_level=4, **kwargs):\n import base64\n import msgpack\n import brotli\n\n compressed = brotli.compress(\n msgpack.packb(\n data,\n use_bin_type=kwargs.pop(\"use_bin_type\", True),\n strict_types=kwargs.pop(\"strict_types\", True),\n **kwargs,\n ),\n quality=compression_level,\n )\n return base64.b64encode(compressed).decode(\"ascii\")", "title": "" }, { "docid": "15138d0394d3c92bc2a09faaa954f533", "score": "0.5957165", "text": "def compress_string(s):\n return base64.b64encode(compress(s.encode(\"utf-8\"))).decode(\"utf-8\")", "title": "" }, { "docid": "1eb7c7f29292ae2c022b3c4e33d82850", "score": "0.5945101", "text": "def download_gzip(url, output_path, cache=0):\n\n if is_cached(output_path, cache, 1):\n return\n\n response = requests.get(url)\n\n try:\n # Human-readable text, to ease debugging\n content = gzip.decompress(response.content).decode()\n except gzip.BadGzipFile as e:\n print(\"URL did not respond with a gzipped file: \" + url)\n raise(e)\n\n with open(output_path, \"w\") as f:\n f.write(content)", "title": "" }, { "docid": "977280d0b0c3fe3bc9294823091fc69b", "score": "0.5936043", "text": "def gzip_compress_file(filepath):\n import subprocess\n former_file = filepath + '.gz'\n if os.path.isfile(former_file):\n os.remove(former_file)\n subprocess.run([\"gzip\", filepath])", "title": "" }, { "docid": "7312019d96014c952ae9a73c9ec22e59", "score": "0.5893837", "text": "def write(self, data):\n self.gzip.write(data)", "title": "" }, { "docid": "ba43acec6f76f8a0118e6d607dbd13b5", "score": "0.5881855", "text": "def transparent_gzip(afile):\n if afile.read(2) == b'\\x1f\\x8b':\n afile.seek(0)\n return gzip.GzipFile(fileobj=afile, mode='r')\n else:\n afile.seek(0)\n return afile", "title": "" }, { "docid": "b5a54abdce83e2e2c372eb2dc68ffa79", "score": "0.58785707", "text": "def compress_json(data):\n json_data = json.dumps(data)\n encoded = json_data.encode('utf-8')\n compressed = gzip.compress(encoded)\n response = make_response(compressed)\n response.headers['Content-length'] = len(compressed)\n response.headers['Content-Encoding'] = 'gzip'\n response.headers['Content-Type'] = 'application/json'\n\n return response", "title": "" }, { "docid": "dc14ff9596f00a0a41619ff0715d59b8", "score": "0.5876226", "text": "def file_compress(fname):\n print ('Running gzip on', fname)\n subprocess.run (['gzip', fname])\n return", "title": "" }, { "docid": "689b2f6bb0fb0fb375b6ce679e4ed893", "score": "0.58075106", "text": "def _compress(data: NamedTag) -> bytes:\n data = data.save_to(compressed=False)\n return b\"\\x02\" + zlib.compress(data)", "title": "" }, { "docid": "7fe826ad304bf453b7b367919cdc6752", "score": "0.57567596", "text": "def gunzip(gzipfile, template=None, runas=None, options=None):\n cmd = [\"gunzip\"]\n if options:\n cmd.append(options)\n cmd.append(\"{}\".format(gzipfile))\n\n return __salt__[\"cmd.run\"](\n cmd, template=template, runas=runas, python_shell=False\n ).splitlines()", "title": "" }, { "docid": "82d81be1d56b4a7ac50a0e75ff7eefa1", "score": "0.57303536", "text": "def doGunzip(data):\n log_internal.debug(\"data len {}\".format(len(data)))\n\n str_io = cStringIO.StringIO(data)\n gz_file = gzip.GzipFile(mode='rb', fileobj=str_io)\n read_csio = cStringIO.StringIO()\n\n while True:\n uncompressed_data = gz_file.read(2**30)\n if uncompressed_data:\n read_csio.write(uncompressed_data)\n else:\n break\n\n return read_csio.getvalue()", "title": "" }, { "docid": "b2ee3dd2f40bffd6b2dc5d32eb2bd4b4", "score": "0.5722497", "text": "def _gzip_file(self):\n\n with open(self.path, 'rb') as f_in, gzip.open(\n os.path.join(self._dir,\n self.hosted_name), 'wb') as f_out:\n\n f_out.writelines(f_in)\n\n utils.delete_paths(self.path)", "title": "" }, { "docid": "dcc3c6a27e59e489d29286455cc34bfa", "score": "0.5700953", "text": "def folderer(gzfile):\n # Define the gzip command line call\n gzipcommand = \"gzip -d --force %s\" % gzfile\n # Run the call\n subprocess.call(gzipcommand, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))\n dotter()", "title": "" }, { "docid": "6a96eb75b51c7bb7911e4a76f2f9a050", "score": "0.5682337", "text": "def gzip2string(item):\n formatted_item = hash_format(item)\n return zlib.decompress(formatted_item)", "title": "" }, { "docid": "c6a040684a3b035a7c1fc421ac2dd229", "score": "0.567579", "text": "def zip_compress(content_generator, level=7):\n compressor = zlib.compressobj(level)\n for chunk in content_generator:\n compressed = compressor.compress(chunk)\n if compressed:\n yield compressed\n tail = compressor.flush(zlib.Z_FINISH)\n if tail:\n yield tail", "title": "" }, { "docid": "350db40a2256d2aae1a4741efa32ee42", "score": "0.56527686", "text": "def compress_gzip(fromfile, tofile):\n with gzip.open(tofile, 'wb') as outfile:\n with open(fromfile, 'rb') as infile:\n shutil.copyfileobj(infile, outfile)", "title": "" }, { "docid": "a2523e0c3366b92190380401a47b46ac", "score": "0.5647768", "text": "def compress_dump(self, data):\n with gzip.open(self.dump_path, 'wt') as f:\n dump(data, f)", "title": "" }, { "docid": "e25eb5eaaeab35fa3f36379fcec2eb11", "score": "0.564502", "text": "def _decompress(compressed):\n gzippedstream = StringIO.StringIO(compressed)\n gzipper = gzip.GzipFile(fileobj=gzippedstream)\n data = gzipper.read()\n return data", "title": "" }, { "docid": "9974666698cd3605799f965a269d07d9", "score": "0.56319463", "text": "def decompress_string(text):\n return", "title": "" }, { "docid": "8f97b87e5577800070d11c32c35c43a0", "score": "0.5617337", "text": "def decompress_gzip(self, filename):\n with gzip.open(filename, 'rb') as input_file:\n with open(filename.replace('.gz', ''), 'wb') as output_file:\n shutil.copyfileobj(input_file, output_file)\n\n os.remove(filename)", "title": "" }, { "docid": "c63bf14c2f7645dc0371c8cfe22284c6", "score": "0.56157184", "text": "def gzipped(f: T) -> T:\n\n @functools.wraps(f)\n def view_func(*args, **kwargs):\n @after_this_request\n def zipper(response):\n accept_encoding = request.headers.get(\"Accept-Encoding\", \"\")\n\n if \"gzip\" not in accept_encoding.lower():\n return response\n\n response.direct_passthrough = False\n\n if (\n response.status_code < 200\n or response.status_code >= 300\n or \"Content-Encoding\" in response.headers\n ):\n return response\n gzip_buffer = IO()\n gzip_file = gzip.GzipFile(mode=\"wb\", fileobj=gzip_buffer)\n gzip_file.write(response.data)\n gzip_file.close()\n\n response.data = gzip_buffer.getvalue()\n response.headers[\"Content-Encoding\"] = \"gzip\"\n response.headers[\"Vary\"] = \"Accept-Encoding\"\n response.headers[\"Content-Length\"] = len(response.data)\n\n return response\n\n return f(*args, **kwargs)\n\n return cast(T, view_func)", "title": "" }, { "docid": "6ef966d572f336a2494abdf07a9e9d31", "score": "0.5612811", "text": "def unzip_gz(job, input_path, input_id):\n work_dir = job.fileStore.getLocalTempDir()\n assert input_path.endswith('.gz')\n fa_path = os.path.join(work_dir, os.path.basename(input_path))\n job.fileStore.readGlobalFile(input_id, fa_path, mutable=True)\n cactus_call(parameters=['gzip', '-fd', os.path.basename(fa_path)], work_dir=work_dir)\n return job.fileStore.writeGlobalFile(fa_path[:-3])", "title": "" }, { "docid": "e59eb22df29b16e5904cbaf72efcccdd", "score": "0.555883", "text": "def deflate_and_base64_encode(string):\n data = zlib.compress(string.encode(\"utf-8\"))\n return base64.b64encode(data).decode(\"utf-8\")", "title": "" }, { "docid": "0e6e6c75aba2e06abdbc4b5f90f9f5e7", "score": "0.5556114", "text": "def list_gzip(self, service_id, version_number):\n\t\tcontent = self._fetch(\"/service/%s/version/%d/gzip\" % (service_id, version_number))\n\t\treturn map(lambda x: FastlyGzip(self, x), content)", "title": "" }, { "docid": "51dc9d3ee67fee71a2e902eedd05ba76", "score": "0.55481076", "text": "def gzip_xml(fname):\n with open(fname, 'r') as f:\n fdata = f.read()\n with gzip.open(fname + '.gz', 'w') as gf:\n gf.write(fdata)\n os.remove(fname)\n print (fname + '.gz successfully archived')", "title": "" }, { "docid": "ced78c25be3412c5fbf050ea2f96db99", "score": "0.55017775", "text": "def delete_gzip(self, service_id, version_number, name):\n\t\tcontent = self._fetch(\"/service/%s/version/%d/gzip/%s\" % (service_id, version_number, urllib.quote(name, safe='')), method=\"DELETE\")\n\t\treturn self._status(content)", "title": "" }, { "docid": "ca87588ba5aeacedd7a9f49f18a15083", "score": "0.54942983", "text": "def update_gzip(self, service_id, version_number, name_key, **kwargs):\n\t\tbody = self._formdata(kwargs, FastlyGzip.FIELDS)\n\t\tcontent = self._fetch(\"/service/%s/version/%d/gzip/%s\" % (service_id, version_number, urllib.quote(name_key, safe='')), method=\"PUT\", body=body)\n\t\treturn FastlyGzip(self, content)", "title": "" }, { "docid": "1b866684f5cca5b862ebee05e393c085", "score": "0.54734886", "text": "def ungzip(path):\n assert path.endswith(\".gz\")\n dest_path = path.replace(\".gz\", \"\")\n if not os.path.isfile(dest_path):\n with gzip.open(path, \"rb\") as f_in:\n with open(dest_path, \"wb\") as f_out:\n shutil.copyfileobj(f_in, f_out)\n return dest_path", "title": "" }, { "docid": "72b356d0bebdfac0af6145fde897c231", "score": "0.54157346", "text": "def compressFile(file_path):\n log(\"Compressing %s ...\" % file_path)\n f_in = open(file_path, 'rb')\n gzip_file_path = file_path + '.gz'\n f_out = gzip.open(gzip_file_path, 'wb')\n f_out.writelines(f_in)\n f_out.close()\n os.remove(file_path)\n log(\"Compressing done : %s\" % gzip_file_path)", "title": "" }, { "docid": "2ad93f0632b7a1651d0aef29c0f954ce", "score": "0.5390739", "text": "def test_gzip(self):\n dset = self.f.create_dataset('foo', (20, 30), compression='gzip',\n compression_opts=9)\n self.assertEqual(dset.compression, 'gzip')\n self.assertEqual(dset.compression_opts, 9)", "title": "" }, { "docid": "685980334ea10fb7b79263c0564eaf4f", "score": "0.5382202", "text": "def _decompress_string(self, compressed_string):\n try:\n before_bytes = base64.b64decode(compressed_string)\n zip = gzip.decompress(before_bytes)\n return codecs.decode(zip, \"utf-8\")\n except:\n raise", "title": "" }, { "docid": "ea4d0b661c40de2d5bcd30bc08f51245", "score": "0.5379161", "text": "def should_save_gzipped_copy(path):\n\n return matches_patterns(path, GZIP_PATTERNS)", "title": "" }, { "docid": "91b1ea82c7d2c80d336bef21a9722fb8", "score": "0.53713596", "text": "def encode_data(self, data):\r\n \r\n # The Accept-Encoding header sent by the client.\r\n encoding = self.headers.get(\"Accept-Encoding\")\r\n \r\n if encoding.find(\"gzip\") != -1:\r\n # If the client accepts gzip, we encode the response in gzip.\r\n self.send_header(\"Content-Encoding\", \"gzip\")\r\n \r\n bytes_io = io.BytesIO()\r\n gzip_file = gzip.GzipFile(fileobj=bytes_io, mode=\"wb\")\r\n gzip_file.write(data)\r\n gzip_file.close()\r\n \r\n result = bytes_io.getvalue()\r\n bytes_io.close()\r\n \r\n return result\r\n else:\r\n return data", "title": "" }, { "docid": "8b3383eec71972c0f59ebf62bdc86f5e", "score": "0.5370667", "text": "def backup_to_gzip():\n from fabdeploy import apache\n\n # Gzip folder and remove\n with settings(warn_only = True):\n run('tar -cvzpf ' + env.today_backup_gzip + ' -C ' + env.today_backup_folder + ' . >/dev/null 2>&1')\n run('rm -rf ' + env.today_backup_folder)\n run('rm -f '+ env.log_path + '/apache/*')\n run(\"echo '*\\n!.gitignore' > \"+ env.log_path + \"/apache/.gitignore\")\n apache.restart()", "title": "" }, { "docid": "7004541643da1f6c9714df3d21c4b4b4", "score": "0.5367365", "text": "def compress(input_file, output_file):\n\n with open(input_file, \"rb\") as in_file, gzip.open(output_file, \"wb\") as out_file:\n shutil.copyfileobj(in_file, out_file)", "title": "" }, { "docid": "292981900034edf4e8773aeb0cd6d72a", "score": "0.5363993", "text": "def compress(parser, token):\r\n from compressor.templatetags.compress import compress\r\n return compress(parser, token)", "title": "" }, { "docid": "1cc1437c4ac63267f0a303961e67b2a4", "score": "0.53273726", "text": "def load_gz(data_source, chunk_size=None, verbose=False):\n raise NotImplementedError", "title": "" }, { "docid": "ba5b25502756a1e5f202c0d33e19398e", "score": "0.5324681", "text": "def retrieve_gzip(url, cache):\n head, tail = split(url[6:])\n path = join(cache, tail)\n gzpath = path + '.gz'\n if not exists(gzpath):\n print 'Accessing', tail, 'at CGRO SSC...'\n name, hdrs = urllib.urlretrieve(url, path)\n if name != path:\n raise ValueError('URL target/name mismatch!')\n check_call(['gzip', path])\n return GzipFile(gzpath, 'r')\n return GzipFile(gzpath, 'r')", "title": "" }, { "docid": "beb28db6d7c6976a300fef09dfe966b8", "score": "0.5314302", "text": "def test_compress(string: str, expected: str):\n assert compress(string) == expected", "title": "" }, { "docid": "22f91fe55bfcec59bd956b94fb3bcb1c", "score": "0.5299723", "text": "def compress_javascript_data(data):\n tmp_fname = tempfile.mktemp(\"urfastr-player-air-min.js\")\n open(tmp_fname, \"w+\").write(data)\n cmdline = [\"yui-compressor\", tmp_fname]\n compressed_data = subprocess.Popen(cmdline, stdout=subprocess.PIPE).communicate()[0]\n os.remove(tmp_fname) \n return compressed_data", "title": "" }, { "docid": "39041f8cb79cf5b16d00a86f7224e7ad", "score": "0.5277539", "text": "def decompressBytesToString(inputBytes):\n buf = BytesIO()\n stream = BytesIO(inputBytes)\n decompressor = gzip.GzipFile(fileobj=stream, mode='r')\n while True: # until EOF\n chunk = decompressor.read(8192)\n if not chunk:\n decompressor.close()\n buf.seek(0)\n return buf.read().decode(\"utf-8\")\n buf.write(chunk)\n return None", "title": "" }, { "docid": "7cc5e80b568b6cd40f8c0dcc2906f33c", "score": "0.52567154", "text": "def extract_gzip(fromfile, tofile):\n with gzip.open(fromfile, 'rb') as infile:\n with open(tofile, 'wb') as outfile:\n shutil.copyfileobj(infile, outfile)", "title": "" }, { "docid": "f6083347045939d1e722f7ddc5fc21cb", "score": "0.5251583", "text": "def gz_compress(file_to_compress, clean=True):\n # Check if the input file exists\n if not os.path.isfile(file_to_compress):\n raise ConnectomistBadFileError(file_to_compress)\n\n # Zip the input file\n gz_file = file_to_compress + \".gz\"\n with open(file_to_compress, 'rb') as f_in, gzip.open(gz_file, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n if clean:\n os.remove(file_to_compress)\n if not os.path.isfile(gz_file):\n raise ConnectomistBadFileError(gz_file)\n\n return gz_file", "title": "" }, { "docid": "7121ee449bb5906f9223a02c054de947", "score": "0.52508414", "text": "def gzipped_url_download(url: str, write_location: str, filename: str) -> None:\n\n print('Downloading Gzipped Data from {}'.format(url))\n\n with open(write_location + '{filename}'.format(filename=filename), 'wb') as outfile:\n outfile.write(gzip.decompress(requests.get(url, allow_redirects=True, verify=False).content))\n outfile.close()\n\n return None", "title": "" }, { "docid": "0533865b0549f827d3a73abfc2caccec", "score": "0.5248998", "text": "def _is_gzipped(path):\n with open(path, 'rb') as fin:\n return fin.read(len(_GZIP_MAGIC)) == _GZIP_MAGIC", "title": "" }, { "docid": "0018837b4b81695d84b5da998066202b", "score": "0.52472335", "text": "def func_handle_gzip( self, lstr_files ):\n\n if not lstr_files:\n return lstr_files\n\n # Handle in case a string is accidently given\n if isinstance( lstr_files, basestring ):\n lstr_files = [ lstr_files ]\n\n # If gzipped files are used then let pipeline know so the bash shell is used\n lstr_return_string = []\n for str_uncompress_files in lstr_files:\n if os.path.splitext( str_uncompress_files )[1] == STR_GZIPPED_EXT:\n lstr_return_string.append( \" \".join( [ \"<( zcat\", str_uncompress_files, \")\" ] ) )\n self.f_use_bash = True\n else:\n lstr_return_string.append( str_uncompress_files )\n return lstr_return_string", "title": "" }, { "docid": "c33b8090f631ae2a726b6e9185ab3513", "score": "0.52445495", "text": "def compress(self, data):\n \n datasize = len(data)\n \n if datasize == 0:\n return self.space.wrap(\"\")\n \n if not self.running:\n raise OperationError(self.space.w_ValueError,\n self.space.wrap(\"this object was already flushed\"))\n \n out_bufsize = SMALLCHUNK\n out_buf = create_string_buffer(out_bufsize)\n \n in_bufsize = datasize\n in_buf = create_string_buffer(in_bufsize)\n in_buf.value = data\n \n self.bzs.next_in = cast(in_buf, POINTER(c_char))\n self.bzs.avail_in = in_bufsize\n self.bzs.next_out = cast(out_buf, POINTER(c_char))\n self.bzs.avail_out = out_bufsize\n \n temp = []\n while True:\n bzerror = libbz2.BZ2_bzCompress(byref(self.bzs), BZ_RUN)\n if bzerror != BZ_RUN_OK:\n _catch_bz2_error(self.space, bzerror)\n\n if self.bzs.avail_in == 0:\n break\n elif self.bzs.avail_out == 0:\n total_out = _bzs_total_out(self.bzs)\n data = \"\".join([out_buf[i] for i in range(total_out)])\n temp.append(data)\n \n out_bufsize = _new_buffer_size(out_bufsize)\n out_buf = create_string_buffer(out_bufsize)\n self.bzs.next_out = cast(out_buf, POINTER(c_char))\n self.bzs.avail_out = out_bufsize\n\n if temp:\n total_out = _bzs_total_out(self.bzs)\n data = \"\".join([out_buf[i] for i in range(total_out - len(temp[0]))])\n temp.append(data)\n return self.space.wrap(\"\".join(temp))\n\n total_out = _bzs_total_out(self.bzs)\n res = \"\".join([out_buf[i] for i in range(total_out)])\n return self.space.wrap(res)", "title": "" }, { "docid": "6f8c74400de03857c71c8032bd705a8b", "score": "0.52398807", "text": "def test_gzip_implicit(self):\n dset = self.f.create_dataset('foo', (20, 30), compression='gzip')\n self.assertEqual(dset.compression, 'gzip')\n self.assertEqual(dset.compression_opts, 4)", "title": "" }, { "docid": "7d6abf38396aeb5c042a814880fa892d", "score": "0.52324885", "text": "def is_gzipped(filename):\t\n\ttry:\n\t\timport magic\n\t\tms = magic.open(magic.MAGIC_NONE)\n\t\tms.load()\n\t\tif re.search(\"^gzip compressed data.*\", ms.file(filename)):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\texcept:\n\t\tfrom os.path import splitext\n\t\t\n\t\tif not QUIET:\n\t\t\tprint(\"Using fallback detection... please install python-magic for better gzip detection.\")\n\t\t\n\t\tif splitext(filename)[1] == \".gz\":\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "title": "" }, { "docid": "2e7797b88bd1ed0247ab195365853302", "score": "0.5224181", "text": "def flimage_enable_gzip():\n _flimage_enable_gzip = library.cfuncproto(\n library.load_so_libflimage(), \"flimage_enable_gzip\",\n None, [],\n \"\"\"void flimage_enable_gzip()\"\"\")\n library.check_if_flimageinitialized()\n _flimage_enable_gzip()", "title": "" }, { "docid": "e26fb4a821580059d46eb3f9e4ddf771", "score": "0.5219681", "text": "def compressed_string(string: str) -> str:\n if len(string) < 3:\n return string\n\n compressed_chars = []\n count = 1\n for i, c in enumerate(string):\n try:\n if c == string[i + 1]:\n count += 1\n else:\n compressed_chars.append(c)\n compressed_chars.append(str(count))\n count = 1\n except IndexError:\n compressed_chars.append(c)\n compressed_chars.append(str(count))\n\n compressed_str = ''.join(compressed_chars)\n if len(compressed_str) < len(string):\n return compressed_str\n return string", "title": "" }, { "docid": "fda0dab455f0e4bffd7b508eae713b82", "score": "0.520576", "text": "def compress(text):\n components = []\n current = None\n count = 0\n\n for c in text:\n if not current:\n current = c\n\n if c == current:\n count += 1\n else:\n components.append(current + str(count))\n current = c\n count = 1\n\n components.append(current + str(count))\n compressed = \"\".join(components)\n\n # Only return the compressed string if it's actually shorter\n return compressed if len(compressed) < len(text) else text", "title": "" }, { "docid": "78bf0797def5e67c7b7ba1779c6f75b6", "score": "0.52039593", "text": "def compress(data):\n p32 = lambda x: struct.pack(\"<I\", x)\n return hexlify(p32(len(data)) + zlib.compress(data))", "title": "" }, { "docid": "f7723ec6779747609647b90130da0be7", "score": "0.5177382", "text": "def gunzip_bytes_obj(bytes_obj):\n\n binary_stream = io.BytesIO()\n binary_stream.write(bytes_obj)\n binary_stream.seek(0)\n\n with gzip.GzipFile(fileobj=binary_stream, mode='rb') as f:\n gunzipped_bytes_obj = f.read()\n\n return gunzipped_bytes_obj.decode('utf-8')", "title": "" }, { "docid": "eaaef87d5c5cb2529e6f1e23c76ec6fe", "score": "0.5165345", "text": "def s3_gzip_writer(element_tree, output_file, logger=None):\n if logger is None:\n logger = writer_logger\n url = urlparse(output_file)\n s3 = boto3.resource(\"s3\")\n bucket = s3.Bucket(url.netloc)\n key_path = url.path.lstrip(\"/\")\n with BytesIO() as compressed_fp:\n # Write to Gzip file handle in order to compress directly to the\n # compressed_fp in memory buffer\n with gzip.GzipFile(fileobj=compressed_fp, mode=\"wb\") as gz:\n element_tree.write(gz, encoding=\"UTF-8\")\n compressed_fp.seek(0)\n bucket.upload_fileobj(\n compressed_fp,\n key_path,\n {\"ContentType\": \"text/xml\", \"ContentEncoding\": \"gzip\"},\n )\n logger.info(\n \"Augmented Diff written to: Bucket {}, Path: {} (gzip=True)\".format(\n url.netloc, key_path\n )\n )", "title": "" }, { "docid": "cb9e577a88f430667f46253e23d8c5fb", "score": "0.5161484", "text": "def compress(self, chunk):\n return _compress(self._context, chunk)", "title": "" }, { "docid": "34bd9eb29702ea761546a54663bff36a", "score": "0.5161296", "text": "def decompress_gz(filename, whereTo=\".\", fLOG=noLOG):\n if not filename.endswith(\".gz\"):\n raise NameError(\"the file should end with .gz: \" + filename)\n dest = os.path.join(whereTo, filename[:-3])\n with gzip.open(filename, 'rb') as f:\n with open(dest, \"wb\") as g:\n g.write(f.read())\n return [dest]", "title": "" }, { "docid": "aa1a11aef7a14ac398968e88fea927a2", "score": "0.51484096", "text": "def test_compress():\n # Spec test\n assert_equals('c1o17l1k1a1n1g1a1r1o3',\n hw1.compress('cooooooooooooooooolkangarooo'))\n assert_equals('a3', hw1.compress('aaa'))\n assert_equals('', hw1.compress(''))\n # Addtional test\n assert_equals('c1a1b2a1g1e1', hw1.compress('cabbage'))\n assert_equals('b4a2o1r1o3', hw1.compress('bbbbaaorooo'))", "title": "" }, { "docid": "f12b18ef1404c205a9ed281448693754", "score": "0.5142059", "text": "def get_gzipped_name(name):\n\n file_ext_index = name.rfind('.')\n file_name, file_ext = name[:file_ext_index], name[file_ext_index:]\n return '%s.gz%s' % (file_name, file_ext)", "title": "" }, { "docid": "a82be0c2b7a565ac9f55ece8ab1961a9", "score": "0.5134156", "text": "def gzip_compression_level(self: \"Options\") -> int:\n return self._gzip_compression_level", "title": "" }, { "docid": "5b15592681626219e91f6167994f4fd6", "score": "0.51333696", "text": "def ungzip_stream(stream):\n try:\n gzipped_stream = GzipFile(fileobj=StringIO(stream))\n return gzipped_stream.read()\n except IOError:\n return stream", "title": "" }, { "docid": "cbcee486a5e67601072cb418c3463d2e", "score": "0.50814444", "text": "def compress_file(file_path):\n with open(file_path, \"rb\") as f_in:\n with gzip.open(file_path + \".gz\", \"wb\", compresslevel=6) as f_out:\n shutil.copyfileobj(f_in, f_out)\n os.unlink(file_path)", "title": "" }, { "docid": "387ea2aa398152d2589bc0782fbf9840", "score": "0.50783753", "text": "def decompressBytesToString(inputBytes):\n bio = BytesIO()\n stream = BytesIO(inputBytes)\n decompressor = gzip.GzipFile(fileobj=stream, mode='r')\n while True: # until EOF\n chunk = decompressor.read(8192)\n if not chunk:\n decompressor.close()\n bio.seek(0)\n return bio.read().decode(\"utf-8\")\n bio.write(chunk)\n return None", "title": "" }, { "docid": "089169730b8ba06fd1b4ebe74f828209", "score": "0.50472313", "text": "def gunzip_file(gz_path, txt_path):\n print(\"Unpacking %s to %s\" % (gz_path, txt_path))\n with gzip.open(gz_path +\"/\"+ data + \".txt.gz\", \"r\") as gz_file:\n full_data = gz_file.readlines()\n print(\"origianl size\", len(full_data))\n length_all = len(full_data)\n length_subset = int(length_all * keep_rate/2) * 2\n with open(txt_path +\"/\"+ data + \".txt\", \"wb\") as new_file:\n new_file.writelines(full_data[:length_subset])\n print(\"subsetted size\", length_subset)", "title": "" }, { "docid": "b5b80c333f49349cec083be77dba2208", "score": "0.50327855", "text": "def optionally_gzipped_file(arg):\n if not os.path.isfile(arg):\n raise argparse.ArgumentTypeError(arg + \" does not exist\")\n dir_name = os.path.dirname(os.path.realpath(arg))\n with open(arg, \"rb\") as fh:\n magic_number = fh.read(2)\n open_func = gzip.open if magic_number == \"\\x1f\\x8b\" else open\n return open_func(arg), dir_name", "title": "" }, { "docid": "15a406fefd43450a0589a2efd713393f", "score": "0.5016486", "text": "def compress_and_hex(value):\n assert type(value) == str, \\\n \"Non-str argument passed to compress_and_hex, maybe Python 2/3 compatibility issue\\n\" \\\n \"Argument was type {} with value {}\".format(value.__class__.__name__, value)\n compr = zlib.compress(bytes(value) if six.PY2 else bytes(value, \"utf-8\"))\n return binascii.hexlify(compr)", "title": "" }, { "docid": "ae4be06b507f31c265474d71d63de359", "score": "0.50087065", "text": "def decompressFile(path1, path2):\n\n with gzip.open(path1, \"rb\") as f1:\n with open(path2, \"wb\") as f2:\n contents = f1.read()\n f2.write(contents)\n\n return()", "title": "" }, { "docid": "2f40b31f4cb58a7de2f25c295a805fbc", "score": "0.4996566", "text": "def htmllibmanager_gzip(self, htmllibmanager_gzip: ConfigNodePropertyBoolean):\n\n self._htmllibmanager_gzip = htmllibmanager_gzip", "title": "" }, { "docid": "95138ca6fb4399c964f94db9ec10d66c", "score": "0.49587753", "text": "def gzipped_ftp_url_download(url: str, write_location: str, filename: str) -> None:\n\n # get ftp server info\n server = url.replace('ftp://', '').split('/')[0]\n directory = '/'.join(url.replace('ftp://', '').split('/')[1:-1])\n file = url.replace('ftp://', '').split('/')[-1]\n write_loc = write_location + '{filename}'.format(filename=file)\n print('Downloading Gzipped data from FTP Server: {}'.format(url))\n with closing(ftplib.FTP(server)) as ftp, open(write_loc, 'wb') as fid:\n ftp.login(); ftp.cwd(directory); ftp.retrbinary('RETR {}'.format(file), fid.write)\n print('Decompressing and Writing Gzipped Data to File')\n with gzip.open(write_loc) as fid_in:\n with open(write_loc.replace('.gz', ''), 'wb') as file_loc:\n file_loc.write(fid_in.read())\n # change filename and remove gzipped and original files\n if filename != '': os.rename(re.sub(zip_pat, '', write_loc), write_location + filename)\n os.remove(write_loc) # remove compressed file\n\n return None", "title": "" }, { "docid": "a3121a4ec71e18c71663ed0aea9173c3", "score": "0.49579114", "text": "def gz(lon, lat, height, model, dens=None, ratio=RATIO_G,\n njobs=1, pool=None, delta=DELTA):\n field = 'gz'\n result = _dispatcher(field, lon, lat, height, model, dens=dens,\n ratio=ratio, njobs=njobs, pool=pool,\n delta=delta)\n result *= SI2MGAL*G\n return result", "title": "" }, { "docid": "730b7ccba54b7b34994dd177d169389e", "score": "0.4955858", "text": "def compress(text, hcode):\n assert isinstance(text, str)\n return \"\".join((hcode[ch] for ch in text))", "title": "" }, { "docid": "4c4c347f02388ee543b2f7de199a77e1", "score": "0.49543616", "text": "def compress(*fnames):\n for fname in fnames:\n if fname.endswith(\".gz\"): continue\n if os.path.exists(fname + '.gz'): continue\n subprocess.call(['gzip', fname])", "title": "" }, { "docid": "2a1095e06035edce42e7384ee3fcb6c5", "score": "0.4950033", "text": "def compress(content, tree=None):\n if tree is None:\n # Generate code from input file\n tree = generate_tree(content)\n code = huffman_code(tree)\n\n # First 6 bytes = size of the uncompressed data\n bits = bin(len(content))[2:].rjust(48, '0')\n # Convert content to bits using code\n for c in content:\n if c in code:\n bits += code[c]\n else:\n # If byte not found in code\n bits += code[0]\n\n # Transform bit string to bytes\n bytestring = bytes([int(bits[i:i+8].ljust(8, '0'), 2) for i in range(0, len(bits), 8)])\n\n return bytestring, tree", "title": "" }, { "docid": "9a927425db8e43c1c0e90b73e74eed89", "score": "0.49311826", "text": "def get_axz_content(fname):\n with gzip.open(fname, mode='rb') as f:\n content = f.read().decode('UTF-16')\n return content", "title": "" }, { "docid": "2d621a6765fe011dce30166b18ca8507", "score": "0.4925961", "text": "def string_compressor(string):\n previous = None\n count = 0\n compressed_list = []\n for char in string:\n if previous is None:\n count += 1\n previous = char\n elif char == previous:\n count += 1\n else:\n compressed_list.append(previous + str(count))\n count = 1\n previous = char\n compressed_list.append(previous + str(count))\n\n # Convert list to string\n compressed_string = ''.join(compressed_list)\n\n if len(compressed_string) > len(string):\n compressed_string = string\n\n return compressed_string", "title": "" }, { "docid": "477cec4058cc34b72ecd9a2e392b3ada", "score": "0.49255037", "text": "def open_maybe_gzipped(filename):\n with open(filename, 'rb') as test_read:\n byte1, byte2 = ord(test_read.read(1)), ord(test_read.read(1))\n if byte1 == 0x1f and byte2 == 0x8b:\n f = gzip.open(filename, mode='rt')\n else:\n f = open(filename, 'rt')\n return f", "title": "" }, { "docid": "55c893eb60a9731c04b8c09a76c5f568", "score": "0.4916086", "text": "def gz(lons, lats, heights, tesseroids, dens=None, ratio=1.):\n # Multiply by -1 so that z is pointing down for gz and the gravity anomaly\n # doesn't look inverted (ie, negative for positive density)\n return -1*SI2MGAL*_optimal_discretize(tesseroids, lons, lats, heights,\n _kernels.gz, ratio, dens)", "title": "" }, { "docid": "e992d294e48d87107f329d00848b6e53", "score": "0.49145454", "text": "def zipper(in_fn):\n print(\"zipping file...\")\n out_fn = in_fn + '.gz'\n if not os.path.exists(in_fn):\n raise ValueError(\"{} does not exist\".format(in_fn))\n if os.path.exists(out_fn):\n print('zipping tool not run, {} already exsists.'.format(out_fn))\n return out_fn\n cmd = 'gzip {}'.format(in_fn)\n subprocess.check_call(cmd, shell = True)\n return out_fn", "title": "" } ]
6200d473f1718524d9d2c9c3a1298ca0
For the most part, reformatting of
[ { "docid": "53b89fad17638ca30b49dff33075ec2b", "score": "0.0", "text": "def plot_county_errors(model, svg_file=Path(\"data/counties.svg\"), save_colorbar=True):\n\n model_sd = torch.load(model, map_location=\"cpu\")\n\n model_dir = model.parents[0]\n\n real_values = model_sd[\"test_real\"]\n pred_values = model_sd[\"test_pred\"]\n\n gp = True\n try:\n gp_values = model_sd[\"test_pred_gp\"]\n except KeyError:\n gp = False\n\n indices = model_sd[\"test_indices\"]\n\n pred_err = pred_values - real_values\n pred_dict = {}\n for idx, err in zip(indices, pred_err):\n state, county = idx\n\n state = str(state).zfill(2)\n county = str(county).zfill(3)\n\n pred_dict[state + county] = err\n\n model_info = model.name[:-8].split(\"_\")\n\n colors = [\n \"#b2182b\",\n \"#d6604d\",\n \"#f4a582\",\n \"#fddbc7\",\n \"#d1e5f0\",\n \"#92c5de\",\n \"#4393c3\",\n \"#2166ac\",\n ]\n\n _single_plot(\n pred_dict, svg_file, model_dir / f\"{model_info[0]}_{model_info[1]}.svg\", colors\n )\n\n if gp:\n gp_pred_err = gp_values - real_values\n gp_dict = {}\n for idx, err in zip(indices, gp_pred_err):\n state, county = idx\n\n state = str(state).zfill(2)\n county = str(county).zfill(3)\n\n gp_dict[state + county] = err\n\n _single_plot(\n gp_dict, svg_file, model_dir / f\"{model_info[0]}_{model_info[1]}_gp.svg\", colors\n )\n\n if save_colorbar:\n _save_colorbar(model_dir / \"colorbar.png\", colors)", "title": "" } ]
[ { "docid": "c71ddc9cfaee61c31bb33d4b6057df39", "score": "0.6501458", "text": "def Format():", "title": "" }, { "docid": "6a147248e93e4251e8b209992430c030", "score": "0.61220425", "text": "def format(self):\n ...", "title": "" }, { "docid": "df179818f68aaad3ea35380912ff6532", "score": "0.57476217", "text": "def convert_or():\r\n lines = list()\r\n lines.append(\"@SP\")\r\n lines.append(\"M=M-1\")\r\n lines.append(\"A=M\")\r\n lines.append(\"D=M\")\r\n lines.append(\"A=A-1\")\r\n lines.append(\"M=D|M\")\r\n return lines", "title": "" }, { "docid": "8ccbaa20db9a4390a6b541e606e9ddab", "score": "0.5713759", "text": "def formatOutput(self,data):", "title": "" }, { "docid": "db16baed27eac84fccefee232d6a9b91", "score": "0.5606879", "text": "def format():\n pass", "title": "" }, { "docid": "7759ca2808f431d245258302363265dd", "score": "0.5603964", "text": "def Format(self):\n pass", "title": "" }, { "docid": "4cc8294366b278ad313ac4ad8b3754a8", "score": "0.5599693", "text": "def _convert(self, raw):", "title": "" }, { "docid": "ce2536e81a745f907a112f6a42177a68", "score": "0.55576646", "text": "def tidyup():\n pass", "title": "" }, { "docid": "ce2536e81a745f907a112f6a42177a68", "score": "0.55576646", "text": "def tidyup():\n pass", "title": "" }, { "docid": "22510bc4ded6a3688591060b25eaf310", "score": "0.55304396", "text": "def convert_sub():\r\n lines = list()\r\n lines.append(\"@SP\")\r\n lines.append(\"M=M-1\")\r\n lines.append(\"A=M-1\")\r\n lines.append(\"D=M\")\r\n lines.append(\"A=A+1\")\r\n lines.append(\"D=D-M\")\r\n lines.append(\"A=A-1\")\r\n lines.append(\"M=D\")\r\n return lines", "title": "" }, { "docid": "732cfb872b0d46e1a68498573e8772b2", "score": "0.5491052", "text": "def parse(self):", "title": "" }, { "docid": "35e99268ba002df131cb848b2fa0a5f1", "score": "0.54908115", "text": "def fixformat(self, data, delta=0):\n\t\treturn data # most format don't need relocations", "title": "" }, { "docid": "d2f1e91f278ef85a463645998c6a5ade", "score": "0.54617196", "text": "def main():\n format_data()", "title": "" }, { "docid": "56015ac5b66d6bf9a29760673f63b889", "score": "0.5415842", "text": "def format(self, fmt):\n ...", "title": "" }, { "docid": "b66bf2249191e750e03026b5413bbb00", "score": "0.5409906", "text": "def format(computer):", "title": "" }, { "docid": "b29bf444b2f1e55cd38f171295bd6ec5", "score": "0.53969765", "text": "def _str_format(self):", "title": "" }, { "docid": "0fbd79d7fc708a959844df3d01bf44a9", "score": "0.53943026", "text": "def format_output(output):\n # Feel free to format the same", "title": "" }, { "docid": "adef492ca71fcb506f2bdaa3365326b8", "score": "0.53793705", "text": "def _preprocess(self):\n pass", "title": "" }, { "docid": "e2d70c25ed7ca7086ea10af06cdd9443", "score": "0.5358006", "text": "def parse():", "title": "" }, { "docid": "2878a10a2856db202e3d82d31246db36", "score": "0.53386277", "text": "def extract_raw(self):", "title": "" }, { "docid": "58ac6012b3abbdc3a995a3702880e9ea", "score": "0.53290373", "text": "def organizing():", "title": "" }, { "docid": "7b5fed2215f3f2f0727f0c4b7600bca4", "score": "0.5322732", "text": "def format_by_timings(self,db_res):\n res_str = []\n res_str.append(TABLE_OPEN)\n res_str.append(THEAD_OPEN)\n res_str.append(ROW_OPEN)\n res_str.append(THEAD_ROW_OPEN)\n res_str.append('DAY ID')\n res_str.append(THEAD_ROW_CLOSE)\n res_str.append(THEAD_ROW_OPEN)\n res_str.append('FILE NAME')\n res_str.append(THEAD_ROW_CLOSE)\n res_str.append(THEAD_ROW_OPEN)\n res_str.append('FILE ID')\n res_str.append(THEAD_ROW_CLOSE)\n res_str.append(THEAD_ROW_OPEN)\n res_str.append('TIME TO PARSE IN MINS')\n res_str.append(THEAD_ROW_CLOSE)\n res_str.append(THEAD_ROW_OPEN)\n res_str.append('TIME TO LOAD IN MINS')\n res_str.append(THEAD_ROW_CLOSE)\n res_str.append(THEAD_ROW_OPEN)\n res_str.append('RECORDS LOADED - WT_LOGS')\n res_str.append(THEAD_ROW_CLOSE)\n res_str.append(THEAD_ROW_OPEN)\n res_str.append('RECORDS LOADED - WT_LOG_PARTS')\n res_str.append(THEAD_ROW_CLOSE)\n res_str.append(ROW_CLOSE)\n res_str.append(THEAD_CLOSE)\n res_str.append(TBODY_OPEN)\n for row in db_res:\n res_str.append(ROW_OPEN)\n for elem in row:\n res_str.append(COLUMN_OPEN)\n res_str.append(str(elem)) # do this gracefully\n res_str.append(COLUMN_CLOSE)\n res_str.append(ROW_CLOSE)\n res_str.append(TBODY_CLOSE)\n res_str.append(TABLE_CLOSE)\n return \"\".join(res_str)", "title": "" }, { "docid": "f62915bd95746bb6031675e82e649e5c", "score": "0.53091556", "text": "def parseKeyFromToGloud ( text ):\n\tpat = re.compile( \"(%d|%s)\" )\n\ti = 0\n\tformatters= []\n\tcopyFrom = 0\n\tnewText= \"\"\n\n\tfor m in pat.finditer( text ) :\n\t\tcopyTill = m.start() # slicing operator will adjust by -1 automatically\n\t\tformatters.append( m.group() )\n\t\t# _dbx( copyFrom ) _dbx( copyTill )\n\t\tnewText += text [ copyFrom : copyTill ] \n\t\t#_dbx( newText )\n\n\t\tnewText += \"{%d\\}\" % i\n\t\t# _dbx( newText )\n\t\tcopyFrom = m.start() + len( m.group() ) # re-init for next pass\n\t\ti += 1\n\n\tif len( formatters ) == 0: # no formatter found, return the original key\n\t\tnewText = text\n\telse: # take care trailing text after the last formatter\n\t\tnewText += text [ copyFrom : ] \n\n\t# _dbx( text ); _dbx( newText )\n\treturn newText, formatters", "title": "" }, { "docid": "41d05ca26a1eba32fd83f005df1f4367", "score": "0.5304262", "text": "def process_text_out(filename_in, filename_out):\n with open(filename_in, 'r') as file_in:\n with open(filename_out, 'w') as file_out:\n lines = []\n for line in file_in:\n line = line.rstrip('\\r\\n')\n line = line.replace('<s> summary </s>', '')\n line = line.replace('<s> title </s>', '')\n line = line.replace('<s>', '')\n line = line.replace('</s>', '')\n line = line.replace('<sec>', '\\n')\n line = line.replace('<stop>', '')\n line = line.replace('<pad>', '')\n line = detokenize_line(line)\n file_out.write(line)\n lines.append(line)\n return lines", "title": "" }, { "docid": "d6bb22295ab6be1edfe63661f0b97e0e", "score": "0.5285234", "text": "def make_columns_string():", "title": "" }, { "docid": "d664f08fd98b15000283cd8574e34ab6", "score": "0.527464", "text": "def exo2():", "title": "" }, { "docid": "6389767f3d72d59ad9f459d4b4f93b86", "score": "0.52678597", "text": "def _raw_prettify(self):\n\t\tstring = \"\"\n\t\tfor name, msg, date in self.convo:\n\t\t\tstring += name.title() + \": \" + msg + \" | \" + str(date)\n\t\t\tstring += '\\n'\n\t\treturn string", "title": "" }, { "docid": "019ec7c2cb76401014d8273aeb15300f", "score": "0.5263639", "text": "def split(self):", "title": "" }, { "docid": "cc71e87591b88b0265518acee7c61116", "score": "0.5258223", "text": "def formated(self):\n return self._formatloop()[1]", "title": "" }, { "docid": "c388f99da50a984d4b4a6bd1d85138e1", "score": "0.5254367", "text": "def make_format(s_p,line_split,i,hash_dic):\n\n cand_list = []\n\n if i-2 == 0 and i+3 < len(line_split)-1:\n\n cand_list.append( (line_split[i-2].split(\"_\"))[0] )\n cand_list.append( (line_split[i-1].split(\"_\"))[0] )\n \n cand_list.append( (line_split[i+3].split(\"_\"))[0] )\n cand_list.append( (line_split[i+2].split(\"_\"))[0] )\n cand_list.append( (line_split[i+1].split(\"_\"))[0] )\n\n if i-1 == 0 and i+3 < len(line_split)-1:\n\n cand_list.append( (line_split[i-1].split(\"_\"))[0] )\n \n cand_list.append( (line_split[i+3].split(\"_\"))[0] )\n cand_list.append( (line_split[i+2].split(\"_\"))[0] )\n cand_list.append( (line_split[i+1].split(\"_\"))[0] )\n\n if i == 0 and i+3 < len(line_split)-1:\n\n cand_list.append( (line_split[i+3].split(\"_\"))[0] )\n cand_list.append( (line_split[i+2].split(\"_\"))[0] )\n cand_list.append( (line_split[i+1].split(\"_\"))[0] )\n\n if i-3 >= 0 and i+2 == len(line_split)-1:\n\n cand_list.append( (line_split[i-3].split(\"_\"))[0] )\n cand_list.append( (line_split[i-2].split(\"_\"))[0] )\n cand_list.append( (line_split[i-1].split(\"_\"))[0] )\n \n cand_list.append( (line_split[i+1].split(\"_\"))[0] )\n cand_list.append( (line_split[i+2].split(\"_\"))[0] )\n\n if i-3 >= 0 and i+1 == len(line_split)-1:\n\n cand_list.append( (line_split[i-3].split(\"_\"))[0] )\n cand_list.append( (line_split[i-2].split(\"_\"))[0] )\n cand_list.append( (line_split[i-1].split(\"_\"))[0] )\n \n cand_list.append( (line_split[i+1].split(\"_\"))[0] )\n\n\n if i-3 >= 0 and i == len(line_split)-1:\n\n cand_list.append( (line_split[i-3].split(\"_\"))[0] )\n cand_list.append( (line_split[i-2].split(\"_\"))[0] )\n cand_list.append( (line_split[i-1].split(\"_\"))[0] ) \n \n\n #if i-3 has more than 0\n if i-3 >= 0 and i+3 <= len(line_split)-1:\n\n #対象の語が文頭から3単語以上の場合\n # look up 3 previous 3 words\n cand_list.append( (line_split[i-3].split(\"_\"))[0] )\n cand_list.append( (line_split[i-2].split(\"_\"))[0] )\n cand_list.append( (line_split[i-1].split(\"_\"))[0] )\n #look up POS tag of word itself\n #cand_list.append( (line_split[i].split(\"_\"))[1] )\n\n #対象の語が文末から3単語以上の場合\n cand_list.append( (line_split[i+3].split(\"_\"))[0] )\n cand_list.append( (line_split[i+2].split(\"_\"))[0] )\n cand_list.append( (line_split[i+1].split(\"_\"))[0] )\n\n \n\n if not cand_list == []:\n\n sort_list = []\n print_list = []\n # 0 is label for singular\n if s_p == \"s\":\n label = \"1\"\n #i is label for plural\n if s_p == \"p\":\n label = \"2\"\n\n for e_word in cand_list: \n f_id = hash_dic[e_word]\n sort_list.append(f_id)\n #delete repetition elements\n sort_list = list(set(sort_list))\n # sort by order\n sort_list.sort()\n \n print_list.append(label)\n for id in sort_list:\n format = str(id) + \":\" + str(1)\n print_list.append(format)\n\n print \" \".join(print_list)", "title": "" }, { "docid": "39a320b34b0c7f2c879696dee7cc0805", "score": "0.5222473", "text": "def grouping():", "title": "" }, { "docid": "fd95aeb1a31c852b5347bcb9b7c0eb2a", "score": "0.52077144", "text": "def __preprocess_bib_904(record):\n for field in record.get_fields('904'):\n assert 'a' in field or 'x' in field, f\"{record.get_control_number()}: 904 with no $a or $x: {field}\"\n record.remove_field(field)\n for code, val in field.get_subfields('a','x', with_codes=True):\n new_subfields = ['i',\"Normalized sort title\",'a',val]\n if code == 'x':\n new_subfields += ['@',\"Exception to algorithm\"]\n record.add_field(Field('246',' ',new_subfields))", "title": "" }, { "docid": "06391fbf353a9f2d1ded8ddb8b408ff0", "score": "0.51978743", "text": "def formatter(x):\n savee_copy = x.copy()\n savee_copy.fillna(0, inplace = True)\n savee_copy.drop(['Ei','Ef','mat_unc','Ei_unc','Ef_unc', 'Eerr', 'matrix', 'old_unc', 'Berr', 'precise_wave', 'precise_Eerr'], axis = 1, inplace = True)\n\n\n\n ini_hold = []\n dec_hold = []\n n_holdI, l_holdI, s_holdI = [], [], []\n n_holdD, l_holdD, s_holdD = [], [], []\n for i in range(len(savee_copy)):\n #Initial\n n = str(savee_copy.Initial[i][0])\n if savee_copy.Initial[i][1] == 0:\n l = 's'\n elif savee_copy.Initial[i][1] == 1:\n l = 'p'\n elif savee_copy.Initial[i][1] == 2:\n l = 'd'\n elif savee_copy.Initial[i][1] == 3:\n l = 'f'\n if savee_copy.Initial[i][2] == 0.5:\n s = '1/2'\n elif savee_copy.Initial[i][2] == 1.5:\n s = '3/2'\n elif savee_copy.Initial[i][2] == 2.5:\n s = '5/2'\n elif savee_copy.Initial[i][2] == 3.5:\n s = '7/2'\n elif savee_copy.Initial[i][2] == 4.5:\n s = '9/2'\n ini = n+l+s\n ini_hold.append(ini)\n n_holdI.append(n)\n l_holdI.append(l)\n s_holdI.append(s)\n\n #Decay\n n = str(savee_copy.Decay[i][0])\n if savee_copy.Decay[i][1] == 0:\n l = 's'\n elif savee_copy.Decay[i][1] == 1:\n l = 'p'\n elif savee_copy.Decay[i][1] == 2:\n l = 'd'\n elif savee_copy.Decay[i][1] == 3:\n l = 'f'\n if savee_copy.Decay[i][2] == 0.5:\n s = '1/2'\n elif savee_copy.Decay[i][2] == 1.5:\n s = '3/2'\n elif savee_copy.Decay[i][2] == 2.5:\n s = '5/2'\n elif savee_copy.Decay[i][2] == 3.5:\n s = '7/2'\n elif savee_copy.Decay[i][2] == 4.5:\n s = '9/2'\n dec = n+l+s\n dec_hold.append(dec)\n n_holdD.append(n)\n l_holdD.append(l)\n s_holdD.append(s)\n \n savee_copy['Initial'] = ini_hold\n savee_copy['Decay'] = dec_hold\n savee_copy['nI'] = n_holdI\n savee_copy['lI'] = l_holdI\n savee_copy['sI'] = s_holdI\n savee_copy['nD'] = n_holdD\n savee_copy['lD'] = l_holdD\n savee_copy['sD'] = s_holdD\n \n savee_copy.drop(['nI', 'lI', 'sI', 'nD', 'sD', 'lD'], axis = 1, inplace = True)\n savee_copy = savee_copy[['Initial','Decay','mat_werr','wavelength','transition_rate s-1', 'branching ratio']]\n savee_copy.rename(columns = {\"mat_werr\": \"Matrix element (a.u.)\", \"wavelength\": \"Wavelength (nm)\", \n \"transition_rate s-1\": \"Transition Rate (s-1)\", \n 'branching ratio': \"Branching ratio\"}, inplace = True)\n return savee_copy", "title": "" }, { "docid": "d53dcd0d550059215b12bc990212f1c2", "score": "0.5179776", "text": "def extract(self):", "title": "" }, { "docid": "e17908349010312af186b59b4b4d410e", "score": "0.5168919", "text": "def _format_data(self, data):\n\n return data", "title": "" }, { "docid": "b56f1e007ca8f7d7c41fe8c5ca2d2eb2", "score": "0.5168714", "text": "def reformat_db_info(info):\n\n temp = \"\"\n for i in info:\n temp += i\n\n temp_list = temp.split(\".\\n\")\n temp_list.pop()\n\n return [i + \".\\n\" for i in temp_list]", "title": "" }, { "docid": "a3ed699239b04e3614d56aa8121336b9", "score": "0.51680255", "text": "def format(value):", "title": "" }, { "docid": "2e7873799afddd062702661a997bc3af", "score": "0.51596135", "text": "def apply_general_formatting(datum):\n datum = fmt.format_tarako(datum)\n datum = fmt.general_formatting(datum)\n return datum", "title": "" }, { "docid": "631b7f858ffc1aea33810fe92a698507", "score": "0.51483935", "text": "def _scad(self):", "title": "" }, { "docid": "8f9b4c43bc6a523865b6970a66bea9fa", "score": "0.51473963", "text": "def format_by_fileID(self,db_res):\n res_str = []\n res_str.append(TABLE_OPEN)\n res_str.append(THEAD_OPEN)\n res_str.append(ROW_OPEN)\n res_str.append(THEAD_ROW_OPEN)\n res_str.append('DAY ID')\n res_str.append(THEAD_ROW_CLOSE)\n res_str.append(THEAD_ROW_OPEN)\n res_str.append('FILE NAME')\n res_str.append(THEAD_ROW_CLOSE)\n res_str.append(THEAD_ROW_OPEN)\n res_str.append('FILE_ID')\n res_str.append(THEAD_ROW_CLOSE)\n res_str.append(THEAD_ROW_OPEN)\n res_str.append('TOTAL RECORDS')\n res_str.append(THEAD_ROW_CLOSE)\n res_str.append(THEAD_ROW_OPEN)\n res_str.append('RECORDS LOADED - WT_LOGS')\n res_str.append(THEAD_ROW_CLOSE)\n res_str.append(THEAD_ROW_OPEN)\n res_str.append('RECORDS LOADED - WT_LOG_PARTS')\n res_str.append(THEAD_ROW_CLOSE)\n res_str.append(ROW_CLOSE)\n res_str.append(THEAD_CLOSE)\n res_str.append(TBODY_OPEN)\n for row in db_res:\n res_str.append(ROW_OPEN)\n for elem in row:\n res_str.append(COLUMN_OPEN)\n res_str.append(str(elem)) # do this gracefully\n res_str.append(COLUMN_CLOSE)\n res_str.append(ROW_CLOSE)\n res_str.append(TBODY_CLOSE)\n res_str.append(TABLE_CLOSE)\n return \"\".join(res_str)", "title": "" }, { "docid": "abf9f0dfa488306c8eaea2c2f0fdc963", "score": "0.51368934", "text": "def _str_format_arguments(self):", "title": "" }, { "docid": "8d0db05be0c6b442f178037852155b12", "score": "0.511411", "text": "def _verbose_format(self):\n ...", "title": "" }, { "docid": "486f779beb6d9e62bb4a175add522d23", "score": "0.51014507", "text": "def reformat():\n hitchpylibrarytoolkit.reformat(DIR.project, \"strictyamljsonschema\")", "title": "" }, { "docid": "70aee1ecf709594d867559fc77d62b22", "score": "0.51009136", "text": "def normalize(self):\n\t\treturn", "title": "" }, { "docid": "d1a15ab0b21bfe936875b5fb430ae2b4", "score": "0.5099555", "text": "def _format_str_base(self,first,surname,prefix,suffix,patronymic,\n title,call,format_str):\n\n func = self.__class__.format_funcs.get(format_str)\n if func == None:\n func = self._gen_cooked_func(format_str)\n self.__class__.format_funcs[format_str] = func\n\n\ts = func(first,surname,prefix,suffix,patronymic,title,call)\n return ' '.join(s.split())", "title": "" }, { "docid": "ecc960a4d35375b0e941e5fb6a5cf005", "score": "0.50901365", "text": "def asformat(self, format):", "title": "" }, { "docid": "ee580333bcb96b98603a8619cf9711a6", "score": "0.50890243", "text": "def txt_format(raw_strct_mmbr_props):\n rmk_mmbr_props_txt = ''\n \n for prop in raw_strct_mmbr_props.values():\n for entry_val in prop[0].values():\n rmk_mmbr_props_txt += '{}\\t'.format(entry_val)\n \n rmk_mmbr_props_txt += '\\n'\n \n for line in prop[1:]:\n rmk_mmbr_props_txt += '\\t'\n \n for entry_val in line.values():\n rmk_mmbr_props_txt += '{}\\t'.format(entry_val)\n \n rmk_mmbr_props_txt += '\\n'\n \n return rmk_mmbr_props_txt", "title": "" }, { "docid": "7f80630101505f4b428c8d4bb687a62b", "score": "0.5088779", "text": "def preprocess(self):\n pass", "title": "" }, { "docid": "0a5a951d49806269980d72667fc70df5", "score": "0.50879925", "text": "def separate(rw):\n\n if \"_\" in rw['#Uploaded_variation']:\n rw['#CHROM'] = rw['#Uploaded_variation'].split(\"_\")[0]\n rw['POS'] = rw['#Uploaded_variation'].split(\"_\")[1]\n rw['Change'] = rw['#Uploaded_variation'].split(\"_\")[2]\n rw['REF'] = rw['Change'].split(\"/\")[0]\n rw['ALT'] = rw['Change'].split(\"/\")[1]\n else:\n rw['#CHROM'] = str(rw['Location'].split(\":\")[0])\n rw['POS'] = rw['Location'].split(\":\")[1]\n if \"-\" in rw['POS']:\n rw['POS'] = int(rw['POS'].split(\"-\")[0])\n else:\n rw['POS'] = int(rw['POS'])\n rw['REF'] = hg19(str(rw['#CHROM']), rw['POS'], 1)\n rw['ALT'] = rw['Allele']\n return rw", "title": "" }, { "docid": "05b6d5c72e31a93d226617e6eaf228a3", "score": "0.50857824", "text": "def _format(records):\n return list(map(format_record, records))", "title": "" }, { "docid": "5f3d7fdb33d98ebb676b6200bfaced32", "score": "0.5080461", "text": "def getQualifiedFormats():", "title": "" }, { "docid": "77161d317cc4171a0a16544a75e11073", "score": "0.50758016", "text": "def _refactor_arguments(self, header):\n items = self.extract_items(item_class=ListItem)\n lines = [':{0}:'.format(header.lower())]\n prefix = None if len(items) == 1 else '-'\n for item in items:\n lines += add_indent(item.to_rst(prefix))\n return lines", "title": "" }, { "docid": "f5de23db34c67010f68b7ca56a3e13ba", "score": "0.50697696", "text": "def transform_spec_data(fname): \n\n with open(fname, 'rb') as input_file: #read from this file \n data = csv.reader(input_file, delimiter=' ', quoting=csv.QUOTE_NONE) #get data from input file\n with open('output.csv', 'w') as output_file: #write to this output file\n mywriter = csv.writer(output_file, escapechar=' ', quoting=csv.QUOTE_NONE) #create writer object\n mywriter.writerow(['Account Number','Read Date','Address','Zipcode','Consumption']) #add header to output\n seen = set() #keep track of duplicates\n for row in data:\n if tuple(row) in seen:\n continue #ignore duplicates \n seen.add(tuple(row)) #turn row into hashable/immutable tuple, so can add to set\n row[0] = row[0].zfill(6) #pad with leading zero's to ensure 6 digit account numbers.\n if len(row[0]) > 6: #make sure account number data is isolated \n fix_account_num(row) #function call to fix format \n row = [x for x in row if x != ''] #remove empty fields \n f_row = [] #create new row to append correct data format \n date = datetime.datetime.strptime(\" \".join(row[1:4]), \"%b %d %Y\").strftime(\"%Y%m%d\") #use datetime to format \n \n if re.search('[a-zA-Z]', row[8]): #regex search for alpha text in next column\n address = (\" \".join(row[4:9])).replace(',', '') #grab rest of address text\n if re.search('[0-9]', address[-1]): #regex search for numbers in text ending \n address = address[:-5] #dont grab the the 5 digit zipcode \n else: \n address = (\" \".join(row[4:7]))\n \n zipcode = re.sub('[^0-9]','', (row[-2].zfill(5))) #regex to limit to numbers only \n consumption = re.sub('[^0-9]','', (row[-1])) #regex used for numbers only \n\n f_row.append(row[0])\n f_row.append(date)\n f_row.append(address)\n f_row.append(zipcode)\n f_row.append(consumption)\n\n mywriter.writerow(f_row) #add data to output file", "title": "" }, { "docid": "feade27fd77dfc1afe706b4964dfa75f", "score": "0.50646234", "text": "def pre_processing(self):\n\t\tpass", "title": "" }, { "docid": "df3c1843c15aca54a3e9f5122a864ac0", "score": "0.50609416", "text": "def __preprocess_hdg_907(self, record):\n for field in record.get_fields('907'):\n new_subfields = []\n for code, val in field.get_subfields(with_codes=True):\n if code == 'b':\n # b Analysis treatment (AA CA DA PA NA) (NR)\n # split\n for extracted_subset_code in self.f907b_regex.findall(val):\n new_subfields.append(code)\n new_subfields.append(extracted_subset_code.upper())\n if re.sub('[\\s:;.,/!+*]', '', self.f907b_regex.sub('', val)):\n # rest --> $α -> Analysis Treatment Note\n new_subfields.append('α')\n new_subfields.append(val)\n elif code == 'c':\n # c Classification/shelving pattern (PER EPER SCN VCN MST N/A) (NR)\n for extracted_subset_code in self.f907c_regex.findall(val):\n new_subfields.append(code)\n new_subfields.append(extracted_subset_code.upper())\n elif code == 'f':\n # f Component parts indicator (aa, aa selected, ca) (NR)\n for extracted_subset_code in self.f907f_regex.findall(val):\n if extracted_subset_code.startswith('aa') and len(extracted_subset_code) > 2:\n extracted_subset_code = 'pa'\n new_subfields.append(code)\n new_subfields.append(extracted_subset_code.lower())\n if re.sub('[\\s:;.,/!+*]', '', self.f907f_regex.sub('', val)):\n # rest --> $π -> Component Parts Note\n new_subfields.append('π')\n new_subfields.append(val)\n elif code in 'xy':\n # x Subscription Status (Current, eCurrent, Ceased, On Order, etc.) (NR)\n # y Payment Type (DigiPay, DigiCopay, DigiCoop, DigiFree, PrtPay, PrtFree) (NR)\n f907subf_regex = self.f907x_regex if code == 'x' else self.f907y_regex\n for extracted_subset_code in f907subf_regex.findall(val):\n new_subfields.append(code)\n new_subfields.append(extracted_subset_code.lower())\n if re.sub('[\\s:;.,/!+*]', '', f907subf_regex.sub('', val)):\n # rest --> $ω -> Acquisitions Note\n new_subfields.append('ω')\n new_subfields.append(val)\n else:\n # just pass the rest through\n new_subfields.append(code)\n new_subfields.append(val)\n field.subfields = new_subfields\n return record", "title": "" }, { "docid": "797121b951a46a4a86db14941f667f4c", "score": "0.50600624", "text": "def bug_1534630():", "title": "" }, { "docid": "8557c87e1c7c3dee071ada6464c00fa5", "score": "0.50584435", "text": "def _refactorPrintableRow(self, data_row, is_header=False):\n out_row = [''] * 5\n\n if is_header:\n out_row[0] = data_row[0]\n out_row[1] = data_row[1]\n out_row[2] = data_row[9]\n out_row[3] = data_row[11]\n return out_row\n\n # ID is always a single value\n data_row = data_row.split(',')\n out_row[0] = data_row[0].strip()\n\n # if only a single manning's n value\n if not data_row[1] == ' ':\n out_row[1] = data_row[1].strip()\n\n # otherwise it's multiple mannings n and depth values\n elif not data_row[2] == ' ':\n out_row[1] = '\\\"' + data_row[2].strip() + ',' + data_row[3].strip() + ',' + data_row[4].strip() + ',' + data_row[5].strip() + '\\\"'\n\n # Or finally it could be file name and headers\n else:\n out_row[1] = data_row[6].strip()\n\n if not data_row[7] == ' ':\n out_row[1] += ' | ' + data_row[7].strip()\n\n if not data_row[8] == ' ':\n out_row[1] += ' | ' + data_row[8].strip()\n\n # Infiltration parametres\n out_row[2] = '\\\"' + data_row[9].strip() + ', ' + data_row[10].strip() + '\\\"'\n # Land use hazard\n out_row[3] = data_row[11].strip()\n\n out_row = ','.join(out_row)\n return out_row", "title": "" }, { "docid": "39bf790f28e2b3fee2126a06c5dd30be", "score": "0.50543404", "text": "def reformat_raw_data(emg, ori, acc, gyr):\n o = [[c.x, c.y, c.z] for c in [b[0] for b in [a[1:] for a in ori]]]\n a = [[c.x, c.y, c.z] for c in [b[0] for b in [a[1:] for a in acc]]]\n g = [[c.x, c.y, c.z] for c in [b[0] for b in [a[1:] for a in gyr]]]\n\n length = len(o)\n if any(len(lst) != length for lst in [a, g]):\n length = min(len(o), len(a), len(g))\n o = o[:length]\n a = a[:length]\n g = g[:length]\n # at least one list has a different length - unknown reason\n\n imu = []\n for i in range(len(o)):\n tmp = o[i]\n tmp.extend([x for x in a[i]])\n tmp.extend([x for x in g[i]])\n imu.append(tmp)\n\n emg = [y[0] for y in [x[1:] for x in emg]]\n return emg, imu", "title": "" }, { "docid": "d3ee584be759bcbf5ba1401b97162fab", "score": "0.50536394", "text": "def _adjust_lines(lines):\r\n formatted_lines = []\r\n for l in lines:\r\n #Convert line endings\r\n l = l.replace('\\r\\n', '\\n').replace('\\r', '\\n').strip()\r\n if l.lower().startswith('matrix'):\r\n formatted_lines.append(l)\r\n else:\r\n l = l.replace('\\n', ' ')\r\n if l:\r\n formatted_lines.append(l)\r\n return formatted_lines", "title": "" }, { "docid": "155f723a944338e8311d8de60c31796b", "score": "0.50519925", "text": "def _to_transfac(self):\r\n res=\"XX\\nTY Motif\\n\" #header\r\n try:\r\n res+=\"ID %s\\n\"%self.name\r\n except:\r\n pass\r\n res+=\"BF undef\\nP0\"\r\n for a in self.alphabet.letters:\r\n res+=\" %s\"%a\r\n res+=\"\\n\"\r\n if not self.has_counts:\r\n self.make_counts_from_instances()\r\n for i in range(self.length):\r\n if i<9:\r\n res+=\"0%d\"%(i+1)\r\n else:\r\n res+=\"%d\"%(i+1)\r\n for a in self.alphabet.letters:\r\n res+=\" %d\"%self.counts[a][i]\r\n res+=\"\\n\"\r\n res+=\"XX\\n\"\r\n return res", "title": "" }, { "docid": "6d0231c53430ac29e23c9c4d18606a9b", "score": "0.50346434", "text": "def __format__(self, format_spec):", "title": "" }, { "docid": "69edfdcb32176a9e747a4a45b5962174", "score": "0.5031447", "text": "def utils():", "title": "" }, { "docid": "edf9ed601da4616778b30b52e50f1d90", "score": "0.50221884", "text": "def reformat(code, is_diag):\n code = ''.join(code.split('.'))\n if is_diag:\n if code.startswith('E'):\n if len(code) > 4:\n code = code[:4] + '.' + code[4:]\n else:\n if len(code) > 3:\n code = code[:3] + '.' + code[3:]\n else:\n code = code[:2] + '.' + code[2:]\n return code", "title": "" }, { "docid": "171524df0061696bc259882b026b23c7", "score": "0.50213355", "text": "def format_results(self):\n pass", "title": "" }, { "docid": "22eb3f27d46f4f910427eeda086f53b5", "score": "0.5017479", "text": "def doctest_Report_format_source():", "title": "" }, { "docid": "1041c488f12e8ff8a90545062eec7c0c", "score": "0.50072175", "text": "def _format(self, line):\n content, attributes = super()._format(line)\n\n for m in re.finditer(r\"return\", content):\n attributes[m.start(): m.end()] = [((Color.LightGreen, Color.Black), Property.Default)] * len(m.group())\n\n for m in re.finditer(r\"def\\b\", content):\n attributes[m.start(): m.end()] = [((Color.LightRed, Color.Black), Property.Default)] * len(m.group())\n\n return content, attributes", "title": "" }, { "docid": "3cb22b9098b4d1f28effc0a7821fb048", "score": "0.49960956", "text": "def getStartStrings():", "title": "" }, { "docid": "0efa843525262d2cedfc6ee7c6b82821", "score": "0.49889576", "text": "def pretty(self) -> str:\n\n # protocol first\n\n protocol = ''\n\n for entry in self.protocol:\n # pretty fromto\n if entry['from_unixtime']:\n from_time = time.localtime(entry['from_unixtime']) \n from_hour = from_time.tm_hour\n from_minute = from_time.tm_min\n else:\n from_hour = 0\n from_minute = 0\n\n if entry['to_unixtime']:\n to_time = time.localtime(entry['to_unixtime']) \n to_hour = to_time.tm_hour\n to_minute = to_time.tm_min\n else:\n to_time = time.localtime(entry['to_unixtime']) \n to_hour = 0\n to_minute = 0\n\n # add entry\n protocol += '%d.%d %.2fh (%02d:%02d-%02d:%02d): %s\\n' % (\n entry['day'],\n self.month,\n entry['duration'] / 3600,\n from_hour, from_minute,\n to_hour, to_minute,\n entry['description']\n )\n\n decorator = 25 * '*'\n\n # and all together now\n\n return (\n '%s %04d-%02d %s'\n '\\nHolidaysLeftBeginMonth: %dd'\n '\\nHolidaysLeft: %dd'\n '\\nMonthlyTarget: %.1fh'\n '\\nWorkingHoursAccountBeginMonth: %+.1fh (%ds)'\n '\\nWorkingHoursAccount: %.1fh (%ds)'\n '\\nWorkingHours: %.1fh (%ds)'\n '\\nWorkingHoursBalance: %+.1fh (%ds)'\n '\\n%s Protocol %s' \n '\\n%s' % (\n decorator, self.year, self.month, decorator,\n self.holidays_left_begin,\n self.holidays_left,\n self.monthly_target,\n self.working_hours_account_begin / 3600, self.working_hours_account_begin,\n self.working_hours_account / 3600, self.working_hours_account,\n self.working_hours /3600, self.working_hours,\n self.working_hours_balance / 3600 , self.working_hours_balance,\n decorator, decorator,\n protocol\n ) + \n '\\n' + 60 * '~'\n )", "title": "" }, { "docid": "88a5c60585035d242bf1534910374ad0", "score": "0.4985208", "text": "def fix_w602(self, result):\n line_index = result['line'] - 1\n line = self.source[line_index]\n\n split_line = line.split(',')\n if len(split_line) > 1 and split_line[1].strip().startswith('('):\n # Give up\n return []\n\n if ' or ' in line or ' and ' in line:\n # Give up\n return []\n\n if (line.endswith('\\\\\\n') or\n line.endswith('\\\\\\r\\n') or\n line.endswith('\\\\\\r')):\n self.source[line_index] = line.rstrip('\\n\\r \\t\\\\')\n self.source[line_index + 1] = \\\n ' ' + self.source[line_index + 1].lstrip()\n return [line_index + 1, line_index + 2] # Line indexed at 1\n\n modified_lines = [1 + line_index] # Line indexed at 1\n\n double = '\"\"\"'\n single = \"'''\"\n if double in line or single in line:\n # Move full multiline string to current line\n if double in line and single in line:\n quotes = (double if line.find(double) < line.find(single)\n else single)\n elif double in line:\n quotes = double\n else:\n quotes = single\n assert quotes in line\n\n # Find last line of multiline string\n end_line_index = line_index\n if line.count(quotes) == 1:\n for i in range(line_index + 1, len(self.source)):\n end_line_index = i\n if quotes in self.source[i]:\n break\n\n # We do not handle anything other than plain multiline strings\n if ('(' in self.source[end_line_index] or\n '\\\\' in self.source[end_line_index]):\n return []\n\n for i in range(line_index + 1, end_line_index + 1):\n line_contents = self.source[i]\n self.source[line_index] += line_contents\n self.source[i] = ''\n modified_lines.append(1 + i) # Line indexed at 1\n line = self.source[line_index]\n\n indent, rest = _split_indentation(line)\n try:\n ast_body = ast.parse(rest).body[0]\n except SyntaxError:\n # Give up\n return []\n\n if len(ast_body._fields) == 3 and ast_body.tback is not None:\n _id = [indent, ]\n for node in ast.iter_child_nodes(ast_body):\n if ast.Str == type(node):\n quote_word = line[node.col_offset]\n if quote_word * 3 == \\\n line[node.col_offset:node.col_offset + 3]:\n quote_word = quote_word * 3\n _id.append(quote_word + node.s + quote_word)\n continue\n if ast.Name == type(node):\n _id.append(node.id)\n continue\n try:\n _id.append(repr(ast.literal_eval(node)))\n except ValueError:\n # Give up\n return []\n\n # find space and comment\n sio = StringIO(line)\n old_tokens = None\n for tokens in tokenize.generate_tokens(sio.readline):\n if tokens[0] is tokenize.COMMENT:\n comment_offset = old_tokens[3][1]\n _id.append(line[comment_offset:])\n break\n elif len(_id) == 4 and tokens[0] is token.NEWLINE:\n _id.append(self.newline)\n break\n old_tokens = tokens\n # Create fixed line and check for correctness\n candidate = \"%sraise %s(%s), None, %s%s\" % tuple(_id)\n pattern = '[)(, ]'\n if (re.sub(pattern, repl='', string=candidate).replace('None', '')\n == re.sub(pattern, repl='', string=line)):\n self.source[result['line'] - 1] = candidate\n return modified_lines\n else:\n return []\n else:\n self.source[line_index] = _fix_basic_raise(line, self.newline)\n\n return modified_lines", "title": "" }, { "docid": "88a5c60585035d242bf1534910374ad0", "score": "0.4985208", "text": "def fix_w602(self, result):\n line_index = result['line'] - 1\n line = self.source[line_index]\n\n split_line = line.split(',')\n if len(split_line) > 1 and split_line[1].strip().startswith('('):\n # Give up\n return []\n\n if ' or ' in line or ' and ' in line:\n # Give up\n return []\n\n if (line.endswith('\\\\\\n') or\n line.endswith('\\\\\\r\\n') or\n line.endswith('\\\\\\r')):\n self.source[line_index] = line.rstrip('\\n\\r \\t\\\\')\n self.source[line_index + 1] = \\\n ' ' + self.source[line_index + 1].lstrip()\n return [line_index + 1, line_index + 2] # Line indexed at 1\n\n modified_lines = [1 + line_index] # Line indexed at 1\n\n double = '\"\"\"'\n single = \"'''\"\n if double in line or single in line:\n # Move full multiline string to current line\n if double in line and single in line:\n quotes = (double if line.find(double) < line.find(single)\n else single)\n elif double in line:\n quotes = double\n else:\n quotes = single\n assert quotes in line\n\n # Find last line of multiline string\n end_line_index = line_index\n if line.count(quotes) == 1:\n for i in range(line_index + 1, len(self.source)):\n end_line_index = i\n if quotes in self.source[i]:\n break\n\n # We do not handle anything other than plain multiline strings\n if ('(' in self.source[end_line_index] or\n '\\\\' in self.source[end_line_index]):\n return []\n\n for i in range(line_index + 1, end_line_index + 1):\n line_contents = self.source[i]\n self.source[line_index] += line_contents\n self.source[i] = ''\n modified_lines.append(1 + i) # Line indexed at 1\n line = self.source[line_index]\n\n indent, rest = _split_indentation(line)\n try:\n ast_body = ast.parse(rest).body[0]\n except SyntaxError:\n # Give up\n return []\n\n if len(ast_body._fields) == 3 and ast_body.tback is not None:\n _id = [indent, ]\n for node in ast.iter_child_nodes(ast_body):\n if ast.Str == type(node):\n quote_word = line[node.col_offset]\n if quote_word * 3 == \\\n line[node.col_offset:node.col_offset + 3]:\n quote_word = quote_word * 3\n _id.append(quote_word + node.s + quote_word)\n continue\n if ast.Name == type(node):\n _id.append(node.id)\n continue\n try:\n _id.append(repr(ast.literal_eval(node)))\n except ValueError:\n # Give up\n return []\n\n # find space and comment\n sio = StringIO(line)\n old_tokens = None\n for tokens in tokenize.generate_tokens(sio.readline):\n if tokens[0] is tokenize.COMMENT:\n comment_offset = old_tokens[3][1]\n _id.append(line[comment_offset:])\n break\n elif len(_id) == 4 and tokens[0] is token.NEWLINE:\n _id.append(self.newline)\n break\n old_tokens = tokens\n # Create fixed line and check for correctness\n candidate = \"%sraise %s(%s), None, %s%s\" % tuple(_id)\n pattern = '[)(, ]'\n if (re.sub(pattern, repl='', string=candidate).replace('None', '')\n == re.sub(pattern, repl='', string=line)):\n self.source[result['line'] - 1] = candidate\n return modified_lines\n else:\n return []\n else:\n self.source[line_index] = _fix_basic_raise(line, self.newline)\n\n return modified_lines", "title": "" }, { "docid": "53be2ac4314b938b68ff424714a3c2fc", "score": "0.49800193", "text": "def _format_crd(self,\n ) -> None:\n pass", "title": "" }, { "docid": "4893c1bc570e75c88a8473a3ed18994d", "score": "0.49780336", "text": "def format_element(bfo, kb_name=\"dbcollid2coll\"):\n collection_indicator = bfo.kb(kb_name, bfo.field(\"980__a\"))\n collection_indicator = cgi.escape(collection_indicator)\n subject = bfo.field(\"65017a\")\n subject = cgi.escape(subject)\n subject_2 = bfo.field(\"65027a\")\n subject_2 = cgi.escape(subject_2)\n additional_report_numbers = bfo.fields(\"088__a\")\n\n source_of_aquisition = bfo.field(\"037__a\")\n source_of_aquisition = cgi.escape(source_of_aquisition)\n\n\n if subject:\n subject = \" / \" + subject\n\n if subject_2:\n subject_2 = \" / \" + subject_2\n\n if len(source_of_aquisition) > 0:\n source_of_aquisition = '<td align=\"right\"><strong>'+ source_of_aquisition + \"</strong></td>\"\n\n report_numbers_out = ''\n for report_number in additional_report_numbers:\n report_numbers_out += \"<td><small><strong>\" + \\\n cgi.escape(report_number) + \\\n \" </strong></small></td>\"\n\n out = '''\n <table border=\"0\" width=\"100%%\">\n <tr>\n <td>%(collection_indicator)s<small>%(subject)s%(subject_2)s</small></td>\n <td><small><strong>%(report_number)s</strong></small></td>\n %(source_of_aquisition)s\n </tr>\n </table>\n ''' % {'collection_indicator': collection_indicator,\n 'subject': subject,\n 'subject_2': subject_2,\n 'report_number': report_numbers_out,\n 'source_of_aquisition': source_of_aquisition}\n\n if collection_indicator or \\\n subject or \\\n subject_2 or \\\n source_of_aquisition or \\\n report_numbers_out:\n return out\n else:\n return ''", "title": "" }, { "docid": "059fb549346b90d8da4b4d2e31ea508e", "score": "0.4972413", "text": "def _gen_raw_func(self, format_str):\n\n\t# we need the names of each of the variables or methods that are\n\t# called to fill in each format flag.\n d = {\"%t\":\"raw_data[_TITLE]\",\n \"%f\":\"raw_data[_FIRSTNAME]\",\n \"%p\":\"raw_data[_PREFIX]\",\n \"%l\":\"raw_data[_SURNAME]\",\n \"%s\":\"raw_data[_SUFFIX]\",\n \"%y\":\"raw_data[_PATRONYM]\",\n\t \"%c\":\"raw_data[_CALL]\",\n \"%T\":\"raw_data[_TITLE].upper()\",\n \"%F\":\"raw_data[_FIRSTNAME].upper()\",\n \"%P\":\"raw_data[_PREFIX].upper()\",\n \"%L\":\"raw_data[_SURNAME].upper()\",\n \"%S\":\"raw_data[_SUFFIX].upper()\",\n \"%Y\":\"raw_data[_PATRONYM].upper()\",\n \"%C\":\"raw_data[_CALL].upper()\",\n \"%%\":\"'%'\"}\n\n new_fmt = format_str\n\n\t# replace the specific format string flags with a \n\t# flag that works in standard python format strings.\n new_fmt = new_fmt.replace(\"%t\",\"%s\")\n new_fmt = new_fmt.replace(\"%f\",\"%s\")\n new_fmt = new_fmt.replace(\"%p\",\"%s\")\n new_fmt = new_fmt.replace(\"%l\",\"%s\")\n new_fmt = new_fmt.replace(\"%s\",\"%s\")\n new_fmt = new_fmt.replace(\"%y\",\"%s\")\n new_fmt = new_fmt.replace(\"%c\",\"%s\")\n\n new_fmt = new_fmt.replace(\"%T\",\"%s\")\n new_fmt = new_fmt.replace(\"%F\",\"%s\")\n new_fmt = new_fmt.replace(\"%P\",\"%s\")\n new_fmt = new_fmt.replace(\"%L\",\"%s\")\n new_fmt = new_fmt.replace(\"%S\",\"%s\")\n new_fmt = new_fmt.replace(\"%Y\",\"%s\")\n new_fmt = new_fmt.replace(\"%C\",\"%s\")\n new_fmt = new_fmt.replace(\"%%\",'%')\n\n\t# find each format flag in the original format string\n\t# for each one we find the variable name that is needed to \n\t# replace it and add this to a list. This list will be used\n\t# generate the replacement tuple.\n pat = re.compile(\"%.\")\n\n param = ()\n mat = pat.search(format_str)\n while mat:\n param = param + (d[mat.group(0)],)\n mat = pat.search(format_str,mat.end())\n\n s = 'def fn(raw_data):\\n'\\\n ' return \"%s\" %% (%s)' % (new_fmt,\",\".join(param))\n\texec(s)\n\n return fn", "title": "" }, { "docid": "aef796f8a9bdfe2ef5583411dd7094d9", "score": "0.4955774", "text": "def reformat(filename):\n filestr = read_file(filename)\n refilestr = reformat_filestr(filestr)\n #\n todos, links, blockcodecodes, linecodes, apendixes, chapters, alltopics = index_file(refilestr)\n content = get_chapters_topics_appendixes_string(apendixes, chapters, alltopics)\n # Write table of content to file:\n retfile = \"\"\n retfile += syntax\n for _ in range(2):\n retfile += \"\\n\"\n retfile += content\n for _ in range(3):\n retfile += \"\\n\"\n retfile += bline + \"\\n\\n\\n\"\n # Rewrite content lines numbers due added conten index to file beginning:\n ri = retfile.split(\"\\n\")\n preambulegth = len(ri)\n newri = []\n nlist = [str(x) for x in range(0, 10)]\n for line in ri:\n snum = \"\"\n for mark in line[-10:]:\n if mark in nlist:\n snum += mark\n if snum:\n snum = int(snum)\n snum += preambulegth\n line = line[:-len(str(snum))]\n line += str(snum)\n newri.append(line)\n # Join file preamble and its content:\n retfile = \"\\n\".join(newri)\n retfile += refilestr\n return retfile", "title": "" }, { "docid": "a2826fadb2d2cbeb990798428e70f20d", "score": "0.49534485", "text": "def _format_meta(self, data):\n\n return data", "title": "" }, { "docid": "ea70f5949af59f0a9f2f58132fad545d", "score": "0.49522576", "text": "def extract_table(self):", "title": "" }, { "docid": "43af63ccb94e26bbad5219a531642661", "score": "0.4951", "text": "def _adjust_output(self, field, value):\r\n # qseq and sseq are stored as SeqRecord, but here we only need the str\r\n if field in ('qseq', 'sseq'):\r\n value = str(value.seq)\r\n\r\n # evalue formatting, adapted from BLAST+ source:\r\n # src/objtools/align_format/align_format_util.cpp#L668\r\n elif field == 'evalue':\r\n if value < 1.0e-180:\r\n value = '0.0'\r\n elif value < 1.0e-99:\r\n value = '%2.0e' % value\r\n elif value < 0.0009:\r\n value = '%3.0e' % value\r\n elif value < 0.1:\r\n value = '%4.3f' % value\r\n elif value < 1.0:\r\n value = '%3.2f' % value\r\n elif value < 10.0:\r\n value = '%2.1f' % value\r\n else:\r\n value = '%5.0f' % value\r\n\r\n # pident and ppos formatting\r\n elif field in ('pident', 'ppos'):\r\n value = '%.2f' % value\r\n\r\n # evalue formatting, adapted from BLAST+ source:\r\n # src/objtools/align_format/align_format_util.cpp#L723\r\n elif field == 'bitscore':\r\n if value > 9999:\r\n value = '%4.3e' % value\r\n elif value > 99.9:\r\n value = '%4.0d' % value\r\n else:\r\n value = '%4.1f' % value\r\n\r\n # coverages have no comma (using floats still ~ a more proper\r\n # representation)\r\n elif field in ('qcovhsp', 'qcovs'):\r\n value = '%.0f' % value\r\n\r\n # list into '<>'-delimited string\r\n elif field == 'salltitles':\r\n value = '<>'.join(value)\r\n\r\n # list into ';'-delimited string\r\n elif field in ('sallseqid', 'sallacc', 'staxids', 'sscinames',\r\n 'scomnames', 'sblastnames', 'sskingdoms'):\r\n value = ';'.join(value)\r\n\r\n # everything else\r\n else:\r\n value = str(value)\r\n\r\n return value", "title": "" }, { "docid": "cdf3fbf0b6949660f47e78647e895668", "score": "0.49442372", "text": "def format_output(self, results):\n pass", "title": "" }, { "docid": "7fafef28a28f4b93cae6067a3e5d27ca", "score": "0.49336246", "text": "def make_line_fmt_tuples(level, obj): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "90a23510733ab372d207e48589f11b77", "score": "0.49261582", "text": "def reformat(outprefix, target=True):\n \n f = open('.'.join([outprefix, 'final', 'coords']), 'w')\n ow = csv.DictWriter(f, delimiter=\"\\t\", fieldnames=_output_colnames, extrasaction='ignore')\n ow.writerow(dict(zip(_output_colnames, _output_colnames)))\n\n infile = open('.'.join([PREFIX, 'coords']))\n ireader = csv.DictReader(infile, fieldnames=_input_colnames, delimiter=\"\\t\")\n\n # Resort by target genome, otherwise do nothing\n if target:\n data = sorted(ireader, key=lambda x: (x[_contigcol], min(int(x[_targetstartcol]), int(x[_targetendcol]))))\n else:\n data = ireader\n \n ow.writerows(data)\n\n f.close()", "title": "" }, { "docid": "7c11c40600b014b44bb21542dcf87c5d", "score": "0.4924925", "text": "def format_output(cursor):\r\n #assign all the contents of the cursor to sqlOutput\r\n sqlOutput = cursor.fetchall()\r\n\t#assign rowOutput to a blank value so it can be used with the += function\r\n rowOutput = ''\r\n columnHeaders = ''\r\n\t#format each row and column so that it has a uniform length of 20 - also change the date format that ODBC outputs by default\r\n for row in sqlOutput:\r\n for column in row:\r\n columnBody = str(column)\r\n columnBody = re.sub(\"(?P<year>[0-9]{4,})-(?P<month>[0-9]{1,})-(?P<day>[0-9]{1,}) (?P<hour>[0-9]{1,}):(?P<minute>[0-9]{1,}):(?P<second>[0-9]{1,}).[0-9]{1,6}\", \"\\g<year>-\\g<month>-\\g<day> \\g<hour>:\\g<minute>:\\g<second>\", columnBody)\r\n\r\n if len(columnBody) >= 20:\r\n columnBody = textwrap.shorten(columnBody, width = 20)\r\n\r\n while len(columnBody) < 20:\r\n columnBody += ' '\r\n\r\n rowOutput += columnBody\r\n rowOutput += '\\n'\r\n\r\n #format the column headers in a similar fashion as the rows\r\n for column in cursor.description:\r\n columnHeader = str(column[0])\r\n\r\n while len(columnHeader) < 20:\r\n columnHeader += ' '\r\n columnHeaders += columnHeader\r\n columnHeaders += '\\n'\r\n\r\n for columns in columnHeaders:\r\n columnHeaders += '-'\r\n\r\n columnHeaders = re.sub('_',' ', columnHeaders)\r\n outputMessage = '```TSQL\\n' + columnHeaders + '\\n' + rowOutput + '```'\r\n\r\n return(outputMessage)", "title": "" }, { "docid": "10c6e788d43a23dd8152478554f7bfa2", "score": "0.49150586", "text": "def fix(x):\n s = \"\"\n for i in x.splitlines():\n i = i.strip(\"\\n\\r\")\n if len(i) == 0:\n continue\n\n # If the expected output contains tabs, then use those to split,\n # otherwise space. This allows you to have expected output with blank\n # fields (e.g., \"\\t\\t\")\n if \"\\t\" in i:\n i = i.split(\"\\t\")\n else:\n i = i.split()\n\n i = \"\\t\".join(i) + \"\\n\"\n s += i\n return s", "title": "" }, { "docid": "dec05b59819ac258ed70e4e1fb6881da", "score": "0.49099147", "text": "def preparse(self):\n\t\tpass", "title": "" }, { "docid": "33b2352bea4772d7aa2d894cacd1d19a", "score": "0.49077776", "text": "def format_data(self):\n # O-arms report fluoro times in seconds only\n # C-båge report fluoro times in minutes and seconds\n output_data = [[i.Behandlingsnr, i.Opkort_huvudgrupp + ', ' + i.Opkort_undergrupp + ', ' + i.Opkortsbenämning,\n i.Opdatum, i.Opererande_enhet + ', ' + i.Opavdelning, i.Huvudoperatör,\n i.C_båge1, i.C_båge2, i.C_båge3,\n i.O_arm1, i.O_arm2,\n i.Minuter1 * 60 + i.Sekunder1, i.Minuter1, i.Sekunder1,\n i.Minuter2 * 60 + i.Sekunder2, i.Minuter2, i.Sekunder2,\n i.Minuter3 * 60 + i.Sekunder3, i.Minuter3, i.Sekunder3,\n i.Minuter_o1 * 60 + i.Sekunder_o1, int(time.strftime('%M', time.gmtime(i.Sekunder_o1))), int(time.strftime('%S', time.gmtime(i.Sekunder_o1))),\n i.Minuter_o2 * 60 + i.Sekunder_o2, int(time.strftime('%M', time.gmtime(i.Sekunder_o2))), int(time.strftime('%S', time.gmtime(i.Sekunder_o2))),\n i.Total_dos1_mGym2_, i.Total_dos2_mGym2_, i.Total_dos3_mGym2_,\n i.DAP1_mGycm2_, i.DAP2_mGycm2_]\n for i in self.orbit_data.itertuples()]\n column_names = ['examNo', 'examDescId', 'examDate', 'clinicId', 'operatorId',\n 'idModality1', 'idModality2', 'idModality3', 'idModality4', 'idModality5',\n 'fluoroTime1', 'fluoroTime1_m', 'fluoroTime1_s',\n 'fluoroTime2', 'fluoroTime2_m', 'fluoroTime2_s',\n 'fluoroTime3', 'fluoroTime3_m', 'fluoroTime3_s',\n 'fluoroTime4', 'fluoroTime4_m', 'fluoroTime4_s',\n 'fluoroTime5', 'fluoroTime5_m', 'fluoroTime5_s',\n 'dose1', 'dose2', 'dose3', 'dose4', 'dose5']\n self.output_data = pd.DataFrame(data=output_data, columns=column_names)\n\n # None Operator given name 'Ej angivet'\n self.output_data.operatorId.fillna(value='Ej angivet', inplace=True)\n\n # Remove rows with no fluoro time or fluoro dose values\n for m in [1, 2, 3, 4, 5]:\n replace_rows = self.output_data[self.output_data[f'fluoroTime{m}'] == 0].index.tolist()\n if len(replace_rows) > 0:\n self.output_data.ix[replace_rows, f'fluoroTime{m}'] = None\n self.output_data.ix[replace_rows, f'fluoroTime{m}_m'] = None\n self.output_data.ix[replace_rows, f'fluoroTime{m}_s'] = None\n replace_rows = self.output_data[self.output_data[f'dose{m}'] == 0].index.tolist()\n if len(replace_rows) > 0:\n self.output_data.ix[replace_rows, f'dose{m}'] = None\n\n # Remove duplicates\n self.output_data = self.output_data.drop_duplicates(keep='first')", "title": "" }, { "docid": "45a88ab95c3fba76143159a78143dc3f", "score": "0.4906791", "text": "def pretty_print(self):\n pass # implement your code here", "title": "" }, { "docid": "a2897b72d9ea1bffd885ea956867013c", "score": "0.49067605", "text": "def formattable(text):\n # split teext and try to determine separation character and prefix character\n linelist = text.strip(\"\\n\\t \").splitlines()\n itemlist = []\n sep, prefix = '', ''\n length = 0\n for line in linelist:\n if line.strip().startswith(\"%|\"):\n itemlist.append(line[1:].strip(\" |\").split('|'))\n sep, prefix = '|', '%'\n elif line.strip().startswith('|'):\n itemlist.append(line.strip(\" |\").split('|'))\n sep, prefix = '|', ''\n else:\n return 1, text\n length = max(length, len(itemlist[-1]))\n itemlist[-1] = [item.strip() for item in itemlist[-1]]\n lengthlist = [0 for i in range(length)]\n # calculate maximum with of each column\n for item in itemlist:\n if item[0].find(\"===\") != -1 or item[0].find(\"---\") != -1:\n continue\n currentlength = [len(column) for column in item]\n for index, length in enumerate(lengthlist):\n lengthlist[index] = max(length, currentlength[index])\n for row, item in enumerate(itemlist):\n if item[0].find(\"===\") != -1:\n itemlist[row] = prefix + \"{0}={1}={0}\".format(sep, \"===\".join([\"=\" * length for length in lengthlist]))\n continue\n elif item[0].find(\"---\") != -1:\n itemlist[row] = prefix + \"{0}-{1}-{0}\".format(sep, \"---\".join([\"-\" * length for length in lengthlist]))\n continue\n for column, content in enumerate(item):\n itemlist[row][column] = \"{{:<{}}}\".format(lengthlist[column]).format(content)\n itemlist[row] = prefix + \"{0} {1} {0}\".format(sep, \" {} \".format(sep).join(itemlist[row]))\n\n return 0, '\\n'.join(itemlist)", "title": "" }, { "docid": "f6011b73d111ffbcbb7c863d7b7ab2fb", "score": "0.49065715", "text": "def _convertData(self, disasm):\n while 'DCD ' in disasm: disasm = disasm.replace('DCD ', '.word ')\n while 'DCW ' in disasm: disasm = disasm.replace('DCW ', '.hword ')\n while 'DCB ' in disasm: disasm = disasm.replace('DCB ', '.byte ')\n # gnu assembler format\n while '.long ' in disasm: disasm = disasm.replace('.long ', '.word ')\n while '.short ' in disasm: disasm = disasm.replace('.short ', '.hword ')\n\n\n return disasm", "title": "" }, { "docid": "5d4e1dad42a40febcdd64c8aad865356", "score": "0.49061328", "text": "def _rewrite_patch_header(self, lines, to_format, from_format = None):\n if not lines:\n raise ValueError(\"empty patch file\")\n\n if from_format is None:\n from_format = self._detect_patch_header_format(lines)\n\n if from_format == to_format:\n return lines\n\n def parse_header(lines, regexs):\n if len(lines) < len(regexs):\n raise ValueError(\"patch files must have at least %s lines\"%len(regexs))\n\n for i,regex in enumerate(regexs):\n if not regex.match(lines[i]):\n raise ValueError(\"Malformatted patch. Line `%s` does not match regular expression `%s`.\"%(lines[i],regex.pattern))\n\n message = []\n for i in range(len(regexs),len(lines)):\n if not lines[i].startswith(\"diff -\"):\n message.append(lines[i])\n else: break\n\n return message, lines[i:]\n\n if from_format == \"git\":\n message, diff = parse_header(lines, (GIT_FROM_REGEX, GIT_SUBJECT_REGEX, GIT_DATE_REGEX))\n\n if to_format == \"hg\":\n ret = []\n ret.append('# HG changeset')\n ret.append('# User %s'%GIT_FROM_REGEX.match(lines[0]).groups()[0])\n ret.append('# Date %s 00000'%datetime.strptime(GIT_DATE_REGEX.match(lines[2]).groups()[0], \"%a %b %d %H:%M:%S %Z %Y\").strftime(\"%s\")) # this is not portable and the time zone is wrong\n ret.append('# Node ID 0000000000000000000000000000000000000000')\n ret.append('# Parent 0000000000000000000000000000000000000000')\n ret.append(GIT_SUBJECT_REGEX.match(lines[1]).groups()[0])\n ret.extend(message)\n ret.extend(diff)\n else:\n raise NotImplementedError(to_format)\n elif from_format == \"diff\":\n ret = []\n ret.append('From: \"Unknown User\" <[email protected]>')\n ret.append('Subject: No Subject')\n ret.append('Date: %s'%datetime.today().ctime())\n ret.extend(lines)\n return self._rewrite_patch_header(ret, to_format=to_format, from_format=\"git\")\n elif from_format == \"hg\":\n message, diff = parse_header(lines, (HG_HEADER_REGEX, HG_USER_REGEX, HG_DATE_REGEX, HG_NODE_REGEX, HG_PARENT_REGEX))\n ret = []\n ret.append('From: %s'%HG_USER_REGEX.match(lines[1]).groups()[0])\n ret.append('Subject: %s'%(\"No Subject\" if not message else message[0]))\n ret.append('Date: %s'%(datetime.utcfromtimestamp(int(HG_DATE_REGEX.match(lines[2]).groups()[0])).ctime()))\n ret.extend(message[1:])\n ret.extend(diff)\n return self._rewrite_patch_header(ret, to_format=to_format, from_format=\"git\")\n else:\n raise NotImplementedError(from_format)", "title": "" }, { "docid": "2a3c05e75326335590569b1d83e33f21", "score": "0.49042663", "text": "def reformat_filestr(filestr):\n filelines = filestr.splitlines()\n # get just indexing text:\n formtext = []\n reading = False\n for i in filelines:\n if i.startswith(\"#IB:\"):\n reading = True\n if reading:\n formtext.append(i)\n elif i.startswith(\"#IE:\"):\n reading = False\n\n newfile = []\n for i in formtext:\n if \"todo:\" in i.lower():\n r = i.lower().split(\"todo:\")[1]\n i = \"# TODO:\" + r.lower()\n newfile.append(i)\n filelines = []\n for i in newfile:\n if \"##~ \" in i:\n i = i.upper()\n filelines.append(i)\n newfile = []\n for i in filelines:\n if \"#→ \" in i:\n i = i.title()\n newfile.append(i)\n retfile = []\n empty = \"n\"\n # formating empty lines:\n for i in newfile:\n if i == \"\":\n if empty != \"\":\n retfile.append(i)\n else:\n retfile.append(i)\n empty = i\n rfile = []\n for i in retfile:\n if \"##~\" in i or \"#:<\" in i:\n for _ in range(4):\n rfile.append(\"\")\n elif \"#→\" in i:\n for _ in range(2):\n rfile.append(\"\")\n rfile.append(i)\n return \"\\n\".join(rfile)", "title": "" }, { "docid": "359e60b0b43ccc5ec7622d020c8e47fb", "score": "0.48935723", "text": "def _format_alignment(self):\n # needs to reverse, so the lists are in order\n self._seq_pos_2.reverse()\n self._seq_pos_1.reverse()\n ali_1: list = [] # The Characters for the string are stored here\n ali_2: list = [] # The Characters for the string are stored here\n char_1_1: int = -1\n char_2_1: int = -1\n for char_1 in self._seq_pos_1:\n if char_1 == char_1_1:\n ali_1.append('_') # '_' represents a gap\n else:\n ali_1.append(self.sequence_1[char_1])\n char_1_1 = char_1\n for char_2 in self._seq_pos_2:\n if char_2 == char_2_1:\n ali_2.append('_') # '_' represents a gap\n else:\n ali_2.append(self.sequence_2[char_2])\n char_2_1 = char_2\n return ''.join(ali_1) + '\\n' + ''.join(ali_2)", "title": "" }, { "docid": "79f2c5202d6336a86073a0d5503a346d", "score": "0.48930028", "text": "def test_ps_data_formatting():\n data = [\n {'Id': ('9e19b1558bbcba9202c1d3c4e26d8fe6e2c6060faad9a7074487e3b210a2'\n '6a16')},\n {'Id': ('b798acf4382421d231680d28aa62ae9b486b89711733c6acbb4cc85d8bec4'\n '072')},\n ]\n formatted = format_data('ps', data)\n print('\\n')\n print('\\n'.join(formatted))", "title": "" }, { "docid": "56d722dd4f5c30f2352e13ec58e24f89", "score": "0.4885474", "text": "def str_data( self , width , index1 , index2 , fmt):\n data = []\n s = \"\"\n for index in range(index1, index2):\n data.append( self[index] )\n for index in range(len(data)):\n s += fmt % data[ index ]\n if index % width == (width - 1):\n s+= \"\\n\"\n return s", "title": "" }, { "docid": "e7d25826087740069b16cabf0b715617", "score": "0.48816004", "text": "def _parse(self):\n pass", "title": "" }, { "docid": "315b2f4d888e193fe5a065c17c6caaac", "score": "0.48797566", "text": "def _format_for_encoding(self, *Xs):\n return Xs", "title": "" }, { "docid": "b255b43c13f297f565a9ba13c90a34d3", "score": "0.48775813", "text": "def formatBloc(t):\n rows = []\n\n # store units and names\n units = {}\n names = []\n\n for row in t :\n rows.append(ParseResults([ row.name, row.value ]))\n names.append(row.name)\n if row.unit : units[row.name] = row.unit[0]\n\n rows.append( ParseResults([ 'names_', names ]))\n rows.append( ParseResults([ 'unit_', units]))\n\n return rows", "title": "" }, { "docid": "ac9c84f87019cfcd288f0efe7d442112", "score": "0.4877395", "text": "def test():\n\n t1 = '''\n1 But - CC O 0\n2 Dumbledore - NNP O 0 n.person\n3 says - VBZ O 0 v.communication\n4 he - PRP O 0\n5 does - VBZ O 0\n6 n't - RB O 0\n7 care - VB O 0 v.emotion\n8 what - WP O 0\n9 they - PRP O 0\n10 do - VBP O 0 v.change\n11 as - IN B 0\n12 long - JJ I 11\n13 as - IN I 12\n14 they - PRP O 0\n15 do - VBP O 0 v.change\n16 n't - RB O 0\n17 take - VB B 0 v.change\n18 him - PRP o 0\n19 off - RP I 17\n20 the - DT O 0\n21 Chocolate - NNP B 0 n.food\n22 Frog - NNP I 21\n23 cards - NNS O 0 n.artifact\n24 . - . O 0\n\n1 Would - MD O 0 sent2\n2 you - PRP O 0 sent2\n3 care - VB B 0 v.emotion sent2\n4 for - IN I 3 ent2\n5 a - DT O 0 sent2\n6 lemon - NN B 0 n.food sent2\n7 drop - NN I 6 sent2\n8 ? - . O 0 sent2\n\n\n1 Harry - NNP O 0 n.person\n2 had - VBD B 0 v.cognition\n3 a - DT b 0\n4 lot - NN i 3\n5 of - IN o 0\n6 trouble - NN I 2\n7 keeping - VBG O 0 v.stative\n8 his - PRP$ O 0\n9 mind - NN O 0 n.cognition\n10 on - IN O 0\n11 his - PRP$ O 0\n12 lessons - NNS O 0 n.cognition\n'''.lstrip().replace(' ','\\t')\n\n for data in readsents(StringIO.StringIO(t1)):\n print(render([w for w,pos in data[\"words\"]], data[\"_\"], data[\"~\"],\n {int(k): v[1] for k,v in data[\"labels\"].items()}))\n print('***')", "title": "" }, { "docid": "1d9d3c970e4228e1b90e3ae839f4d664", "score": "0.48744142", "text": "def _get_formatted_data(self):\n main_str = str()\n # Header\n for line in self._header:\n main_str = main_str + line + '\\n'\n # Data\n for line in self._data:\n main_str = main_str + line + '\\n'\n return main_str", "title": "" }, { "docid": "7f38d71dd265567d43d68500b21165b1", "score": "0.48726946", "text": "def fix_e302(self, result):\r\n add_linenum = 2 - int(result['info'].split()[-1])\r\n cr = '\\n' * add_linenum\r\n self.source[result['line'] - 1] = cr + self.source[result['line'] - 1]", "title": "" }, { "docid": "d649ec580b0c0c1baec33d628f2e8aa7", "score": "0.48627144", "text": "def revise():", "title": "" }, { "docid": "2fa9734175fac34d8431a2e88d4fd41c", "score": "0.4857048", "text": "def doconce2format4docstrings(filestr, format):\n\n c1 = re.compile(r'^\\s*(class|def)\\s+[A-Za-z0-9_,() =]+:\\s+(\"\"\".+?\"\"\")',\n re.DOTALL|re.MULTILINE)\n c2 = re.compile(r\"^\\s*(class|def)\\s+[A-Za-z0-9_,() =]+:\\s+('''.+?''')\",\n re.DOTALL|re.MULTILINE)\n doc_strings = [doc_string for dummy, doc_string in c1.findall(filestr)] + \\\n [doc_string for dummy, doc_string in c2.findall(filestr)]\n lines = filestr.splitlines()\n for i, line in enumerate(lines):\n if not line.lstrip().startswith('#'):\n break\n filestr2 = '\\n'.join(lines[i:])\n c3 = re.compile(r'^\\s*\"\"\".+?\"\"\"', re.DOTALL) # ^ is the very start\n c4 = re.compile(r\"^\\s*'''.+?'''\", re.DOTALL) # ^ is the very start\n doc_strings = c3.findall(filestr2) + c4.findall(filestr2) + doc_strings\n\n # Find and remove indentation\n all = []\n for doc_string in doc_strings:\n lines = doc_string.splitlines()\n if len(lines) > 1:\n indent = 0\n line1 = lines[1]\n while line1[indent] == ' ':\n indent += 1\n for i in range(1, len(lines)):\n lines[i] = lines[i][indent:]\n all.append(('\\n'.join(lines), indent))\n else:\n all.append((doc_string, None))\n\n for doc_string, indent in all:\n new_doc_string = doconce2format(doc_string, format)\n if indent is not None:\n lines = new_doc_string.splitlines()\n for i in range(1, len(lines)):\n lines[i] = ' '*indent + lines[i]\n new_doc_string = '\\n'.join(lines)\n lines = doc_string.splitlines()\n for i in range(1, len(lines)):\n if lines[i].isspace() or lines[i] == '':\n pass # don't indent blank lines\n else:\n lines[i] = ' '*indent + lines[i]\n doc_string = '\\n'.join(lines)\n\n filestr = filestr.replace(doc_string, new_doc_string)\n\n return filestr", "title": "" }, { "docid": "11963c883dcecb66e71b40a7ce34863b", "score": "0.48492151", "text": "def processLine(line):\n #NOTE: The most common time when two format dictionaries are returned occurs when the second operand of the line is \"x\". This registers as both formats 2 and 3.\n #Create regular expressions to recognize formats\n # label = re.compile('^(((?P<label>[A-z]([A-z]|[0-9]|_)*):)(\\s)*)|(\\.(\\.|\\n|\\s)*)$')\n label = re.compile('^((((?P<label>[A-z]([A-z]|[0-9]|_)*):)(\\s)*)$)|(^(\\s)*\\.(\\s|\\S)*$)|(^(\\s)*$)')\n format1 = re.compile('^((?P<label>[A-z]([A-z]|[0-9]|_)*):)?(\\s)*(?P<opcode>([A-z]+)|(\\+rsub))(\\s)*$')\n format2 = re.compile('^((?P<label>[A-z]([A-z]|[0-9]|_)*):)?(\\s)*(?P<opcode>[A-z]+)(\\s)+(?P<operand1>(a|x|l|pc|sw|b|s|t|f))(\\s)*,(\\s)*(?P<operand2>(a|x|l|pc|sw|b|s|t|f|((\\+|\\-)?([0-9]|[A-F]|[a-f])+)))(\\s)*$')\n format3 = re.compile('^((?P<label>[A-z]([A-z]|[0-9]|_)*):)?(\\s)*(?P<opcode>[A-z]+)(\\s)+((?P<indirect>@)?|(?P<immediate>#)?)(?P<operand>((\\+|\\-)?(\\w+)|((x|c)\\'(\\S| |\\t)*\\')))(\\s)*(,(\\s)*(?P<extended>x)(\\s)*)?$')\n format4 = re.compile('^((?P<label>[A-z]([A-z]|[0-9]|_)*):)?(\\s)*\\+(?P<opcode>[A-z]+)(\\s)+((?P<indirect>@)?|(?P<immediate>#)?)(?P<operand>(\\+|\\-)?(\\w|\\'|\\\")+)(\\s)*(,(\\s)*(?P<extended>x)(\\s)*)?$')\n\n lmatch = label.match(line)\n f1match = format1.match(line)\n f2match = format2.match(line)\n f3match = format3.match(line)\n f4match = format4.match(line)\n allmatch = [lmatch, f1match, f2match, f3match, f4match]\n matched = []\n for i in range(len(allmatch)):\n if(allmatch[i] != None):\n temp = allmatch[i].groupdict(None)\n temp.update({\"format\" : i})\n matched.append(temp)\n\n return sorted(matched)", "title": "" } ]
35ad6880de89a0177445728f2773a0ba
Starts a new subroutine scope (i.e. erases all names in the previous subroutine's scope.)
[ { "docid": "ad7c5468c895132ec22ad7756b240832", "score": "0.6358833", "text": "def start_subroutine(self):\n\n self._subroutine_table.clear()\n self._kind_counter['argument'] = 0\n self._kind_counter['var'] = 0", "title": "" } ]
[ { "docid": "ce7b3e090f92b78d8e305cac7c33755e", "score": "0.6175013", "text": "def startRoutine(self):\n self.symbolTable[\"subroutine\"] = {}\n self.counts['ARG'] = 0\n self.counts['VAR'] = 0", "title": "" }, { "docid": "0b40c38b32a688e503b0eb805ae43e21", "score": "0.6157766", "text": "def startSubroutine(self):\n self._args.clear()\n self._vars.clear()", "title": "" }, { "docid": "5c0cc50bab44475a5e1a5d10fe86e0db", "score": "0.60035115", "text": "def leave_scope(self):", "title": "" }, { "docid": "6dc47e2e224073b69f932e8eba893ab6", "score": "0.5743008", "text": "def close_scope(cls) -> None:\n # Points to the actual scope (outer) \n cls.current_scope -= 1", "title": "" }, { "docid": "9131ca641c81c386a9b10c35da58f01d", "score": "0.55994153", "text": "def begin_scope(self):\n stack_names = self.symbol_table.lookup('_stack_names')\n self.symbol_table.begin_scope()\n if stack_names:\n self.symbol_table.add_stack_assumptions(stack_names.value)\n\n self.scopes.append(StateScope.copy(self.state))\n self.state = self.scopes[-1]", "title": "" }, { "docid": "0afe516c8ff6e909ede3329197b9f509", "score": "0.55700135", "text": "def scope_enter(self):\n\n # print(\"\\n\\n>>> Entering new scope...\")\n self._symbols.append(dict())", "title": "" }, { "docid": "715952f69f311409c71729dcd72b2998", "score": "0.54539573", "text": "def pop_scope(self, aliases, frame):\r\n for name, alias in aliases.iteritems():\r\n self.writeline('l_%s = %s' % (name, alias))\r\n to_delete = set()\r\n for name in frame.identifiers.declared_locally:\r\n if name not in aliases:\r\n to_delete.add('l_' + name)\r\n if to_delete:\r\n # we cannot use the del statement here because enclosed\r\n # scopes can trigger a SyntaxError:\r\n # a = 42; b = lambda: a; del a\r\n self.writeline(' = '.join(to_delete) + ' = missing')", "title": "" }, { "docid": "17f8aa896773ffe3f2263e9913f6cbdf", "score": "0.5398948", "text": "def sub():\n pass", "title": "" }, { "docid": "f0ec3b555d22450cc58f2d809ed34f4f", "score": "0.52668124", "text": "def open_scope(cls) -> None:\n # Enlarges the list\n if ( cls.current_scope + 1 == len(cls.scopes) ):\n cls.scopes.append(-1)\n # Points-to the actual scope just opened\n cls.current_scope += 1\n # Update its identifier\n cls.scopes[cls.current_scope] += 1", "title": "" }, { "docid": "b15ca299138da362fedd732e14843bef", "score": "0.5237222", "text": "def enter_scope(self):\n # NOTE cyclic imports!\n from vyper.semantics import environment\n\n self._scopes.append(set())\n\n if len(self._scopes) == 1:\n # add mutable vars (`self`) to the initial scope\n self.update(environment.get_mutable_vars())\n\n return self", "title": "" }, { "docid": "482fdb2a95c4b5cf2289631e4a4e2e26", "score": "0.51207346", "text": "def i1():\n def i2():\n pass\n x = 1\n\n def i3():\n i2()\n def i4():\n x\n i3()\n i4()", "title": "" }, { "docid": "b626537be660e0a9a8507846878d5998", "score": "0.51169294", "text": "def scope_exit(self):\n # print(\"\\n<<< Exiting scope...\")\n self._symbols.pop()", "title": "" }, { "docid": "3c8fe6d9f869fe25eb1f8fb7442c12b3", "score": "0.50653523", "text": "def main():\n print('entering closure scope')\n def CreateClosure(value):\n \"\"\"Create closure forcing capture by copy or value.\"\"\"\n context = copy.deepcopy(value)\n def Method():\n return copy.deepcopy(context)\n return Method\n # Create lifetime probe\n print('create lifetime probe')\n probe = LifetimeProbe()\n # Create closure\n print('create closure')\n closure = CreateClosure(probe)\n # Print closure value\n print('context in closure: ', closure())\n print('exiting closure scope')\n return closure", "title": "" }, { "docid": "a6e05ca98167bd6a6293b768a3861dc6", "score": "0.506206", "text": "def append_scope(self):\n self.stack.current.append(Scope(self.stack.current.current))", "title": "" }, { "docid": "7b1bbec168cc15b99a882cf8ddc22c13", "score": "0.5050978", "text": "def unoptimize_scope(self, frame):\r\n # XXX: this is not that nice but it has no real overhead. It\r\n # mainly works because python finds the locals before dead code\r\n # is removed. If that breaks we have to add a dummy function\r\n # that just accepts the arguments and does nothing.\r\n if frame.identifiers.declared:\r\n self.writeline('%sdummy(%s)' % (\r\n unoptimize_before_dead_code and 'if 0: ' or '',\r\n ', '.join('l_' + name for name in frame.identifiers.declared)\r\n ))", "title": "" }, { "docid": "b3d3611e14a496ab97771a41c2e344a9", "score": "0.49575308", "text": "def __init__(self, **kwags):\n self.stack = Stack([Frame([Scope(**kwags)])])", "title": "" }, { "docid": "6755f2516dcc02dee05b92163aeaeae5", "score": "0.49471307", "text": "def sub():", "title": "" }, { "docid": "b6c921757b0572a3dd1f64f6f6c06dca", "score": "0.49436694", "text": "def drbrace(self, lexer: MetamathLexer) -> None:\n self.scopes.pop_scope()", "title": "" }, { "docid": "a6238260bf94aba9621a20036f23b951", "score": "0.49343923", "text": "def __init__(self):\n self.globals = set()\n self.nonlocals = set()\n self.args = set()\n self.father = None # point to the nearest function name scope.\n self.w_vars = set() # all qualified + normal names been stored\n self.created = set() # useful for control flow compatibility\n # only valid in control_flow nodes\n # may be remove later.\n self.push_pop_vars = set() # we call push and pop in the vars", "title": "" }, { "docid": "0960ca8f37ba815c102acbcce0524c1c", "score": "0.49304077", "text": "def __call__(self, level=0):\r\n if not isinstance(level, int) or level < 0:\r\n msg = ('The nonlocal scope level must be an int >= 0. '\r\n 'Got %r instead.')\r\n raise ValueError(msg % level)\r\n offset = 0\r\n target = self._nls_obj\r\n while target is not None and offset != level:\r\n target = target.parent\r\n offset += 1\r\n if offset != level:\r\n msg = 'Scope level %s is out of range'\r\n raise ValueError(msg % level)\r\n return Nonlocals(target, self._nls_listener)", "title": "" }, { "docid": "c0831f0280ebf5b6b6132c3362151a59", "score": "0.49297702", "text": "def enter_function(self, state):\n # Do not add any frames after we already\n if self.stackframes:\n self.stackframes.append(StackFrame(state.solver.eval(state.regs.sp)))", "title": "" }, { "docid": "bf05d72d442b571402c63f28183d8b94", "score": "0.4884891", "text": "def enterBlockScope(self):\n self.codes.append((\"enterBlockScope\",))", "title": "" }, { "docid": "96a5b2832e9c91b8028c84174328fa20", "score": "0.48792666", "text": "def end_scope(self):\n self.symbol_table.end_scope()\n self.scopes.pop()\n if not self.scopes:\n raise Exception('Popped the topmost scope')\n self.state = self.scopes[-1]", "title": "" }, { "docid": "007ea25e8ef87d1d26cf23549d9c6375", "score": "0.48669708", "text": "def toplevel_function():\n pass", "title": "" }, { "docid": "f3b665e312c325f3115834ab55a67e6f", "score": "0.48601297", "text": "def push_scope(self) -> None:\n self.scopes.append(Scope())", "title": "" }, { "docid": "99b5f5e1de03fe4231e353896b8cecc1", "score": "0.48549852", "text": "def pop_scope(self) -> None:\n scope = self.scopes.pop()\n scope.report_d_vars()", "title": "" }, { "docid": "deb0200d73461989d6764393057c85da", "score": "0.481563", "text": "def outer_scope_function(name):\n\n def inner_scope_function():\n _ = name.capitalize()\n # uncomment the following line and you will have an error. Python does\n # not make its scope dynamically.\n # name = 'some other name'\n return 'Hello ' + name\n\n return inner_scope_function", "title": "" }, { "docid": "d76379491f7f4233ba04b72bfcac7ff7", "score": "0.47864997", "text": "def my_reset(*varnames):\n globals_ = globals()\n to_save = {v: globals_[v] for v in varnames}\n to_save['my_reset'] = my_reset # lets keep this function by default\n del globals_\n get_ipython().magic(\"reset -f\")\n globals().update(to_save)", "title": "" }, { "docid": "12e0809205b3543391bb2db16802cf27", "score": "0.47817945", "text": "def sub_compiler(self, status: str, name: str):\n self.current_sub = name\n if status == \"start\":\n return [\"#Start {}\".format(name)]\n elif name == \"main\":\n # Special case the main subroutine to jump to the end of the script at end\n return [\"#End {}\".format(name),\n \"MLZ -1 -2 0\",\n \"MLZ 0 0 0\"]\n else:\n return [\"#End {}\".format(name)]", "title": "" }, { "docid": "ef340eccbef71c98707d1afcb3e894d2", "score": "0.47768793", "text": "def my_previous_function():\n global level\n level -= 1", "title": "" }, { "docid": "f5cf1b2974d877ba8dbbc477d255ae8a", "score": "0.47748366", "text": "def avoidNameClash(self):\n boundVarSet = self.function.body.boundVar()\n freeVarSet = self.argument.freeVar()\n nameClash = boundVarSet.intersection(freeVarSet)\n if nameClash != {}:\n for var in nameClash:\n self.function.body.rename(var, self.freshVar())", "title": "" }, { "docid": "c8d38235cf01ac7575baa87addad3ad5", "score": "0.47739646", "text": "def startName():", "title": "" }, { "docid": "27487be9c36e5c483b835bea1cb1b84a", "score": "0.47613811", "text": "def init_globals(stor):\n fake_state = State(None, Envr(\"init_globals\", None), stor, None)\n for decl in ls.LinkSearch.global_decl_list:\n logging.debug(\"Global %s\", str(decl.name))\n decl_helper(decl, fake_state)\n if decl.init:\n address = fake_state.envr.get_address(decl.name)\n value, _ = get_value(decl.init, fake_state)\n # MARKER\n fake_state.stor.write(address, value)\n funcs = ls.LinkSearch.function_lut\n for func in funcs:\n logging.debug(\"Global function %s\", func)\n f_addr = fake_state.envr.map_new_identifier(func)\n fake_state.stor.allocM(f_addr, [8]) # word size\n func_val = generate_function_definition(funcs[func])\n # MARKER\n fake_state.stor.write(f_addr, func_val)\n\n Envr.set_global(fake_state.envr)", "title": "" }, { "docid": "65d1da631198dc44aa6afbd06e851d28", "score": "0.47593936", "text": "def rem_before_start_func(self, func):\n if func in self._start_funcs:\n self._start_funcs.remove(func)", "title": "" }, { "docid": "a6440995bc86a74d0d8bdc37246b02c9", "score": "0.4750313", "text": "def old_outer_function(name):\n\n def new_inner_function():\n return 'Hello ' + name\n\n return new_inner_function", "title": "" }, { "docid": "8b0b9ef6601f863387f372d85f20bd6a", "score": "0.47358865", "text": "def fresh_start(self):\n self.env.reset()", "title": "" }, { "docid": "7e03e316b7c5540018711b91e529904f", "score": "0.47232127", "text": "def f_globals(self):", "title": "" }, { "docid": "2c644b47e1b8c82d84cab344f485f3c4", "score": "0.47187582", "text": "def push_default(self):\n _name_scope_stack.push(self)", "title": "" }, { "docid": "786b64a6bc44d71c702313fa6874548c", "score": "0.471208", "text": "def pop_caller(self):\r\n\r\n del self.caller_stack[-1]", "title": "" }, { "docid": "1b61b7866aa7564a94ba2d71e6aab739", "score": "0.46838972", "text": "def foo():\n ...", "title": "" }, { "docid": "eb14edf07ffcff3f547843543424eb80", "score": "0.46725318", "text": "def _restore_runtime(func, intersect):\n _globals = func.__globals__\n for elem in list(HYBRID_GLOBALS.keys()):\n _globals.pop(elem)\n for k, v in intersect:\n _globals[k] = v", "title": "" }, { "docid": "f77376281d4d61fbf6c472771426f20d", "score": "0.46636143", "text": "def ctxtStart(ctxt):\r\n ctxt.__enter__()\r\n return ctxt", "title": "" }, { "docid": "09825962b5b6143c988bb55e564ee066", "score": "0.4661358", "text": "def __globals__(self):", "title": "" }, { "docid": "faa9f41a1ab25c91a9c55a4705be9e86", "score": "0.46590608", "text": "def rebind_fun(self):\n self.fun = self.get_bound_fun()", "title": "" }, { "docid": "df35a313fffcc26a7aae5f5a3a356f3f", "score": "0.46570525", "text": "def compile_sub_routine(self):\n # start new subroutine symbol table\n self.symbol_table.start_subroutine()\n # get subroutine type (method/construction/function)\n sub_type = self.tokenizer.key_word()\n\n # advances the token to what the subroutine returns\n self.tokenizer.advance()\n # updates the return type\n if self.tokenizer.token_type() == KEY_WORD:\n return_type = self.tokenizer.key_word()\n else:\n return_type = self.tokenizer.identifier()\n\n # advances the token to <identifier> sub_name <identifier>\n self.tokenizer.advance()\n # update the subroutine name\n subroutine_name = self.tokenizer.identifier()\n self.current_sub_name = subroutine_name\n\n # advance to <symbol> ( <symbol>\n self.tokenizer.advance()\n # if subroutine is a method, add 'this' to the symbol table as argument 0\n if sub_type == METHOD:\n self.symbol_table.define(\"this\", self.class_name, \"ARG\")\n # compiles the parameter list\n self.compile_parameter_list()\n # we are at <symbol> ) <symbol>\n # advance to subroutine body, and compile it\n self.tokenizer.advance()\n self.compile_subroutine_body(sub_type)", "title": "" }, { "docid": "8ec25ca7f4ea0cf607bc8caa5c7e09e2", "score": "0.46552938", "text": "def function_scoping(self, node, frame, children=None,\r\n find_special=True):\r\n # we have to iterate twice over it, make sure that works\r\n if children is None:\r\n children = node.iter_child_nodes()\r\n children = list(children)\r\n func_frame = frame.inner()\r\n func_frame.inspect(children, hard_scope=True)\r\n\r\n # variables that are undeclared (accessed before declaration) and\r\n # declared locally *and* part of an outside scope raise a template\r\n # assertion error. Reason: we can't generate reasonable code from\r\n # it without aliasing all the variables.\r\n # this could be fixed in Python 3 where we have the nonlocal\r\n # keyword or if we switch to bytecode generation\r\n overriden_closure_vars = (\r\n func_frame.identifiers.undeclared &\r\n func_frame.identifiers.declared &\r\n (func_frame.identifiers.declared_locally |\r\n func_frame.identifiers.declared_parameter)\r\n )\r\n if overriden_closure_vars:\r\n self.fail('It\\'s not possible to set and access variables '\r\n 'derived from an outer scope! (affects: %s)' %\r\n ', '.join(sorted(overriden_closure_vars)), node.lineno)\r\n\r\n # remove variables from a closure from the frame's undeclared\r\n # identifiers.\r\n func_frame.identifiers.undeclared -= (\r\n func_frame.identifiers.undeclared &\r\n func_frame.identifiers.declared\r\n )\r\n\r\n # no special variables for this scope, abort early\r\n if not find_special:\r\n return func_frame\r\n\r\n func_frame.accesses_kwargs = False\r\n func_frame.accesses_varargs = False\r\n func_frame.accesses_caller = False\r\n func_frame.arguments = args = ['l_' + x.name for x in node.args]\r\n\r\n undeclared = find_undeclared(children, ('caller', 'kwargs', 'varargs'))\r\n\r\n if 'caller' in undeclared:\r\n func_frame.accesses_caller = True\r\n func_frame.identifiers.add_special('caller')\r\n args.append('l_caller')\r\n if 'kwargs' in undeclared:\r\n func_frame.accesses_kwargs = True\r\n func_frame.identifiers.add_special('kwargs')\r\n args.append('l_kwargs')\r\n if 'varargs' in undeclared:\r\n func_frame.accesses_varargs = True\r\n func_frame.identifiers.add_special('varargs')\r\n args.append('l_varargs')\r\n return func_frame", "title": "" }, { "docid": "c0be1f52f677cc166c68c50d310bf268", "score": "0.46270946", "text": "def decrementScope(self):\n\t\tif self.returnDeclared:\n\t\t\tif self.scope_depth-1 < self.MIN_SCOPE:\n\t\t\t\tself.scope_depth -= 1\n\t\t\t\tself.end = True\n\t\t\t\treturn ''\n\t\tif self.scope_depth <= self.MIN_SCOPE:\n\t\t\tif self.returnDeclared:\n#\t\t\t\tself.endCodeGeneration()\n\t\t\t\tself.end = True\n\t\t\t\treturn ''\n\t\t\telse:\n\t\t\t\treturn self.OPTIONS[3]()\n\t\tself.variable_names.pop()#deletes top level\n\t\t#self.makeDebugFile('debugTS', 'PreDecrement Scope: '+str(self.scope_depth)+self.TAB)\n\t\tself.scope_depth -= 1\n\t\t#self.makeDebugFile('debugTS', 'PostDecrement Scope: '+str(self.scope_depth)+self.NEWLINE)\n\t\tself.returnDeclared = False\n\t\treturn ''", "title": "" }, { "docid": "da6c5e537aadf004a2de78813593d03e", "score": "0.46173993", "text": "def startSymbolWriting():\n gg.doSymbolWriting = True\n gg._s = ''\n gg.symbol_list = []\n gg.symbol_currentid = None\n gg.symbol_topid = None\n gg.symbol_s = []", "title": "" }, { "docid": "23bb5eb44f8fa58a38685b99eaae7996", "score": "0.46073323", "text": "def define_scope(func):\n\n name_func = func.__name__\n if name_func == '__init__':\n name_func = func.__class__.__name__\n name_func = camel_case(name_func)\n\n @functools.wraps(func)\n def _wrapper(*args, **kwargs):\n # Local, mutable copy of `name`.\n name_to_use = name_func\n\n with tf.name_scope(name_to_use):\n return func(*args, **kwargs)\n\n return _wrapper", "title": "" }, { "docid": "12c7e1ba584383b5a69207cc921d3910", "score": "0.45976022", "text": "def pull_locals(self, frame):\r\n self.undeclared_identifiers.update(frame.identifiers.undeclared)", "title": "" }, { "docid": "bf340770ad2693b802f0d568de79e70c", "score": "0.45896116", "text": "def new(self):\r\n self._exit()\r\n self.__init__()", "title": "" }, { "docid": "0b56c29edf82443babe30a464fc3e621", "score": "0.45876196", "text": "def reset():\n cls = close()\n global flag, closed_files, file_dict\n flag = None\n file_dict = {}\n closed_files = []\n return cls", "title": "" }, { "docid": "c477632c8c11c5f2391e55245e5d2025", "score": "0.458358", "text": "def exec_script(self, *a, **kw):\n for k in [k for k, v in self.get_globals().items()\n if type(v) == type(self) and v == self]:\n self.clear_globals[k] = self\n\n super().exec_script(*a, **kw)", "title": "" }, { "docid": "0f7cef260e786b274571660d00e82d58", "score": "0.45766625", "text": "def local_function():\n pass", "title": "" }, { "docid": "f267fc0a9cca559c4b68f38d223e1564", "score": "0.45582142", "text": "def make_global(self):\n self.__class__.__globalsys = self", "title": "" }, { "docid": "57574e1c51ceb1d6139f81598405c891", "score": "0.4553436", "text": "def __init__(self):\n # Symbol table will be a dictionary which contains a list for each variable, incluid it`s id, type, kind and number depending on it`s kind\n # We will have a global program table, in which every subroutine scope will be contained\n self.symbolTable = {\"class\": {}, \"subroutine\": {}}\n # Symbol table running index\n self.counts = {'STATIC': 0, 'FIELD': 0, 'ARG': 0, 'VAR': 0}", "title": "" }, { "docid": "a06c8ffe0f0755b2451c419a632eac74", "score": "0.45489392", "text": "def pop_scope(self):\n child_scope = self.stack.current.current.copy()\n self.stack.current.pop()\n parent_scope = self.stack.current.current.copy()\n self.stack.current.current = {\n key: child_scope[key] for key in child_scope if key in parent_scope\n }", "title": "" }, { "docid": "ed9d2b57d00fdbf2137efd6bf68fd01a", "score": "0.4548133", "text": "def clopure_def(self, name, expression, local_vars):\n if not isinstance(name, ClopureSymbol):\n raise ClopureRuntimeError(\"%s is not a symbol\" % str(name))\n self.global_vars[name.symbol] = self.evaluate(expression, local_vars=local_vars)", "title": "" }, { "docid": "f52bd1f7f3255e6e439da45596439944", "score": "0.45346045", "text": "def test_undeclare_function_after_declare_variable(self):\n input =Program([FuncDecl(Id(\"foo\"),[],VoidType(),Block([])),FuncDecl(Id(\"main\"),[],VoidType(),Block([VarDecl(\"foo\",IntType()),CallExpr(Id(\"foo\"),[])]))])\n expect = \"Type Mismatch In Expression: CallExpr(Id(foo),[])\"\n self.assertTrue(TestChecker.test(input,expect,467))", "title": "" }, { "docid": "95a418a7ffc1bee85f3602171ae788dd", "score": "0.45161405", "text": "def pop_default(self):\n assert(_name_scope_stack.top == self)\n _name_scope_stack.pop()", "title": "" }, { "docid": "b7ad173b73c75a492960560c15d5ec8e", "score": "0.4499048", "text": "def name_scope(name_or_scope):\n if isinstance(name_or_scope, NameScope):\n scope = name_or_scope\n else:\n scope = current_name_scope().sub_scope(name_or_scope)\n\n scope.push_default()\n yield scope\n scope.pop_default()", "title": "" }, { "docid": "2f28026c0becec89b6959f01d86962dc", "score": "0.44987878", "text": "def _hproc_starting(self, (pname, _)):\n self._child=True\n self._reset()", "title": "" }, { "docid": "05a88dab44bd5169ae6bc233f846d88d", "score": "0.4489851", "text": "def test_Redeclare_function(self):\n input = Program([VarDecl(\"a\",IntType()),FuncDecl(Id(\"foo\"),[VarDecl(\"b\",IntType())],IntType(),Block([Return(Id(\"b\"))])),FuncDecl(Id(\"foo\"),[VarDecl(\"c\",IntType())],IntType(),Block([Return(Id(\"c\"))])),FuncDecl(Id(\"main\"),[],VoidType(),Block([Return()]))])\n expect = \"Redeclared Function: foo\"\n self.assertTrue(TestChecker.test(input,expect,401))", "title": "" }, { "docid": "8d627fe8c4081821aa5353c8d06fe626", "score": "0.4472862", "text": "def __start__(self, name):\n print_status(\"start('{0}')\".format(name))", "title": "" }, { "docid": "2bfd145769b3946b2e0469e9c899bd1a", "score": "0.44712824", "text": "def pull_locals(self, frame):\r\n for name in frame.identifiers.undeclared:\r\n self.writeline('l_%s = context.resolve(%r)' % (name, name))", "title": "" }, { "docid": "8f66d573ec817e0b7b5a90ee1223b7b6", "score": "0.44670215", "text": "def test_enclosing_scope(self):\n self.assertEqual(GLOBAL_SCOPE, enclosing_scope(GLOBAL_SCOPE))\n self.assertEqual(GLOBAL_SCOPE, enclosing_scope('scope'))\n self.assertEqual('base', enclosing_scope('base.subscope'))", "title": "" }, { "docid": "d6d3b35cf5980bec56e29166eb4235d3", "score": "0.44636184", "text": "def preloop(self):\n cmd.Cmd.preloop (self) ## sets up command completion\n self._hist = [] ## No history yet\n self._locals = {} ## Initialize execution namespace for user\n self._globals = {}", "title": "" }, { "docid": "2740350dc172c93f7a64e856a636b99c", "score": "0.44379637", "text": "def clopure_defn(self, name, varnames, expression, local_vars):\n if not isinstance(name, ClopureSymbol):\n raise ClopureRuntimeError(\"%s is not a symbol\" % str(name))\n if not isinstance(varnames, list):\n raise ClopureRuntimeError(\"the first argument must be a vector\")\n for arg in varnames:\n if not isinstance(arg, ClopureSymbol):\n raise ClopureRuntimeError(\"%s is not a symbol\" % str(arg))\n self.global_vars[name.symbol] = ClopureFunction(varnames, expression)", "title": "" }, { "docid": "39c0be8246be1b2ca1b08489fd705b8e", "score": "0.4435955", "text": "def my_Previous_function():\n global level\n level -= 2", "title": "" }, { "docid": "6701dbf6097ca0f5f8be0c476f5a08a1", "score": "0.4435912", "text": "def clone(self):\n new = Scope(self.token_dict, self.needs_closed_brace)\n new.scope_open = self.scope_open\n new.current_token = self.current_token\n for s in self.scopes:\n new.scopes.append(s.clone())\n return new", "title": "" }, { "docid": "c50d96979dcac01611af96d3293e58c1", "score": "0.44338983", "text": "def _visit_scope_node(self, node, pre_func, post_func):\n self._reset_name_scope(node)\n self.scope_node_stack.append(node)\n self._current_name_scope().set_father(self._nearest_function_scope())\n if pre_func:\n pre_func()\n self.generic_visit(node)\n if post_func:\n post_func()\n self.scope_node_stack.pop()", "title": "" }, { "docid": "8b3e1ac858d9372d8ebe1e2c96721e0b", "score": "0.44296944", "text": "def __enter__(self) -> \"ContextChange\":\n self.start()\n return self", "title": "" }, { "docid": "0b6d32c85db3d5cffd5911406751d73d", "score": "0.44230267", "text": "def delete_global(name):\n if _name(name) in globals():\n del globals()[_name(name)]", "title": "" }, { "docid": "83b4eaea8f6a3cd74709285a7523ba63", "score": "0.4419563", "text": "def _make_globals(self, sample):\n\t\tself._config.globals = sample", "title": "" }, { "docid": "ffa0e02094fddbe4e5bc375c973cad9f", "score": "0.44156894", "text": "def _make_skel_func(code, cell_count, base_globals=None):\n if base_globals is None:\n base_globals = {}\n elif isinstance(base_globals, string_types):\n base_globals_name = base_globals\n try:\n # First try to reuse the globals from the module containing the\n # function. If it is not possible to retrieve it, fallback to an\n # empty dictionary.\n if importlib is not None:\n base_globals = vars(importlib.import_module(base_globals))\n elif sys.modules.get(base_globals, None) is not None:\n base_globals = vars(sys.modules[base_globals])\n else:\n raise ImportError\n except ImportError:\n base_globals = _dynamic_modules_globals.get(\n base_globals_name, None)\n if base_globals is None:\n base_globals = _DynamicModuleFuncGlobals()\n _dynamic_modules_globals[base_globals_name] = base_globals\n\n base_globals['__builtins__'] = __builtins__\n\n closure = (\n tuple(_make_empty_cell() for _ in range(cell_count))\n if cell_count >= 0 else\n None\n )\n return types.FunctionType(code, base_globals, None, None, closure)", "title": "" }, { "docid": "37b9cbf36e4fd58c9227f16a6bf2b100", "score": "0.4401637", "text": "def _a_function2():\n global _a_global\n _a_global = 20", "title": "" }, { "docid": "37b9cbf36e4fd58c9227f16a6bf2b100", "score": "0.4401637", "text": "def _a_function2():\n global _a_global\n _a_global = 20", "title": "" }, { "docid": "c99f4a8df5ce60b84a53a35166a62e1f", "score": "0.43920267", "text": "def local_method(self):\n pass", "title": "" }, { "docid": "546ef35efc5709c400d6234272f40973", "score": "0.43702248", "text": "def callee(self, new_callee: vr.VariableReference) -> None:\n self._callee = new_callee", "title": "" }, { "docid": "b45f21607b6e7e1e3a37e007ae0b6301", "score": "0.43648675", "text": "def clear_all():\n gl = globals().copy()\n for var in gl:\n if var[0] == '_': continue\n if 'func' in str(globals()[var]): continue\n if 'module' in str(globals()[var]): continue\n del globals()[var]", "title": "" }, { "docid": "db1c87e9c222539637ddc015e63d91bf", "score": "0.4358809", "text": "def funcs(self):\n debug('GCS21Commands.funcs: reset')\n super(GCS21Commands, self.__class__).funcs.fdel(self)", "title": "" }, { "docid": "070232d0b0ca4348f02e6392fc9d295a", "score": "0.43565512", "text": "def cls():\n global CLEAR_NAME\n temp = os.system(CLEAR_NAME)", "title": "" }, { "docid": "f5f66060ee1a45e081145a73f6b91e18", "score": "0.43468493", "text": "def initScope():\n global scopeConnected\n if (scope.setup() == False):\n return\n scopeConnected = True\n scope.setPhaseMeas()\n scope.setFreqMeas()\n scope.setVoltMeas()", "title": "" }, { "docid": "b61c1d3f4ec6af20a3c7eb099d364c12", "score": "0.43400484", "text": "def stop_acquisition(self) -> None:\n self.__uhfli.daq.setInt(f'/{self._address}/scopes/0/enable', 0)\n self.__uhfli.scope.finish()", "title": "" }, { "docid": "189bb104f37124cf7f0fd5a12a9309ad", "score": "0.4336614", "text": "def take_a_break():\n a = 42", "title": "" }, { "docid": "636d616029ed1f09b8b5b64cc3ada5ee", "score": "0.43338904", "text": "def starting_over():\n pass", "title": "" }, { "docid": "a2c066fdc3ea1fb2adf5e17cebb9f7d4", "score": "0.43292928", "text": "def _insert_global(self, gv, gty):\n def on_disposal(wr, pop=self._globals.pop):\n # pop() is pre-looked up to avoid a crash late at shutdown on 3.5\n # (https://bugs.python.org/issue25217)\n pop(wr)\n try:\n gv = weakref.ref(gv, on_disposal)\n except TypeError:\n pass\n self._globals[gv] = gty", "title": "" }, { "docid": "54000fb24ba61887c0985203b1db1985", "score": "0.43282452", "text": "def dlbrace(self, lexer: MetamathLexer) -> None:\n self.scopes.push_scope()", "title": "" }, { "docid": "e0b85ae26d8246790106f629289e6d24", "score": "0.4326168", "text": "def _remove_global(self, gv):\n try:\n gv = weakref.ref(gv)\n except TypeError:\n pass\n del self._globals[gv]", "title": "" }, { "docid": "72953105c0d4cc6a73f49a40fa2c3780", "score": "0.43257242", "text": "def endName():", "title": "" }, { "docid": "72dbdc3518199a4e40638f46c8907761", "score": "0.43237847", "text": "def _reset(self):\n ob = self.env.reset()\n for _ in range(self.k): self.frames.append(ob)\n return self._observation()", "title": "" }, { "docid": "22e153a7dcaaa218272528382677807b", "score": "0.43196675", "text": "def start_over():\n pass", "title": "" }, { "docid": "4dadca32301907a1a2f8095c89af29d4", "score": "0.43187335", "text": "def foo_bar():", "title": "" }, { "docid": "fcd6a595f95cc0f561672d871e93ca6e", "score": "0.43160367", "text": "def a_function():\n\n def _a_function2():\n \"\"\"Another example function to illustrate global keyword in nested functions\"\"\"\n global _a_global\n _a_global = 20\n \n print(\"Before calling a_function, value of _a_global is \", _a_global)\n\n _a_function2()\n \n print(\"After calling _a_function2, value of _a_global is \", _a_global)", "title": "" }, { "docid": "b3aa81290ea66d8eea4dac368c639596", "score": "0.43149722", "text": "def invalidate_closure(self, name):\r\n \r\n self.invalidate(name, defname=name)", "title": "" }, { "docid": "1c70bb9ef627688c2d2faf3ee45ae565", "score": "0.43090728", "text": "async def loopvar_scope_restore(self, var_names, save_vars):\n for var_name in var_names:\n if var_name in save_vars:\n self.sym_table[var_name] = save_vars[var_name]\n else:\n del self.sym_table[var_name]", "title": "" }, { "docid": "3de95cba29eb748e4c7a9715a7b47636", "score": "0.4303551", "text": "def reconstructor(fn):\n fn.__sa_reconstructor__ = True\n return fn", "title": "" }, { "docid": "7a969d0ae87cc6af674340b77f4e5118", "score": "0.43024576", "text": "def reset(self):\r\n\r\n self.stack = [ ]", "title": "" }, { "docid": "c01ee2323905d4a7be629af961c51a35", "score": "0.43021622", "text": "def run_in_stack(func):\n Stack(func)", "title": "" }, { "docid": "c5a6d9a60faec5aa35965613e2350329", "score": "0.43017957", "text": "def __enter__(self):\n self._file = open(self._name, \"w\")\n self._file.write(\"Begin\\n\")\n return self", "title": "" } ]
0c45ad1ca37f394b9dd2683d932f02b0
Generates list of cal curve, qc, and blank samples to append in sample_queue()
[ { "docid": "3d72f771a08a44bbfed35e1b09d25845", "score": "0.499762", "text": "def cal_curve(state: int, number: int, platform: str, wash_no: int, pool_no: int):\n cal_list = list()\n std_plate_name = \"SP\" + str(ceil(number/7))\n\n row = list('ABCDEFG')[(number-1) % 7]\n if state < 2:\n wells = [0, 3, 6]\n if state == 0 and platform != \"Oxy\":\n wells.append(9)\n elif state == 2:\n wells = [1, 4, 7]\n else:\n wells = [2, 5, 8]\n\n for well in wells:\n inj_name = \"{}Std{}\".format(platform, str(well))\n well_name = row + str(well + 1)\n cal_list.append(tuple([inj_name, well_name, std_plate_name, 'QC']))\n cal_list.append(_wash(wash_no))\n if state < 2:\n cal_list.append(add_pool(pool_no))\n\n return cal_list", "title": "" } ]
[ { "docid": "9f2cb6765d7c16b8e03dd0016209394a", "score": "0.60471046", "text": "def fill_batch_queue(self):\r\n while True:\r\n\r\n # print 'hi'\r\n if self._hps.mode != 'decode' and self._hps.mode != 'calc_features':\r\n # Get bucketing_cache_size-many batches of Examples into a list, then sort\r\n inputs = []\r\n for _ in range(self._hps.batch_size * self._bucketing_cache_size):\r\n inputs.append(self._example_queue.get())\r\n inputs = sorted(inputs, key=lambda inp: inp.enc_len) # sort by length of encoder sequence\r\n\r\n # Group the sorted Examples into batches, optionally shuffle the batches, and place in the batch queue.\r\n batches = []\r\n for i in range(0, len(inputs), self._hps.batch_size):\r\n batches.append(inputs[i:i + self._hps.batch_size])\r\n if not self._single_pass:\r\n shuffle(batches)\r\n for b in batches:\t# each b is a list of Example objects\r\n self._batch_queue.put(Batch(b, self._hps, self._vocab))\r\n\r\n elif self._hps.mode == 'decode': # beam search decode mode\r\n ex = self._example_queue.get()\r\n batch = preprocess_batch(ex, self._hps.batch_size, self._hps, self._vocab)\r\n self._batch_queue.put(batch)\r\n else: # calc features mode\r\n inputs = []\r\n for _ in range(self._hps.batch_size * self._bucketing_cache_size):\r\n inputs.append(self._example_queue.get())\r\n # print \"_ %d\"%_\r\n # print \"inputs len%d\"%len(inputs)\r\n # Group the sorted Examples into batches, and place in the batch queue.\r\n batches = []\r\n for i in range(0, len(inputs), self._hps.batch_size):\r\n # print i\r\n batches.append(inputs[i:i + self._hps.batch_size])\r\n\r\n # if not self._single_pass:\r\n # shuffle(batches)\r\n for b in batches:\t# each b is a list of Example objects\r\n self._batch_queue.put(Batch(b, self._hps, self._vocab))", "title": "" }, { "docid": "84f52487f293baecc35910a2b6894c67", "score": "0.5943262", "text": "def consume_sample(self, producer_lock, queue, port, authkey):\n self.nextID = 0\n self.samples_cache = {}\n logwidth = np.log(1.0 - np.exp(-1.0 / float(self.Nlive)))\n if self.new_run==True:\n \"\"\"\n generate samples from the prior\n \"\"\"\n for i in xrange(self.Nlive):\n while True:\n while not(self.nextID in self.samples_cache):\n ID,acceptance,jumps,_internalvalues,values,logP,logL = queue.get()\n self.samples_cache[ID] = acceptance,jumps,_internalvalues,values,logP,logL\n acceptance,jumps,_internalvalues,values,logP,logL = self.samples_cache.pop(self.nextID)\n self.nextID +=1\n self.params[i].values = np.copy(values)\n self.params[i]._internalvalues = np.copy(_internalvalues)\n self.params[i].logP = np.copy(logP)\n self.params[i].logL = np.copy(logL)\n if self.params[i].logP!=-np.inf or self.params[i].logL!=-np.inf: break\n if self.verbose: sys.stderr.write(\"sampling the prior --> %.3f %% complete\\r\"%(100.0*float(i+1)/float(self.Nlive)))\n if self.verbose: sys.stderr.write(\"\\n\")\n\n self.condition = np.inf\n logL_array = np.array([p.logL for p in self.params])\n self.worst = logL_array.argmin()\n self.logLmin.value = np.min(logL_array)\n logLmin = np.float128(self.logLmin.value)\n self.logLmax = np.max(logL_array)\n logWt = logLmin+logwidth;\n logZnew = np.logaddexp(self.logZ, logWt)\n self.information = np.exp(logWt - logZnew) * self.params[self.worst].logL + np.exp(self.logZ - logZnew) * (self.information + self.logZ) - logZnew\n self.logZ = logZnew\n self.condition = np.logaddexp(self.logZ,self.logLmax-self.iteration/(float(self.Nlive))-self.logZ)\n line = \"\"\n for n in self.params[self.worst].par_names:\n line+='%.30e\\t'%self.params[self.worst].values[n]\n line+='%30e\\n'%self.params[self.worst].logL\n self.output.write(line)\n self.active_index =self._select_live()\n self.copy_params(self.params[self.active_index],self.params[self.worst])\n while self.condition > self.tolerance:\n while not(self.nextID in self.samples_cache):\n ID,acceptance,jumps,_internalvalues,values,logP,logL = queue.get()\n self.samples_cache[ID] = acceptance,jumps,_internalvalues,values,logP,logL\n acceptance,jumps,_internalvalues,values,logP,logL = self.samples_cache.pop(self.nextID)\n self.rejected+=1\n self.nextID += 1\n self.params[self.worst].values = np.copy(values)\n self.params[self.worst]._internalvalues = np.copy(_internalvalues)\n self.params[self.worst].logP = np.copy(logP)\n self.params[self.worst].logL = np.copy(logL)\n if self.params[self.worst].logL>self.logLmin.value:\n logL_array = np.array([p.logL for p in self.params])\n self.worst = logL_array.argmin()\n self.logLmin.value = np.min(logL_array)\n logLmin = np.float128(self.logLmin.value)\n self.logLmax = np.max(logL_array)\n logWt = logLmin+logwidth;\n logZnew = np.logaddexp(self.logZ, logWt)\n self.information = np.exp(logWt - logZnew) * self.params[self.worst].logL + np.exp(self.logZ - logZnew) * (self.information + self.logZ) - logZnew\n self.logZ = logZnew\n self.condition = np.logaddexp(self.logZ,self.logLmax-self.iteration/(float(self.Nlive))-self.logZ)\n line = \"\"\n for n in self.params[self.worst].par_names:\n line+='%.30e\\t'%self.params[self.worst].values[n]\n line+='%30e\\n'%self.params[self.worst].logL\n self.output.write(line)\n self.active_index=self._select_live()\n self.copy_params(self.params[self.active_index],self.params[self.worst])\n if self.verbose: sys.stderr.write(\"%d: n:%4d acc:%.3f H: %.2f logL %.5f --> %.5f dZ: %.3f logZ: %.3f logLmax: %.5f logLinj: %.5f cache: %d\\n\"%(self.iteration,jumps,acceptance,self.information,logLmin,self.params[self.worst].logL,self.condition,self.logZ,self.logLmax,self.logLinj,len(self.samples_cache)))\n logwidth-=1.0/float(self.Nlive)\n self.iteration+=1\n# work_queue.put((self.jobID,self.logLmin))\n# self.jobID += 1\n# if work_queue.empty():\n # put as many None as sampler processes\n# for _ in xrange(NUMBER_OF_PRODUCER_PROCESSES): work_queue.put(\"pill\")\n self.logLmin.value = 999\n sys.stderr.write(\"\\n\")\n # final adjustments\n i = 0\n logL_array = [p.logL for p in self.params]\n logL_array = np.array(logL_array)\n idx = logL_array.argsort()\n logL_array = logL_array[idx]\n for i in idx:\n line = \"\"\n for n in self.params[i].par_names:\n line+='%.30e\\t'%self.params[i].values[n]\n line+='%30e\\n'%self.params[i].logL\n self.output.write(line)\n i+=1\n self.output.close()\n self.evidence_out.write('%.5f %.5f %.5f\\n'%(self.logZ,self.logLmax,self.logLinj))\n self.evidence_out.close()\n# manager.shutdown()\n sys.stderr.write(\"process %s, exiting\\n\"%os.getpid())\n return 0", "title": "" }, { "docid": "48d26e46dc713fcbe9f736b8055d2ded", "score": "0.5869864", "text": "def Acquisition_Waveform( oscilloscope, necessarySamples, path, samples=100, rnd_sample=1_000, min_peaks=2, min_separation=10 ):\n\n trigger_value, trigger_slope, y_scale = check_parameters(oscilloscope=oscilloscope)\n\n '''Retrieve the conversion parameters from the oscilloscope'''\n converter = units_conversion_parameters(oscilloscope=oscilloscope)\n\n '''Convert slope to a number and trigger to scope_units'''\n trigger_slope_number = trigger_slope_value(trigger_slope)\n trigger_in_units = convert_y_to_units(value_in_volts=trigger_value/1000, converter_df=converter)\n \n acquired_samples = 0 # Total amount of samples collected\n saved_csv = 1 # Total of saved csv files \n files = [] # File names\n\n while acquired_samples < necessarySamples:\n\n myprint(f'Try number {saved_csv}. {round(100*acquired_samples/necessarySamples,2)}% ({acquired_samples}/{necessarySamples}).')\n\n waveforms = run_acquisition(\n oscilloscope=oscilloscope,\n samples=samples,\n rnd_sample=rnd_sample,\n min_peaks=min_peaks,\n min_separation=min_separation,\n trigger=trigger_in_units,\n trigger_slope=trigger_slope_number,\n )\n\n file = f'{path}/file_{saved_csv}.csv'\n waveforms.to_csv(file) # save the partial waveforms DataFrame\n files.append(file) # add the name to the list\n\n acquired_samples += waveforms.shape[0]\n saved_csv += 1\n\n print_scope_config(trigger_value, trigger_slope, y_scale)", "title": "" }, { "docid": "fdee848520b370d1c8af3cc65310cc64", "score": "0.5799954", "text": "def run_acquisition( oscilloscope, trigger, trigger_slope=-1, samples=100, rnd_sample=1000, min_peaks=2, min_separation=10 ):\n\n sleep(1)\n\n waveforms_storage = pd.DataFrame()\n time_storage = []\n counter = 1\n\n while waveforms_storage.shape[1] < samples:\n \n total_events = waveforms_storage.shape[1]\n temp_df = pd.DataFrame()\n temp_time = [] \n myprint(f' Run {counter}. {round(100*total_events/samples , 1)}%. ({total_events}/{samples}).')\n\n temp_df, temp_time = get_rnd_sample(\n oscilloscope=oscilloscope, \n rnd_sample=rnd_sample,\n )\n\n waveforms_analyzed, time_analyzed = analyze_rnd_sample(\n df_data=temp_df, \n time_data=temp_time, \n counter=counter, \n min_peaks=min_peaks, \n min_separation=min_separation, \n trigger=trigger, \n trigger_slope=trigger_slope\n )\n \n waveforms_storage = pd.concat( [waveforms_storage,waveforms_analyzed], axis=1 )\n time_storage.extend(time_analyzed)\n\n '''If not finished yet, try again'''\n counter += 1\n\n '''Add correct label to the columns and add the time column'''\n waveforms_storage.columns = [ ('event_'+str(i)) for i in range(waveforms_storage.shape[1]) ]\n # waveformList.index = [i for i in range(waveformList.shape[0])]\n df = waveforms_storage.T #This command is only to add the time_storage as a line in an easy way. This is undone later\n df.insert( 0, 'time_epoch', np.array(time_storage) )\n\n return(df)", "title": "" }, { "docid": "5c942d4d15f51c0a3a5cf44f0de19ce8", "score": "0.56957054", "text": "def fill_example_queue(self):\r\n\r\n if self._example_generator is None:\r\n input_gen = self.text_generator(\r\n data.example_generator(self._data_path, self._single_pass, self._cnn_500_dm_500, is_original=('with_coref' not in self._data_path)))\r\n else:\r\n input_gen = self.text_generator(self._example_generator)\r\n if self._hps.pg_mmr and self._hps.ssi_data_path != '': # if use pg_mmr and bert\r\n print (util.bcolors.OKGREEN + \"Loading SSI from BERT at %s\" % os.path.join(self._hps.ssi_data_path, 'ssi.pkl') + util.bcolors.ENDC)\r\n with open(os.path.join(self._hps.ssi_data_path, 'ssi.pkl')) as f:\r\n ssi_triple_list = pickle.load(f)\r\n # ssi_list = [ssi_triple[1] for ssi_triple in ssi_triple_list]\r\n else:\r\n ssi_triple_list = None\r\n counter = 0\r\n while True:\r\n try:\r\n (article,\r\n abstracts, doc_indices_str, raw_article_sents, ssi, article_lcs_paths_list) = next(input_gen) # read the next example from file. article and abstract are both strings.\r\n except StopIteration: # if there are no more examples:\r\n logging.info(\"The example generator for this example queue filling thread has exhausted data.\")\r\n if self._single_pass:\r\n logging.info(\r\n \"single_pass mode is on, so we've finished reading dataset. This thread is stopping.\")\r\n self._finished_reading = True\r\n if ssi_triple_list is not None and counter < len(ssi_triple_list):\r\n raise Exception('Len of ssi list (%d) is greater than number of examples (%d)' % (len(ssi_triple_list), counter))\r\n break\r\n else:\r\n raise Exception(\"single_pass mode is off but the example generator is out of data; error.\")\r\n if ssi_triple_list is not None:\r\n if counter >= len(ssi_triple_list):\r\n raise Exception('Len of ssi list (%d) is less than number of examples (>=%d)' % (len(ssi_triple_list), counter))\r\n ssi_length_extractive = ssi_triple_list[counter][2]\r\n ssi = ssi_triple_list[counter][1]\r\n ssi = ssi[:ssi_length_extractive]\r\n\r\n article = article\r\n abstracts = [abstract for abstract in abstracts]\r\n if type(doc_indices_str) != str:\r\n doc_indices_str = doc_indices_str\r\n raw_article_sents = [sent for sent in raw_article_sents]\r\n\r\n all_abstract_sentences = [[sent.strip() for sent in data.abstract2sents(\r\n abstract)] for abstract in abstracts]\r\n if len(all_abstract_sentences) != 0:\r\n abstract_sentences = all_abstract_sentences[0]\r\n else:\r\n abstract_sentences = []\r\n doc_indices = [int(idx) for idx in doc_indices_str.strip().split()]\r\n # join_separator = ' [SEP] ' if self._hps.sep else ' '\r\n if self._hps.by_instance: # if we are running iteratively on only instances (a singleton/pair + a summary sentence), not the whole article\r\n for abs_idx, abstract_sentence in enumerate(abstract_sentences):\r\n inst_ssi = ssi[abs_idx]\r\n if len(inst_ssi) == 0:\r\n continue\r\n inst_abstract_sentences = abstract_sentence\r\n inst_raw_article_sents = util.reorder(raw_article_sents, inst_ssi)\r\n inst_article = ' '.join([' '.join(util.process_sent(sent, whitespace=True)) for sent in inst_raw_article_sents])\r\n inst_doc_indices = [0] * len(inst_article.split())\r\n inst_article_lcs_paths_list = article_lcs_paths_list[abs_idx]\r\n\r\n if len(inst_article) == 0: # See https://github.com/abisee/pointer-generator/issues/1\r\n logging.warning(\r\n 'Found an example with empty article text. Skipping it.\\n*********************************************')\r\n elif len(inst_article.strip().split()) < 3 and self._hps.skip_with_less_than_3:\r\n print(\r\n 'Article has less than 3 tokens, so skipping\\n*********************************************')\r\n elif len(inst_abstract_sentences.strip().split()) < 3 and self._hps.skip_with_less_than_3:\r\n print(\r\n 'Abstract has less than 3 tokens, so skipping\\n*********************************************')\r\n else:\r\n inst_example = Example(None, [inst_abstract_sentences], all_abstract_sentences, None, inst_raw_article_sents, None, [inst_article_lcs_paths_list], self._vocab, self._hps)\r\n self._example_queue.put(inst_example)\r\n else:\r\n example = Example(None, abstract_sentences, all_abstract_sentences, None, raw_article_sents, ssi, article_lcs_paths_list, self._vocab, self._hps) # Process into an Example.\r\n self._example_queue.put(example) # place the Example in the example queue.\r\n\r\n # print \"example num\", counter\r\n counter += 1", "title": "" }, { "docid": "a938a7a1c506eba9e2c05c3f0fbc9b6b", "score": "0.5571372", "text": "def gen(self):\n self.queue = []\n print_list_obj = menu_c.PrintList(self.values)\n self.resized_result = print_list_obj.result\n self.queue.append(print_list_obj)\n if self.kwargs:\n self.process_kwargs()\n temp = []\n for obj in self.disp_order:\n for elem in self.queue:\n if isinstance(elem, obj):\n temp.append(elem)\n self.queue = temp\n # Input:\n self.prompt_obj = menu_c.Prompt(self.text)", "title": "" }, { "docid": "a396242f5e43a75ea64603efb9c6b632", "score": "0.554498", "text": "def dynamic_spectroscopy(G, tau, R, dt, startprint, simultime, fo1, fo2, fo3, k_m1, k_m2, k_m3, A1, A2, A3, printstep = 1, Ge = 0.0, Q1=100, Q2=200, Q3=300, H=2.0e-19, z_step = 1):\n if z_step == 1:\n z_step = A1*0.05 #default value is 5% of the free oscillation amplitude\n zeq = []\n peakF = []\n maxdepth = []\n amp = []\n phase = []\n Ediss = []\n Virial = []\n \n tip_a = []\n Fts_a = []\n xb_a = []\n zb = A1*1.1\n \n while zb > 0.0:\n t, tip, Fts, xb = GenMaxwell_jit(G, tau, R, dt, startprint, simultime, fo1, fo2, fo3, k_m1, k_m2,k_m3, A1, A2, A3, zb, printstep, Ge, Q1, Q2, Q3, H)\n A,phi = amp_phase(t, tip, fo1)\n Ets = e_diss(tip, Fts, dt, fo1)\n fts_peak = Fts[np.argmax(Fts)]\n tip_depth = xb[np.argmax(tip)] -xb[np.argmin(tip)]\n Vts = v_ts(tip-zb, Fts, dt)\n \n #Attaching single values to lists\n zeq.append(zb)\n peakF.append(fts_peak)\n maxdepth.append(tip_depth)\n amp.append(A)\n phase.append(phi)\n Ediss.append(Ets)\n Virial.append(Vts)\n \n #attaching 1D arrays to lists\n tip_a.append(tip)\n Fts_a.append(Fts)\n xb_a.append(xb)\n \n zb -= z_step\n return np.array(amp), np.array(phase), np.array(zeq), np.array(Ediss), np.array(Virial), np.array(peakF), np.array(maxdepth), t, np.array(tip_a), np.array(Fts_a), np.array(xb_a)", "title": "" }, { "docid": "38ffe2a1bc0a3cbbcbd48e801ab5bf03", "score": "0.551103", "text": "def gen_qa(self):\n\n # Loop on slits to generate stats on chi^2\n med_chis = []\n std_chis = []\n for slitidx in range(self.slits.nslits):\n _, med, std = self.calc_chi_slit(slitidx)\n med_chis.append(med)\n std_chis.append(std)\n # Save\n self.med_chis = np.array(med_chis)\n self.std_chis = np.array(std_chis)", "title": "" }, { "docid": "604a97ea0583ca9ce3ebe99a19958909", "score": "0.5498277", "text": "def __init__(self, opt):\n self.num_sample_per_q = int(num_per_q * TRAIN_TEST_SPLIT)\n self.batch_size = opt.batch_size\n self.num_q = 22\n self.SCALE = SCALE\n\n self.input_func = TPCH_GET_INPUT\n fnames = [fname for fname in os.listdir(opt.data_dir) if 'csv' in fname]\n fnames = sorted(fnames,\n key=lambda fname: int(fname.split('temp')[1][:-4]))\n\n data = []\n all_groups, all_groups_test = [], []\n\n self.grp_idxes = []\n self.num_grps = [0] * self.num_q\n for i, fname in enumerate(fnames):\n temp_data = self.get_all_plans(opt.data_dir + '/' + fname)\n\n ##### this is for all samples for this query template #####\n enum, num_grp = self.grouping(temp_data)\n groups = [[] for _ in range(num_grp)]\n for j, grp_idx in enumerate(enum):\n groups[grp_idx].append(temp_data[j])\n all_groups += groups\n\n ##### this is for train #####\n self.grp_idxes += enum[:self.num_sample_per_q]\n self.num_grps[i] = num_grp\n data += temp_data[:self.num_sample_per_q]\n\n ##### this is for test #####\n test_groups = [[] for _ in range(num_grp)]\n for j, grp_idx in enumerate(enum[self.num_sample_per_q:]):\n test_groups[grp_idx].append(temp_data[self.num_sample_per_q+j])\n all_groups_test += test_groups\n\n self.dataset = data\n self.datasize = len(self.dataset)\n print(\"Number of groups per query: \", self.num_grps)\n\n if not opt.test_time:\n self.mean_range_dict = self.normalize()\n\n with open('mean_range_dict.pickle', 'wb') as f:\n pickle.dump(self.mean_range_dict, f)\n else:\n with open(opt.mean_range_dict, 'rb') as f:\n self.mean_range_dict = pickle.load(f)\n\n print(self.mean_range_dict)\n\n self.test_dataset = [self.get_input(grp) for grp in all_groups_test]\n self.all_dataset = [self.get_input(grp) for grp in all_groups]", "title": "" }, { "docid": "c7641c01d869999e3c87f6f86fe75fdf", "score": "0.54866886", "text": "def populate_test_queue():\n from mscanner.core.Storage import Storage\n pmids = list(iofuncs.read_pmids(rc.corpora / \"Test\" / \"gdsmall.txt\"))\n task = Storage(\n allfeatures = True,\n captcha = \"orange\",\n dataset = \"gd_wmqia_valid\",\n hidden = False,\n limit = 500,\n mindate = 19700101,\n minscore = 0.0, \n numnegs = 1000, \n operation = \"validate\", \n submitted = time.time())\n write_descriptor(rc.queue_path/task.dataset, pmids, task)\n task.operation = \"retrieval\"\n task.dataset = \"gd_wmqia_query\"\n task.submitted += 5\n write_descriptor(rc.queue_path/task.dataset, pmids, task)", "title": "" }, { "docid": "1d4b5f58920641eb508acae69328e26e", "score": "0.5466867", "text": "def __init__(self): \n self.values = []\n self.test_qwk = -1", "title": "" }, { "docid": "77238a370e12e537c8d4e9262b45d3c8", "score": "0.5464624", "text": "def get_data(self):\n # Get list of traces in form \"name, S21, name, S32,...\".\n formats_s_params = yield self.query('CALC:PAR:CAT?')\n formats_s_params = formats_s_params.strip('\"').split(',')\n\n # yield self.write('FORM REAL,64') # Do we need to add ',64'?\n yield self.write('FORM ASC,0')\n\n avg_mode = yield self.average_mode()\n if avg_mode:\n avgCount = yield self.average_points()\n yield self.restart_averaging()\n yield self.write('SENS:SWE:GRO:COUN %i' %avgCount)\n yield self.write('ABORT;SENS:SWE:MODE GRO')\n else:\n # Stop the current sweep and immediately send a trigger.\n yield self.write('ABORT;SENS:SWE:MODE SING')\n\n # Wait for the measurement to finish.\n # yield self.query('*OPC?', timeout=24*units.h) <-- old way, blocked GPIB chain entirely\n measurement_finished = False \n yield self.write('*OPC') # will trigger bit in ESR when measurement finished\n while measurement_finished == False:\n yield sleep(0.05) # polling rate = 20Hz \n opc_bit = yield self.query('*ESR?') # poll ESR for measurement completion\n opc_bit = int(opc_bit) & 0x1\n if (opc_bit == 1):\n measurement_finished = True\n \n # Pull the data.\n data = ()\n pair = ()\n unit_multipliers = {'R': 1, 'I': 1,\n 'M': units.dB, 'P': units.deg}\n # The data will come in with a header in the form\n # '#[single char][number of data points][data]'.\n for idx, meas in enumerate(formats_s_params[::2]):\n yield self.write('CALC:PAR:SEL \"%s\"' %meas)\n data_string = yield self.query('CALC:DATA? FDATA')\n d = np.array(data_string.split(','), dtype=float)\n pair += (d * unit_multipliers[meas[0]]),\n if idx % 2:\n data += (pair),\n pair = ()\n returnValue(data)", "title": "" }, { "docid": "a7a842af3790839561b3dcfd4e597a60", "score": "0.54497504", "text": "def _generate_waveforms(self):\n # find out if CZ pulses are used, if so pre-calc envelope to save time\n pulses_cz = set()\n # find set of all CZ pulses in use\n for step in self.sequences:\n for qubit, gate in enumerate(step.gates):\n pulse = self._get_pulse_for_gate(qubit, gate)\n if pulse is not None and pulse.shape == PulseShape.CZ:\n pulses_cz.add(pulse)\n # once we've gone through all pulses, pre-calculate the waveforms\n for pulse in pulses_cz:\n pulse.calculate_cz_waveform()\n\n for step in self.sequences:\n for qubit, gate in enumerate(step.gates):\n pulse = self._get_pulse_for_gate(qubit, gate)\n if pulse is None:\n continue\n if pulse.pulse_type == PulseType.Z:\n waveform = self._wave_z[qubit]\n delay = self.wave_z_delays[qubit]\n elif pulse.pulse_type == PulseType.XY:\n waveform = self._wave_xy[qubit]\n delay = self.wave_xy_delays[qubit]\n elif pulse.pulse_type == PulseType.READOUT:\n waveform = self.readout_iq\n delay = 0\n\n # get the range of indices in use\n if (pulse.pulse_type == PulseType.READOUT and not\n self.readout_match_main_size):\n # special case for readout if not matching main wave size\n start = 0.0\n middle = self._round(step.t_middle - step.t_start)\n end = self._round(step.t_end - step.t_start)\n else:\n start = self._round(step.t_start + delay)\n middle = self._round(step.t_middle + delay)\n end = self._round(step.t_end + delay)\n\n indices = np.arange(\n max(np.floor(start * self.sample_rate), 0),\n min(np.ceil(end * self.sample_rate), len(waveform)),\n dtype=int\n )\n\n # return directly if no indices\n if len(indices) == 0:\n continue\n\n # calculate time values for the pulse indices\n t = indices / self.sample_rate\n max_duration = end - start\n if step.align == 'center':\n t0 = middle\n elif step.align == 'left':\n t0 = middle - (max_duration - pulse.total_duration()) / 2\n elif step.align == 'right':\n t0 = middle + (max_duration - pulse.total_duration()) / 2\n # calculate the pulse waveform for the selected indices\n waveform[indices] += gate.get_waveform(pulse, t0, t)", "title": "" }, { "docid": "b1bc4c55154ed2f1adbf472220202110", "score": "0.5418461", "text": "def __init__(self, q0, q0dot, qf, qfdot, tf, rate):\n self.q0 = q0\n self.qf = qf\n self.tf = tf\n self.q0dot = q0dot\n self.qfdot = qfdot\n self.rate = rate\n self.A = []\n self.q = []\n self.qdot = []\n self.qddot = []", "title": "" }, { "docid": "7a7eb8e62070c6c79ef56ab5fcfd785b", "score": "0.5373222", "text": "def buildGeneratorHeatCurve(self):\n self.genpoints, Q = [], 0\n # 0, pre-inlet\n self.genpoints.append((Q,self.T_gen_pre))\n # 1, Generator preheat\n Q += self.m_concentrate * self.h_gen_inlet \\\n - self.m_concentrate * self.h_gen_pre\n self.genpoints.append((Q,self.T_gen_inlet)) \n # 2, Generator proper\n Q += self.m_concentrate * self.h_gen_outlet \\\n + self.m_refrig * self.h_gen_vapor_outlet \\\n - self.m_pump * self.h_gen_inlet\n self.genpoints.append((Q,self.T_gen_outlet))", "title": "" }, { "docid": "f5f9f85211be63cc2951c3e1707320da", "score": "0.53575164", "text": "def getQ(self):\n val = [0 for i in range(9)]\n\n val[0],val[1],val[2],val[3],val[4],val[5],val[6],val[7],val[8] = self.getValues()\n \n self.now = time() * 1000000.0\n self.now = self.now - self.startTime\n self.sampleFreq = 1.0 / ((self.now - self.lastUpdate) / 1000000.0)\n self.lastUpdate = self.now\n \n #gyro values are expressed in deg/sec, the * math.pi/180 will convert it to radians/sec\n self.AHRSupdate(val[3] * math.pi/180, val[4] * math.pi/180, val[5] * math.pi/180, val[0], val[1], val[2], val[6], val[7], val[8])\n\n q = [0 for i in range(4)]\n\n q[0] = self.q0\n q[1] = self.q1\n q[2] = self.q2\n q[3] = self.q3\n\n return q[0],q[1],q[2],q[3]", "title": "" }, { "docid": "c36180010e0ad0fd5590892d62c5a906", "score": "0.5326652", "text": "def _update_good_samples(self):\n logger = self.logger\n\n for cond in range(self.M):\n # Sample costs estimate.\n if self._hyperparams['algo_hyperparams']['bad_costs']:\n cs = np.zeros_like(self.cur[cond].cs)\n for bc in self._hyperparams['algo_hyperparams']['bad_costs']:\n for ss in range(cs.shape[0]): # Over samples\n cs[ss, :] += self.cur[cond].cost_compo[ss][bc]\n # If this specific cost is zero, then use the total cost\n if np.sum(cs) == 0:\n cs = self.cur[cond].cs\n else:\n cs = self.cur[cond].cs\n\n sample_list = self.cur[cond].sample_list\n\n # Get index of sample with best Return\n n_good = self._hyperparams['algo_hyperparams']['n_good_samples']\n if n_good == cs.shape[0]:\n best_indeces = range(n_good)\n else:\n best_indeces = get_smaller_idx(np.sum(cs, axis=1), n_good)\n\n # TODO: Maybe it is better to put this step directly in exp_buffer\n samples_to_add = [sample_list[good_index] for good_index in best_indeces]\n costs_to_add = [cs[good_index] for good_index in best_indeces]\n\n # Get the experience buffer\n exp_buffer = self._good_duality_info[cond].experience_buffer\n\n # Add to buffer\n exp_buffer.add(samples_to_add, costs_to_add)\n\n # TODO: CHeck if it is better to fit to the whole buffer\n # Get the desired number of elements to fit the traj\n trajs, costs = exp_buffer.get_trajs_and_costs(n_good)\n\n # TODO: Find a better way than create always SampleList\n self._good_duality_info[cond].sample_list = SampleList(trajs)\n self._good_duality_info[cond].samples_cost = costs", "title": "" }, { "docid": "3ed910555487f2e3d70eacf85d26b344", "score": "0.53170747", "text": "def from_fastqc_data(self,fastqc_data):\n for line in FastqcData(fastqc_data).data('Per base sequence quality'):\n if line.startswith('#'):\n continue\n i,mean,median,q25,q75,p10,p90 = line.strip().split('\\t')\n self.mean.append(self._convert_value(mean))\n self.median.append(self._convert_value(median))\n self.q25.append(self._convert_value(q25))\n self.q75.append(self._convert_value(q75))\n self.p10.append(self._convert_value(p10))\n self.p90.append(self._convert_value(p90))", "title": "" }, { "docid": "f0dc0d9a341fca71e585a2e860b6cfc5", "score": "0.5316957", "text": "def gen(self):\n self.queue = []\n if self.kwargs:\n self.process_kwargs()\n # items\n for item in self.val.items():\n (key, val) = item\n if key[-1] == 'S':\n self.substitute[key[:-1]] = val\n else:\n self.prod[key] = val\n if self.substitute:\n self.comp()\n else:\n for item in self.prod.items():\n # modify display\n (key, val) = item\n g = self.colors['green']\n end = self.colors['endc']\n txt = f\"{end}{g}{key} :{end} {val}{end}\"\n self.queue.append(menu_c.PrintLine(txt))\n # Input:\n self.prompt_obj = menu_c.Prompt(self.text)", "title": "" }, { "docid": "5b6a0fd82c54d3d1b54ab1f5eb8acef7", "score": "0.5289355", "text": "def __gen__(ext='*.epf'):\n import matplotlib.pyplot as plt\n import numpy as np\n import glob\n files = glob.glob(ext)\n masterdata = []\n for f in files:\n datasets, max_khi = __read__(f)\n # need to trim up until max_khi though ...\n for i in range(len(datasets)):\n if len(datasets[i])!=19:\n print('The resolution along chi axis is not 5degree.')\n print('There is %i elements along the chi axis.'%len(datasets))\n input('>>>')\n raise IOError\n else: dkhi = 5.\n tiny = 0.001\n ang = np.arange(0, 90+tiny, dkhi)\n\n # maxi khi trim the datasets[i]\n\n avg = __avg__(datasets[i])\n DefocusCurve = __NormReci__(avg) #avg: a list type variable\n plt.plot(ang, DefocusCurve)\n ax=plt.gca(); ax.set_xlabel(r'$\\chi$')\n #print DefocusCurve\n pass", "title": "" }, { "docid": "930b7c5d7de7bb382a560ab6431f8f92", "score": "0.527773", "text": "def process(self):\n boxnm = self.processor.currbox / 10.0\n self.apllist.append(boxnm[0]*boxnm[1]/float(self.nlipid/2))\n self.vpllist.append((boxnm[0]*boxnm[1]*boxnm[2] -\n self.watvol*self.nwat)/float(self.nlipid))\n zpos = self.phosphorsel.positions[:,2] - self.lipidsel.positions[:,2].mean()\n for lipdig in np.digitize(zpos,self.edges) :\n self.density[lipdig] += 1\n self.sumcoords += self.phosphorsel.positions[:,:2]\n self.sumcoords2 += self.phosphorsel.positions[:,:2]*self.phosphorsel.positions[:,:2]\n self.records.append(MDRecord(self.processor.currtime,[self.apllist[-1],self.vpllist[-1],self._calc_dhh(),self._calc_rmsf()]))\n\n if self.gridout is not None :\n mid = self.phosphorsel.center_of_geometry()\n sel_low = self.phosphorsel.positions[:,2] < mid[2]\n sel_upp = np.logical_not(sel_low)\n coords_upp = self.phosphorsel.positions[sel_upp,:]\n coords_low = self.phosphorsel.positions[sel_low,:]\n self.grid_low.accumulate(coords_low-mid,\n self._calc_zdist(coords_low, coords_upp))\n self.grid_upp.accumulate(coords_upp-mid,\n self._calc_zdist(coords_upp, coords_low))\n if self.protsel is not None :\n self.grid_prot.accumulate(self.protsel.positions-mid, self.protone)", "title": "" }, { "docid": "7f1e3bc2ff92742fa7796d67579e8140", "score": "0.52594763", "text": "def test_multi_composite_curve_analysis(self):\n analyses = []\n\n group_names = [\"group_A\", \"group_B\"]\n setups = [\"setup_A\", \"setup_B\"]\n for group_name, setup in zip(group_names, setups):\n analysis = CurveAnalysis(\n models=[\n ExpressionModel(\n expr=\"amp * cos(2 * pi * freq * x) + b\",\n name=\"m1\",\n ),\n ExpressionModel(\n expr=\"amp * sin(2 * pi * freq * x) + b\",\n name=\"m2\",\n ),\n ],\n name=group_name,\n )\n analysis.set_options(\n filter_data={\"setup\": setup},\n data_subfit_map={\n \"m1\": {\"type\": \"cos\"},\n \"m2\": {\"type\": \"sin\"},\n },\n result_parameters=[\"amp\"],\n data_processor=DataProcessor(input_key=\"counts\", data_actions=[Probability(\"1\")]),\n )\n analyses.append(analysis)\n\n group_analysis = CompositeCurveAnalysis(analyses)\n group_analysis.analyses(\"group_A\").set_options(p0={\"amp\": 0.3, \"freq\": 2.1, \"b\": 0.5})\n group_analysis.analyses(\"group_B\").set_options(p0={\"amp\": 0.5, \"freq\": 3.2, \"b\": 0.5})\n group_analysis.set_options(plot=False)\n\n amp1 = 0.2\n amp2 = 0.4\n b1 = 0.5\n b2 = 0.5\n freq1 = 2.1\n freq2 = 3.2\n\n x = np.linspace(0, 1, 100)\n y1a = amp1 * np.cos(2 * np.pi * freq1 * x) + b1\n y2a = amp1 * np.sin(2 * np.pi * freq1 * x) + b1\n y1b = amp2 * np.cos(2 * np.pi * freq2 * x) + b2\n y2b = amp2 * np.sin(2 * np.pi * freq2 * x) + b2\n\n # metadata must contain key for filtering, specified in filter_data option.\n test_data1a = self.single_sampler(x, y1a, type=\"cos\", setup=\"setup_A\")\n test_data2a = self.single_sampler(x, y2a, type=\"sin\", setup=\"setup_A\")\n test_data1b = self.single_sampler(x, y1b, type=\"cos\", setup=\"setup_B\")\n test_data2b = self.single_sampler(x, y2b, type=\"sin\", setup=\"setup_B\")\n\n expdata = ExperimentData(experiment=FakeExperiment())\n expdata.add_data(test_data1a.data())\n expdata.add_data(test_data2a.data())\n expdata.add_data(test_data1b.data())\n expdata.add_data(test_data2b.data())\n expdata.metadata[\"meas_level\"] = MeasLevel.CLASSIFIED\n\n result = group_analysis.run(expdata).block_for_results()\n amps = result.analysis_results(\"amp\")\n\n # two entries are generated for group A and group B\n self.assertEqual(len(amps), 2)\n self.assertEqual(amps[0].extra[\"group\"], \"group_A\")\n self.assertEqual(amps[1].extra[\"group\"], \"group_B\")\n self.assertAlmostEqual(amps[0].value.n, 0.2, delta=0.1)\n self.assertAlmostEqual(amps[1].value.n, 0.4, delta=0.1)", "title": "" }, { "docid": "4582f0a7272acb38c828d8498433f1f4", "score": "0.5256397", "text": "def build_samples(graphs, resources, opts):\n # count slot number\n slot_num_dict = defaultdict(list)\n for r in resources:\n slot_num_dict[r.slot_num].append(r)\n sample_data_list = []\n baselines = []\n for baseline in opts.baselines:\n if baseline == 'storm':\n baselines.append(StormHeuristic(opts))\n elif baseline == 'flink':\n baselines.append(FlinkHeuristicNew(opts))\n elif baseline == 'random':\n baselines.append(RandomStrategy(opts))\n else:\n print(f'please specify correct baseline name: {baseline}')\n return\n\n for g in graphs:\n provide_resources = []\n # dsp.max_parallelism <= res.slot_num <= dsp.max+parallelism + ?\n max_slot_num_greater_than_max_parall = opts.max_slot_num_greater_than_max_parall\n for i in range(g.max_parallelism, g.max_parallelism + max_slot_num_greater_than_max_parall + 1):\n provide_resources.extend(slot_num_dict[i])\n sample_num = min(len(provide_resources), opts.resources_per_dag)\n selected_resources = random.sample(provide_resources, sample_num)\n for r in selected_resources:\n bl_throughputs = []\n bl_delays = []\n for baseline in baselines:\n bl_place = baseline.place(g, r)\n if bl_place is not None:\n throughput, delay = get_qos(g, r, bl_place)\n assert throughput != -1 and delay != -1\n if throughput == -1 or delay == -1:\n print('error! hx debug')\n bl_throughputs.append(throughput)\n bl_delays.append(delay)\n sample_data_list.append(SampleData(g, r, bl_throughputs, bl_delays))\n return sample_data_list", "title": "" }, { "docid": "84fc81aac2f1ebf64b91954668bd32e9", "score": "0.5251785", "text": "def _create_samples(self):\n return [SignalAlignSample(working_folder=self.working_folder, **s) for s in self.args.samples]", "title": "" }, { "docid": "fd4ddd0f21f0b172f7daf970656e4968", "score": "0.52499443", "text": "def create_fastq_list(self):\n short_names = self.ffastq.keys()\n\n for sample in short_names:\n fq_name = os.path.join(self.param['outdir'], sample+'/'+sample+'_fqlist.txt')\n if not self.param['newanalysis']:\n if file_exists(fq_name):\n print 'Removing pre-existing fqlist file {0}'.format(fq_name)\n os.remove(fq_name)\n with open(fq_name, 'a') as fqh:\n for i in range(0, self.ffastq[sample]['nfiles']):\n if self.param['paired']:\n fqh.write('{0}\\t{1}\\n'.format(self.ffastq[sample]['file1'][i], self.ffastq[sample]['file2'][i]))\n else:\n fqh.write('{0}\\n'.format(self.ffastq[sample]['file1'][i]))\n\n print 'Created sample fastqlists'\n pass", "title": "" }, { "docid": "c68ebaf9ec56695e5f917f058d402f13", "score": "0.5246451", "text": "def createSpikes(self, freerates = True, inputtau=10e-3):\n r = np.random.rand(self.nchannels,self.duration)/self.dt\n self.spikes = []\n inputtau_ms=int(inputtau/self.dt)\n if self.noiseset == True:\n bsp=self.rates+self.noise>r\n else:\n bsp=self.rates>r\n for ch in xrange(self.nchannels):\n chlen=len(bsp[ch,:])\n\n ##refractory period\n #for i,sp in enumerate(bsp[ch,:]):\n #if sp==1 and i<chlen-1:\n #period=min(inputtau_ms-1,chlen-i-1)\n #bsp[ch,i+1:i+1+period]=0\n\n chspikes=bsp[ch,:].nonzero()[0]*self.dt\n #refractory period\n #fchspikes=[]\n #lastsp=0.\n #for sp in chspikes:\n #if sp-lastsp>inputtau or lastsp<inputtau:\n #fchspikes.append(sp)\n #lastsp=sp\n\n self.spikes.append(chspikes)\n #TO DO: add jitter on top & add refrectoriness factor\n\n if freerates:\n self.rates = None\n if self.noiseset == True:\n self.noise = None", "title": "" }, { "docid": "c4fface219102dd0b76c621ca3753e98", "score": "0.52454627", "text": "def generator_data():\n while True:\n data_set = []\n target_set = []\n for loop in range(150):\n\n number = np.int(np.random.uniform(0, len(all_events)))\n _event, _probe, _number_of_events = all_events[number]\n new_data = pd.read_pickle(\n f'{_event.year}_{_event.month}_{_event.day}_{_event.hour}_{_event.minute}_{_event.second}_{_probe}.pkl')\n\n # probe_data = get_probe_data(_probe, _event.strftime('%d/%m/%Y'),\n # start_hour=(_event - timedelta(hours=2)).hour, duration=4)\n #\n # probe_data.create_processed_column('b_magnitude')\n # probe_data.create_processed_column('vp_magnitude')\n #\n # # probe_data.data.dropna(inplace=True)\n # new_data = probe_data.data.resample('40S').bfill()\n # new_data.fillna(method='ffill', inplace=True)\n # new_data.dropna(inplace=True)\n time_before = np.random.uniform(240, 6960) # 6*40, 7200-240\n data = new_data[_event - timedelta(seconds=time_before): _event + timedelta(seconds=7200 - time_before)]\n vec_b, vec_v = data['b_magnitude'], data['vp_magnitude']\n vec_b_x, vec_v_x = data['Bx'], data['vp_x']\n vec_b_y, vec_v_y = data['By'], data['vp_y']\n vec_b_z, vec_v_z = data['Bz'], data['vp_z']\n _b_v_array = [vec_b / np.max(vec_b),\n vec_v / np.max(vec_v),\n vec_b_x / np.max(vec_b_x),\n vec_v_x / np.max(vec_v_x),\n vec_b_y / np.max(vec_b_y),\n vec_v_y / np.max(vec_v_y),\n vec_b_z / np.max(vec_b_z),\n vec_v_z / np.max(vec_v_z)]\n if len(_b_v_array[0]) != 178:\n change = 178 - len(_b_v_array[0])\n if np.sign(change) > 0:\n for i in range(len(_b_v_array)):\n _b_v_array[i] = list(_b_v_array[i]) + [0 for _ in range(change)]\n else:\n for i in range(len(_b_v_array)):\n _b_v_array[i] = list(_b_v_array[i])[:change]\n _b_v_array = np.array(_b_v_array).transpose((1, 0))\n target = 1 if _number_of_events else 0\n data_set.append(_b_v_array)\n target_set.append(target)\n yield (np.array(data_set), np.array(target_set))", "title": "" }, { "docid": "db09a6abcfd18354f6444f2b5cadcfe4", "score": "0.5226188", "text": "def _init_waveforms(self):\n # To keep the first pulse delay, use the smallest delay as reference.\n min_delay = np.min([self.wave_xy_delays[:self.n_qubit],\n self.wave_z_delays[:self.n_qubit]])\n\n # commented this part out to allow for negative pulse delay\n # self.wave_xy_delays -= min_delay\n # self.wave_z_delays -= min_delay\n\n max_delay = np.max([self.wave_xy_delays[:self.n_qubit],\n self.wave_z_delays[:self.n_qubit]])\n\n # find the end of the sequence\n # only include readout in size estimate if all waveforms have same size\n if self.readout_match_main_size:\n end = np.max([s.t_end for s in self.sequences]) + max_delay\n else:\n end = np.max([s.t_end for s in self.sequences[0:-1]]) + max_delay\n\n # create empty waveforms of the correct size\n if self.trim_to_sequence:\n self.n_pts = int(np.ceil(end * self.sample_rate)) + 1\n if self.n_pts % 2 == 1:\n # Odd n_pts give spectral leakage in FFT\n self.n_pts += 1\n for n in range(self.n_qubit):\n self._wave_xy[n] = np.zeros(self.n_pts, dtype=np.complex)\n self._wave_z[n] = np.zeros(self.n_pts, dtype=float)\n self._wave_gate[n] = np.zeros(self.n_pts, dtype=float)\n\n # Waveform time vector\n self.t = np.arange(self.n_pts) / self.sample_rate\n\n # readout trig and i/q waveforms\n if self.readout_match_main_size:\n # same number of points for readout and main waveform\n self.n_pts_readout = self.n_pts\n else:\n # different number of points for readout and main waveform\n self.n_pts_readout = 1 + int(\n np.ceil(self.sample_rate *\n (self.sequences[-1].t_end - self.sequences[-1].t_start)))\n if self.n_pts_readout % 2 == 1:\n # Odd n_pts give spectral leakage in FFT\n self.n_pts_readout += 1\n\n self.readout_trig = np.zeros(self.n_pts_readout, dtype=float)\n self.readout_iq = np.zeros(self.n_pts_readout, dtype=np.complex)", "title": "" }, { "docid": "a60994662e3ac23c80d94e132b4b1985", "score": "0.5211222", "text": "def _get_data(self, instr, sources, use_pbar=False, decompose_dch=True):\n\n encoding_table = {\n WaveType.MATH: (\"FPBinary\", 16, \"d\"),\n WaveType.DIGITAL: (\"RIBinary\", 16, \"h\"),\n WaveType.ANALOG: (\"RIBinary\", 16, \"h\"),\n }\n channel_table = {\n WaveType.MATH: lambda x: x,\n WaveType.DIGITAL: lambda x: \"_\".join([x.split(\"_\")[0], \"DALL\"]),\n WaveType.ANALOG: lambda x: x,\n }\n\n # remember the state of the acquisition system and then stop acquiring waveforms\n acq_state = instr.query(\"ACQuire:STATE?\").strip()\n instr.write(\"ACQuire:STATE STOP\")\n\n # keep track of the sources so that we only download each digital channel only once\n downloaded_sources = []\n\n # if tqdm is installed, display a progress bar\n if use_pbar and (\"tqdm\" in globals()):\n pbar = tqdm\n else:\n pbar = _disabled_pbar\n\n # Process one signal source at a time\n for source in pbar(sources, desc=\"Downloading\", unit=\"Wfm\"):\n\n # Only download super channels and math waveforms once\n # Digital supper channels will produce 8 sources each\n if source.split(\"_\")[0] not in downloaded_sources:\n\n # Keep track of each super channel and math source that has been handled\n downloaded_sources.append(source.split(\"_\")[0])\n\n # Determine the type of waveform and set key interface parameters\n wave_type = self._classify_waveform(source)\n channel = channel_table[wave_type](source)\n encoding, bit_nr, datatype = encoding_table[wave_type]\n\n # Switch to the source and setup the data encoding\n instr.write(\"data:source {}\".format(channel))\n instr.write(\"data:encdg {}\".format(encoding))\n instr.write(\"WFMOUTPRE:BIT_NR {}\".format(bit_nr))\n\n # Horizontal scale information\n x_scale = self._get_xscale(instr)\n if x_scale is not None:\n\n # Issue the curve query command\n instr.write(\"curv?\")\n\n # Read the waveform data sent by the instrument\n source_data = instr.read_binary_values(\n datatype=datatype, is_big_endian=True, expect_termination=True\n )\n\n # Normal analog channels must have the vertical scale and offset applied\n if wave_type is WaveType.ANALOG:\n offset = float(instr.query(\"WFMOutpre:YZEro?\"))\n scale = float(instr.query(\"WFMOutpre:YMUlt?\"))\n source_data = [scale * i + offset for i in source_data]\n\n # Format and return the result\n if wave_type is WaveType.DIGITAL:\n\n # Digital channel to be decomposed into separate bits\n if decompose_dch:\n for bit in range(8):\n bit_channel = \"{}_D{}\".format(source.split(\"_\")[0], bit)\n\n # if the bit channel is available, decompose the data\n if bit_channel in sources:\n bit_data = [\n (i >> (2 * bit)) & 1 for i in source_data\n ]\n yield (bit_channel, bit_data, x_scale, None)\n\n # Digital channel to be converted into an 8-bit word\n else:\n digital = []\n for i in source_data:\n a = (\n (i & 0x4000) >> 7\n | (i & 0x1000) >> 6\n | (i & 0x400) >> 5\n | (i & 0x100) >> 4\n | (i & 0x40) >> 3\n | (i & 0x10) >> 2\n | (i & 0x4) >> 1\n | i & 0x1\n )\n digital.append(a)\n yield (source.split(\"_\")[0], digital, x_scale, None)\n\n elif wave_type is WaveType.ANALOG:\n # Include y-scale information with analog channel waveforms\n y_scale = self._get_yscale(instr, source)\n yield (source, source_data, x_scale, y_scale)\n\n elif wave_type is WaveType.MATH:\n # Y-scale information for MATH channels is not supported at this time\n yield (source, source_data, x_scale, None)\n\n else:\n raise Exception(\n \"It should have been impossible to execute this code\"\n )\n\n # Restore the acquisition state\n instr.write(\"ACQuire:STATE {}\".format(acq_state))", "title": "" }, { "docid": "0ccac9f9d255b7622f758eb785c8f653", "score": "0.5196919", "text": "def get_data(self):\n avg_mode = yield self.average_mode()\n if avg_mode:\n yield self.write('TRIG:AVER 1')\n \n # Start the measurement.\n yield self.write('INIT1:CONT 0')\n yield self.write('ABOR')\n yield self.write('INIT1')\n\n # Wait for the measurement to finish.\n sweep_time = yield self.get_sweep_time()\n number_of_averages = yield self.average_points()\n yield sleep(sweep_time * number_of_averages)\n\n # Wait for the measurement to finish.\n yield self.query('*OPC?', timeout=24*units.h)\n print(self.query('*OPC?', timeout=24*units.h))\n\n # Pull the data.\n yield self.write('FORM:DATA ASC')\n data = ()\n pair = ()\n unit_multipliers = {'REAL': 1, 'IMAG': 1,\n 'MLOG': units.dB, 'PHAS': units.deg}\n num_params = yield self.query('CALC1:PAR:COUNT?')\n for k in range(int(num_params)):\n yield self.write('CALC1:PAR%d:SEL' %(k + 1)) \n format = (yield self.query('CALC1:FORM?'))\n data_string = (yield self.query('CALC1:DATA:FDAT?'))\n d = np.array(data_string.split(','), dtype=float)\n # Select only every other element.\n d = d[::2]\n pair += (d * unit_multipliers[format]),\n if k % 2:\n data += (pair),\n pair = ()\n returnValue(data)", "title": "" }, { "docid": "a232f9546c1266e43f858ee21a048183", "score": "0.5173059", "text": "def precalculateAll(self):\n \n # ok, compute the FFT\n ff = abs(fft.fft(self.fulldata))\n lbnd = max(ff) * 10e-12\n self.fft = ff = where(ff < lbnd, 10e-12, ff) \n \n hff = ff[:len(ff)/2]\n \n self.lfft = lff = 10*log10(ff)\n \n self.m1 = m1 = argmax(hff); \n self.m2 = m2 = argmax(hstack([hff[:m1-1], array([0, 0, 0]), hff[m1+2:]]))\n \n self.tow1 = tow1 = 2*pi*self.rate*float(m1)/self.nsamples\n self.tow2 = tow2 = 2*pi*self.rate*float(m2)/self.nsamples\n \n (self.w1, self.a1, self.b1), (self.w2, self.a2, self.b2), self.c0 = \\\n Sinefit.doubleSinefit4matrix(self.fulldata, self.rate**-1, tow1, tow2)\n \n self.amplitude1 = hypot(self.a1, self.b1)\n self.amplitude2 = hypot(self.a2, self.b2)\n self.phase1 = arctan2(self.b1, self.a1)\n self.phase2 = arctan2(self.b2, self.a2)\n \n # cut right now\n N1 = floor(0.5 + 2*pi*self.rate/self.w1)\n N2 = floor(0.5 + 2*pi*self.rate/self.w2)\n N0 = lcm(N1, N2)\n \n N = self.nsamples -(self.nsamples%N0)\n \n if N0 % 2:\n print N0, 'is odd'\n while ((N/N1 -N/N2) % 2) == 1:\n print (N/N1 -N/N2), 'is odd'\n N -= N0\n else:\n print N0, 'is even'\n \n ff = abs(fft.fft(self.fulldata))\n lbnd = max(ff) * 10e-12\n self.fft = ff = where(ff < lbnd, 10e-12, ff) \n \n hff = ff[:len(ff)/2]\n \n self.lfft = lff = 10*log10(ff)\n \n self.m1 = m1 = argmax(hff); \n self.m2 = m2 = argmax(hstack([hff[:m1-1], array([0, 0, 0]), hff[m1+2:]]))\n \n self.tow1 = tow1 = 2*pi*self.rate*float(m1)/self.nsamples\n self.tow2 = tow2 = 2*pi*self.rate*float(m2)/self.nsamples\n \n (self.w1, self.a1, self.b1), (self.w2, self.a2, self.b2), self.c0 = \\\n Sinefit.doubleSinefit4matrix(self.fulldata, self.rate**-1, tow1, tow2)\n \n self.amplitude1 = hypot(self.a1, self.b1)\n self.amplitude2 = hypot(self.a2, self.b2)\n self.phase1 = arctan2(self.b1, self.a1)\n self.phase2 = arctan2(self.b2, self.a2)\n \n delta = abs(self.w1 - self.w2)\n \n i1 = min([self.w1, self.w2]) - delta\n i2 = max([self.w1, self.w2]) + delta\n \n temp1, temp2 = self.toi(self.w1), self.toi(self.w2)\n fw1, fw2 = max(self.fft[temp1-1:temp1+2]), max(self.fft[temp2-1:temp2+2])\n \n temp1, temp2 = self.toi(i1), self.toi(i2)\n fi1, fi2 = max(self.fft[temp1-1:temp1+2]), max(self.fft[temp2-1:temp2+2])\n \n print fw1, fw2\n print fi1, fi2\n meaningful = hypot(fw1, fw2)\n interferences = hypot(fi1, fi2)\n \n self.imd = 10*log10(meaningful/interferences)", "title": "" }, { "docid": "09d0a87e4422484b81466a580be74bcc", "score": "0.5171079", "text": "def sample():\n with tf.variable_scope(\"scheduled_sampling\", reuse=tf.AUTO_REUSE):\n output_items = []\n for item_gt, item_gen in zip(groundtruth_items, generated_items):\n output_items.append(scheduled_sampling_func(item_gt, item_gen))\n return output_items", "title": "" }, { "docid": "759701728916653a65423380b883d7b0", "score": "0.51482266", "text": "def generate_thread(settings, captions, queue):\n\tsynth = vocalsynth(cachedir = settings.cachedir)\n\tfor caption in captions:\n\t\t# generate audio\n\t\tcaption.audiofile = synth.generate(caption)\n\t\tqueue.put(caption)\n\n\tprint \"*** all audio processing done ***\"", "title": "" }, { "docid": "bc1717052be998f1b4771c61c7464c5e", "score": "0.51393455", "text": "def compile_qc(path, application=\"seqcap\", **kw):\n output_data = {'stdout':StringIO(), 'stderr':StringIO()}\n ### find_samples excrutiatingly slow for multi-sample projects where we can have > 100k files...\n flist = find_samples(path, **kw)\n srm_l = []\n for f in flist:\n LOG.debug(\"Opening config file {}\".format(f))\n with open(f) as fh:\n runinfo_yaml = yaml.load(fh)\n for info in runinfo_yaml['details']:\n if info.get(\"multiplex\", None):\n for mp in info.get(\"multiplex\"):\n sample_kw = dict(path=os.path.dirname(f), flowcell=runinfo_yaml.get(\"fc_name\", None), date=runinfo_yaml.get(\"fc_date\", None), lane=info.get(\"lane\", None), barcode_name=mp.get(\"name\", None), sample_prj=kw.get(\"project\"), barcode_id=mp.get('barcode_id', None), sequence=mp.get('sequence', None))\n obj = SampleRunMetrics(**sample_kw)\n srm_l.append(obj)\n else:\n sample_kw = dict(path=os.path.dirname(f), flowcell=runinfo_yaml.get(\"fc_name\", None), date=runinfo_yaml.get(\"fc_date\", None), lane=info.get(\"lane\", None), barcode_name=info.get(\"description\", None), sample_prj=kw.get(\"project\"), barcode_id=None, sequence=None)\n obj = SampleRunMetrics(**sample_kw)\n obj.read_picard_metrics()\n srm_l.append(obj)\n qcdata = []\n output_data = _qc_info_header(kw.get(\"project\"), application, output_data)\n for s in srm_l:\n qcdata.append(_srm_to_qc(s))\n for v in qcdata:\n y = [str(x) for x in assess_qc(v, application)]\n output_data[\"stdout\"].write(\"\".join(y) + \"\\n\")\n return output_data", "title": "" }, { "docid": "2ee12f3eeb86472f8be480b9abb893cd", "score": "0.5130843", "text": "def __init__(self):\n self.q = []\n self.min_vals = []", "title": "" }, { "docid": "3b77f477be323cb00473848eaa5234f6", "score": "0.51279444", "text": "def exam2():\n q1 = QFactory.ball(pos=(-10.,0.,0.),mass=10.*qmag.m,charge=+10.*qmag.u,radius=0.3)\n q2 = QFactory.ball(pos=(+10.,0.,0.),mass=10.*qmag.m,charge=+10.*qmag.u,radius=0.3)\n q3 = QFactory.ball(pos=(0.,-10.,0.),mass=10.*qmag.m,charge=-10.*qmag.u,radius=0.3)\n q4 = QFactory.ball(pos=(0.,+10.,0.),mass=10.*qmag.m,charge=-10.*qmag.u,radius=0.3)\n q3.qdin.fix = True; q4.qdin.fix = True;\n q3.qvis.color = vis.color.red; q4.qvis.color = vis.color.red\n f = QFrame()\n map(lambda q: f.add(q),[q1,q2,q3,q4])\n vis.rate(1)\n raw_input('enter key to continue ')\n f.run(10000*f.dt,rate=1000)\n return f", "title": "" }, { "docid": "b7ba04a6cc0b1feef68faf2bebb00cc6", "score": "0.51278895", "text": "def analyseData(self, samples, mean, variance, printName, allSampleMean = [], allCovariance = []):\n \n print('Analysing sampled data...')\n\n helper=numpy.shape(samples)\n dimension=helper[0]\n numberOfSamples=helper[1]\n # For parallelization (number of processors)\n procs = 2\n results = numpy.array([0.0 for i in range(procs)])\n # Analyse the first component!\n dim=0\n # Maximal number of lag_k autocorrelations\n maxS=int((numberOfSamples-1)/3)\n # lag_k autocorrelation\n autocor = [0.0 for i in range(maxS)]\n autocor[0]=variance[dim][dim]\n # sample frequency\n m=1\n # modified sample variance = autocor[0] + 2 sum_{i}(autocor[i])\n msvar=0.0\n # SEM = sqrt( msvar/numberOfSamples )\n sem=0.0\n # ACT = m * msvar / autocor[0]\n act=0.0\n # ESS = m * numberOfSamples / act\n ess=0.0\n \n temp=0.0\n\n flagSEM=True\n flagACT=True\n flagESS=True\n\n # Calculate lag_k for following k's\n evaluation = range( maxS )\n evaluation = evaluation[1:]\n evaluation2 = numpy.arange(numberOfSamples)\n for lag in evaluation:\n\n evaluation2 = evaluation2[:-1]\n # Do this expensive calculation parallel\n output = mp.Queue()\n morsel = numpy.array_split(evaluation2, procs)\n processes = []\n for i in range(procs):\n processes.append( mp.Process(target = self.calculateACF, args = (samples[dim], mean[dim], lag, morsel[i], output, ) ) )\n for p in processes:\n p.start()\n for p in processes:\n p.join()\n results = [output.get() for p in processes]\n tmp = numpy.sum(results)\n autocor[lag] = (numberOfSamples-lag)**-1 * tmp\n # noise affects autocorrelation -> stop when near zero\n if (autocor[lag-1]+autocor[lag])<=0.0001:\n maxS = lag\n break\n percentage = format(100*lag/maxS, '.2f')\n print('Processing: {0}%'.format(percentage), end='\\r')\n \n # Calculate the modified sample variance\n evaluation = range( maxS-1 )\n evaluation = evaluation[1:] \n msvar += autocor[0]\n # Plot Standard error of mean\n allSem = [0.0 for i in range(maxS-1)]\n allSem[0]= 1.0\n for lag in evaluation:\n msvar += 2*autocor[lag]\n # Calculate the autocovariance function by dividing by variance and multiplying a factor\n autocor[lag] = autocor[lag]/autocor[0]\n # Sample standard error of the Mean\n allSem[lag] = ( math.sqrt( abs(msvar)/ lag ) )\n # Standard Error of the Mean\n sem = math.sqrt(abs(msvar)/numberOfSamples)\n # AutoCorrelation Time\n act = m*msvar/autocor[0]\n # Effective Sample Size\n ess = m*numberOfSamples/act\n # Normalizing autocor[0] for plots\n autocor[0] = 1.0\n\n print('Modified sample variance: {0}'.format(msvar)) \n print('Standard Error of the Mean: {0}'.format(sem)) \n print('AutoCorrelation Time: {0}'.format(act)) \n print('Effective Sample Size: {0}'.format(ess)) \n\n #Print some results\n if True:\n if True:\n iterations=range(numberOfSamples)\n pylab.plot(iterations, allSampleMean, label='Sample mean')\n pylab.ylabel('Sample mean', fontsize=10)\n pylab.xlabel('Iterations', fontsize=10)\n pylab.ylim([-3.0, 3.0])\n pylab.grid(True)\n newPrintName = printName.replace(\".png\", \"_mean.png\")\n pylab.savefig(newPrintName)\n pylab.clf() \n\n pylab.plot(iterations, allCovariance, label='Sample covariance')\n pylab.ylabel('Sample covariance', fontsize=10)\n pylab.xlabel('Iterations', fontsize=10)\n pylab.ylim([2.0, 8.0])\n pylab.grid(True)\n newPrintName = printName.replace(\".png\", \"_cov.png\")\n pylab.savefig(newPrintName)\n pylab.clf() \n\n lag=range(maxS-1)\n pylab.plot(lag, autocor[:maxS-1], 'r', label='Autocorrelation')\n pylab.ylabel('ACF', fontsize=10)\n pylab.xlabel('Lag', fontsize=10)\n pylab.grid(True)\n newPrintName = printName.replace(\".png\", \"_1.png\")\n pylab.savefig(newPrintName)\n pylab.clf()\n\n pylab.plot(lag, allSem[:maxS-1], 'r', label='sem')\n pylab.ylabel('Standard error of the mean', fontsize=10)\n pylab.xlabel('Lag', fontsize=10)\n pylab.grid(True)\n newPrintName = printName.replace(\".png\", \"_sem.png\")\n pylab.savefig(newPrintName)\n pylab.clf()\n\n iterations=range(numberOfSamples)\n pylab.plot(iterations, samples[dim], label='First dimension of samples')\n pylab.ylabel('First dim of samples', fontsize=10)\n pylab.xlabel('Iterations', fontsize=10)\n #pylab.ylim([-6.5, 6.5])\n pylab.grid(True)\n newPrintName = printName.replace(\".png\", \"_2.png\")\n pylab.savefig(newPrintName)\n pylab.clf()\n\n num_bins=100\n n, bins, patches=pylab.hist(samples[dim], num_bins, normed=1, facecolor='green', alpha=0.5, label='Histogram of the first dimension')\n #------------- CHOOSE APPROPRIATE FIRST DIMENSION DENSITY ---------------------------\n # add a 'best fit' line\n #y = 1.0 * mlab.normpdf(bins, 0.0, 1) + 0.0 * mlab.normpdf(bins, 3.0, 1)\n #y = 1.0 * mlab.normpdf(bins, 0.0, 1)\n y = 0.5 * mlab.normpdf(bins, -2.0, 1.0) + 0.5 * mlab.normpdf(bins, 2.0, 1.0)\n #-----------------------------------------------------------------------------------\n plt.plot(bins, y, 'r--')\n pylab.xlabel('First dimension of samples', fontsize=10)\n pylab.ylabel('Relative frequency', fontsize=10)\n #pylab.xlim([-6.0, 6.0])\n pylab.grid(True)\n newPrintName = printName.replace(\".png\", \"_3.png\")\n pylab.savefig(newPrintName)\n pylab.clf()\n # Do the scatter plot and histogram\n if True:\n newPrintName = printName.replace(\".png\", \"Plot.png\")\n self.scatterPlot3D(samples, newPrintName)\n newPrintName = printName.replace(\".png\", \"Histo.png\")\n self.Histogram3D(samples, newPrintName)\n newPrintName = printName.replace(\".png\", \"_short.png\")\n iterations=range(1000)\n pylab.plot(iterations, samples[dim][:1000], label='First dimension of samples')\n pylab.ylabel('First dim of samples', fontsize=10)\n #pylab.ylim([-6.5, 6.5])\n pylab.grid(True)\n pylab.savefig(newPrintName)\n pylab.close('all')\n # For the function calculateSEM: first: False, second: True.\n if True:\n return act\n if False:\n return sem", "title": "" }, { "docid": "3e82e0b6b22b890cc515c58134d3ae85", "score": "0.51272666", "text": "def gen_samples(self, *args, **kwargs):\n raise MethodImplementationError(self,'gen_samples')", "title": "" }, { "docid": "dde580e176a7e9ec17cbee7c387cfd14", "score": "0.5123873", "text": "def gen_q_list(q_bands):\n lis =[]\n for i in range(-q_bands,q_bands+1):\n lis.append(i)\n return lis #the q numbers in a list ranging from [-q,,..,0,..,q]", "title": "" }, { "docid": "336339ed5bc0af948eea9e60ee8f4fe1", "score": "0.5120881", "text": "def _calc_features_on_thread(self,first_seq,last_seq,queue):\n\n out = []\n for i in range(first_seq,last_seq):\n tmp = []\n for f in self._feature_functions:\n tmp.append(f(self._sequences[i]))\n out.append(np.concatenate(tmp))\n\n queue.put((first_seq,last_seq,out))", "title": "" }, { "docid": "328457c741845c59d94d5b143984d1b6", "score": "0.51182187", "text": "def buildCalcTuples(self, **kwargs):\n\n # TODO Refactor this, by getting ct's first.\n # A little tricky to test throughly:\n # is always used, even for pools of 1 with the same name as the sample.\n # refGenome (matching all, none, or some of default vaules)\n # Sample Sets: SubNum 1 Fcid, SubNum multFC, custom sample set,\n # default pool, custom pool\n # Homogenous/Heterogeneous submissions\n # 30 cases\n self.calcTuples = []\n doPooling = False\n if 'pool' in kwargs:\n if isinstance(kwargs['pool'], dict):\n self.buildCalcTupleForArbitraryPooling(**kwargs) # Case III\n return\n elif kwargs['pool'] is False:\n kwargs.pop('pool')\n else:\n doPooling = True\n assert kwargs['pool'] is True, \\\n \"allowed vals for pool: True (False) or dct\"\n elif 'sampList' in kwargs:\n self.buildCalcTupleForSampList(**kwargs)\n return\n if 'refGenome' in kwargs:\n assert 'subNum' in kwargs or 'sampList' in kwargs, \\\n \"must specify single subNum with refG\"\n #refGenome = kwargs.pop('refGenome')\n # else:\n #refGenome = None\n # TODO fcidOnly or MultFC? two descriptions of same thing\n ''' if not ('multFC' in kwargs and kwargs['multFC']):\n kwargs['fcid'] = self.fcid\n kwargs['multFC'] = False '''\n\n subNumToSamps = self.getSubNumToSampsDict(**kwargs)\n # TODO this shouldn't be here. find When samples are first imported\n # from stemcell and lower them there.\n for samp in self.db.tables['Samp']:\n self.db.tables['Samp'][samp]['genome'] = self.db.tables[\n 'Samp'][samp]['genome'].lower()\n\n for subNum in subNumToSamps:\n if 'subNum' in kwargs:\n kwargs.pop('subNum')\n for genome in set([self.db.getAttrFromSamp('genome', el)\n for el in subNumToSamps[subNum]]):\n subSampList = sorted([el for el in subNumToSamps[subNum] if\n self.db.getAttrFromSamp('genome', el)\n == genome])\n prj = self.db.getAttrFromSamp('project_name', subSampList[0])\n pool = {}\n if doPooling:\n pool = self.getPoolFromSubSamps(subSampList)\n\n ct = calc_tuple.CalcTuple(\n db=self.db,\n node='Collate',\n subNum=subNum,\n Sample=subSampList,\n pool=pool,\n poolId=sorted(pool.keys()),\n fcid=self.fcid,\n **kwargs)\n\n self.calcTuples.append(ct)\n try:\n subNumStr = \"{0:04d}\".format(int(subNum))\n except BaseException:\n subNumStr = subNum\n\n _outdir = p_join(self.dirPref,\n \"Sub_{0}_{1}_{2}__{3}\".format(\n subNumStr,\n prj,\n ct.getRefGenome(),\n ct.hsh[:config.ODIR_HSH_LEN]))\n if self.tryToLoadFinishedCalc(ct):\n continue\n self.buildCalcInfoWithErrAndWrn(ct, _outdir)\n ct.putMetadata(Sample=subSampList)\n subprocess.call(['mkdir', '-p', '-m', '777',\n ct.getMetadata('outdir')])", "title": "" }, { "docid": "b0b89b34f0d8627adba9300e04f5d548", "score": "0.5115811", "text": "def collect(self):\n result = []\n for func, arg in self.func_queue:\n result = func(arg)\n \n \n return result", "title": "" }, { "docid": "7945ab28a9891e0db9427bfc833bb590", "score": "0.51088536", "text": "def get_data(self, queue, end_ts=MAXSIZE):\n # pylint: disable=too-many-branches\n queue_name = queue['name']\n data_queue = queue['data']\n queue_type = queue['type']\n self.logger.trace(\"TopicManager starting queue %s size is: %i\" %(queue_name, len(data_queue)))\n if self.collect_wind_across_loops:\n collector = self.collector\n else:\n collector = CollectData(self.collected_fields, self.collected_units)\n\n if self.collect_observations:\n observation_collector = CollectData(None, self.collected_units)\n\n while data_queue:\n if data_queue[0]['data']['dateTime'] > end_ts:\n self.logger.trace(\"TopicManager leaving queue: %s size: %i content: %s\" %(queue_name, len(data_queue), data_queue[0]))\n break\n payload = data_queue.popleft()\n if queue_type == 'collector':\n fieldname = payload['fieldname']\n self.logger.trace(\"TopicManager processing wind data %s %s: %s.\"\n %(fieldname, weeutil.weeutil.timestamp_to_string(payload['data']['dateTime']), to_sorted_string(payload)))\n data = collector.add_data(fieldname, payload['data'])\n elif self.collect_observations:\n data = observation_collector.add_dict(payload['data'])\n else:\n data = payload['data']\n\n if data:\n self.logger.debug(\"TopicManager data-> outgoing %s: %s\"\n %(queue_name, to_sorted_string(data)))\n yield data\n\n if not self.collect_wind_across_loops:\n data = collector.get_data()\n if data:\n self.logger.debug(\"TopicManager data-> outgoing wind %s: %s\"\n % (queue_name, to_sorted_string(data)))\n yield data\n\n if self.collect_observations:\n data = observation_collector.get_data()\n if data:\n self.logger.debug(\"TopicManager data-> outgoing collected %s: %s\"\n % (queue_name, to_sorted_string(data)))\n yield data", "title": "" }, { "docid": "f74c2e74c21ad7895d6a45927d3dd524", "score": "0.5108168", "text": "def _generate(self, samples):\n\n def gen(f, samples):\n f.write(','.join(['f' + str(i) for i in range(10)]) + ',' + \"label\\n\")\n\n # candidate_data_10 = [\n # ['0', '1', '1', '1', '0', '0', '1', '1', '0', '0']]\n # candidate_data_11 = [\n # ['1', '0', '1', '0', '1', '0', '1', '1', '0', '0']]\n\n # candidate_data_12 = [\n # ['1', '1', '0', '0', '1', '1', '1', '1', '0', '0']]\n\n # candidate_data_00 = [\n # ['0', '0', '0', '0', '0', '0', '1', '1', '0', '0']]\n\n # candidate_data_01 = [\n # ['1', '1', '1', '1', '1', '1', '1', '1', '0', '0']]\n\n # candidate_data_02 = [\n # ['0', '1', '0', '1', '0', '1', '1', '1', '0', '0']]\n\n # candidate_data_20 = [\n # ['0', '0', '0', '0', '0', '0', '1', '1', '0', '0']]\n # candidate_data_21 = [\n # ['0', '0', '0', '0', '0', '0', '1', '1', '0', '0']]\n\n # candidate_data_22 = [\n # ['0', '0', '0', '0', '0', '0', '1', '1', '0', '0']]\n\n candidate_data_1 = [\n ['0', '1', '1', '1', '0', '0', '1', '1', '0', '0']]\n\n candidate_data_0 = [['0', '1', '0', '0', '0', '0', '1', '1', '0', '0'],\n ['0', '0', '1', '0', '0', '0', '1', '1', '0', '0'],\n ['0', '0', '0', '1', '0', '0', '1', '1', '0', '0'],\n ['0', '1', '1', '0', '0', '0', '1', '1', '0', '0'],\n ['0', '1', '0', '1', '0', '0', '1', '1', '0', '0'],\n ['0', '0', '1', '1', '0', '0', '1', '1', '0', '0'],\n ['0', '0', '0', '0', '0', '0', '1', '1', '0', '0'],\n ['0', '1', '0', '0', '0', '0', '1', '1', '0', '0'],\n ['0', '0', '1', '0', '0', '0', '1', '1', '0', '0'],\n ['0', '0', '0', '1', '0', '0', '1', '1', '0', '0'],\n ['0', '1', '1', '0', '0', '0', '1', '1', '0', '0'],\n ['0', '0', '1', '1', '0', '0', '1', '1', '0', '0'],\n ['0', '1', '0', '1', '0', '0', '1', '1', '0', '0']]\n\n for i in range(samples):\n candidate_label = str(random.randrange(0, 2))\n if candidate_label == '0':\n f.write(','.join(\n candidate_data_0[random.randint(0, len(candidate_data_0)) - 1]) + \",\" + candidate_label + \"\\n\")\n elif candidate_label == '1':\n f.write(','.join(\n candidate_data_1[random.randint(0, len(candidate_data_1)) - 1]) + \",\" + candidate_label + \"\\n\")\n\n # if candidate_label == '0':\n # if can == 0:\n # f.write(','.join(\n # candidate_data_00[random.randint(0, len(candidate_data_00)) - 1]) + \",\" + candidate_label + \"\\n\")\n\n # elif can == 1:\n # f.write(','.join(\n # candidate_data_01[random.randint(0, len(candidate_data_01)) - 1]) + \",\" + candidate_label + \"\\n\")\n\n # elif can == 2:\n # f.write(','.join(\n # candidate_data_02[random.randint(0, len(candidate_data_02)) - 1]) + \",\" + candidate_label + \"\\n\")\n\n # elif candidate_label == '1':\n # if can == 0:\n # f.write(','.join(candidate_data_10[random.randint(0, len(candidate_data_10)) - 1]) + \",\" + candidate_label + \"\\n\")\n # elif can == 1:\n # f.write(','.join(candidate_data_11[random.randint(\n # 0, len(candidate_data_11)) - 1]) + \",\" + candidate_label + \"\\n\")\n # elif can == 2:\n # f.write(','.join(candidate_data_12[random.randint(\n # 0, len(candidate_data_12)) - 1]) + \",\" + candidate_label + \"\\n\")\n\n # elif candidate_label == '2':\n # if can == 0:\n # f.write(','.join(candidate_data_20[random.randint(\n # 0, len(candidate_data_20)) - 1]) + \",\" + candidate_label + \"\\n\")\n # elif can == 1:\n # f.write(','.join(candidate_data_21[random.randint(\n # 0, len(candidate_data_21)) - 1]) + \",\" + candidate_label + \"\\n\")\n # elif can == 2:\n # f.write(','.join(candidate_data_12[random.randint(\n # 0, len(candidate_data_22)) - 1]) + \",\" + candidate_label + \"\\n\")\n\n with open(os.path.join(NTE_MODULE_PATH, 'data', 'blipv3', 'train.csv'), 'w') as f:\n print(int(samples * 0.8))\n gen(f, int(samples * 0.8))\n\n with open(os.path.join(NTE_MODULE_PATH, 'data', 'blipv3', 'test.csv'), 'w') as f:\n print(samples - int(samples * 0.8))\n gen(f, samples - int(samples * 0.8))\n\n self.create_meta([os.path.join(NTE_MODULE_PATH, 'data', 'blipv3', 'train.csv'),\n os.path.join(NTE_MODULE_PATH, 'data', 'blipv3', 'test.csv')])", "title": "" }, { "docid": "55979973ad98659930c11234031745ca", "score": "0.50907093", "text": "def __init__(self, alg):\n self._alg = alg\n self._fittedQ = list()", "title": "" }, { "docid": "e9d0dd220819d6496d0b2d098d130b15", "score": "0.5076742", "text": "def rawDataRead(self):\n\n ## The functions name is used as index in when connecting a function to \n ## it's queue \n current_function_ptr = self.rawDataRead\n try:\n ## The queue contains objects to be read\n connected_q:LimitedQueue.LimitedQueue = self.__connect_func_queue_pair[current_function_ptr]\n \n ## Getting data from queue\n while(not connected_q.empty()): \n try:\n if(self.__math_processor == None): \n raise MissingMathProcessor\n\n ## Get one token from input \n raw_data_token = connected_q.get_nowait()\n self.__log_list.append(raw_data_token)\n ## Launch a process with the input token\n \n math_proc = self.__math_processor(raw_data_token , True, \n self.__math_processor_args_dict)\n math_proc.start_process()\n ## put the token into output queue\n self.__output_processes_queue.put(math_proc)\n \n except: \n # raise\n pass \n except: \n raise", "title": "" }, { "docid": "03adb2ad7a2972322cac02c65274b512", "score": "0.5075132", "text": "def Drawmodels(Drawqueue,dirstring):\r\n print(\"DRAW QUEUE Starting!\")\r\n Roundfig, (ax_multipl1, ax_multipl2) = plt.subplots(2, 1, figsize=(15, 15))\r\n ax_multipl1.set_xlim([0,3500])\r\n ax_multipl1.set_ylim([0.5, 0.95])\r\n\r\n ax_multipl2.set_xlim([0,3500])\r\n ax_multipl2.set_ylim([0.05, 0.2])\r\n while(1):\r\n #Get new Data! The Header tells what to do\r\n Header, Data1, Data2 = Drawqueue.get()\r\n if Header == 1: #Plot Data; Data1=x; Data2=y\r\n ax_multipl1.plot(Data1, color='0.1')\r\n ax_multipl2.plot(Data2, color='0.1')\r\n elif Header == 2: #Data2=y; Data1=Data1\r\n ax_multipl1.hlines(y=Data1, xmin=Data2 * gatetimes + 50, xmax=(Data2 + 1) * gatetimes + 49)\r\n elif Header == 3:\r\n ax_multipl1.plot(Data1, color='orange')\r\n ax_multipl2.plot(Data2, color='0.1')\r\n elif Header == 4:\r\n Roundfig.savefig(dirstring + \"Evalresults/\" + str(Data1) + \".png\", dpi=200)\r\n import tikzplotlib\r\n\r\n tikzplotlib.get_tikz_code(figure=Roundfig,filepath=dirstring + \"Evalresults/\" + str(Data1) + \".tex\")\r\n\r\n ax_multipl1.cla()\r\n ax_multipl2.cla()\r\n ax_multipl1.set_xlim([0, 3500])\r\n ax_multipl1.set_ylim([0.5, 0.95])\r\n\r\n ax_multipl2.set_xlim([0, 3500])\r\n ax_multipl2.set_ylim([0.05, 0.2])", "title": "" }, { "docid": "caae99f3840068285030cfec31f840ec", "score": "0.5063502", "text": "def run(self):\n\t\tif self.debug:\n\t\t\timport pdb\n\t\t\tpdb.set_trace()\n\t\t\n\t\tdb = self.db_250k\n\t\tsession = db.session\n\t\tQC_method_id = 0 \t#just for QC_250k.get_call_info_id2fname()\n\t\tcall_data = QC_250k.get_call_info_id2fname(db, QC_method_id, self.call_method_id, filter_calls_QCed=0, \\\n\t\t\t\t\t\t\t\t\t\t\t\tmax_call_info_mismatch_rate=self.max_array_mismatch_rate, input_dir=self.input_dir,\\\n\t\t\t\t\t\t\t\t\t\t\t\ttake_unique_ecotype=self.take_unique_ecotype)\n\t\t#snps_with_best_QC_ls = self.get_snps_with_best_QC_ls(db, self.call_method_id)\n\t\tif self.max_snp_mismatch_rate<1 or self.max_snp_NA_rate<1:\t#2008-05-18 only do this when it's necessary\n\t\t\tsnps_name_set = self.get_snps_name_set_given_criteria(db, self.call_method_id, self.max_snp_mismatch_rate, self.max_snp_NA_rate)\n\t\telse:\n\t\t\tsnps_name_set = None\n\t\tdb_id2chr_pos = db.getSNPID2ChrPos()\n\t\tif len(call_data.call_info_id2fname)>0:\n\t\t\tdb_id2index = self.getSNPID2index(call_data.call_info_id2fname.values()[0][1], db_id2chr_pos)\n\t\t\tpdata = QC_250k.read_call_matrix(call_data.call_info_id2fname, self.min_probability, snps_name_set, \\\n\t\t\t\t\t\t\t\t\t\t\tdb_id2chr_pos=db_id2chr_pos, db_id2index=db_id2index)\t#2008-05-20 read_call_matrix returns PassingData object\n\t\t\tstrain_acc_list, category_list = pdata.ecotype_id_ls, pdata.array_id_ls\n\t\t\twrite_data_matrix(pdata.data_matrix, self.outputFname, pdata.header, strain_acc_list, category_list)", "title": "" }, { "docid": "044ca1db8ae5800d6045527f92a22a75", "score": "0.50591147", "text": "def __init__(self):\n self.q1=[]\n self.q2=[]\n self.flag=0", "title": "" }, { "docid": "64aabcd436dc802467c3c612ea641d4b", "score": "0.504184", "text": "def fastqc():\n\n mkdir(FASTQC_DIR)\n\n printp(\"\"\"\\n#\\n# run FastQC on initial data\\n#\"\"\")\n printp(\"\"\"\\n# drmr:label fastqc\"\"\")\n printp(\"\"\"\\n# drmr:job time_limit=2h working_directory={}\"\"\".format(FASTQC_DIR))\n\n for library, info in DATA.items():\n for readgroup, fastqs in info['readgroups'].items():\n for fastq in fastqs:\n printp(\"\"\"fastqc -o {} {}\"\"\".format(FASTQC_DIR, fastq), timed=True, ioniced=True)\n\n printp(\"\"\"\\n# drmr:wait\"\"\")", "title": "" }, { "docid": "03343e9fb3ed572ae6cbe8f9c79123b3", "score": "0.5040388", "text": "def result(self, num=128): \n #If signal samples are needed for te < t_start, then samples are taken\n #from the end of the calculated signal.\n \n signal = self.signal.usignal(self.up)\n out = empty((num, self.numchannels))\n # shortcuts and intial values\n m = self.mics\n t = self.start*ones(m.num_mics)\n i = 0\n epslim = 0.1/self.up/self.sample_freq\n c0 = self.env.c\n tr = self.trajectory\n n = self.numsamples\n while n:\n n -= 1\n eps = ones(m.num_mics)\n te = t.copy() # init emission time = receiving time\n j = 0\n # Newton-Rhapson iteration\n while abs(eps).max()>epslim and j<100:\n loc = array(tr.location(te))\n rm = loc-m.mpos# distance vectors to microphones\n rm = sqrt((rm*rm).sum(0))# absolute distance\n loc /= sqrt((loc*loc).sum(0))# distance unit vector\n der = array(tr.location(te, der=1))\n Mr = (der*loc).sum(0)/c0# radial Mach number\n eps = (te + rm/c0 - t)/(1+Mr)# discrepancy in time \n te -= eps\n j += 1 #iteration count\n t += 1./self.sample_freq\n # emission time relative to start time\n ind = (te-self.start_t+self.start)*self.sample_freq\n if self.conv_amp: rm *= (1-Mr)**2\n try:\n out[i] = signal[array(0.5+ind*self.up, dtype=int64)]/rm\n i += 1\n if i == num:\n yield out\n i = 0\n except IndexError: #if no more samples available from the source \n break\n if i > 0: # if there are still samples to yield\n yield out[:i]", "title": "" }, { "docid": "ff34bd3625a69a691e61d40877b04fb5", "score": "0.50353944", "text": "def get_samples(self, manager, cache):", "title": "" }, { "docid": "e26fa8ce36917792883460e69a502332", "score": "0.50189567", "text": "def plot_maker(sorter, we,unit_list):\n \n for unit_id in unit_list:\n fig = plt.figure(figsize=(25, 13))\n gs = GridSpec(nrows=3, ncols=6)\n fig.suptitle(f'{unit_id} (Total spike {sorter.get_total_num_spikes()[unit_id]})',)\n ax0 = fig.add_subplot(gs[0, 0:3])\n ax1 = fig.add_subplot(gs[0, 3:7])\n ax1.set_title('Mean firing rate during a trial')\n ax2 = fig.add_subplot(gs[1, :])\n ax2.set_title('Waveform of the unit')\n ax3 = fig.add_subplot(gs[2, 0])\n ax4 = fig.add_subplot(gs[2, 1], sharey = ax3)\n ax5 = fig.add_subplot(gs[2, 2], sharey = ax3)\n ax6 = fig.add_subplot(gs[2, 3:6])\n sw.plot_autocorrelograms(sorter, unit_ids=[unit_id], axes=ax0, bin_ms=1, window_ms=200)\n ax0.set_title('Autocorrelogram')\n current_spike_train = sorter.get_unit_spike_train(unit_id)/sorter.get_sampling_frequency()\n current_spike_train_list = []\n while len(current_spike_train) > 0: #this loop is to split the spike train into trials with correct duration in seconds\n # Find indices of elements under 9 (9 sec being the duration of the trial)\n indices = np.where(current_spike_train < 9)[0]\n if len(indices)>0:\n # Append elements to the result list\n current_spike_train_list.append(SpikeTrain(current_spike_train[indices]*s, t_stop=9))\n # Remove the appended elements from the array\n current_spike_train = np.delete(current_spike_train, indices)\n # Subtract 9 from all remaining elements\n current_spike_train -= 9\n bin_size = 100\n histogram = time_histogram(current_spike_train_list, bin_size=bin_size*ms, output='mean')\n histogram = histogram*(1000/bin_size)\n ax1.axvspan(0, 0.5, color='green', alpha=0.3)\n ax1.axvspan(1.5, 2, color='green', alpha=0.3)\n ax6.axvspan(0, 0.5, color='green', alpha=0.3)\n ax6.axvspan(1.5, 2, color='green', alpha=0.3)\n plot_time_histogram(histogram, units='s', axes=ax1)\n sw.plot_unit_waveforms_density_map(we, unit_ids=[unit_id], ax=ax2)\n template = we.get_template(unit_id=unit_id).copy()\n \n for curent_ax in [ax3, ax4, ax5]:\n max_channel = np.argmax(np.abs(template))%template.shape[1]\n template[:,max_channel] = 0\n mean_residual = np.mean(np.abs((we.get_waveforms(unit_id=unit_id)[:,:,max_channel] - we.get_template(unit_id=unit_id)[:,max_channel])), axis=0)\n curent_ax.plot(mean_residual)\n curent_ax.plot(we.get_template(unit_id=unit_id)[:,max_channel])\n curent_ax.set_title('Mean residual of the waveform for channel '+str(max_channel))\n plt.tight_layout()\n rasterplot_rates(current_spike_train_list, ax=ax6, histscale=0.1)", "title": "" }, { "docid": "bf925900164c2451a1e56c1b1c29f299", "score": "0.50185037", "text": "def __init__(self,params,samples,rev_x,pos_test):\n self.params = params\n self.samples = samples\n self.rev_x = rev_x\n self.pos_test = pos_test\n\n def ad_ks_test(parnames,inn_samps,mcmc_samps,cnt):\n \"\"\"\n Record and print ks and AD test statistics\n \"\"\"\n \n ks_mcmc_arr = []\n ks_inn_arr = []\n ad_mcmc_arr = []\n ad_inn_arr = []\n cur_max = self.params['n_samples']\n mcmc = []\n c=vici = []\n for i in range(inn_samps.shape[0]):\n # remove samples outside of the prior mass distribution\n mask = [(inn_samps[0,:] >= inn_samps[2,:]) & (inn_samps[3,:] >= 0.0) & (inn_samps[3,:] <= 1.0) & (inn_samps[1,:] >= 0.0) & (inn_samps[1,:] <= 1.0) & (inn_samps[0,:] >= 0.0) & (inn_samps[0,:] <= 1.0) & (inn_samps[2,:] <= 1.0) & (inn_samps[2,:] >= 0.0)]\n mask = np.argwhere(mask[0])\n new_rev = inn_samps[i,mask]\n new_rev = new_rev.reshape(new_rev.shape[0])\n new_samples = mcmc_samps[mask,i]\n new_samples = new_samples.reshape(new_samples.shape[0])\n tmp_max = new_rev.shape[0]\n if tmp_max < cur_max: cur_max = tmp_max\n vici.append(new_rev[:cur_max])\n mcmc.append(new_samples[:cur_max])\n\n mcmc = np.array(mcmc)\n vici = np.array(vici)\n\n # iterate through each parameter\n for i in range(inn_samps.shape[0]):\n ks_mcmc_samps = []\n ks_inn_samps = []\n ad_mcmc_samps = []\n ad_inn_samps = []\n n_samps = self.params['n_samples']\n n_pars = self.params['ndim_x']\n\n # iterate over number of randomized sample slices\n for j in range(self.params['n_kl_samp']):\n # get ideal bayesian number. We want the 2 tailed p value from the KS test FYI\n ks_mcmc_result = ks_2samp(np.random.choice(mcmc[i,:],size=int(mcmc.shape[1]/2.0)), np.random.choice(mcmc[i,:],size=int(mcmc.shape[1]/2.0)))\n ad_mcmc_result = anderson_ksamp([np.random.choice(mcmc[i,:],size=int(mcmc.shape[1]/2.0)), np.random.choice(mcmc[i,:],size=int(mcmc.shape[1]/2.0))])\n \n\n # get predicted vs. true number\n ks_inn_result = ks_2samp(np.random.choice(vici[i,:],size=int(mcmc.shape[1]/2.0)),np.random.choice(mcmc[i,:],size=int(mcmc.shape[1]/2.0)))\n ad_inn_result = anderson_ksamp([np.random.choice(vici[i,:],size=int(mcmc.shape[1]/2.0)),np.random.choice(mcmc[i,:],size=int(mcmc.shape[1]/2.0))])\n\n # store result stats\n ks_mcmc_samps.append(ks_mcmc_result[1])\n ks_inn_samps.append(ks_inn_result[1])\n ad_mcmc_samps.append(ad_mcmc_result[0])\n ad_inn_samps.append(ad_inn_result[0])\n print('Test Case %d, Parameter(%s) k-s result: [Ideal(%.6f), Predicted(%.6f)]' % (int(cnt),parnames[i],np.array(ks_mcmc_result[1]),np.array(ks_inn_result[1])))\n print('Test Case %d, Parameter(%s) A-D result: [Ideal(%.6f), Predicted(%.6f)]' % (int(cnt),parnames[i],np.array(ad_mcmc_result[0]),np.array(ad_inn_result[0])))\n\n # store result stats\n ks_mcmc_arr.append(ks_mcmc_samps)\n ks_inn_arr.append(ks_inn_samps)\n ad_mcmc_arr.append(ad_mcmc_samps)\n ad_inn_arr.append(ad_inn_samps)\n\n return ks_mcmc_arr, ks_inn_arr, ad_mcmc_arr, ad_inn_arr, 0, 0\n\n def load_test_set(model,sig_test,par_test,normscales,sampler='dynesty1'):\n \"\"\"\n load requested test set\n \"\"\"\n\n if sampler=='vitamin1' or sampler=='vitamin2':\n # The trained inverse model weights can then be used to infer a probability density of solutions given new measurements\n _, _, x, _, timet = model.run(self.params, sig_test, np.shape(par_test)[1], \"inverse_model_dir_%s/inverse_model.ckpt\" % self.params['run_label'])\n\n # Convert XS back to unnormalized version\n if self.params['do_normscale']:\n for m in range(self.params['ndim_x']):\n x[:,m,:] = x[:,m,:]*normscales[m]\n return x, [timet,timet,timet]\n\n # Define variables\n pos_test = []\n samples = np.zeros((params['r']*params['r'],params['n_samples'],params['ndim_x']+1))\n cnt=0\n test_set_dir = params['kl_set_dir'] + '_' + sampler\n\n # Load test set\n timet=[]\n default_n_samps = params['n_samples']\n for i in range(params['r']):\n for j in range(params['r']):\n # TODO: remove this bandaged phase file calc\n f = h5py.File('%s/test_samp_%d.h5py' % (test_set_dir,cnt), 'r+')\n\n # select samples from posterior randomly\n phase = (f['phase_post'][:] - (params['prior_min'][1])) / (params['prior_max'][1] - params['prior_min'][1])\n\n if params['do_mc_eta_conversion']:\n m1 = f['mass_1_post'][:]\n m2 = f['mass_2_post'][:]\n eta = (m1*m2)/(m1+m2)**2\n mc = np.sum([m1,m2], axis=0)*eta**(3.0/5.0)\n else:\n m1 = (f['mass_1_post'][:] - (params['prior_min'][0])) / (params['prior_max'][0] - params['prior_min'][0])\n m2 = (f['mass_2_post'][:] - (params['prior_min'][3])) / (params['prior_max'][3] - params['prior_min'][3])\n t0 = (f['geocent_time_post'][:] - (params['prior_min'][2])) / (params['prior_max'][2] - params['prior_min'][2])\n dist=(f['luminosity_distance_post'][:] - (params['prior_min'][4])) / (params['prior_max'][4] - params['prior_min'][4])\n #theta_jn=f['theta_jn_post'][:][shuffling]\n timet.append(np.array(f['runtime']))\n if params['do_mc_eta_conversion']:\n f_new=np.array([mc,phase,t0,eta]).T\n else:\n f_new=np.array([m1,phase,t0,m2,dist]).T\n f_new=f_new[:params['n_samples'],:]\n \n # resize array if less than 5000 samples\n if f_new.shape[0] < default_n_samps:\n default_n_samps = f_new.shape[0]\n samples = np.delete(samples,np.arange(default_n_samps,samples.shape[1]),1) \n \n samples[cnt,:default_n_samps,:]=f_new[:default_n_samps,:]\n\n # get true scalar parameters\n if params['do_mc_eta_conversion']:\n m1 = np.array(f['mass_1'])\n m2 = np.array(f['mass_2'])\n eta = (m1*m2)/(m1+m2)**2\n mc = np.sum([m1,m2])*eta**(3.0/5.0)\n pos_test.append([mc,np.array(f['phase']),(np.array(f['geocent_time']) - (params['prior_min'][2])) / (params['prior_max'][2] - params['prior_min'][2]),eta])\n else:\n m1 = (np.array(f['mass_1']) - (params['prior_min'][0])) / (params['prior_max'][0] - params['prior_min'][0])\n m2 = (np.array(f['mass_2']) - (params['prior_min'][3])) / (params['prior_max'][3] - params['prior_min'][3])\n t0 = (np.array(f['geocent_time']) - (params['prior_min'][2])) / (params['prior_max'][2] - params['prior_min'][2])\n dist = (np.array(f['luminosity_distance']) - (params['prior_min'][4])) / (params['prior_max'][4] - params['prior_min'][4])\n phase = (np.array(f['phase']) - (params['prior_min'][1])) / (params['prior_max'][1] - params['prior_min'][1])\n pos_test.append([m1,phase,t0,m2,dist])\n cnt += 1\n f.close()\n\n pos_test = np.array(pos_test)\n # save time per sample\n timet = np.array(timet)\n timet = np.array([np.min(timet),np.max(timet),np.median(timet)])\n\n # rescale all samples to be from 0 to 1\n samples\n\n pos_test = pos_test[:,[0,2,3,4]]\n samples = samples[:,:,[0,2,3,4]]\n new_samples = []\n for i in range(samples.shape[0]):\n new_samples.append(samples[i].T)\n #samples = samples.reshape(samples.shape[0],samples.shape[2],samples.shape[1])\n samples = np.array(new_samples)\n\n return samples, timet\n\n def confidence_bd(samp_array):\n \"\"\"\n compute confidence bounds for a given array\n \"\"\"\n cf_bd_sum_lidx = 0\n cf_bd_sum_ridx = 0\n cf_bd_sum_left = 0\n cf_bd_sum_right = 0\n cf_perc = 0.05\n\n cf_bd_sum_lidx = np.sort(samp_array)[int(len(samp_array)*cf_perc)]\n cf_bd_sum_ridx = np.sort(samp_array)[int(len(samp_array)*(1.0-cf_perc))]\n\n return [cf_bd_sum_lidx, cf_bd_sum_ridx]\n\n def make_contour_plot(ax,x,y,dataset,parnames,prior_min=0,prior_max=1,color='red',load_plot_data=False,contours=None):\n \"\"\" Module used to make contour plots in pe scatter plots.\n\n Parameters\n ----------\n ax: matplotlib figure\n a matplotlib figure instance\n x: 1D numpy array\n pe sample parameters for x-axis\n y: 1D numpy array\n pe sample parameters for y-axis\n dataset: 2D numpy array\n array containing both parameter estimates\n color:\n color of contours in plot\n Returns\n -------\n kernel: scipy kernel\n gaussian kde of the input dataset\n \"\"\"\n \n def get_contours(x,y,prior_min=[0,0],prior_max=[1,1],mass_flag=False):\n\n #idx = np.argwhere((x>=0.0)*(x<=1.0)*(y>=0.0)*(y<=1.0)).flatten()\n #x = x[idx]\n #y = y[idx]\n N = len(x)\n\n values = np.vstack([x,y])\n kernel = gaussian_kde(values)\n f = lambda b, a: kernel(np.vstack([a,b]))\n if mass_flag:\n R = dblquad(f, prior_min[0], prior_max[0], lambda x: prior_min[1], lambda x: x)[0]\n dist = lambda a, b: f(b,a)/R*(b>=prior_min[1])*(b<=prior_max[1])*(a>=prior_min[0])*(a<=prior_max[0])*(a>=b)\n else:\n R = dblquad(f, prior_min[0], prior_max[0], lambda x: prior_min[1], lambda x: prior_max[1])[0]\n dist = lambda a, b: f(b,a)/R*(b>=prior_min[1])*(b<=prior_max[1])*(a>=prior_min[0])*(a<=prior_max[0])\n #R = dblquad(f, 0, 1, lambda x: 1, lambda x: 1)\n #dist = lambda a, b: f(b,a)/R*(b>=0)*(b<=1)*(a>=0)*(a<=1)\n\n Z = dist(x,y)\n Lidx = np.argsort(Z)\n Z68 = Z[Lidx[int((1.0-0.68)*N)]]\n Z90 = Z[Lidx[int((1.0-0.90)*N)]]\n Z95 = Z[Lidx[int((1.0-0.95)*N)]]\n\n x_range = np.max(x) - np.min(x)\n y_range = np.max(y) - np.min(y)\n xv = np.arange(np.min(x)-0.1*x_range, np.max(x)+0.1*x_range, 1.2*x_range/50.0)\n yv = np.arange(np.min(y)-0.1*y_range, np.max(y)+0.1*y_range, 1.2*y_range/50.0)\n X, Y = np.meshgrid(xv, yv)\n Q = dist(X.flatten(),Y.flatten()).reshape(X.shape)\n\n return Q,X,Y,[Z95,Z90,Z68,np.max(Q)]\n\n do_unnorm_contours = False\n\n if do_unnorm_contours: \n # Make a 2d normed histogram\n H,xedges,yedges=np.histogram2d(x,y,bins=20,normed=True)\n\n norm=H.sum() # Find the norm of the sum\n # Set contour levels\n contour1=0.95\n contour2=0.90\n contour3=0.68\n\n # Set target levels as percentage of norm\n target1 = norm*contour1\n target2 = norm*contour2\n target3 = norm*contour3\n\n # Take histogram bin membership as proportional to Likelihood\n # This is true when data comes from a Markovian process\n def objective(limit, target):\n w = np.where(H>limit)\n count = H[w]\n return count.sum() - target\n\n # Find levels by summing histogram to objective\n level1= scipy.optimize.bisect(objective, H.min(), H.max(), args=(target1,))\n level2= scipy.optimize.bisect(objective, H.min(), H.max(), args=(target2,))\n level3= scipy.optimize.bisect(objective, H.min(), H.max(), args=(target3,))\n\n # For nice contour shading with seaborn, define top level\n level4=H.max()\n levels=[level1,level2,level3,level4]\n\n # Pass levels to normed kde plot\n X, Y = np.mgrid[np.min(x):np.max(x):100j, np.min(y):np.max(y):100j]\n positions = np.vstack([X.ravel(), Y.ravel()])\n kernel = gaussian_kde(dataset)\n Z = np.reshape(kernel(positions).T, X.shape)\n\n if color == 'blue':\n ax.contour(X,Y,Z,levels=levels,alpha=0.5,colors=color)\n elif color == 'red':\n ax.contourf(X,Y,Z,levels=levels,alpha=1.0,colors=['#e61a0b','#f75448','#ff7a70'])\n\n else:\n# if load_plot_data==False:\n if (parnames[0] == 'm_{1} (M_\\odot)' and parnames[1]=='m_{2} (M_\\odot)') or (parnames[0]=='m_{2} (M_\\odot)' and parnames[1]=='m_{1} (M_\\odot)'):\n mass_flag=True\n else:\n mass_flag=False\n # Get contours for plotting\n Q,X,Y,L = get_contours(x,y,prior_min=prior_min,prior_max=prior_max,mass_flag=mass_flag)\n# else:\n# Q = contours[0]\n# X = contours[1]\n# Y = contours[2]\n# L = contours[3]\n \n if color == 'blue':\n ax.contour(X,Y,Q,levels=L,alpha=0.5,colors=color, origin='lower')\n elif color == 'red':\n ax.contourf(X,Y,Q,levels=L,alpha=1.0,colors=['#e61a0b','#f75448','#ff7a70'], origin='lower')\n ax.set_xlim(np.min(X),np.max(X))\n ax.set_ylim(np.min(Y),np.max(Y))\n return [Q,X,Y,L]\n\n # Store above declared functions to be used later\n self.ad_ks_test = ad_ks_test\n self.load_test_set = load_test_set\n self.confidence_bd = confidence_bd\n self.make_contour_plot = make_contour_plot", "title": "" }, { "docid": "39e263422ea447dd4890eb0e279e56d7", "score": "0.50178", "text": "def result(self, num=128):\n #If signal samples are needed for te < t_start, then samples are taken\n #from the end of the calculated signal.\n\n mpos = self.mics.mpos\n \n # direction vector from tuple\n direc = array(self.direction, dtype = float) \n # normed direction vector\n direc_n = direc/norm(direc)\n c = self.env.c\n \n # distance between monopoles in the line \n dist = self.length / self.num_sources \n \n #blocwise output\n out = zeros((num, self.numchannels))\n \n # distance from line start position to microphones \n loc = array(self.loc, dtype = float).reshape((3, 1)) \n \n # distances from monopoles in the line to microphones\n rms = empty(( self.numchannels,self.num_sources))\n inds = empty((self.numchannels,self.num_sources))\n signals = empty((self.num_sources, len(self.signal.usignal(self.up))))\n #for every source - distances\n for s in range(self.num_sources):\n rms[:,s] = self.env._r((loc.T+direc_n*dist*s).T, mpos)\n inds[:,s] = (-rms[:,s] / c - self.start_t + self.start) * self.sample_freq \n #new seed for every source\n if self.coherence == 'incoherent':\n self.signal.seed = s + abs(int(hash(self.digest)//10e12))\n self.signal.rms = self.signal.rms * self.source_strength[s]\n signals[s] = self.signal.usignal(self.up)\n i = 0\n n = self.numsamples \n while n:\n n -= 1\n try:\n for s in range(self.num_sources):\n # sum sources\n out[i] += (signals[s,array(0.5 + inds[:,s].T * self.up, dtype=int64)] / rms[:,s])\n \n inds += 1.\n i += 1\n if i == num:\n yield out\n out = zeros((num, self.numchannels))\n i = 0\n except IndexError:\n break\n \n yield out[:i]", "title": "" }, { "docid": "b92cf7f0d310ca9b76c8a4ec05293a4a", "score": "0.501682", "text": "def __init__(self):\n self.q1 = list()", "title": "" }, { "docid": "de32f66d62605f756116dc4923658b72", "score": "0.50064564", "text": "def empty(self, starttime, timestep, i, msg, ucf):\n\n logging.info(msg)\n\n starttime = starttime + timestep*i\n n = util.time2sample(timestep, self.sampling_rate) + 1\n max_coa = max_coa_n = np.full(n, 0)\n coord = np.full((n, 3), 0)\n\n self.append(starttime, max_coa, max_coa_n, coord, ucf)", "title": "" }, { "docid": "334f8e1c95f4af24e8323834cfa6a5af", "score": "0.50063705", "text": "def pre_start_capture(self) -> None:\n alazar = self._get_alazar()\n acq_s_p_r = self.samples_per_record.get()\n inst_s_p_r = alazar.samples_per_record.get()\n sample_rate = alazar.get_sample_rate()\n if acq_s_p_r != inst_s_p_r:\n raise Exception('acq controller samples per record {} does not match'\n ' instrument value {}, most likely need '\n 'to set and check int_time and int_delay'.format(acq_s_p_r, inst_s_p_r))\n\n samples_per_record = inst_s_p_r\n records_per_buffer = alazar.records_per_buffer.get()\n buffers_per_acquisition = alazar.buffers_per_acquisition.get()\n max_samples = self.board_info['max_samples']\n samples_per_buffer = records_per_buffer * samples_per_record\n if samples_per_buffer > max_samples:\n raise RuntimeError(\"Trying to acquire {} samples in one buffer maximum\"\n \" supported is {}\".format(samples_per_buffer, max_samples))\n\n\n # We currently enforce the shape to be identical for all channels\n # so it's safe to take the first\n if self.shape_info['average_buffers']:\n self.buffer = np.zeros(samples_per_record *\n records_per_buffer *\n self.number_of_channels)\n else:\n self.buffer = np.zeros((buffers_per_acquisition,\n samples_per_record *\n records_per_buffer *\n self.number_of_channels))\n self.demodulators = []\n\n for channel in self.active_channels_nested:\n if channel['ndemods'] > 0:\n self.demodulators.append(Demodulator(buffers_per_acquisition,\n records_per_buffer,\n samples_per_record,\n sample_rate,\n self.filter_settings,\n channel['demod_freqs'],\n self.shape_info['average_buffers'],\n self.shape_info['average_records'],\n self.shape_info['integrate_samples']\n ))\n else:\n self.demodulators.append(None)", "title": "" }, { "docid": "10154aa97b64de6a72daa2fe861a9aec", "score": "0.50045246", "text": "def run(self):\r\n self.patchVoltOutChan = self.configs.patchVoltOutChannel\r\n self.patchCurOutChan = self.configs.patchCurOutChannel\r\n self.patchVoltInChan = self.configs.patchVoltInChannel\r\n \r\n #DAQ\r\n with nidaqmx.Task() as writeTask, nidaqmx.Task() as readTask: \r\n writeTask.ao_channels.add_ao_voltage_chan(self.patchVoltInChan)\r\n readTask.ai_channels.add_ai_voltage_chan(self.patchVoltOutChan)\r\n readTask.ai_channels.add_ai_voltage_chan(self.patchCurOutChan)\r\n \r\n self.setTiming(writeTask, readTask)\r\n \r\n reader = AnalogMultiChannelReader(readTask.in_stream)\r\n writer = AnalogSingleChannelWriter(writeTask.out_stream)\r\n \r\n writer.write_many_sample(self.wave)\r\n \r\n \"\"\"Reading data from the buffer in a loop. \r\n The idea is to let the task read more than could be loaded in the buffer for each iteration.\r\n This way the task will have to wait slightly longer for incoming samples. And leaves the buffer\r\n entirely clean. This way we always know the correct numpy size and are always left with an empty\r\n buffer (and the buffer will not slowly fill up).\"\"\"\r\n output = np.zeros([2, self.readNumber])\r\n writeTask.start() #Will wait for the readtask to start so it can use its clock\r\n readTask.start()\r\n while not self.isInterruptionRequested():\r\n reader.read_many_sample(data = output, \r\n number_of_samples_per_channel = self.readNumber)\r\n \r\n #Emiting the data just received as a signal\r\n #output = np.around(output, 7) #Round all values\r\n self.measurement.emit(output[0,:], output[1,:])", "title": "" }, { "docid": "f6f0af46b1b740083b26f1f012820e5b", "score": "0.49898362", "text": "def newSample(self,bn,seq):\n self._sample = {}\n LL=0\n for node_id in seq:\n (nod,par)=self.caching_nameAndParents(bn,node_id)\n (self._sample[nod],p) = CSVGenerator.draw(self.caching_probas(bn,node_id,nod,par))\n LL+=math.log(p,2)\n\n return (self._sample,LL)", "title": "" }, { "docid": "33917c9c93564e45bb593ddc33b46bd1", "score": "0.49881154", "text": "def compute_gsm_splines(self):\n\n print \"computing %s for Pol %s\" % (self._name.upper(), self.pol)\n drift_data, drift_lsts, drift_freqs = self.data, self.lsts, self.freqs\n \n # Extend to full 24 hours then form interpolation spline\n nd = np.zeros((13, 145))\n nd[:, :144] = drift_data\n nd[:, 144] = drift_data[:, 0]\n drift_lsts = np.append(drift_lsts, drift_lsts[0]+24)\n drift_lsts[0] = 0.0\n drift_data = nd\n \n fits = [ [] for ii in range(self.npol + 1)]\n #print fits\n for ii in range(len(drift_lsts)):\n\n fit = self.curve_fit(self.freqs, drift_data[:, ii])\n #if not ii%10:\n #print fit\n \n for jj in range(len(fit)):\n fits[jj].append(fit[jj])\n \n self.gsm_pols = np.array(fits)\n \n self.gsm_spline = []\n for kk in range(self.gsm_pols.shape[0]):\n self.gsm_spline.append(interpolate.interp1d(drift_lsts, self.gsm_pols[kk, :], kind='cubic'))", "title": "" }, { "docid": "700e9cf7c9a2c1a3eb05b68ba1e8bae3", "score": "0.49830276", "text": "def Preprc(raw_data: object, flag: object = 0) -> object:\n # process recieved arrays (data_arr1=data, data_arr2=time,seq)\n if not list(raw_data):\n return []\n\n data_arr1, data_arr2, err_pkts = process_raw_PPG(raw_data)\n seq = np.copy(data_arr2[:, 1])\n # make Sq no. ordered\n d = np.diff(seq)\n idx1 = np.where(d < -(1023 - 50))[0]\n idx1 = np.append(idx1, len(seq) - 1)\n for i in range(len(idx1) - 1):\n seq[idx1[i] + 1:idx1[i + 1] + 1] = seq[idx1[i] + 1:idx1[i + 1] + 1] - (i + 1) * d[idx1[i]]\n seq = (seq - seq[0]).astype(int).reshape((len(seq)))\n # print(seq)\n seq_max = max(seq) # just some heuristic to make ECG seq value 4 times\n\n arr1 = np.concatenate([seq.reshape((len(seq), 1)), data_arr1], axis=1)\n\n if raw_data.all != None:\n df1 = pd.DataFrame(arr1, columns=['Seq', 'AccX', 'AccY', 'AccZ', 'GyroX',\n 'GyroY', 'GyroZ', 'LED1', 'LED2', 'LED3'])\n else:\n return []\n\n df1.drop_duplicates(subset=['Seq'], inplace=True)\n\n df2 = pd.DataFrame(np.array(range(seq_max + 1)), columns=['Seq'])\n\n itime = data_arr2[0, 0];\n ftime = data_arr2[-1, 0]\n df3 = df2.merge(df1, how='left', on=['Seq'])\n df3['time'] = pd.to_datetime(np.linspace(itime, ftime, len(df2)), unit='ms')\n df3.set_index('time', inplace=True)\n df3.interpolate(method='time', axis=0, inplace=True) # filling missing data\n df3.dropna(inplace=True)\n df3['time_stamps'] = np.linspace(itime, ftime, len(df2))\n return df3", "title": "" }, { "docid": "47707c69bf6d1fa581bad74ffdd041d4", "score": "0.49797022", "text": "def prallel_producer( self ):\n while True:\n yield [ RisingEdge(self.dut.peaks[i]) for i in range(self.NSAMP) ]\n yield ReadOnly()\n detected_pulse = p.pulse( orbit=self.orb_cnt, bx=self.bx_cnt, amplitude=int(self.dut.local_maximum), position=int(math.log(int(self.dut.peaks),2)), tot=int(self.dut.time_over_threshold) )\n self.pulses.append( detected_pulse )\n trig = True\n while trig:\n yield RisingEdge( self.dut.bunch_clk )\n yield ReadOnly()\n if int(self.dut.peaks) > 0:\n # self.dut._log.info( pfu.string_color(\"CONSECUTIVE\", \"yellow\") )\n self.consec_cnt += 1\n detected_pulse = p.pulse( orbit=self.orb_cnt, bx=self.bx_cnt, amplitude=int(self.dut.local_maximum), position=int(math.log(int(self.dut.peaks),2)), tot=int(self.dut.time_over_threshold) )\n self.pulses.append( detected_pulse )\n trig = True\n else:\n trig = False", "title": "" }, { "docid": "7c7ef095a3c66a7bbfb5adb0561b91ec", "score": "0.4972983", "text": "def prepare_config_file(self):\n\n jobinfo = defaultdict()\n short_names = self.ffastq.keys()\n sampleinfo = defaultdict()\n\n for sample in short_names:\n conf = self.conf\n\n sample_config = os.path.join(self.param['outdir'], 'job_sample_logs/'+sample+'.conf')\n sample_sge_err = os.path.join(self.param['outdir'], 'job_sample_logs/'+sample+'.sge.err')\n sample_dir = os.path.join(self.param['outdir'], sample)\n sample_log = os.path.join(self.param['outdir'], 'job_sample_logs/'+sample+'.log')\n\n # Paired samples\n # --------------\n sample_paired = 'no;;'\n if self.param['paired_samples']:\n if sample in [self.paired_samples[item]['tumor'] for item in self.paired_samples.keys()]:\n sample_type = 'tumor'\n matching_sample = [self.paired_samples[item]['normal'] for\n item in self.paired_samples.keys() if self.paired_samples[item]['tumor'] == sample][0]\n else:\n sample_type = 'normal'\n matching_sample = [self.paired_samples[item]['tumor'] for\n item in self.paired_samples.keys() if self.paired_samples[item]['normal'] == sample][0]\n\n sample_paired = 'yes;{0};{1}'.format(sample_type, matching_sample)\n\n # Compressed fastq\n # -----------------\n inputfile_compressed = 'no'\n if self.param['input_filetype'] == 'compressed': inputfile_compressed = 'yes'\n\n # Fastq location\n sampleloc = 'local'\n if self.param['sample_location'] == 'ftp': sampleloc = 'ftp'\n\n fq_name = os.path.join(self.param['outdir'], sample+'/'+sample+'_fqlist.txt')\n\n jobinfo = {'sample_name': sample, 'sample_dir': sample_dir, 'sample_log': sample_log,\n 'sample_fastq': fq_name, 'sample_sge_err': sample_sge_err, 'sample_config': sample_config,\n 'sample_size': self.ffastq[sample]['filesize'], 'sample_paired': sample_paired,\n 'sample_location': sampleloc,\n 'sample_filecompressed': inputfile_compressed,\n 'sample_library_stranded': self.param['library_stranded'],\n 'sample_library_strand': self.param['strand'],\n 'sample_library_type': self.conf['Series_Info']['library_type']}\n\n if self.opthead:\n # Make list of unique parameters\n exparamlist = []\n for item in self.otherparam[sample]:\n jobinfo.update({'sample_'+item.lower(): self.otherparam[sample][item]})\n if item not in exparamlist: exparamlist.append(item.lower())\n conf['Series_Info'].update({'opt_param': ';'.join(exparamlist)})\n\n sampleinfo[sample] = jobinfo\n\n conf['Job_Info'] = jobinfo\n conf.filename = sample_config\n\n # Check file sizes\n if self.param['modify_cpu']:\n if float(self.ffastq[sample]['filesize']) > 100000000 and conf['Global_Parameters']['NUMTHREADS'] == '2':\n print 'Setting NUMTHREADS to 4; MAXMEM to 24 GB'\n conf['Global_Parameters']['NUMTHREADS'] = '4'\n conf['Global_Parameters']['MAXMEM'] = '24g'\n # if float(self.ffastq[sample]['filesize']) > 100000000 and conf['Global_Parameters']['NUMTHREADS'] == '2':\n # print 'Setting NUMTHREADS to 4; MAXMEM to 32 GB'\n # conf['Global_Parameters']['NUMTHREADS'] = '4'\n # conf['Global_Parameters']['MAXMEM'] = '32g'\n\n # Check for library strandedness\n if self.param['library_stranded']:\n if 'app_tophat' in conf.keys():\n conf['app_tophat']['library-type'] = 'fr-firststrand'\n if self.param['strand'] == 'second':\n conf['app_tophat']['library-type'] = 'fr-secondstrand'\n if 'app_tophatfusion' in conf.keys():\n conf['app_tophatfusion']['library-type'] = 'fr-firststrand'\n if self.param['strand'] == 'second':\n conf['app_tophatfusion']['library-type'] = 'fr-secondstrand'\n if 'app_cufflinks' in conf.keys():\n conf['app_cufflinks']['-library-type'] = 'fr-firststrand'\n if self.param['strand'] == 'second':\n conf['app_cufflinks']['-library-type'] = 'fr-secondstrand'\n if 'app_htseq' in conf.keys():\n conf['app_htseq']['s'] = 'yes'\n if 'app_star' in conf.keys():\n conf['app_star']['outSAMstrandField'] = 'omit'\n conf['app_star']['outFilterIntronMotifs'] = 'RemoveNoncanonical'\n if 'app_featurecount' in conf.keys():\n conf['app_featurecount']['s'] = '1'\n if self.param['strand'] == 'second':\n conf['app_featurecount']['s'] = '2'\n if 'app_rsem_calculate_expression' in conf.keys():\n #conf['app_rsem_calculate_expression']['strand-specific'] = 'keep'\n conf['app_rsem_calculate_expression']['forward-prob'] = '1'\n if self.param['strand'] == 'second':\n conf['app_rsem_calculate_expression']['forward-prob'] = '0'\n if 'app_qualimap_rnaseq' in conf.keys():\n conf['app_qualimap_rnaseq']['p'] = 'strand-specific-forward'\n if self.param['strand'] == 'second':\n conf['app_qualimap_rnaseq']['p'] = 'strand-specific-reverse'\n if 'app_qualimap_bamqc' in conf.keys():\n conf['app_qualimap_bamqc']['p'] = 'strand-specific-forward'\n if self.param['strand'] == 'second':\n conf['app_qualimap_bamqc']['p'] = 'strand-specific-reverse'\n\n if self.param['paired']:\n if 'app_featurecount' in conf.keys():\n conf['app_featurecount']['p'] = 'keep'\n else:\n if 'app_rnaseqc' in conf.keys():\n conf['app_rnaseqc']['singleEnd'] = 'keep'\n\n conf.write()\n\n self.sampleinfo = sampleinfo\n print 'Created sample config files'\n\n pass", "title": "" }, { "docid": "4c86ae3671f311e332e5cad9b259bb51", "score": "0.4970712", "text": "def qc_sample(self):\n raise Exception(\"%s.qc_sample method no longer supported\"\n % self.__class__)", "title": "" }, { "docid": "59b0ed05f1c67beaa8d8f2eac8a29807", "score": "0.49613336", "text": "def __init__(\n self,\n Dq: float = 4000.0,\n B: float = 400.0,\n C: float = 3600.0,\n nroots: int = 100,\n d_count: int = 5,\n slater: bool = False,\n ) -> None:\n self.Dq = Dq\n self.B = B\n self.C = C\n\n if slater:\n\n self.B, self.C = tools.racah(B, C)\n self.nroot = nroots\n energy = np.linspace(0.0, self.Dq, nroots)\n\n self.d_count = d_count\n if self.d_count in {4, 5, 6}:\n self._size = 42\n if self.d_count in {3, 7}:\n self._size = 19\n if self.d_count in {2, 8}:\n self._size = 10\n self.result = np.zeros((self._size + 1, nroots))\n\n self.df = pd.DataFrame(\n {\"Energy\": energy, \"delta_B\": energy / self.B, \"10Dq\": energy * 10.0}\n )\n self.title_TS = (\n f\"TS-diagram_d{self.d_count}_10Dq_{int(self.Dq * 10.0)}_\"\n f\"B_{int(self.B)}_C_{int(self.C)}\"\n )\n self.title_DD = (\n f\"DD-energies_d{self.d_count}_10Dq_{int(self.Dq * 10.0)}_\"\n f\"B_{int(self.B)}_C_{int(self.C)}\"\n )", "title": "" }, { "docid": "2a0693e1307d6e2bf0d397c378b7daf7", "score": "0.49608985", "text": "def __process(self):\n\n # sort this list based on the recorded time\n new_list = sorted(self._time_and_values, key=lambda x: x[0])\n\n # create the sample path\n for item in new_list:\n self._samplePath.record_increment(item[0], item[1])\n\n # proceed\n self._ifProcessed = True", "title": "" }, { "docid": "cbff0d1867440be963f01f80a08ba4d7", "score": "0.49577743", "text": "def __init__(self, settings = {'Q':10 , 'R':10 , 'P0':10, 'rate':10}):\n #\n #\n self.dt = 1. / float(settings['rate'])\n #\n self.A = array([[2, 0, 0] , [0, 2, 0], [0, 0, 2]], dtype=float)\n self.B = array([[-1, 0, 0] , [0, -1, 0], [0, 0, -1]], dtype=float)\n self.H = array([[1, 0, 0] , [0, 1, 0], [0, 0, 1]], dtype=float)\n #\n # settings = {'Q':10 , 'R':10 , 'P0':10}\n #\n self.Q = array([[settings['Q'], 0, 0] , [0, settings['Q'], 0], [0, 0, settings['Q']]], dtype=float)\n self.R = array([[settings['R'], 0, 0] , [0, settings['R'], 0], [0, 0, settings['R']]], dtype=float)\n self.P0 = array([[settings['P0'], 0, 0] , [0, settings['P0'], 0], [0, 0, settings['P0']]], dtype=float)\n #\n #self.Xk = array([[0], [0], [0]], dtype=float)\n #self.Xkp = array([[0], [0] , [0]], dtype=float)\n self.Xkm1 = array([[0] , [0], [0]], dtype=float)\n #\n self.Y = array([[0], [0], [0]], dtype=float) # measure\n #\n self.U = array([[0], [0], [0]], dtype=float) # measure\n #\n #self.Pk = array([[settings['P0'], 0, 0] , [0, settings['P0'], 0], [0, 0, settings['P0']]], dtype=float)\n #self.Pkp = array([[settings['P0'], 0, 0] , [0, settings['P0'], 0], [0, 0, settings['P0']]], dtype=float)\n self.Pkm1 = array([[settings['P0'], 0, 0] , [0, settings['P0'], 0], [0, 0, settings['P0']]], dtype=float)\n #\n self.times = 0\n #", "title": "" }, { "docid": "8320f74531ffc1e0d42bb785451c1d46", "score": "0.4951755", "text": "def curveList():\n return [curvei(i) for i in range(MaxCV())]", "title": "" }, { "docid": "375331a802570b518f0f235ceb75714e", "score": "0.49516723", "text": "def test_data_extraction(self):\n x = np.linspace(0, 1, 10)\n y1 = 0.1 * x + 0.3\n y2 = 0.2 * x + 0.4\n expdata1 = self.single_sampler(x, y1, shots=1000000, series=1)\n expdata2 = self.single_sampler(x, y2, shots=1000000, series=2)\n\n analysis = CurveAnalysis(\n models=[\n ExpressionModel(\n expr=\"par0 * x + par1\",\n name=\"s1\",\n ),\n ExpressionModel(\n expr=\"par2 * x + par3\",\n name=\"s2\",\n ),\n ]\n )\n analysis.set_options(\n data_processor=DataProcessor(\"counts\", [Probability(\"1\")]),\n data_subfit_map={\n \"s1\": {\"series\": 1},\n \"s2\": {\"series\": 2},\n },\n )\n\n curve_data = analysis._run_data_processing(\n raw_data=expdata1.data() + expdata2.data(),\n models=analysis._models,\n )\n self.assertListEqual(curve_data.labels, [\"s1\", \"s2\"])\n\n # check data of series1\n sub1 = curve_data.get_subset_of(\"s1\")\n self.assertListEqual(sub1.labels, [\"s1\"])\n np.testing.assert_array_equal(sub1.x, x)\n np.testing.assert_array_almost_equal(sub1.y, y1, decimal=3)\n np.testing.assert_array_equal(sub1.data_allocation, np.full(x.size, 0))\n\n # check data of series2\n sub2 = curve_data.get_subset_of(\"s2\")\n self.assertListEqual(sub2.labels, [\"s2\"])\n np.testing.assert_array_equal(sub2.x, x)\n np.testing.assert_array_almost_equal(sub2.y, y2, decimal=3)\n np.testing.assert_array_equal(sub2.data_allocation, np.full(x.size, 1))", "title": "" }, { "docid": "e88a5ab1fd0430d77ffc49a505495f03", "score": "0.49503836", "text": "def sample_data(self):\n # dataset: all queries used in training\n samp = np.random.choice(np.arange(self.datasize), self.batch_size, replace=False)\n\n samp_group = [[[] for j in range(self.num_grps[i])]\n for i in range(self.num_q)]\n for idx in samp:\n grp_idx = self.grp_idxes[idx]\n samp_group[idx // self.num_sample_per_q][grp_idx].append(self.dataset[idx])\n\n parsed_input = []\n for i, temp in enumerate(samp_group):\n for grp in temp:\n if len(grp) != 0:\n parsed_input.append(self.get_input(grp))\n\n return parsed_input", "title": "" }, { "docid": "125e2520416608f634649113096cb287", "score": "0.4949805", "text": "def __init__(self):\n self.que = []\n self.rec = [float('inf')]", "title": "" }, { "docid": "277505ddc8eafaed8ab5c005d9245666", "score": "0.49479443", "text": "def process(self, signal, sampling_rate):", "title": "" }, { "docid": "bbec12f97f4132f4ccee484c6657adcb", "score": "0.494175", "text": "def run(self):\n # Setup qPCRs\n # self.jobq.dump()\n self.idler(100000)\n self.trp.e.waitpgm() # May still need to wait for TC to complete before able to do final jobs\n self.idler(100000)\n\n if self.jobq.len() > 0:\n logging.error(\"Blocked jobs remain on queue:\")\n self.jobq.dump()\n assert False\n\n tgt1 = Sample(\"Barcoded.Mixdown1\", trplayout.EPPENDORFS)\n for i in range(1, len(self.dilProds)):\n self.e.transfer(12.5 * 1.2 * 2 / 1.5, self.dilProds[i], tgt1, mix=(False, False))\n\n # worklist.comment('Starting qPCR setup')\n # for p in self.allprimers():\n # # Build list of relevant entries\n # ind=[ i for i in range(len(self.dilProds)) if p in self.primers[i]]\n # self.trp.runQPCR(src=[self.dilProds[i] for i in ind],vol=self.volume,primers=[p],nreplicates=[self.nreplicates[i] for i in ind])", "title": "" }, { "docid": "b297f614230a461853ac73ddf539626f", "score": "0.49366197", "text": "def gen_qpb_spectrum(xmmsim, tsim, area_spec):\n\n rmf_file = get_data_file_path('rmfs/%s.rmf' % (xmmsim.instrument))\n\n frmf = fits.open(rmf_file)\n ebounds = frmf['EBOUNDS'].data\n frmf.close()\n\n fwc_file = fits.open(xmmsim.ccfpath + xmmsim.fwc_file)\n\n evts_fwc = fwc_file[1].data\n exp_fwc = fwc_file[2].data\n\n okflag = np.where(evts_fwc['FLAG'] == 0)\n\n nevt_tot = len(okflag[0])\n\n if xmmsim.instrument == 'PN':\n area_tot = area_in_pn + area_out_pn\n\n sum_expo = np.sum(exp_fwc['EXPOSURE']) / 12. # PN has 12 chips\n\n else:\n area_tot = area_in_m2 + area_out_m2\n\n sum_expo = np.sum(exp_fwc['EXPOSURE']) / 7. # MOS has 7 chips\n\n nevt_rat = nevt_tot * tsim / sum_expo * area_spec / area_tot\n\n nevt_sim = np.random.poisson(nevt_rat)\n\n rand_evt = np.random.rand(nevt_sim) * nevt_sim\n\n sel_evt = (rand_evt.astype(int))\n\n evt_okflag = evts_fwc[okflag]\n\n evtlist_sel = evt_okflag[sel_evt]\n\n emin, emax = ebounds['E_MIN'], ebounds['E_MAX']\n\n nchan = len(emin)\n\n spec_bkg = np.empty(nchan)\n\n for i in range(nchan):\n sel_chan = np.where(np.logical_and(evtlist_sel['PI'] >= emin[i] * 1000, evtlist_sel['PI'] < emax[i] * 1000))\n\n spec_bkg[i] = len(sel_chan[0])\n\n spec_bkg = spec_bkg.astype(int)\n\n return spec_bkg", "title": "" }, { "docid": "f7b77accadc1eacdb6dea8473c57dd03", "score": "0.493654", "text": "def gen_sample(**_: Any) -> None:", "title": "" }, { "docid": "3ad146a92d92e28ddde1ca6ad6201aa3", "score": "0.4935561", "text": "def make_data_products(survey):\n if survey.emcee:\n if survey.verbose and survey.progress:\n survey.pbar.update(1)\n if survey.n == survey.iter:\n if survey.verbose:\n print(\" - %d MC steps completed\"%(int(survey.n)))\n print(\" - algorithm took %d seconds to run\"%(int(survey.ranking_time)))\n else:\n if survey.verbose:\n print(\" - algorithm took %d seconds to run\"%(int(survey.ranking_time)))\n\n if survey.save:\n if not survey.emcee:\n survey = make_directory(survey)\n else:\n if survey.n == 1:\n survey = make_directory(survey)\n if not os.path.exists('%s/%d/'%(survey.path_save,survey.n)):\n os.makedirs('%s/%d/'%(survey.path_save,survey.n))\n else:\n return\n survey = make_final_sample(survey)\n survey = make_ranking_steps(survey)\n survey = assign_priorities(survey)\n# survey = final_costs(survey)\n survey = program_overlap(survey)\n get_stats(survey)", "title": "" }, { "docid": "db403367d877d1919833d2a16be419d5", "score": "0.49251574", "text": "def put(self, query_samples, receipt_time):\r\n num_samples = len(query_samples)\r\n if num_samples == 1:\r\n item = InputItem([query_samples[0].id], [query_samples[0].index], receipt_time=receipt_time)\r\n self.in_queue.put(item)\r\n\r\n else:\r\n #idx = [q.index for q in query_samples]\r\n #query_id = [q.id for q in query_samples]\r\n\r\n num_batches = num_samples // self.batch_size\r\n remainder = num_samples % self.batch_size\r\n batch = 0\r\n bidx = 0\r\n bs = self.batch_size\r\n while batch < num_batches:\r\n j = 0\r\n ids = []\r\n indexes = []\r\n while j < bs:\r\n ids.append(query_samples[bidx].id)\r\n indexes.append(query_samples[bidx].index)\r\n bidx += 1\r\n j += 1\r\n\r\n item = InputItem(ids, indexes, receipt_time=receipt_time)\r\n self.in_queue.put( item )\r\n batch += 1\r\n\r\n ids = []\r\n indexes = []\r\n while bidx < num_samples:\r\n ids.append(query_samples[bidx].id)\r\n indexes.append(query_samples[bidx].index)\r\n bidx += 1\r\n\r\n item = InputItem(ids, indexes, receipt_time=receipt_time)\r\n self.in_queue.put( item )", "title": "" }, { "docid": "f90da6a452eece109dda3019a2feaf60", "score": "0.49239555", "text": "def create_sampletimes(self, params={}, default={'samp':'evenspacing','numpts':1}):\n self.sampletimes=dict.fromkeys(self.phases.keys())\n self.weights={fxnmode:dict.fromkeys(rate) for fxnmode,rate in self.rates.items()}\n self.sampparams={}\n for phase, times in self.phases.items():\n possible_phasetimes = list(np.arange(times[0], times[1], self.tstep))\n for fxnmode in self.rates:\n param = params.get((fxnmode,phase), default)\n self.sampparams[fxnmode, phase] = param\n if param['samp']=='likeliest':\n weights=[]\n if self.rates[fxnmode][phase] == max(list(self.rates[fxnmode].values())):\n phasetimes = [round(np.quantile(possible_phasetimes, 0.5)/self.tstep)*self.tstep]\n else: phasetimes = []\n else: \n pts, weights = self.select_points(param, [pt for pt, t in enumerate(possible_phasetimes)])\n phasetimes = [possible_phasetimes[pt] for pt in pts]\n self.add_phasetimes(fxnmode, phase, phasetimes, weights=weights)", "title": "" }, { "docid": "9d379936b1dc10f965ca0fc735c9398a", "score": "0.492017", "text": "def get_data(self, clear = False):\n ret_data = []\n for sample in self.data:\n ret_data.append(sample)\n \n if clear:\n self.data[:] = []\n \n return ret_data", "title": "" }, { "docid": "dc38f62b3acedaeda5d9dff4e7f0c7f5", "score": "0.49169424", "text": "def sample():\n msg = \"Starting sampling...\"\n msg = format_msg(msg, \"info\")\n print(msg)\n print(\"==========================================================\")\n global samples\n greenhaslit = False\n redhaslit = False\n lastupdated = 0\n alreadysentsensornotifications = False\n alreadysentoutputnotifications = False\n if 'AVERAGEFREQ' in SETTINGS:\n countcurrent = 0\n counttarget = SETTINGS['AVERAGECOUNT']\n dataset = {}\n while True:\n try:\n curtime = time.time()\n timesincelast = curtime - lastupdated\n sampletime = None\n if timesincelast > (SETTINGS['SAMPLEFREQ'] - 0.01):\n if (timesincelast > (SETTINGS['SAMPLEFREQ'] + 0.02)) and (samples is not 0):\n print(format_msg(\"Can't keep up - requested sample frequency is too fast!\", \"warning\"))\n lastupdated = curtime\n data = []\n # Read the sensors\n failedsensors = []\n sampletime = datetime.datetime.now()\n for sensor in PLUGINSSENSORS:\n datadict = {}\n if sensor == gpsplugininstance:\n datadict = read_gps(sensor)\n else:\n datadict = read_sensor(sensor, PLUGINSSUPPORTS[\"limits\"])\n # TODO: Ensure this is robust\n if (datadict[\"value\"] is None or\n isnan(float(datadict[\"value\"])) or\n datadict[\"value\"] == 0):\n failedsensors.append(sensor.sensorname)\n # Average the data if required\n if (('AVERAGEFREQ' in SETTINGS) and\n (sensor != gpsplugininstance)):\n identifier = datadict['sensor'] + \"-\"\n identifier += datadict['name']\n if identifier not in dataset:\n dataset[identifier] = {}\n temp = datadict.copy()\n temp.pop(\"value\", None)\n for thekey, thevalue in temp.iteritems():\n if thekey not in dataset[identifier]:\n dataset[identifier][thekey] = thevalue\n dataset[identifier]['values'] = []\n dataset[identifier]['values'].append(datadict[\"value\"])\n # Always record raw values for every sensor\n data.append(datadict)\n # Record the outcome of reading sensors\n if 'AVERAGEFREQ' in SETTINGS:\n countcurrent += 1\n if failedsensors:\n if not alreadysentsensornotifications:\n for j in PLUGINSNOTIFICATIONS:\n j.sendnotification(\"alertsensor\")\n alreadysentsensornotifications = True\n msg = \"Failed to obtain data from these sensors: \" + \", \".join(failedsensors)\n msg = format_msg(msg, 'error')\n logthis(\"error\", msg)\n if SETTINGS['PRINTERRORS']:\n print(msg)\n else:\n msg = \"Data successfully obtained from all sensors.\"\n msg = format_msg(msg, 'success')\n logthis(\"info\", msg)\n\n # Output data\n try:\n # Averaging\n if 'AVERAGEFREQ' in SETTINGS:\n if countcurrent == counttarget:\n data = average_dataset(identifier, dataset)\n dataset = {}\n if (('AVERAGEFREQ' in SETTINGS and\n countcurrent == counttarget) or\n ('AVERAGEFREQ' not in SETTINGS)):\n if 'AVERAGEFREQ' in SETTINGS:\n countcurrent = 0\n # Output the data\n outputsworking = True\n for i in PLUGINSOUTPUTS:\n LOGGER.debug(\" Dataset to output to \" + str(i) + \":\")\n LOGGER.debug(\" \" + str(data))\n if i.output_data(data, sampletime) == False:\n outputsworking = False\n # Record the outcome of outputting data\n if outputsworking:\n msg = \"Data output in all requested formats.\"\n msg = format_msg(msg, 'success')\n logthis(\"info\", msg)\n if (SETTINGS['GREENPIN'] and\n (SETTINGS['SUCCESSLED'] == \"all\" or\n (SETTINGS['SUCCESSLED'] == \"first\" and\n not greenhaslit))):\n led_on(SETTINGS['GREENPIN'])\n greenhaslit = True\n else:\n if not alreadysentoutputnotifications:\n for j in PLUGINSNOTIFICATIONS:\n j.sendnotification(\"alertoutput\")\n alreadysentoutputnotifications = True\n msg = \"Failed to output in all requested formats.\"\n msg = format_msg(msg, 'error')\n logthis(\"error\", msg)\n if SETTINGS['PRINTERRORS']:\n print(msg)\n if (SETTINGS['REDPIN'] and\n (SETTINGS['FAILLED'] in [\"all\", \"constant\"] or\n (SETTINGS['FAILLED'] == \"first\" and\n not redhaslit))):\n led_on(SETTINGS['REDPIN'])\n redhaslit = True\n\n except KeyboardInterrupt:\n raise\n except Exception as excep:\n msg = \"Exception during output: %s\" % excep\n msg = format_msg(msg, 'error')\n logthis(\"error\", msg)\n else:\n # Delay before turning off LED\n time.sleep(1)\n if SETTINGS['GREENPIN']:\n led_off(SETTINGS['GREENPIN'])\n if (SETTINGS['REDPIN'] and\n SETTINGS['FAILLED'] != \"constant\"):\n led_off(SETTINGS['REDPIN'])\n samples += 1\n if samples == SETTINGS['STOPAFTER']:\n msg = \"Reached requested number of samples - stopping run.\"\n msg = format_msg(msg, 'sys')\n print(msg)\n logthis(\"info\", msg)\n stop_sampling(None, None)\n try:\n time.sleep(SETTINGS['SAMPLEFREQ'] - (time.time() - curtime))\n except KeyboardInterrupt:\n raise\n except Exception:\n pass # fall back on old method...\n except KeyboardInterrupt:\n stop_sampling(None, None)", "title": "" }, { "docid": "105c1c33ba32bf782d7eb7fb253d8df2", "score": "0.49068084", "text": "def data_stream(self):\n data = np.random.random((4, self.numpoints))\n xy = data[:2, :]\n s, c = data[2:, :]\n xy -= 0.5\n xy *= 10\n while True:\n xy += 0.03 * (np.random.random((2, self.numpoints)) - 0.5)\n s += 0.05 * (np.random.random(self.numpoints) - 0.5)\n c += 0.02 * (np.random.random(self.numpoints) - 0.5)\n yield data", "title": "" }, { "docid": "f50707d2a6b2849c8c9956f04a376664", "score": "0.49017933", "text": "def print_plans(self):\n\t\tif self.sanity_checks() == True:\n\t\t\tpass\n\t\telse:\n\t\t\tprint(\"Pre-run check failed. Please clean data before continuing\")\n\t\t\texit()\n\t\t\n\t\tself.create_fresh_dir(self.temp_dir)\n\t\t#print(\"{} created\".format(self.temp_dir))\n\t\t#exit()\n\t\t\n\t\tself.cds = self.raw_cds\n\n\t\t#convert Status into Existing, Not Existing\n\t\tself.cds['Status'] = np.where(self.cds['Status'] == \"Existing\", \"Existing\", \"NotExisting\")\n\t\t\n\t\t# get dates that are relevant to current chosen interval\n\t\tself.get_date_backbone()\n\t\t\n\t\t#filter cds data to date_backbone\n\t\tself.cds = self.cds[self.cds[\"Date\"].isin(self.date_backbone)]\n\n\t\tself.supply = self.raw_cds[self.raw_cds[\"Suit\"].notnull()]\n\t\tself.demand = self.raw_cds[self.raw_cds[\"Consumption\"].notnull()]\n\t\n\t\tde_ids = self.cds.Group.unique()\n\t\t#open pdf file\n\t\tc = canvas.Canvas(self.output_file,pagesize=(841.89,595.27))\n\t\tfor i,de_id in enumerate(de_ids):\n\t\t\t#filter to current demand_id\n\t\t\tcurr_supply = self.supply.loc[self.cds['Group'] == de_id]\n\n\t\t\t#parse data frame. sum to building status. filter to demand_id\n\t\t\tcurr_supply = curr_supply[['Date', 'Suit','Status','Capacity']]\n\t\t\t#curr_supply = curr_supply.groupby(['Date', 'Suit','Status'])[\"Capacity\"].apply(lambda x : x.astype(int).sum())\n\n\t\t\t# call each chart and write each chart to temp folder\n\t\t\tsnd_output_file = self.temp_dir+\"snd_{}.png\".format(i)\n\t\t\tsup = snd_chart(curr_supply,snd_output_file,x_ticks=self.date_backbone)\n\t\t\tsup.print_chart()\n\t\t\t\n\t\t\tcap_req_output_file = self.temp_dir+\"cap_req_{}.png\".format(i)\n\t\t\tcap_req = cap_req_chart(self.cds,cap_req_output_file)\n\t\t\tcap_req.print_chart()\n\n\t\t\tsuit_status_output_file = self.temp_dir+\"suit_status_{}.png\".format(i)\n\t\t\tsuit_status = suit_status_chart(self.cds,suit_status_output_file)\n\t\t\tsuit_status.print_chart()\n\n\t\t\tpds_output_file = self.temp_dir+\"pds_{}.png\".format(i)\n\t\t\tpds = pds_chart(self.cds,pds_output_file)\n\t\t\tpds.print_chart()\n\n\t\t\t# assemble charts on page. create next page\n\t\t\tc.translate(inch,inch)\n\t\t\tc.setFillColorRGB(1,0,1)\n\t\t\tc.drawImage(snd_output_file, -40, 100,width=None,height=None)\n\n\t\t\tc.showPage() #ends page. everything after is new page\n\n\t\t\tc.drawImage(\"foo.png\", 0, 0,width=None,height=None)\n\t\t\t#c.drawImage(\"foo.png\", 60, 60, 50, 50)\n\n\t\t\tc.showPage()\n\t\tc.save()\n\t\t\n\n\t\t#save and close pdf file\n\t\tshutil.rmtree(self.temp_dir)", "title": "" }, { "docid": "41cc318f601e181f3265aa160c00ddb5", "score": "0.4895983", "text": "def cb_multi(data, sign='+', samplerate=0.1, rise=[5.0, 3.0, 2.0, 0.5], decay=[30.0, 9.0, 5.0, 1.0],\n matchflag=True, threshold=3.0,\n dispflag=True, lpfilter=0, template_type=1, ntau=5):\n\n clist = [(1, 0, 0, 1), (0, 1, 0, 1), (0, 0, 1, 1), (1, 1, 0, 1), (1, 0, 1, 1), (0, 1, 1, 1),\n (1, 0.5, 0, 5, 1), (0.5, 1, 0.5, 1), (0.5, 0.5, 1, 1), (0, 0, 0, 1)]\n\n nrTests = 1\n ndTests = 1\n if len(rise) > 1:\n nrTests = len(rise)\n if len(decay) > 1:\n ndTests = len(decay)\n nTests = max((nrTests, ndTests))\n if matchflag is False:\n nCand = nrTests * ndTests\n else:\n nCand = nrTests\n icand = np.array([])\n iscamp = np.array([])\n ioff = np.array([])\n peaks = np.array([])\n crit = np.array([])\n itj = np.array([])\n itk = np.array([])\n # datan will be modified during CB if subtractMode is on\n datan = data.copy()\n for k in range(0, nrTests): # use multiple template shapes\n for j in range(0, ndTests):\n if matchflag is True and j != k:\n continue\n (ic, pks, critval, isc, io, template) = ClementsBekkers(datan,\n samplerate=samplerate, rise=rise[k], decay=decay[\n j], threshold=threshold, sign=sign,\n dispFlag=dispflag, subtractMode=True,\n lpfilter=lpfilter, template_type=template_type, ntau=ntau, markercolor=clist[k])\n#\treturns :: (eventlist, pkl, crit, scale, cx)\n if ic is []:\n continue\n icand = np.append(icand, ic)\n peaks = np.append(peaks, pks)\n crit = np.append(crit, critval)\n iscamp = np.append(iscamp, isc)\n ioff = np.append(ioff, io)\n itj = np.append(itj, j * np.ones(len(ic)))\n itk = np.append(itk, k * np.ones(len(ic)))\n\n dist = 10.0 # minimum time bewteen events is set to 5 msec here.\n # pairwise comparision\n if sign is '-':\n ksign = -1\n else:\n ksign = 1\n print(np.shape(icand))\n nt = len(icand)\n if nt is 0:\n return\n # choose the best fit candidate events within dist of each other\n for ic in range(0, nt):\n # compare each candidate template with the others\n for jc in range(ic + 1, nt):\n if icand[jc] is -1 or icand[ic] is -1:\n continue\n if abs(icand[ic] - icand[jc]) < dist or abs(peaks[ic] - peaks[jc]) < dist:\n if ksign * crit[ic] > ksign * crit[jc]:\n icand[jc] = -1 # removes an event from the list\n else:\n icand[ic] = -1\n mcand = ma.masked_less(icand, 0)\n selmask = ma.getmask(mcand)\n icand = ma.compressed(mcand)\n crit = ma.compressed(ma.array(crit, mask=selmask))\n peaks = ma.compressed(ma.array(peaks, mask=selmask))\n iscamp = ma.compressed(ma.array(iscamp, mask=selmask))\n ioff = ma.compressed(ma.array(ioff, mask=selmask))\n itj = ma.compressed(ma.array(itj, mask=selmask))\n itk = ma.compressed(ma.array(itk, mask=selmask))\n mpl.figure(2)\n t = samplerate * np.arange(0, len(data))\n mpl.subplot(1, 1, 1)\n mpl.plot(t, data, 'k', zorder=0)\n mpl.hold(True)\n ipts = icand.astype(int).tolist()\n ippts = peaks.astype(int).tolist()\n ijp = itj.astype(int).tolist()\n cols = []\n for p in range(0, len(ippts)):\n cols.append(clist[ijp[p]]) # plots below were t[ipts], data[ipts]\n mpl.scatter(t[ipts], ioff, s=49, c=cols, marker='s', zorder=1)\n mpl.scatter(t[ippts], iscamp, s=49, c=cols, marker='o', zorder=2)\n mpl.show()\n\n return(icand, peaks, crit, iscamp, ioff)", "title": "" }, { "docid": "5e00a74713a55f002f29f510191446e1", "score": "0.4892109", "text": "def __init__(self, queue_size):\n self.qsize = queue_size\n self.pointer = 0\n self.list = []\n for i in range(0, queue_size):\n self.list.append(pt.Frame())\n self.list[i].ppn = i", "title": "" }, { "docid": "30edc009493d0f351a7e6584623aa11b", "score": "0.489079", "text": "def acquisition_poll(self, samples, arm=True,\n acquisition_time=0.010):\n data = {k: [] for k, dummy in enumerate(self._acquisition_nodes)}\n\n # Start acquisition\n if arm:\n self.acquisition_arm()\n\n # Acquire data\n gotem = [False]*len(self._acquisition_nodes)\n accumulated_time = 0\n\n while accumulated_time < self.timeout() and not all(gotem):\n dataset = self.poll(acquisition_time)\n\n # Enable the user to interrupt long (or buggy) acquisitions\n try:\n check_keyboard_interrupt()\n except KeyboardInterrupt as e:\n # Finalize acquisition before raising exception\n self.acquisition_finalize()\n raise e\n\n for n, p in enumerate(self._acquisition_nodes):\n if p in dataset:\n for v in dataset[p]:\n data[n] = np.concatenate((data[n], v['vector']))\n if len(data[n]) >= samples:\n gotem[n] = True\n accumulated_time += acquisition_time\n\n if not all(gotem):\n self.acquisition_finalize()\n for n, _c in enumerate(self._acquisition_nodes):\n if n in data:\n print(\"\\t: Channel {}: Got {} of {} samples\".format(\n n, len(data[n]), samples))\n raise TimeoutError(\"Error: Didn't get all results!\")\n\n return data", "title": "" }, { "docid": "edc34c8fecd6213159f2d5e07c4c95e8", "score": "0.48857364", "text": "def main():\n parser = argparse.ArgumentParser(\n description=\"Calculate calibration constants for foils from calibration data\"\n )\n parser.add_argument(\"-n\", \"--nsamples\", type=int, default=100000,\n help=\"Number of samples to read per channel\")\n parser.add_argument(\"-C\", \"--cooling_threshold\", type=float, default=0.01,\n help=\"Voltage below which the foil is assumed to be cooling\")\n parser.add_argument(\"-H\", \"--heating_threshold\", type=float, default=0.95,\n help=\"Voltage above which the foil is assumed to be heating\")\n parser.add_argument(\"-d\", \"--root_dir\", type=Path, default=\"/dev/acq400/data\",\n help=\"Root directory where the data is stored\")\n parser.add_argument(\"-t\", \"--terse\", action=\"store_true\",\n help=\"Omit headers from output\")\n parser.add_argument(\"-g\", \"--gainpv\", choices=list(GAIN_PV),\n help=\"B8:GAIN value used for all channels to calibrate.\")\n parser.add_argument(\"-m\", \"--multithread\", default=DEFAULT_MULTITHREAD, help=\"set to 1 to enable multiprocessing: cool, but unlikely to be faster on Zynq at least\")\n parser.add_argument(\"channels\", type=int, nargs=\"+\",\n help=\"Channels to calibrate\")\n options = parser.parse_args()\n\n if options.multithread:\n with ProcessPoolExecutor() as ex:\n calibrations = list(ex.map(calibrate_single_channel,\n options.channels, repeat(options)))\n else:\n calibrations = []\n for ch in options.channels:\n calibrations.append(calibrate_single_channel(ch, options))\n\n if not options.terse:\n print(\"Channel Sens Tau Ioff Qoff\")\n for channel, (sens, tau, ioff, qoff) in zip(options.channels, calibrations):\n print(f\"{channel: <10d}{sens: <14.5g}{tau: <14.5g}{ioff: <14.8g}{qoff: <14.8g}\")", "title": "" }, { "docid": "fbe1bd368a30f6478490a6eb3045bcc0", "score": "0.48820528", "text": "def read_block(self , \n num_segment = 5,\n \n segmentduration = 15.,\n \n num_recordingpoint = 8,\n num_spiketrainbyrecordingpoint = 3,\n \n trodness = 4,\n num_spike_by_spiketrain = 30,\n \n spike_amplitude = .8,\n sinus_amplitude = 1.,\n randnoise_amplitude = 0.4, \n \n ) :\n \n if num_recordingpoint%trodness != 0:\n num_recordingpoint = (num_recordingpoint/trodness) * trodness\n \n blck = Block()\n blck.name = 'example block'\n blck.datetime = datetime.datetime.now()\n self.props = rand(num_spiketrainbyrecordingpoint, trodness)# this is for spikes\n for i in range(num_segment) :\n # read a segment in the fake file\n # id_segment is just a example it is not taken in account\n seg = self.read_segment(id_segment = i,\n segmentduration = segmentduration,\n num_recordingpoint = num_recordingpoint,\n num_spiketrainbyrecordingpoint = num_spiketrainbyrecordingpoint,\n trodness = trodness,\n num_spike_by_spiketrain = num_spike_by_spiketrain, \n \n spike_amplitude = spike_amplitude,\n sinus_amplitude = sinus_amplitude,\n randnoise_amplitude = randnoise_amplitude, \n )\n seg.name = 'segment %d' % i \n seg.datetime = datetime.datetime.now()\n # Add seg to blck instance\n blck._segments.append( seg )\n \n \n # create recording point\n for i in range(num_recordingpoint):\n rp = RecordingPoint()\n rp.name = 'electrode %i' % i\n rp.group = int(i/trodness)+1\n rp.trodness = trodness\n rp.channel = i\n blck._recordingpoints.append(rp)\n \n # associate analogsignal of same recording point\n for i in range(num_recordingpoint):\n for seg in blck._segments:\n for ana in seg._analogsignals:\n if ana.channel == blck._recordingpoints[i].channel:\n blck._recordingpoints[i]._analogsignals.append( ana)\n \n # associate spiketrain of same recording point : neuron\n for i in range(num_recordingpoint/trodness):\n for j in range(num_spiketrainbyrecordingpoint):\n neu = Neuron(name = 'Neuron %d of recPoint %d' %( j , i*trodness) )\n blck._neurons.append( neu )\n for seg in blck._segments:\n for sptr in seg._spiketrains :\n if sptr.name == neu.name :\n neu._spiketrains.append( sptr)\n blck._recordingpoints[sptr.channel]._spiketrains.append( sptr )\n \n return blck", "title": "" }, { "docid": "3e00181c9c212bdcc875c4a9f3c0fbaf", "score": "0.4880095", "text": "def test_draw_samples(self):\n self.report('Test for drawing samples. Probabilistic test, might fail.')\n total_coverage = 0\n num_test_pts = 100\n num_samples = 5 # Draw 5 samples at each point - just for testing.\n mfgp_instances = gen_mf_gp_instances()\n for inst in mfgp_instances:\n Z_test = np.random.random((num_test_pts, inst.fidel_dim))\n X_test = np.random.random((num_test_pts, inst.domain_dim))\n F_test = inst.post_gp.draw_mf_samples(num_samples, Z_test, X_test)\n post_mean, post_std = inst.post_gp.eval_at_fidel(Z_test, X_test, uncert_form='std')\n conf_band_width = 1.96\n ucb = post_mean + conf_band_width * post_std\n lcb = post_mean - conf_band_width * post_std\n below_ucb = F_test <= ucb\n above_lcb = F_test >= lcb\n coverage = (below_ucb * above_lcb).mean()\n total_coverage += coverage\n self.report(('(n, DZ, DX) = (%d, %d, %d)::: Coverage for 0.95 credible interval: ' +\n '%0.4f')%(inst.num_data, inst.fidel_dim, inst.domain_dim, coverage),\n 'test_result')\n avg_coverage = total_coverage / len(mfgp_instances)\n avg_coverage_is_good = avg_coverage > 0.9\n self.report('Avg coverage (%0.3f) is larger than 0.9? %d'%(avg_coverage,\n avg_coverage_is_good), 'test_result')\n assert avg_coverage_is_good", "title": "" }, { "docid": "623da96a2201f23ba0ac436985e4419b", "score": "0.4877376", "text": "def __init__(self, BC1, BC2, BC3, BC4, BC5, BC6, BC7, time=None):\n self.xdata = np.array([1.872,1.692,1.492,1.333,1,.926])\n self.BCdata2 = BC2\n self.BCdata3 = BC3\n self.BCdata4 = BC4\n self.BCdata5 = BC5\n self.BCdata6 = BC6\n self.BCdata7 = BC7\n self.ydata = np.array([BC2,BC3,BC4,BC5,BC6,BC7])\n self.AlphaBrC = None\n self.massBC = None\n self.massBrC = None\n self.time = time\n self.list = None #AlphaBrC,massBC,massBrC\n\n\n\tdef func(self, x, a, MassBC, A):\n\t\treturn A*(x**(-a)) + MassBC\n\n\tdef curvefit(self):\n\t\tfitted = curve_fit(func, self.xdata, self.ydata)\n\t\tself.AlphaBrC = fitted[0][0]\n\t\tself.massBC = fitted[0][1]\n\t\tself.massBrC = fitted[0][2]\n\t\tself.list = [self.AlphaBrC,self.massBC,self.massBrC]", "title": "" }, { "docid": "b52b478cf4b9475e504f395381ac89a7", "score": "0.48736918", "text": "def generate_sequence_for_AWG(self,auto_phase=True,auto_zero=True):\r\n p = self.pl[0]\r\n freq = p.frequency()\r\n phase_tmp = 360.*freq*p.total_width()+p.phase() #this is the phase at the end of the pulse\r\n \r\n delay_list = [p.delay()]\r\n \r\n ptmp = p.copy()\r\n ptmp.delay(0)\r\n ptmp.wait(0)\r\n pulse = ptmp.generate()\r\n plist = [pulse]\r\n ids = [p.id()+'-N0']\r\n \r\n prev_wait= p.wait()\r\n if auto_phase:\r\n for i,p in enumerate( self.pl[1:]):\r\n freq = p.frequency()\r\n #adding pulse after erasing the delay and wait time\r\n ptmp = p.copy()\r\n ids.append(ptmp.id()+'-N{}'.format(i+1))\r\n ptmp.phase(phase_tmp+360*freq*p.delay()+ptmp.phase())\r\n ptmp.delay(0)\r\n ptmp.wait(0)\r\n pulse = ptmp.generate()\r\n plist.append(pulse)\r\n \r\n delay_list.append(p.delay()+prev_wait)\r\n #recalculating phase and storing wait time\r\n phase_tmp += 360.*freq*p.total_width()\r\n prev_wait = p.wait()\r\n \r\n \r\n else:\r\n for i,p in enumerate(self.pl[1:]):\r\n \r\n #adding pulse after erasing the delay and wait time\r\n ptmp = p.copy()\r\n ids(ptmp.id()+'-N{}'.format(i+1))\r\n ptmp.delay(0)\r\n ptmp.wait(0)\r\n \r\n pulse = ptmp.generate()\r\n if auto_zero:\r\n pulse[-1]=0\r\n plist.append(pulse)\r\n \r\n delay_list.append(p.delay()+prev_wait)\r\n #recalculating phase and storing wait time\r\n \r\n prev_wait = p.wait()\r\n \r\n if auto_zero:\r\n last_piece = self.np.zeros(10)\r\n else:\r\n last_piece = []\r\n \r\n plist[-1]= self.np.hstack((plist[-1],self.np.zeros(self.np.int(prev_wait*p.sampling())),last_piece ))\r\n \r\n return [ids,delay_list,plist]", "title": "" }, { "docid": "4fedcae432c5ca4a2e552b4e61e5c148", "score": "0.48683333", "text": "def gen_sample(self):\n raise NotImplementedError()", "title": "" }, { "docid": "432b76662036dfbf399f232aec12f3d9", "score": "0.48641828", "text": "def build(self):\n\n catdict = (self.cat_corr)['catalog']\n corrdict = (self.cat_corr)['correction']\n\n cosmo = self.cosmo() # cosmoslogy \n\n # survey redshift limits \n survey_zmin, survey_zmax = self.survey_zlimits() \n\n survey_comdis_min = cosmos.distance.comoving_distance( survey_zmin, **cosmo ) * cosmo['h']\n survey_comdis_max = cosmos.distance.comoving_distance( survey_zmax, **cosmo ) * cosmo['h']\n \n # spline interpolation function hardcoded here \n # to make it faster\n z_arr = np.arange(0.0, 1.01, 0.01)\n dm_arr = cosmos.distance.comoving_distance(z_arr, **cosmo) * cosmo['h']\n comdis2z = sp.interpolate.interp1d(dm_arr, z_arr, kind='cubic') \n \n # upweight corrected galaxy catalog \n fc_cat_corr = {\n 'catalog': (self.cat_corr)['catalog'], \n 'correction': {'name': 'upweight'}\n }\n fc_mock = UpweightCorr(fc_cat_corr, **self.kwargs) \n fc_mock_file = fc_mock.file()\n fc_mock_cols = fc_mock.datacolumns()\n\n fc_data = np.loadtxt(\n fc_mock_file, \n skiprows=1, \n unpack=True, \n usecols=range(len(fc_mock_cols))\n )\n\n for i_col, fc_col in enumerate(fc_mock_cols): \n setattr(fc_mock, fc_col, fc_data[i_col])\n\n fc_mock.upw_index = fc_mock.upw_index.astype(int)\n\n collided = np.where(fc_mock.wfc == 0)[0] # collided\n n_fcpair = len(collided) # number of fiber collision pairs \n\n # D_comov(z_upw), D_comvo(z_coll)\n comdis_upw_gal = cosmos.distance.comoving_distance(\n fc_mock.zupw[collided], **cosmo) * cosmo['h']\n comdis_coll_gal = cosmos.distance.comoving_distance(\n fc_mock.z[collided], **cosmo) * cosmo['h']\n\n dlos_actual = comdis_coll_gal - comdis_upw_gal # line-of-sight displacement\n\n within_peak = np.where(np.abs(dlos_actual) < 3.0 * corrdict['sigma'])\n\n i_peakcorr = collided[within_peak]\n d_samples = dlos_actual[within_peak]\n\n np.random.shuffle(d_samples) # shuffle them up!\n \n fc_mock.wfc[i_peakcorr] += 1.0\n for i_upw in fc_mock.upw_index[i_peakcorr]: \n fc_mock.wfc[i_upw] -= 1.0\n\n #fc_mock.wfc[i_tailcorr] += 1.0\n #fc_mock.wfc[fc_mock.upw_index[i_tailcorr]] -= 1.0\n \n #fc_mock.ra[i_peakcorr] = fc_mock.ra[fc_mock.upw_index[i_peakcorr]]\n #fc_mock.dec[i_peakcorr] = fc_mock.dec[fc_mock.upw_index[i_peakcorr]]\n \n # account for peak correction that places galaxies out of bound. \n #outofbounds = np.where( \n # (comdis_upw_gal[within_peak] + d_samples > survey_comdis_max) |\n # (comdis_upw_gal[within_peak] + d_samples < survey_comdis_min)\n # )\n\n #if len(outofbounds[0]) > 0: \n # d_samples[outofbounds] *= -1.0\n \n collided_z = comdis2z(comdis_upw_gal[within_peak] + d_samples)\n fc_mock.z[i_peakcorr] = collided_z\n\n data_cols = self.datacolumns()\n data_fmts = self.datacols_fmt()\n data_hdrs = self.datacols_header()\n\n data_list = [] \n for data_col in data_cols: \n \n new_col = getattr(fc_mock, data_col)\n\n data_list.append(new_col)\n\n # write to corrected data to file \n output_file = self.file()\n np.savetxt(\n output_file, \n (np.vstack(np.array(data_list))).T, \n fmt=data_fmts, \n delimiter='\\t', \n header=data_hdrs\n ) \n \n # write sampled dLOS value to match to actual dLOS peak \n np.savetxt(\n output_file+'.dlos', \n np.c_[d_samples], \n fmt=['%10.5f'], \n delimiter='\\t'\n )", "title": "" }, { "docid": "4b6ea24de26b1fd636a6faa45f8a913a", "score": "0.48628938", "text": "def process_all_timesteps(self):\n \n # Get relevant parameters from user config\n fill_value = self.config['NO_DATA_FILL']\n nneighb = self.dims['nnx'] * self.dims['nny']\n logging.info('Products: '+','.join(self.products))\n logging.info('Nx : '+','.join([str(n) for n in self.neighb_x]))\n logging.info('Ny : '+','.join([str(n) for n in self.neighb_y]))\n \n\n # All 10 min timestamps to process (gauge data)\n all_timesteps = list(self.tasks.keys())\n \n # LUT to get cartesian data at gauge\n lut_cart = get_lookup('station_to_qpegrid')\n \n # Initialize output\n data_10minagg = [] # Contains all 10 min data for all products\n data_cst = [] # for time, sta, nx,ny\n \n if 'CPC.CV' in self.products:\n data_cpccv = [] # separate list for cpccv\n data_cst_cpccv = [] # for time, sta, nx,ny\n include_cpccv = True\n self.products.remove('CPC.CV')\n colnames_cpccv = ['TIMESTAMP','STATION','NX','NY']\n colnames_cpccv.append('CPC.CV')\n \n \n # For motion vectors\n oflow_method = pysteps.motion.get_method(self.ref_config['MV_METHOD'])\n \n colnames = ['TIMESTAMP','STATION','NX','NY']\n colnames.extend(self.products)\n \n for i, tstep in enumerate(all_timesteps):\n logging.info('Processing timestep '+str(tstep))\n \n # retrieve radar data\n tstart = datetime.datetime.utcfromtimestamp(float(tstep))\n tend = tstart + datetime.timedelta(minutes = 5) \n \n tstep_end = tstep + 10 * 60 # 10 min\n \n stations_to_get = self.tasks[tstep]\n \n hour_of_year = datetime.datetime.strftime(tstart,'%Y%m%d%H')\n day_of_year = hour_of_year[0:-2]\n \n if i == 0:\n current_day = day_of_year\n current_hour = hour_of_year\n \n if day_of_year != current_day or i == len(all_timesteps) - 1:\n logging.info('Saving new table for day {:s}'.format(str(current_day)))\n data_10minagg = np.array(data_10minagg)\n data_cst = np.array(data_cst)\n # Concatenate metadata and product data\n all_data = np.hstack((data_cst, data_10minagg))\n dic = OrderedDict()\n \n for c, col in enumerate(colnames):\n data_col = all_data[:,c]\n isin_listcols = [col in c for c in constants.COL_TYPES.keys()]\n if any(isin_listcols):\n idx = np.where(isin_listcols)[0][0]\n coltype = list(constants.COL_TYPES.values())[idx]\n try:\n data_col = data_col.astype(coltype)\n except:# for int\n data_col = data_col.astype(np.float).astype(coltype)\n else:\n data_col = data_col.astype(np.float32)\n dic[col] = data_col\n \n df = pd.DataFrame(dic)\n \n if include_cpccv:\n data_cst_cpccv = np.array(data_cst_cpccv)\n data_cpccv = np.array([data_cpccv]).T\n all_data_cpccv = np.hstack((data_cst_cpccv, data_cpccv))\n \n dic = OrderedDict()\n for c, col in enumerate(colnames_cpccv):\n data_col = all_data_cpccv[:,c]\n isin_listcols = [col in c for c \n in constants.COL_TYPES.keys()]\n if any(isin_listcols):\n idx = np.where(isin_listcols)[0][0]\n coltype = list(constants.COL_TYPES.values())[idx]\n try:\n data_col = data_col.astype(coltype)\n except:# for int\n data_col = data_col.astype(np.float).astype(coltype)\n else:\n data_col = data_col.astype(np.float32)\n dic[col] = data_col\n \n \n dfcpc = pd.DataFrame(dic)\n df = pd.merge(df, dfcpc, \n on = ['STATION','TIMESTAMP','NX','NY'],\n how = 'left')\n \n name = self.output_folder + current_day + '.parquet'\n logging.info('Saving file ' + name)\n df.to_parquet(name, compression = 'gzip', index = False)\n \n current_day = day_of_year\n # Reset lists\n \n data_10minagg = [] # separate list for cpccv\n data_cst = [] # for time, sta, nx,ny\n if include_cpccv:\n data_cpccv = [] # separate list for cpccv\n data_cst_cpccv = [] # for time, sta, nx,ny\n \n if include_cpccv:\n if hour_of_year != current_hour:\n current_hour = hour_of_year\n \n data_at_stations = retrieve_CPCCV(tstart, stations_to_get)\n data_at_stations[np.isnan(data_at_stations)] = fill_value\n # Assign CPC.CV values to rows corresponding to nx = ny = 0\n data_cpccv.extend(data_at_stations)\n \n for sta in stations_to_get:\n data_cst_cpccv.append([tstep, sta, 0,0]) # nx = ny = 0\n \n \n # Initialize output\n N,M = len(stations_to_get) * nneighb, self.dims['np']\n data_allprod = np.zeros((N,M), dtype = np.float32) + np.nan\n \n # Get data\n baseproducts = [prod for prod in self.products if 'MV' not in prod]\n allfiles = self.retrieve_cart_files(tstart, tend, baseproducts)\n \n for j, prod in enumerate(self.products):\n logging.info('Retrieving product ' + prod)\n if 'MV' in prod:\n if '_x' in prod:\n idx_slice_mv = 0\n # Motion vector case\n ###################\n # Get product for which to compute MV\n baseprod = prod.strip('MV').split('_')[0]\n # Initialize output\n N = len(stations_to_get) * nneighb\n data_prod = np.zeros((N,), dtype = np.float32) + np.nan\n \n try:\n # For CPC we take only gif\n files = allfiles[baseprod]\n \n R = []\n for f in files:\n R.append(read_cart(f))\n R = np.array(R)\n R[R<0] = np.nan\n mv = oflow_method(R)\n \n # Mask mv where there is no rain\n mask = np.nansum(R, axis = 0) <= 0\n mv[:,mask] = 0\n except:\n # fill with missing values, we don't care about the exact dimension\n mv = np.zeros((2,1000,1000)) + fill_value \n\n elif '_y' in prod: # mv already computed\n idx_slice_mv = 1 \n \n idx_row = 0 # Keeps track of the row\n for sta in stations_to_get: # Loop on stations\n for nx in self.neighb_x:\n for ny in self.neighb_y:\n strnb = '{:d}{:d}'.format(nx,ny)\n # Get idx of Cart pixel in 2D map\n idx = lut_cart[sta][strnb]\n data_prod[idx_row] = mv[idx_slice_mv, idx[0],\n idx[1]]\n idx_row += 1\n\n else:\n # Normal product case\n ###################\n files = allfiles[prod]\n \n # Initialize output\n N,M = len(stations_to_get) * nneighb, len(files)\n data_prod = np.zeros((N,M), dtype = np.float32) + np.nan\n \n \n for k, f in enumerate(files):\n try:\n proddata = read_cart(f)\n except:\n # fill with missing values, we don't care about the exact dimension\n proddata = np.zeros((1000,1000)) + np.nan\n \n # Threshold radar precip product\n if prod == 'RZC' or prod == 'AQC':\n proddata[proddata < constants.MIN_RZC_VALID] = 0\n \n idx_row = 0 # Keeps track of the row\n for sta in stations_to_get: # Loop on stations\n for nx in self.neighb_x:\n for ny in self.neighb_y:\n strnb = '{:d}{:d}'.format(nx,ny)\n # Get idx of Cart pixel in 2D map\n idx = lut_cart[sta][strnb]\n data_prod[idx_row,k] = proddata[idx[0],\n idx[1]]\n \n idx_row += 1\n \n data_prod = np.nanmean(data_prod,axis = 1)\n data_prod[np.isnan(data_prod)] = fill_value\n \n data_allprod[:,j] = data_prod\n \n for prod in allfiles.keys():\n for f in allfiles[prod]:\n try:\n os.remove(f)\n except:\n pass\n \n data_10minagg.extend(data_allprod)\n \n # Add constant data\n for sta in stations_to_get:\n for nx in self.neighb_x:\n for ny in self.neighb_y:\n data_cst.append([tstep_end, sta,nx,ny])", "title": "" }, { "docid": "6e3e0e9cd3190e40e8eb970ba9a7e850", "score": "0.48620328", "text": "def generate_samples(self, iterations=1000):\n\n if not self._processed():\n self.parameters = []\n self._identify_strategy()\n self._define_balancing_parameters()\n if self.strategy == 'skip':\n return []\n\n self._move_land_formulas_to_exchange()\n self._move_activity_parameters_to_temp()\n parameters.new_activity_parameters(self.activity_params, self.group)\n parameters.add_exchanges_to_group(self.group, self.act)\n parameters.recalculate()\n pbm = PBM(self.group)\n pbm.load_parameter_data()\n pbm.calculate_stochastic(iterations, update_amounts=True)\n pbm.calculate_matrix_presamples()\n self.matrix_data = pbm.matrix_data\n parameters.remove_from_group(self.group, self.act)\n # This adds activity parameters that are actually not wanted. Remove them.\n self.act['parameters'] = []\n self.act.save()\n self.activity_params = []\n self._restore_activity_parameters()\n self._restore_exchange_formulas()\n return self.matrix_data", "title": "" }, { "docid": "e74d60ae1d1da339e968434e929fa304", "score": "0.48610824", "text": "def generateRawData(self):\n uuids = [uuid.uuid1().hex for _ in xrange(self.num_videos)]\n self.nsq_delete()\n Client.reset(uuids)\n num_low_videos = int(self.num_videos * LOWMSG_RATIO)\n num_low_messages = int(num_low_videos * (LOWMSG_RATIO) * AVG_RATIO * NONTRENDING_VALUE)\n low_range = [1, NONTRENDING_VALUE - 1]\n avg_msg = int((self.num_total_messages * (1 - LOWMSG_RATIO)) / self.num_videos)\n high_range = [avg_msg, avg_msg * 2]\n low_videos = uuids[:num_low_videos]\n high_videos = uuids[num_low_videos:]\n batch_data = self.get_batchData(low_videos, num_low_messages, low_range)\n batch_data += self.get_batchData(high_videos,\n self.num_total_messages - num_low_messages, high_range)\n shuffle(batch_data)\n batch_data = ''.join(batch_data)\n # publish data\n self.publish_batchData(batch_data)\n # log the input when asked, default action for normal run except unittests\n test = [s.strip() for s in batch_data.splitlines()]\n ids = []\n for i in test:\n if i:\n id = json.loads(i).get('uuid')\n if id:\n ids.append(id)\n c = Counter(ids)\n if self.log:\n\n print (\"Setup done, %d messages for %d vidoes queued to Videos(initial)\"\n \" queue\" % (self.num_total_messages, self.num_videos))\n test_file = os.path.join(os.getcwd(), \"test_dir\", \"UnittestSource.txt\")\n with open(test_file, 'w+') as f:\n f.write(json.dumps(c))\n f.write('\\n')\n return c", "title": "" }, { "docid": "2973764df2f5fda3ad93d4fff05af7c4", "score": "0.48582464", "text": "def NewWaveform(self):\n\n WaveformYvals = []\n\n NextWaveform = False\n line = \"blah\"\n self.WaveformFile.seek(self.LastPosition) \n while not NextWaveform and line != \"\":\n line = self.WaveformFile.readline()\n \n splits = line.split(\",\")\n \n if len(splits) == 6:\n adcval = float(splits[1])\n NextWaveform = int(splits[4]) # Waveform ends when last number in row==1\n if not NextWaveform:\n WaveformYvals.append(adcval)\n \n self.LastPosition = self.WaveformFile.tell()\n if len(WaveformYvals) > 200: # only take cases with sufficient number of digitizations\n return WaveformYvals\n else:\n return None", "title": "" }, { "docid": "04739d730e6e9ad002b94d1d04313218", "score": "0.48581675", "text": "def generate_data(qubits, training_data, train_labels, test_data, test_labels):\n train_excitations = []\n test_excitations = []\n labels = []\n for data in training_data:\n bit = np.random.randint(0,len(qubits))\n rng = np.average(data)*(np.pi)\n train_excitations.append(cirq.Circuit(cirq.rx(rng)(qubits[bit])))\n for data in test_data:\n bit = np.random.randint(0,len(qubits))\n rng = np.average(data)*(np.pi)\n test_excitations.append(cirq.Circuit(cirq.rx(rng)(qubits[bit])))\n\n return tfq.convert_to_tensor(train_excitations), np.array(train_labels), \\\n tfq.convert_to_tensor(test_excitations), np.array(test_labels)", "title": "" }, { "docid": "6b554e12bac5a8e1a7eae372fb3626c8", "score": "0.48576367", "text": "def read_serial_data(self):\n qdata = list(get_all_from_queue(self.data_q))\n df = self.data_format\n pm = self.plot_mode\n \n # Simple \n if df == FMT_SIMPLE:\n for d in qdata:\n self.data[0].append( ( d[0], typecast(d[1], self.flow_props[0]['unsigned']) ))\n\n else:\n # Complex 1\n if df == FMT_COMPLEX_VT:\n for d in qdata:\n #print (d)\n # cycle flows\n for i,flow in enumerate(d[1]):\n value = join_bytes(flow, self.flow_props[i]['unsigned'], self.flow_value_size)\n self.data[i].append( ( d[0], value ))\n\n # Complex 2\n elif df == FMT_COMPLEX_YX:\n for d in qdata:\n if pm == MD_VECTOR:\n self.data[0] = []\n for i,v in enumerate(d[1]):\n x = i\n y = join_bytes(v, self.flow_props[0]['unsigned'], self.flow_value_size)\n self.data[0].append( ( x, y ))\n elif pm == MD_POSITION:\n for i in range(int(len(d[1])/2)):\n x = join_bytes(d[1][i], self.flow_props[0]['unsigned'], self.flow_value_size)\n y = join_bytes(d[1][i+1], self.flow_props[0]['unsigned'], self.flow_value_size)\n self.data[i].append( ( x, y ))\n\n elif pm == MD_PLOT: \n _timestamp = join_bytes( d[1][0], 1, self.flow_value_size )\n for i,flow in enumerate(d[1][1:]):\n value = join_bytes(flow, self.flow_props[i]['unsigned'], self.flow_value_size)\n self.data[i].append( ( _timestamp, value ))", "title": "" }, { "docid": "d4f270174f5d60f61322c141f0b28b41", "score": "0.48542944", "text": "def ClementsBekkers(data, sign='+', samplerate=0.1, rise=2.0, decay=10.0, threshold=3.5,\n dispFlag=True, subtractMode=False, direction=1,\n lpfilter=2000, template_type=2, ntau=5,\n markercolor=(1, 0, 0, 1)): \n\n \n # fsamp = 1000.0/samplerate; # get sampling frequency\n # fco = 1600.0;\t\t# cutoff frequency in Hz\n # wco = fco/(fsamp/2); # wco of 1 is for half of the sample rate, so set it like this...\n # if(wco < 1) # if wco is > 1 then this is not a filter!\n # [b, a] = butter(8, wco); # fir type filter... seems to work best, with highest order min distortion of dv/dt...\n # data = filter(b, a, data); # filter all the traces...\n\n # generate the template\n [template, predelay] = cb_template(funcid=template_type, samplerate=samplerate,\n rise=rise, decay=decay, lpfilter=lpfilter, ntau=ntau)\n N = len(template)\n if template_type is 4: # use data\n Npost = len(template)\n else:\n Npost = int(decay * ntau / samplerate)\n isign = 1\n if sign is '-':\n isign = -1.0\n #\ttemplate = isign*template\n sumD = 0.0\n sumD2 = 0.0\n sumT = np.sum(template) # only need to compute once.\n sumT2 = np.sum(np.multiply(template, template))\n nData = len(data)\n \n # initialize arrays used in the computation\n critwave = np.zeros(nData) # waves for internal reference\n scalewave = np.zeros(nData)\n offsetwave = np.zeros(nData)\n cx = []\n scale = []\n pkl = []\n eventlist = []\n evn = [] # list of events\n isamp = []\n icoff = [] # cutoff\n crit = [] # criteria\n nevent = 0 # number of events\n minspacing = int(25.0 / samplerate) # 2.0 msec minimum dt. Events cannot \n # be closer than this direction determines whether detection is done in \n # forward or reverse time.\n if direction == 1:\n start = 0\n finish = nData - N\n else:\n start = nData - N - 1\n finish = 0\n fN = float(N)\n lasti = start\n resetFlag = False # force reset of running sum calculations\n # subtractMode determines whether we subtract the best fits from the data\n # as we go\n i = start\n for i in range(start, finish, direction):\n iEnd = N + i\n if i == start or resetFlag is True:\n #\t\t\tprint \"resetting i = %d\" % (i)\n sumD = np.sum(data[i:iEnd]) # optimization...\n sumD2 = np.sum(np.multiply(data[i:iEnd], data[i:iEnd]))\n ld = data[iEnd]\n fd = data[i]\n resetFlag = False\n else: # only add or subtract the end points\n if direction == 1:\n sumD = sumD + data[iEnd] - fd\n sumD2 = sumD2 + np.multiply(data[iEnd], data[iEnd]) - (fd * fd)\n fd = data[i]\n if direction == -1:\n sumD = sumD - ld + data[i]\n sumD2 = sumD2 - (ld * ld) + np.multiply(data[i], data[i])\n ld = data[iEnd]\n sumTxD = np.sum(np.multiply(data[i:iEnd], template))\n S = (sumTxD - (sumT * sumD / fN)) / (sumT2 - (sumT * sumT / fN))\n C = (sumD - S * sumT) / fN\n # if S*isign < 0.0: # only work with correct signed matches in scaling.\n # S = 0.0 # added, pbm 7/20/09\n # f = S*template+C\n SSE = sumD2 + (S * S * sumT2) + (fN * C * C) - 2.0 * \\\n (S * sumTxD + C * sumD - S * C * sumT)\n if SSE < 0:\n # needed to prevent round-off errors in above calculation\n CRITERIA = 0.0\n else:\n CRITERIA = S / np.sqrt(SSE / (fN - 1.0))\n critwave[i] = CRITERIA\n scalewave[i] = S\n offsetwave[i] = C\n # best fit to template has the wrong sign, so skip it\n if isign * S < 0.0:\n continue\n # get this peak position\n peak_pos = np.argmax(isign * data[i:iEnd]) + i\n addevent = False\n replaceevent = False\n # criteria must exceed threshold in the right direction\n if isign * CRITERIA > threshold:\n if len(eventlist) == 0: # always add the first event\n addevent = True\n else:\n # and events that are adequately spaced\n if abs(peak_pos - pkl[-1]) > minspacing:\n addevent = True\n else:\n # events are close, but fit is better for this point -\n # replace\n if isign * CRITERIA > isign * crit[-1]:\n replaceevent = True\n if addevent:\n eventlist.append(i)\n jEnd = iEnd\n pkl.append(peak_pos)\n crit.append(CRITERIA)\n scale.append(S)\n cx.append(C)\n\n if replaceevent:\n if subtractMode is True:\n j = eventlist[-1]\n jEnd = j + N\n data[j:jEnd] = data[j:jEnd] + \\\n (scale[-1] * template + cx[-1]) # add it back\n # replace last event in the list with the current event\n eventlist[-1] = i\n pkl[-1] = peak_pos\n crit[-1] = CRITERIA\n scale[-1] = S\n cx[-1] = C\n if subtractMode is True and (addevent or replaceevent):\n resetFlag = True\n # and subtract the better one\n data[i:iEnd] = data[i:iEnd] - (S * template + C)\n il = i\n i = jEnd # restart...\n lasti = i\n\n nevent = len(eventlist)\n if nevent == 0:\n print('ClementsBekkers: No Events Detected')\n else:\n print('ClementsBekkers: %d Events Detected' % (nevent))\n if dispFlag is True and nevent > 0:\n mpl.figure(1)\n t = samplerate * np.arange(0, nData)\n mpl.subplot(4, 1, 1)\n mpl.plot(t, data, 'k')\n mpl.hold(True)\n mpl.plot(t[pkl], data[pkl], marker='o',\n markerfacecolor=markercolor, linestyle='')\n mpl.plot(t[eventlist], data[eventlist], marker='s',\n markerfacecolor=markercolor, linestyle='')\n for i in range(0, len(eventlist)):\n tev = t[eventlist[i]: eventlist[i] + len(template)]\n mpl.plot(tev, cx[i] + scale[i] * template, color=markercolor)\n mpl.subplot(4, 1, 2)\n mpl.plot(t, critwave, color=markercolor)\n mpl.hold(True)\n mpl.plot([t[0], t[-1]], [threshold, threshold], 'k-')\n mpl.plot([t[0], t[-1]], [-threshold, -threshold], 'k-')\n mpl.subplot(4, 1, 3)\n mpl.plot(t, scalewave, color=markercolor, linestyle='-')\n mpl.hold(True)\n mpl.plot(t, offsetwave, color=markercolor, linestyle='--')\n tt = samplerate * np.arange(0, len(template))\n mpl.subplot(4, 2, 7)\n mpl.plot(tt, template, color=markercolor)\n mpl.draw()\n return(np.array(eventlist), np.array(pkl), np.array(crit), \n np.array(scale), np.array(cx), np.array(template))", "title": "" } ]
ce4d3a818559285dd4ad0d0459e39396
'send2all' sends a message to all users
[ { "docid": "070aca796895cb047b8bcf76b8687a59", "score": "0.7041352", "text": "def send2all(update, context):\n\n # read all users from StatBot.log\n user = []\n with open('./StatBot.log') as fid:\n for line in fid:\n ele = line.split(' - ')\n user.append(int(ele[4].replace('UserID: ', '')))\n\n # convert to numpy array\n user = np.unique(np.array(user))\n\n # merge them with the user database\n if os.path.exists('./users/users_database.db'):\n user_db = []\n with open('./users/users_database.db', 'r') as fid:\n for line in fid:\n user_db.append(int(line))\n\n user_db = np.unique(np.array(user_db))\n user = np.unique(np.concatenate((user, user_db)))\n np.savetxt('./users/users_database.db', user, fmt=\"%s\")\n else:\n np.savetxt('./users/users_database.db', user, fmt=\"%s\")\n\n # get the message to be sent\n fid = open('./admin_only/message.txt')\n msg = fid.read()\n fid.close()\n\n # send to all user\n cnt_sent = 0\n cnt_not_sent = 0\n for id in user:\n chat_id = int(id)\n # try to send the message\n try:\n context.bot.send_message(chat_id=chat_id,\n text=msg,\n parse_mode=telegram.ParseMode.MARKDOWN, disable_web_page_preview=True)\n cnt_sent += 1\n\n # if the user closed the bot, cacth exception and update cnt_not_sent\n except telegram.error.TelegramError:\n cnt_not_sent += 1\n\n # print on screen\n msg = \"*{} users* notified with the above message.\\n\".format(cnt_sent)\n msg += \"*{} users* not notified (bot is inactive).\".format(cnt_not_sent)\n\n # get admin list\n fid = open('./admin_only/admin_list.txt', 'r')\n ADMIN_LIST = [int(adm) for adm in fid.readline().split()]\n fid.close()\n\n # send to all admins stat about message sent\n for id in ADMIN_LIST:\n chat_id = int(id)\n\n # try to send the message\n try:\n context.bot.send_message(chat_id=chat_id,\n text=msg,\n parse_mode=telegram.ParseMode.MARKDOWN, disable_web_page_preview=True)\n\n # if the admin closed the bot don't care about the exception\n except telegram.error.TelegramError:\n pass", "title": "" } ]
[ { "docid": "eb5132fe50e1452e1b243d7e243512af", "score": "0.6550152", "text": "def send_to_all(self, message):\n [self.send(message, addr) for addr in self.clients]", "title": "" }, { "docid": "af6f69d694fb5a50a67dcaf705a9dc1a", "score": "0.6523553", "text": "def send_to_all(cls):\n for connection in cls.online_clients.values():\n connection.send_message()", "title": "" }, { "docid": "b76a825dccb9fd546f5ab8cbb6f6c45d", "score": "0.63770366", "text": "def change_all(bot, update):\n\n chat_id = update.message.chat_id\n\n # it needs to delete all data about user so\n # receive_user_species and receive_task funcs\n # will offer user to change species and task\n delete_reminder(chat_id)\n\n # run /start as for new user now\n start(bot, update)\n logger.info(f'{chat_id} has started conversation with change_all()')", "title": "" }, { "docid": "d05504fc0f4284725e6161dde3b2e5ef", "score": "0.63345844", "text": "def send_to_all(self, kind: str, *args, **kwargs) -> None:\n for chat_id in self.chat_ids:\n self.send(kind, chat_id, *args, **kwargs)", "title": "" }, { "docid": "6ed690ef2ac6e30fa7fb0e604b023325", "score": "0.6287413", "text": "def send_to_all_xmpp_endpoints(self, username, msg, skip=list()):\n msg = \"{}: {}\".format(username, msg)\n for xmpp_normal_jid in self.xmpp_normal_endpoints:\n if xmpp_normal_jid in skip:\n continue\n\n logging.debug(\"<-- Sending a normal chat message to XMPP.\")\n self.main_bridge.xmpp_client.send_message(\n mto=xmpp_normal_jid,\n mbody=msg,\n mtype='chat',\n mnick=username)\n\n for xmpp_muc_jid in self.xmpp_muc_endpoints:\n if xmpp_muc_jid in skip:\n continue\n\n logging.debug(\"<-- Sending a MUC chat message to XMPP.\")\n self.main_bridge.xmpp_client.send_message(\n mto=xmpp_muc_jid,\n mbody=msg,\n mtype='groupchat',\n mnick=username)", "title": "" }, { "docid": "222ef11a27bb6ffd141c615c980bbe88", "score": "0.62152725", "text": "def send_userlist(self, sock):\n # Here you should examine the list of connected peers\n # and determine how many peers is connected.\n # You will need to form the responce according to the protocol.\n # Remenber that the user requesting the list shouldn't be on the list.\n if len(self.names2info) == 1:\n sock.sendall(\"301 ONLY USER\")\n else:\n resp = \"300 INFO \" + str(len(self.names2info))\n for socket in self.socks2names:\n if socket is not sock:\n name = self.socks2names[socket]\n resp = resp + \" \" + name + \" \" + self.names2info[name][1] + \",\"\n sock.sendall(resp[:-1])", "title": "" }, { "docid": "ed3d63eefa472133bec87def7331b393", "score": "0.61738014", "text": "def send_to_connected_users(self, message):\n\n for user in self.connected_users.values():\n\n user.client_com_thread.send.messages_to_send.append(message)", "title": "" }, { "docid": "a8441362c674cf2adf60f8921f66aa52", "score": "0.61460835", "text": "def batch(self):\n\n messages = []\n yield(messages)\n self.send(messages)", "title": "" }, { "docid": "90e0261954c6a2aac0dea6cfc6006fa8", "score": "0.6065352", "text": "def reply_all (self, msg) :\n return self.reply (msg, self.reply_all_format)", "title": "" }, { "docid": "6bfe95e25202cad7734545858403d69a", "score": "0.6049732", "text": "def send_to_others(self, message, except_user):\n\n for user in self.connected_users.values():\n\n if user is not except_user:\n\n user.client_com_thread.send.messages_to_send.append(message)", "title": "" }, { "docid": "9dffee394fa8b6553e583ef4c47a51c4", "score": "0.6040164", "text": "def send_data(self, target, message):\n if (target == \"ALL\"):\n for user in self.users:\n self.users[user].write(message)\n else:\n valid_user = target in self.users\n if (valid_user):\n self.users[target].write(message)\n else:\n self.send_error('invalid destination')", "title": "" }, { "docid": "69491630a8e444f9b9d115d4d662aa3d", "score": "0.6039062", "text": "def send2admin(update, context):\n\n # get admin list\n fid = open('./admin_only/admin_list.txt', 'r')\n ADMIN_LIST = [int(adm) for adm in fid.readline().split()]\n fid.close()\n\n # get the message to be sent\n fid = open('./admin_only/message.txt')\n msg = fid.read()\n fid.close()\n\n # send to all admins\n for id in ADMIN_LIST:\n chat_id = int(id)\n # try to send the message\n try:\n context.bot.send_message(chat_id=chat_id,\n text=msg,\n parse_mode=telegram.ParseMode.MARKDOWN, disable_web_page_preview=True)\n\n # if the admin closed the bot don't care about the exception\n except telegram.error.TelegramError:\n pass", "title": "" }, { "docid": "46b8eea64478c7cd7ffeb2541bb728fa", "score": "0.6036264", "text": "def sendall(self, data):\n self.send(data)", "title": "" }, { "docid": "91c20908725f183660d13f479089825f", "score": "0.6024842", "text": "def sub_to_user_messages(self):\n self.handler.user_msg_type = \\\n 'userMessage({})'.format(self.handler.user.id)\n\n router_object = self.handler.ws_objects[\n router.RouterWSC]\n self.register_action_in(\n self.handler.user_msg_type,\n action=router_object.to_local,\n channels={'d'}\n )", "title": "" }, { "docid": "0a381bb814a80a6eaf2f815456429a8d", "score": "0.6010399", "text": "def send_poll_to_users(self, poll_id, users_list):\n poll = self.table.query.filter_by(id=poll_id).first()\n print(poll)\n for user in users_list:\n if user:\n print(\"Sending poll by telegram to chat_id: \" + str(user))\n res = send_poll_telegram(poll_id, poll.poll_question, poll.poll_answers, user)\n print(\"response: \" + str(res.status_code))", "title": "" }, { "docid": "03f435cfc8e7c5cb37caafc0bca716af", "score": "0.59649384", "text": "def send_msg(user_msg):", "title": "" }, { "docid": "0efccf8524dbd22928726af5029cccf6", "score": "0.59573126", "text": "def get(self):\n app_id = app_identity.get_application_id()\n users = User.query(User.last_active < datetime.datetime.utcnow() - datetime.timedelta(0),\n User.last_active > datetime.datetime.utcnow() - datetime.timedelta(1)).fetch()\n for user in users:\n print 'emailing...'\n subject = 'Yahtzee!'\n body = 'Hello {}, we haven\\'t seen you in a while!'.format(user.username)\n # This will send test emails, the arguments to send_mail are:\n # from, to, subject, body\n mail.send_mail('noreply@{}.appspotmail.com'.format(app_id),\n user.email,\n subject,\n body)", "title": "" }, { "docid": "f924596b1ac295533cdb73c337a62887", "score": "0.5944376", "text": "def send_to_all(self, message):\n for client in self.clients:\n client.sock.send(bytes(\"{:04d}\".format(len(message)), config.ENCODING))\n client.sock.send(message)", "title": "" }, { "docid": "88b27bbe466ba8599836a7cde8e9d299", "score": "0.59275544", "text": "def send_messages(self):\n global messages\n # send out\n for msg in messages:\n self.cs.sendto(json.dumps(msg).encode(encoding='ascii'), ('255.255.255.255', 5005))\n print(\"sending:\", msg)", "title": "" }, { "docid": "9b150438d1218b784f9678fad097a9fd", "score": "0.59125596", "text": "async def sendalllinks(self, ctx):\n now = dt.datetime.now(tz=gettz(\"Asia/Kolkata\"))\n v = await self.config.all_users()\n\n for user in v:\n if v[user][\"dm\"]:\n await self.bot.get_user(user).send(\"Due to bot down today, links are sent now\")\n for i in [9, 10, 13, 14, 15]:\n n = now.replace(hour=i, minute=25)\n\n a = await self.link(user, None, None, n)\n if a:\n await self.bot.get_user(user).send(embed=a)", "title": "" }, { "docid": "926151c7345691784a520a813fbce0d3", "score": "0.58617055", "text": "def sendMessage_0(self, messages):\n for message in messages:\n self.sendMessage(message)", "title": "" }, { "docid": "849cb65639ab50fee2570b14ed8e4125", "score": "0.5859978", "text": "def get(self):\n app_id = app_identity.get_application_id()\n users = User.query(User.email != None)\n for user in users:\n subject = 'This is a reminder!'\n body = 'Hello {}, Want to play some BlackJack?'.format(user.name)\n mail.send_mail('noreply@{}.appspotmail.com'.format(app_id),\n user.email,\n subject,\n body)", "title": "" }, { "docid": "ae66e7c11997b71eaeb43fbc9e0ef120", "score": "0.58448243", "text": "async def push_room_update():\n for user,ws in index_peers.items():\n await send_rooms(ws)", "title": "" }, { "docid": "2acdea8d5845e9814b1e3a3ceef55319", "score": "0.5844433", "text": "async def dm_user(self, ctx, user: discord.User, *, msg: str):\n await user.send(msg)\n await ctx.send(f\"Sent: \\n{msg}\\nTo: {user.mention}\")", "title": "" }, { "docid": "2cf713aa5224fd75b9109a6b7307a02a", "score": "0.58265305", "text": "async def announcer(self, ctx, *, msg):\n\n server_ids = map(lambda s: s.id, self.bot.servers)\n for server_id in server_ids:\n if server_id in self.settings:\n server = self.bot.get_server(server_id)\n channel = server.get_channel(\n self.settings[server_id]['channel'])\n if channel.permissions_for(server.me).send_messages:\n await self.bot.send_message(channel, msg)", "title": "" }, { "docid": "78a8383cce930d0e11e9c1740ea280f7", "score": "0.582138", "text": "def _send_all_requests(self):\n for addr in self.m_queries:\n domain = self.m_queries[addr][2]\n msgid = self._create_message_id()\n self.m_queries[addr][3] = msgid\n packet = self._create_netlogon_query(domain, msgid)\n self.m_socket.sendto(packet, 0, addr)", "title": "" }, { "docid": "7a37ca600aeb0477e9281002d582abf3", "score": "0.57909715", "text": "def send_to_all(self, data):\n\n for socket in self.server.connections():\n\n send_to_socket(socket, data)\n\n return", "title": "" }, { "docid": "78f3c1987d1c59e3a885a3c7b0517904", "score": "0.5790029", "text": "def send_notifications(message, all_user=False, specific_user=None):\n channel_layer = get_channel_layer()\n group = specific_user\n if all_user and not (specific_user):\n group = \"alluser\"\n\n async_to_sync(channel_layer.group_send)(\n group, {\"type\": \"to.user\", \"event\": f\"{message}\"},\n )", "title": "" }, { "docid": "e9ad4d0858f324aed7295bd25308d29b", "score": "0.578213", "text": "def send_to_user(self, userid, message):\r\n\r\n data = {'payload': message}\r\n users = self.users.get(userid, [])\r\n return self._send_to_users(users, data)", "title": "" }, { "docid": "7cd9d8461b6a899457ca34b5c100e117", "score": "0.5728745", "text": "def notify_all() -> None:\n # Email\n app.logger.debug('Notifying users by email...')\n try:\n users_email = User.query.filter_by(notify_email=True)\n except:\n app.logger.exception(\"There was an error querying users to be notified via email.\")\n users_email = None\n\n if users_email:\n app.logger.debug('Trying to notify %d users via email.', users_email.count())\n with smtplib.SMTP('smtp.gmail.com', 587) as server:\n try:\n server.starttls() # Starts the connection\n server.login(app.config['SMTP_EMAIL'], app.config['SMTP_PASSWORD'])\n \n msg = 'Subject: {}\\n\\n{}'.format(\n \"The laundry is ready!\",\n \"The time was: {}.\\n\\n\\n---\\nThis service is kindly provided by your friendly neighbourhood programmer.\".format(datetime.now()))\n for user in users_email:\n try:\n server.sendmail(app.config['SMTP_EMAIL'], user.email, msg)\n user.notify_email = False # Only notify once \n except smtplib.SMTPRecipientsRefused as e:\n app.logger.excpetion(\"Recipient refused. Is the Email of user %s (%s) correct? %s\", user.username, user.name, user.email)\n db.session.commit()\n except smtplib.SMTPHeloError as e:\n app.logger.exception(\"Couldn't start a connection with the SMTP server!\")\n except smtplib.SMTPAuthenticationError as e:\n app.logger.exception(\"SMTP credentials are wrong! %s:%s\", app.config['SMTP_EMAIL'], app.config['SMTP_PASSWORD'])\n except Exception as e:\n app.logger.exception(\"There was an unexpected exception while sending notification emails!\")\n else:\n app.logger.debug('No user wanted to be notified via email.')\n\n # Telegram\n try:\n users_telegram = User.query.filter_by(notify_telegram=True)\n except Exception as e:\n app.logger.exception(\"There was an error querying users to be notified via telegram.\")\n users_telegram = None\n\n if users_telegram:\n app.logger.debug(\"Trying to notify %d users via telegram...\", users_telegram.count())\n for user in users_telegram:\n try:\n tb.updater.bot.send_message(chat_id=user.telegram_chat_id, text=\"The laundry is ready!\")\n user.notify_telegram = False # Only notify once\n except:\n app.logger.exception(\"There was an error sending telegram notifications to %s (%s)!\", user.username, user.name)\n try:\n db.session.commit()\n except:\n app.logger.exception(\"There was an error writing to the database.\")\n else:\n app.logger.debug(\"No user wanted to be notified via telegram.\")", "title": "" }, { "docid": "b16e4990088a1266506fd75c420edeb4", "score": "0.5704858", "text": "def send_accounts(self):\n\n msg = \"account list\\r\\n\"\n msg = msg.encode()\n self.sock.send(msg)", "title": "" }, { "docid": "d217898229fec6f65efab078a70eae17", "score": "0.57017046", "text": "async def send(self, ctx, *, msg):\n\n if len(self.news) <= 0:\n await self.bot.say(\"You can't send a newsletter if no one is registered.\")\n return\n \n for id in self.news:\n if self.news[id]['send']: \n user = self.bot.get_user_info(id)\n message = \"**{} Newsletter!\\n\\n**\".format(self.bot.user.name)\n message += msg\n message += \"\\n\\n*You can always disable newsletter by saying `{}newsletter toggle!`*\".format(ctx.prefix)\n users = discord.utils.get(self.bot.get_all_members(),\n id=id)\n try:\n await self.bot.send_message(users, message)\n except:\n await self.bot.say(\"The message didn't go thru you `Fox News has edited this word out due to censorship, we apologize` owner! :angry:\")\n else:\n pass\n else:\n await self.bot.say(\"Newsletter has all been sent out to everyone who wanted it!\")", "title": "" }, { "docid": "660d5a08c1f4f4066db61f4f901f3cfe", "score": "0.5674784", "text": "def test_send_message_ratelimited(self):\n # Try to send 53 messages.\n for i in range(53):\n self.client.post(\n reverse(\"messages.new\", locale=\"en-US\"),\n {\"to\": self.user2.username, \"message\": \"hi there %s\" % i},\n )\n\n # Verify only 50 are sent.\n self.assertEqual(50, OutboxMessage.objects.filter(sender=self.user1).count())", "title": "" }, { "docid": "7178c245622f77111495bc3e4db3052f", "score": "0.5663591", "text": "def send_messages(connection, messages):\n for message in messages:\n connection.send(message)", "title": "" }, { "docid": "464f78f4a9f43c44fd4dbee1cf17414b", "score": "0.5650193", "text": "def main():\r\n\r\n recipients = get_all_recipients()\r\n send_emails(recipients)\r\n\r\n return", "title": "" }, { "docid": "a6cd9a70c96001ac29e2e6d8f845dbf1", "score": "0.565015", "text": "def sendall(self, msg):\n byte_msg = stc.pack('>I', len(msg)) + msg\n super().sendall(byte_msg)", "title": "" }, { "docid": "28798d13512c4de6ae03c60cc763c6f5", "score": "0.5648483", "text": "def send_email(self, request, queryset):\n site = get_current_site(request)\n for obj in queryset.filter(is_sent=False):\n obj.send(site)", "title": "" }, { "docid": "230a83f5d676b94306606c843547163a", "score": "0.5616028", "text": "async def id(ctx, *args):\n if len(args) == 0:\n await bot.say('No such user(s).')\n for username in args:\n for user in ctx.message.server.members:\n if user.name == username:\n await bot.send_message(content=f'{user.name} : {user.id}', destination=ctx.message.author)", "title": "" }, { "docid": "38582648f5a341e4e7ffa7ead53fc141", "score": "0.5612278", "text": "def send_message(self):\n if self.ismessage:\n for person in self.connected_people:\n person.receive_message(self.id, self.message)\n pass\n self.ismessage = False", "title": "" }, { "docid": "735988857ead99e6f4ef1e51b133b6c5", "score": "0.5603951", "text": "def send(users, disruptions):\n # Get slack client\n slack_client = SlackClient(settings.SLACK_TOKEN)\n # Alert the users\n for user in users:\n # Get only the disruptions for the user's route\n route_disruptions = ptv.filter_disruptions(disruptions, user.route_id)\n if route_disruptions:\n # Get the disruptions into one string (saves multiple api calls)\n descriptions = ptv.get_descriptions(disruptions)\n # Send the alert\n message = f'{user.route_name} disruptions: {descriptions}'\n slack_client.api_call(\n 'chat.postMessage', channel=user.slack_name, text=message)", "title": "" }, { "docid": "c54487314fa41c38f6e053dc6665bde9", "score": "0.5587383", "text": "async def users(self, ctx):\r\n message = await Message.EmbedText(title=\"Menghitung user...\", color=0XFF8C00).send(ctx)\r\n # Let's try to do this more efficiently\r\n users = [x for x in self.bot.get_all_members() if not x.bot]\r\n users_online = [x for x in users if x.status != discord.Status.offline]\r\n unique_users = set([x.id for x in users])\r\n bots = [x for x in self.bot.get_all_members() if x.bot]\r\n bots_online = [x for x in bots if x.status != discord.Status.offline]\r\n unique_bots = set([x.id for x in bots])\r\n await Message.Embed(\r\n title=\"Member Stats\",\r\n description=\"*Informasi user saat ini*\",\r\n fields=[\r\n { \"name\" : \"Total Server\", \"value\" : \"└─ {:,}\".format(len(self.bot.guilds)), \"inline\" : False },\r\n { \"name\" : \"Total User\", \"value\" : \"└─ {:,}/{:,} online ({:,g}%) - {:,} unique ({:,g}%)\".format(\r\n len(users_online),\r\n len(users),\r\n round((len(users_online)/len(users))*100, 2),\r\n len(unique_users),\r\n round((len(unique_users)/len(users))*100, 2)\r\n ),\"inline\" : False},\r\n { \"name\" : \"Total Bot\", \"value\" : \"└─ {:,}/{:,} online ({:,g}%) - {:,} unique ({:,g}%)\".format(\r\n len(bots_online),\r\n len(bots),\r\n round((len(bots_online)/len(bots))*100, 2),\r\n len(unique_bots),\r\n round(len(unique_bots)/len(bots)*100, 2)\r\n ), \"inline\" : False},\r\n { \"name\" : \"Total Semua\", \"value\" : \"└─ {:,}/{:,} online ({:,g}%)\".format(\r\n len(users_online)+len(bots_online),\r\n len(users)+len(bots),\r\n round(((len(users_online)+len(bots_online))/(len(users)+len(bots)))*100, 2)\r\n ), \"inline\" : False}\r\n ],\r\n color=0XFF8C00).edit(ctx, message)", "title": "" }, { "docid": "4992c2d475168a0629b0c83c2424cf7e", "score": "0.55762106", "text": "def do_list_messages(self, args):\n self.user_manager.fetch_messages()\n messages = self.user_manager.get_messages()\n for message in messages:\n self.__print_message(message)", "title": "" }, { "docid": "e5e2103ba419c3a894273e38918af595", "score": "0.5573281", "text": "def send_message_to_all(self, text: str, *args, **kwargs) -> None:\n log_msg = \"\\\"%s\\\"\" % text\n self.send_to_all('message', text, *args, log_msg=log_msg, **kwargs)", "title": "" }, { "docid": "bd423e49b294b1517ad8c93f2b9190bb", "score": "0.5572392", "text": "async def allmessages(self, ctx):\r\n messages = 0\r\n for guild in self.bot.guilds:\r\n temp = 0 if self.settings.getServerStat(guild, \"TotalMessages\") is None else self.settings.getServerStat(guild, \"TotalMessages\")\r\n messages += int(temp)\r\n messages -= 1\r\n if messages == 1:\r\n msg = 'Sejauh ini aku sudah membaca *{:,} pesan di semua server!*'.format(messages)\r\n em = discord.Embed(color = 0XFF8C00, description = msg)\r\n em.set_footer(text = \"{}\".format(ctx.author), icon_url = \"{}\".format(ctx.author.avatar_url))\r\n await ctx.channel.send(embed = em)\r\n else:\r\n msg = 'Sejauh ini aku sudah membaca *{:,} pesan di semua server!*'.format(messages)\r\n em = discord.Embed(color = 0XFF8C00, description = msg)\r\n em.set_footer(text = \"{}\".format(ctx.author), icon_url = \"{}\".format(ctx.author.avatar_url))\r\n await ctx.channel.send(embed = em)\r\n # Set our message count locally -1\r\n messages = int(self.settings.getServerStat(ctx.message.guild, \"TotalMessages\"))\r\n messages -= 1\r\n self.settings.setServerStat(ctx.message.guild, \"TotalMessages\", messages)", "title": "" }, { "docid": "dae725ba052d0a742aa0f5f785531ca1", "score": "0.5537876", "text": "def send_all():\n print(\"sending a 'Thank You' to all donors!\")\n for donor in donors:\n amount = sum(donors.get(donor))\n file_write(donor, amount)\n #print(compose_email(donor, amount))", "title": "" }, { "docid": "a200114e3098089d176c42e11bdda446", "score": "0.55273217", "text": "def on_fire(self, owner):\n BotApi.say_to_all(self.msg, owner)", "title": "" }, { "docid": "0a28b18a3c3518e167f33f870f41bd06", "score": "0.5509451", "text": "def emit_notices():\n send_all()", "title": "" }, { "docid": "d4242df7236e89bce867d3b695bc7f33", "score": "0.54984146", "text": "def verify_send_to_all(self, message):\n [self.verify_send(message, addr) for addr in self.clients]", "title": "" }, { "docid": "23c395d31cae322a80ad800429e05d96", "score": "0.54954344", "text": "async def message_user(user: object, message: str):\n await user.send(message)", "title": "" }, { "docid": "7c6720fbaf3eb0d5b5d43a08938d4940", "score": "0.54938626", "text": "async def _send_all(self):\n log.debug('working on %d jobs...', len(self._queue))\n for job in self._queue:\n try:\n await self.session.post(job['webhook_url'], json=job['payload'])\n await asyncio.sleep(self.config['discord'].get('delay', 0.25))\n except aiohttp.ClientError:\n log.exception('failed to bridge content')\n self._queue.clear()\n self._incoming.clear()", "title": "" }, { "docid": "e28042a76c5c743bb9f5a4c6844e751d", "score": "0.5492296", "text": "async def _await(self, ctx, *users: discord.Member):\n author = ctx.author\n if not users:\n await ctx.send(\"At least one user is required as an argument :no_entry:\")\n return\n userlist = [x for x in users if x.status == discord.Status.offline and x != author]\n if userlist == [] and len(users) > 1:\n await ctx.send(\"All those users are already online :no_entry:\")\n return\n if userlist == [] and len(users) == 1:\n await ctx.send(\"That user is already online :no_entry:\")\n return\n if str(author.id) not in self._status:\n self._status[str(author.id)] = {}\n if \"users\" not in self._status[str(author.id)]:\n self._status[str(author.id)][\"users\"] = {}\n for user in userlist:\n if str(user.id) not in self._status[str(author.id)][\"users\"]:\n self._status[str(author.id)][\"users\"][str(user.id)] = {}\n dataIO.save_json(self._status_file, self._status)\n await ctx.send(\"You will be notified when {} comes online.\".format(\", \".join([\"`\" + str(x) + \"`\" for x in userlist])))", "title": "" }, { "docid": "832ba7bf117f7542b80a3d828101c1cc", "score": "0.54779387", "text": "def listUsers( self, sock, words ):\r\n \r\n users = self.room.getAcl().listUsers()\r\n\r\n users.sort()\r\n\r\n msg = \"Users: \" + string.join( users, \", \" )\r\n self.sendMessage( sock, msg )", "title": "" }, { "docid": "e8d064987f2fea92f4149444d90781a7", "score": "0.5477402", "text": "def update_users(arg):\r\n if isinstance(arg, socket.socket):\r\n # arg is a socket, send join request first\r\n send_msg(arg, join_msg)\r\n return update_users(get_resp_list(arg))\r\n\r\n # arg is a list of response from join request\r\n resp_list = arg\r\n if resp_list[0] != 'M': # Not a response from join request\r\n return\r\n global MSID\r\n i = int(resp_list[1])\r\n if MSID == i: # List unchanged\r\n return\r\n\r\n MSID = i\r\n resp_list = resp_list[2:-2]\r\n user_hids = []\r\n for i in range(0, len(resp_list), 3):\r\n a_usr = resp_list[i:i + 3] # username, address, port no.\r\n hash_id = sdbm_hash(''.join(a_usr))\r\n user_hids.append(hash_id)\r\n if hash_id not in users:\r\n users[hash_id] = {\r\n 'name': a_usr[0], 'ip': a_usr[1],\r\n 'port': int(a_usr[2]), 'sock': None, 'msgid': 0}\r\n\r\n buf = ['\\nusername userIP userPort']\r\n for hid, usr in users.items():\r\n if hid in user_hids:\r\n buf.append('\\n%-16s%-16s%-16d' % (usr['name'], usr['ip'], usr['port']))\r\n else: # The user has leave the chatroom\r\n del users[hid]\r\n\r\n CmdWin.insert(1.0, ''.join(buf))", "title": "" }, { "docid": "d2985fb28153527b4f4c7bde3cbfb0e9", "score": "0.54630196", "text": "def update_all_clients(self, data):\r\n for sck in self.player_sockets:\r\n sck.sendall(data)", "title": "" }, { "docid": "7c542297cbc6f1a78eed07cee1ea0e08", "score": "0.5455905", "text": "def get(self):\n\n app_id = app_identity.get_application_id()\n users = User.query(User.email != None)\n\n for user in users:\n games = (\n Game.query(ancestor=user.key)\n .filter(Game.game_over == False)\n .filter(Game.game_cancelled == False)\n .fetch())\n if games:\n email_from = 'noreply@{}.appspotmail.com'.format(app_id)\n email_to = user.email\n email_subject = 'Have you given up on your game of Hangman?'\n email_body = ('Hello {}, it\\'s been awhile since you played'\n ' your game of hangman. why don\\'t you come back'\n ' and play for awhile').format(user.user_name)\n mail.send_mail(email_from, email_to, email_subject, email_body)", "title": "" }, { "docid": "8196df3b04926b674f99d72a9e633c1d", "score": "0.5441555", "text": "def send(self):\n if len(self.draft) <= 0:\n raise Exception(\"Error! No messages have been created\")\n for number, body in self.draft.items():\n Messenger.message(FROM, number, body)", "title": "" }, { "docid": "d9dddd2c2c96dfafa29fed3241522287", "score": "0.5427376", "text": "def send_to_all(self, message, exclude_client_id=None):\n date = datetime.datetime.now()\n res = {\"sent_on\": date, \"message\": message}\n # for each client in channel\n for client_id, client_sock in self.clients_connected.items():\n # if client is not excluded client\n if client_id != exclude_client_id:\n # send message\n self.send(client_sock, res)", "title": "" }, { "docid": "657b2b92024111d16d148833a9505095", "score": "0.54246867", "text": "def _send(self, log):\n for o in self._outlets:\n o(log)", "title": "" }, { "docid": "77faa44ba59802ee09edf9a6648d948e", "score": "0.5421265", "text": "def b_user(sv_socks, cs_socks, sen_name, msg):\n for socks in read_list:\n if socks != serv_socks or socks == cs_socks:\n try:\n socks.send(sen_name + \" >> \" + str(msg))\n except :\n socks.close()\n\n if socks in read_list:\n read_list.remove(socks)", "title": "" }, { "docid": "5c237901afd66842cae45c65373a5b5f", "score": "0.5417391", "text": "def process_send_onboarding_emails(payload):\n logger.info(f\"Sending out onboarding emails\")\n\n with session_scope() as session:\n # first onboarding email\n users = session.query(User).filter(User.is_visible).filter(User.onboarding_emails_sent == 0).all()\n\n for user in users:\n send_onboarding_email(user, email_number=1)\n user.onboarding_emails_sent = 1\n user.last_onboarding_email_sent = now()\n session.commit()\n\n # second onboarding email\n # sent after a week if the user has no profile or their \"about me\" section is less than 20 characters long\n users = (\n session.query(User)\n .filter(User.is_visible)\n .filter(User.onboarding_emails_sent == 1)\n .filter(now() - User.last_onboarding_email_sent > timedelta(days=7))\n .filter(User.has_completed_profile == False)\n .all()\n )\n\n for user in users:\n send_onboarding_email(user, email_number=2)\n user.onboarding_emails_sent = 2\n user.last_onboarding_email_sent = now()\n session.commit()", "title": "" }, { "docid": "fab1b9cbd323719fd79274a74220330f", "score": "0.5406943", "text": "def flush(self, irc, msg, args):\r\n world.flush()\r\n irc.replySuccess()", "title": "" }, { "docid": "43f4e7194268f82a523153c49fa3954d", "score": "0.540143", "text": "def msgSender(conn):\n msglist = ['show run', 'show ip int br', 'show clock', 'show int trunk', 'exit']\n for msg in msglist:\n \n conn.send([msg])\n time.sleep(5)\n conn.close()", "title": "" }, { "docid": "b4d102d36417e16f5f76aa38015b79b3", "score": "0.539705", "text": "def post(self, *args, **kwargs):\r\n\r\n messenger = kwargs['messenger']\r\n user = kwargs['user']\r\n\r\n count = messenger.send_to_user(user, self.request.body)\r\n self.response({\"count\": count})\r\n\r\n logger.debug(\"Message has been sent to %d users.\" % count)", "title": "" }, { "docid": "7be081a305bbb257980ab46a87663902", "score": "0.5395326", "text": "async def check_blacklist(self, ctx):\r\n message = \"\"\r\n for user_id in self.blacklist:\r\n message += f\"{user_id}: \"\r\n try:\r\n blacklisted_user = self.get_user(user_id) or await self.fetch_user(user_id)\r\n message += blacklisted_user.name + '#' + blacklisted_user.discriminator\r\n except discord.NotFound:\r\n message += \"User Not Found\"\r\n except discord.HTTPException:\r\n message += \"HTTP Error\"\r\n finally:\r\n message += '\\n'\r\n await smart_send(ctx, message)", "title": "" }, { "docid": "87113354906a3a99a6ed64d89166e2f7", "score": "0.53871554", "text": "def main():\n\n bobson_id = 106829057542664179550\n\n dir_srv = get_service('admin', 'directory_v1', subject)\n\n results = dir_srv.users().list(customer='my_customer', maxResults=500,\n orderBy='email').execute()\n users = results.get('users', [])\n\n for user in users:\n\n if int(user['id']) == bobson_id:\n # print(user)\n print(f\"{user['kind']} {user['id']} {user['name']} {user.get('emails','NA')}\")\n\n print('-------')\n\n gmail_src = get_service('gmail', 'v1', '[email protected]')\n\n results = gmail_src.users().settings().forwardingAddresses().list(userId='me').execute()\n if not results:\n address = {'forwardingEmail': '[email protected]'}\n result = gmail_src.users().settings().forwardingAddresses().create(userId='me', body=address).execute()\n\n results = gmail_src.users().settings().forwardingAddresses().list(userId='me').execute()\n\n print(results)\n\n print('-------')", "title": "" }, { "docid": "56799c2e39bc6ed7823681339ced74b3", "score": "0.53822917", "text": "def send_user(name: str, data: dict[str, typing.Any]):\n if flask.request.context.event_id:\n data['response_to'] = flask.request.context.event_id\n flask.request.has_responded = True\n send_room(name, data, flask.request.sid)", "title": "" }, { "docid": "c60b5b6bedb315e71145484c65d8a445", "score": "0.53722745", "text": "def get(self):\n app_id = app_identity.get_application_id()\n users = User.query(User.email != None)\n for user in users:\n user_games = Game.query(Game.user == user.key)\n send_email = False\n for game in user_games:\n # If there is a game that is not over and is not cancelled, set\n # the user to be reminded\n if game.game_over == False and game.cancelled == False:\n send_email = True\n if send_email == True:\n subject = 'This is a reminder!'\n body = 'Hello {}, go back to your Hangman game!'.format(user.name)\n # This will send test emails, the arguments to send_mail are:\n # from, to, subject, body\n mail.send_mail('noreply@{}.appspotmail.com'.format(app_id),\n user.email,\n subject,\n body)", "title": "" }, { "docid": "f20ea5db223b51d75f624a5a8d51c386", "score": "0.53523034", "text": "def run(self):\r\n self.__data.processUser(self.user)\r\n return self.__textSend()", "title": "" }, { "docid": "a53b45c9c7c9e5af556a447367ba0549", "score": "0.534378", "text": "def notify():\n msg = Thirster.encode_all()\n for sub in SUBSCRIPTIONS:\n sub.put(msg)", "title": "" }, { "docid": "341e8854f8a60d52948d3309805abc65", "score": "0.5343221", "text": "def send_all(self, msg, list = False, servers = []):\r\n threads = []\r\n dest = self.validators\r\n if len(servers) > 0:\r\n dest = servers\r\n for d in dest:\r\n t = threading.Thread(\r\n target=self.send,\r\n args=(msg, d, list,)\r\n )\r\n t.start()\r\n #thread.start_new_thread(self.send, (msg, v, list))\r", "title": "" }, { "docid": "36b93c6fe3bc5deb049a013a9f5e3aa6", "score": "0.53407115", "text": "def send_messages(self, email_messages):\n raise NotImplementedError", "title": "" }, { "docid": "ad0523437fb2b37fbecb2b329f178e1f", "score": "0.53396726", "text": "async def gucast(event):\n xx = event.pattern_match.group(1)\n if not xx:\n return await edit_or_reply(xx, \"`Please Give A Message`\")\n tt = event.text\n msg = tt[8:]\n kk = await edit_or_reply(event, \"`Sending Pivate Messages Globally... 📢`\")\n er = 0\n done = 0\n async for x in bot.iter_dialogs():\n if x.is_user and not x.entity.bot:\n chat = x.id\n try:\n done += 1\n await bot.send_message(chat, msg)\n except BaseException:\n er += 1\n await kk.edit(\n f\"**✔️Successfully** Send Message To : `{done}` Users.\\n**❌Fail** Send Message To : `{er}` Users.\"\n )", "title": "" }, { "docid": "bec54a19946ef95619e9c05a4dc4556f", "score": "0.5330911", "text": "def send(self, messages=None, api_key=None, secure=None, test=None,\n **request_args):\n return super(BatchSender, self).send(message=messages, test=test,\n api_key=api_key, secure=secure,\n **request_args)", "title": "" }, { "docid": "9099df291620659f0dd468c304822543", "score": "0.532353", "text": "def flush_all(self):\n for i in list(self.nodes.keys()):\n self.simulator.get_messages_sent_by(i)", "title": "" }, { "docid": "d6fd86f442b05adea0fc95ef28591b9c", "score": "0.5322512", "text": "def change_user(sender, instance, *args, **kwargs):\n users = serializers.serialize('json', User.objects.all())\n channel_layer = get_channel_layer()\n async_to_sync(channel_layer.group_send)(\n \"users\",\n {\n \"type\":\"user.update\",\n\t\t\t\"event\":\"New User\",\n \"data\": users\n }\n )", "title": "" }, { "docid": "a17c197abb76a3661f2b14a6b823896f", "score": "0.53178287", "text": "def send_msg(self, msg, src, username):\n full_msg = 'From ' + username + ' :\\n\\t' + msg\n for user in self.user_list:\n if user[0] != src:\n user[0].send(encoding.encode_msg(full_msg))", "title": "" }, { "docid": "429cc74a29aca27646b13903284ad85d", "score": "0.53173476", "text": "def send_emails(self, messages=()):\n if len(messages) is not 0:\n if messages != self.__mail_list:\n self.__mail_list = messages\n else:\n self.__mail_list = tuple(self.__mail_list)\n server = smtp.EmailBackend(host=self.__smtp_server, port=self.__smtp_port, username=self.__email,\n password=self.__smtp_pass, use_tls=True) # Set up a secure connection.\n #print server\n server.send_messages(self.__mail_list) # Send all emails in one session.\n server.close() #Close the session\n self.__mail_list = []", "title": "" }, { "docid": "6a9e924ca5d72ade217330eeb11feb74", "score": "0.5316925", "text": "def save(self):\n email = self.cleaned_data[self.field_name]\n for user in self.get_users(email):\n context = self.get_mail_context(self.request, user)\n self.send_mail(email, context)", "title": "" }, { "docid": "8220f8b546bc43dacfa561821433544b", "score": "0.5314047", "text": "def send_batch(self, english, swahili=None, luo=None, auto='', send=False, control=False):\n\n if swahili is None:\n swahili = english\n if luo is None:\n luo = english\n text_translations = {'english': english, 'swahili': swahili, 'luo': luo}\n\n original_count = self.count()\n send_to = self.active_users()\n send_count = send_to.count()\n print(\"Sending to {} of {}\".format(send_count, original_count))\n\n counts = collections.Counter()\n for p in send_to.all():\n # Send the correct language message to all participants\n text = text_translations.get(p.language, english)\n text = text.format(**p.message_kwargs())\n\n if send is True:\n msg = p.send_message(\n text=text,\n translation_status='cust',\n auto='custom.{}'.format(auto) if auto != '' else 'custom',\n translated_text=english if p.language != english else '',\n control=control,\n is_system=False,\n )\n counts[msg.external_status] += 1\n else:\n print(\"({}) -- {}\".format(p, text[:40]))\n\n if send is True:\n print(\"Send Status:\\n\", \"\\n\\t\".join(\"{} -> {}\".format(key, count) for key, count in counts.most_common()))\n\n return send_count", "title": "" }, { "docid": "5c3f24c153db1c1b05c1715ea907a494", "score": "0.5312167", "text": "def send_updates(self):\n for msg in self.update_commands:\n #print(msg, self.has_returned)\n if str(msg) not in self.has_returned:\n #print(self.has_returned.index(msg[0]))\n self.update_command(msg)\n self.has_returned.append(msg)\n return", "title": "" }, { "docid": "6f0afd188cedff40580ca2dfb261e227", "score": "0.5308912", "text": "async def invites(self):\n ...", "title": "" }, { "docid": "cb3f67c1689efd241e6cd466f21c0513", "score": "0.5307318", "text": "def welcome(bot, update):\n\n message = update.message\n chat_id = message.chat.id\n for member in message.new_chat_members:\n logger.info('%s joined to chat %d (%s)'\n % (escape(member.first_name),\n chat_id,\n escape(message.chat.title)))\n\n # CALL PSYCOPG2 TO UPDATE DATABASE THAT ONE INVITE\n # HAS BEEN CONFIRMED\n\n # Use default message if there's no custom one set\n text = 'Hi $username! Please read the pinned post and feel free to introduce yourself.'\n # Replace placeholders and send message\n text = text.replace('$username',\n member.first_name)\\\n .replace('$title', message.chat.title)\n update.message.reply_text(text)\n\n #update DB that user joined\n team_name = message.chat.title\n conn = psycopg2.connect(DATABASE_URL)\n cur = conn.cursor()\n cur.execute(\"UPDATE leetcode_teams SET claimed = claimed + 1 WHERE team_name = %s\", (team_name, ))\n conn.commit()\n cur.close()\n conn.close()\n\n analytics.identify(member.id, {\n 'first_name': member.first_name,\n 'last_name': member.last_name,\n })\n\n analytics.track(member.id, 'LeetcodeClaimed', {\n 'team_name': team_name\n })\n\n #update.message.reply_text(\"I am a bit of a crazy bot, here to help. I am happy now that you're here.\")", "title": "" }, { "docid": "751babc37c44cdf3f5de1360536b038a", "score": "0.52990085", "text": "def send_all_requests(users_list, amount, message):\n\n success_list = []\n failure_list = []\n no_account_list = []\n remaining_list = []\n\n limit_count = 0\n for user in users_list:\n # request limit is reached\n if limit_count >= 50:\n remaining_list.append(user)\n else:\n # no account\n if user.username == \"no-account\":\n no_account_list.append(user)\n print(\"Skipping {} {}...\".format(user.first, user.last))\n else:\n venmo.payment.charge('@' + user.username, amount, message)\n # failed request\n if user.username in logging.error.names_list:\n failure_list.append(user)\n # venmo.payment prints an error message if request fails\n else:\n # successful request\n success_list.append(user)\n limit_count += 1\n # venmo.payment prints a success message if request succeeds\n return success_list, failure_list, no_account_list, remaining_list", "title": "" }, { "docid": "6ab82382b1dafc12949921e88b56210e", "score": "0.52989334", "text": "def chats(bot, update):\n telegram_bot.emit('chats', update)", "title": "" }, { "docid": "5a091f9ab43ecfdadd1b8ab25c53abdb", "score": "0.52944404", "text": "def run(self):\n for message in self.__iter_message():\n channel = message.get('channel', '')\n for client in self.clients.get(channel, set()):\n gevent.spawn(self.send, client, channel, message.get('data'))", "title": "" }, { "docid": "918fb69749715fa8c25502ad6f78d2f2", "score": "0.52901417", "text": "def steady_bots(self):\n ws = self.world_state()\n msg=json.dumps(ws)\n \n for b in self.bots:\n b.send_msg(msg+'\\n')", "title": "" }, { "docid": "303ab761b6b83e476ee1ab3634c7ceb8", "score": "0.52898157", "text": "def transfer_all():\n transfer_tags_in_v1()\n user_ids_to_guids = {}\n transfer_users(user_ids_to_guids)\n transfer_stories(user_ids_to_guids)", "title": "" }, { "docid": "76c629a8aa4d3875e8b5d66498668f2e", "score": "0.52863884", "text": "def broadcast_all(self, message):\n target_class = self.__class__\n targets = [i for i in self.participants if isinstance(i, target_class)]\n self.broadcast(targets, message)", "title": "" }, { "docid": "7531864947d0fc18f07bcadd1112de9a", "score": "0.52842724", "text": "def main():\r\n credentials = get_credentials()\r\n http = credentials.authorize(httplib2.Http())\r\n service = discovery.build('gmail', 'v1', http=http)\r\n\r\n user_id = 'me'\r\n label_ids = [\r\n 'INBOX',\r\n 'CATEGORY_PERSONAL',\r\n ]\r\n results = service.users().messages().list(userId=user_id, labelIds=label_ids).execute()\r\n\r\n for msgId in results['messages'][:5]:\r\n msg_id = msgId['id']\r\n msg = service.users().messages().get(userId=user_id, id=msg_id, format='raw').execute()\r\n msg_from, msg_sbj, msg_bdy = parseRawMsg(msg)\r\n print('from:', msg_from)\r\n print('subject:', msg_sbj)\r\n\r\n parseMsgInfo(msg_from, msg_sbj, msg_bdy)\r\n\r\n # Archive the message TODO: Enable when ready!!!\r\n # archiveMsg(service, user_id, msg_id)\r", "title": "" }, { "docid": "5d8506570ea0b2bcac896e5f18a7c1b9", "score": "0.5281801", "text": "def send(self):", "title": "" }, { "docid": "f4f1ff068bc50c0b27d8b91416c4005e", "score": "0.52804947", "text": "def send(self, client_id, end):\n for k in range(len(self.message_list)):\n if str(self.message_list[k].get_id_receiver()) == str(client_id) and\\\n self.message_list[k].get_status_capsule() != CapsuleStatus.YES:\n self.message_list[k].set_status_capsule(CapsuleStatus.YES)\n end.send_multipart([bytes(client_id, 'utf8'), bytes(json.dumps(self.message_list[k].__dict__), 'utf8')])\n self.my_logger.log_broker_send(client_id, self.message_list[k])\n # Broker.logger.debug('Sent to client {} : {}'.format(client_id,json.dumps(self.message_list[k].__dict__)))\n broker_id = str(socket.gethostbyname(socket.gethostname()))+\"-\"+str(self.id_frontend)+\"-\"+str(self.id_backend)\n c = Capsule(broker_id, CapsuleType.END)\n # c.set_type(CapsuleType.END)\n end.send_multipart([bytes(client_id, 'utf8'), bytes(json.dumps(c.__dict__), 'utf8')])", "title": "" }, { "docid": "493c951eb47fcbfd737e20ba0e9a4ce6", "score": "0.527802", "text": "def get_all_users():\n pass", "title": "" }, { "docid": "203b307b30e6fbd2b5443077f3320adb", "score": "0.5277727", "text": "def send(self, irc, msg, args, user, targets, text):\r\n # Let's get the from user.\r\n public = irc.isChannel(msg.args[0])\r\n sent = []\r\n for target in targets:\r\n id = self.db.send(user.id, target.id, public, text)\r\n s = format('note #%i sent to %s', id, target.name)\r\n sent.append(s)\r\n irc.reply(format('%L.', sent).capitalize())", "title": "" }, { "docid": "76358575450f40039be9e02700829579", "score": "0.52755415", "text": "async def send_user_to_main_menu(message: types.Message, state: FSMContext):\n\n logger.info(msg=f\"User {message.from_user.first_name}(@{message.from_user.username}) is in main menu now.\")\n user = session.query(User).get(message.from_user.id)\n if not user:\n logger.error(msg=f\"Can't get user {message.from_user.first_name}(@{message.from_user.username})\")\n await message.answer(text=\"Oops, something went wrong :(\",\n reply_markup=types.ReplyKeyboardRemove())\n await state.finish()\n else:\n stat = user.is_respondent\n if stat in [0, 1, 2]:\n await CommonUserStates.send_actions.set()\n await common_user_send_actions(message)\n elif stat == 3:\n await RespondentStates.send_actions.set()\n await respondent_send_actions(message)", "title": "" }, { "docid": "61625f6be1e4fbaaacdecfa61fefe893", "score": "0.5274316", "text": "def broadcast_message(user_ids, message, entities=None, sleep_between=0.4, parse_mode=None):\n logger.info(f\"Going to send message: '{message}' to {len(user_ids)} users\")\n\n for user_id in user_ids:\n try:\n send_message(user_id=user_id, text=message, entities=entities, parse_mode=parse_mode)\n logger.info(f\"Broadcast message was sent to {user_id}\")\n except Exception as e:\n logger.error(f\"Failed to send message to {user_id}, reason: {e}\" )\n time.sleep(max(sleep_between, 0.1))\n\n logger.info(\"Broadcast finished!\")", "title": "" }, { "docid": "34c873825cb78369d4362a29c7494e97", "score": "0.52733606", "text": "async def on_guild_join(guild):\n print(\"guild join\")\n for channel in guild.text_channels:\n if channel and channel.permissions_for(guild.me).send_messages:\n await channel.send(\"hewwo! I'm yeet bot pleasure to meet you!\\n please join our support server: https://discord.gg/PJwQxHR\\ny!help for the \"\n \"commands list\")\n break", "title": "" }, { "docid": "6327c8dbed19079c3ef1d8b26df241a7", "score": "0.52688307", "text": "def send(self, msg, **kwargs):\n if msg.author == self.logged_user:\n print(\"<To %s> %s\" % (msg.author, msg.content))", "title": "" }, { "docid": "732cb83c252001d3f5d07162a035599e", "score": "0.5260988", "text": "def end(self):\n servs = self.server.services\n self.sock.sendall('4'+','+str(servs[1].score)+','+\\\n str(servs[2].score)+','+'\\n')", "title": "" }, { "docid": "9ce6456a0766228309dc738f8f511d45", "score": "0.52584916", "text": "def send_from_private(bot, update):\n\n msg = update.effective_message\n\n for i in settings.secrets[\"chats\"]:\n if settings.secrets[\"chats\"][i][\"messages\"]:\n sent_message = robust_send_message(bot, msg, int(i), None)\n sent_messages[sent_message.message_id] = (msg.chat.id, msg.message_id)", "title": "" }, { "docid": "214503ff1ef4d17dbcb1d462e69be6d2", "score": "0.52573633", "text": "def send_action(self):\n route = 'me/messages'\n payload = {\n 'recipient' : {\n 'id' : self.userID\n },\n 'sender_action' : 'typing_on'\n }\n return self.http_post(route,payload)", "title": "" } ]
b10c12870f2d2ab26405055591433408
A copy activity source for SAP Cloud for Customer source.
[ { "docid": "fd8c2c3d8b508202dd8f01837b771e6b", "score": "0.55718714", "text": "def __init__(__self__, *,\n type: pulumi.Input[str],\n query: Optional[Any] = None,\n source_retry_count: Optional[Any] = None,\n source_retry_wait: Optional[Any] = None):\n pulumi.set(__self__, \"type\", 'SapCloudForCustomerSource')\n if query is not None:\n pulumi.set(__self__, \"query\", query)\n if source_retry_count is not None:\n pulumi.set(__self__, \"source_retry_count\", source_retry_count)\n if source_retry_wait is not None:\n pulumi.set(__self__, \"source_retry_wait\", source_retry_wait)", "title": "" } ]
[ { "docid": "6a291caf413f569ac7787a8509365b3c", "score": "0.56941664", "text": "def on_copy_source(self):\n source = self.data.get(\"source\", None)\n if not source:\n return\n\n project_name = self.dbcon.Session[\"AVALON_PROJECT\"]\n root = RegisteredRoots.registered_root(project_name)\n path = source.format(root=root)\n clipboard = QtWidgets.QApplication.clipboard()\n clipboard.setText(path)", "title": "" }, { "docid": "6a291caf413f569ac7787a8509365b3c", "score": "0.56941664", "text": "def on_copy_source(self):\n source = self.data.get(\"source\", None)\n if not source:\n return\n\n project_name = self.dbcon.Session[\"AVALON_PROJECT\"]\n root = RegisteredRoots.registered_root(project_name)\n path = source.format(root=root)\n clipboard = QtWidgets.QApplication.clipboard()\n clipboard.setText(path)", "title": "" }, { "docid": "52c9d280b34c38f8f0a322acbf9301cd", "score": "0.56204987", "text": "def __init__(__self__, *,\n type: pulumi.Input[str],\n additional_columns: Optional[Any] = None,\n disable_metrics_collection: Optional[Any] = None,\n http_request_timeout: Optional[Any] = None,\n max_concurrent_connections: Optional[Any] = None,\n query: Optional[Any] = None,\n query_timeout: Optional[Any] = None,\n source_retry_count: Optional[Any] = None,\n source_retry_wait: Optional[Any] = None):\n pulumi.set(__self__, \"type\", 'SapCloudForCustomerSource')\n if additional_columns is not None:\n pulumi.set(__self__, \"additional_columns\", additional_columns)\n if disable_metrics_collection is not None:\n pulumi.set(__self__, \"disable_metrics_collection\", disable_metrics_collection)\n if http_request_timeout is not None:\n pulumi.set(__self__, \"http_request_timeout\", http_request_timeout)\n if max_concurrent_connections is not None:\n pulumi.set(__self__, \"max_concurrent_connections\", max_concurrent_connections)\n if query is not None:\n pulumi.set(__self__, \"query\", query)\n if query_timeout is not None:\n pulumi.set(__self__, \"query_timeout\", query_timeout)\n if source_retry_count is not None:\n pulumi.set(__self__, \"source_retry_count\", source_retry_count)\n if source_retry_wait is not None:\n pulumi.set(__self__, \"source_retry_wait\", source_retry_wait)", "title": "" }, { "docid": "962b2f6ae09a49c3402f727761077bda", "score": "0.5587206", "text": "def copy_source(source, data_model, change_name: bool = True):\n kwargs = {}\n if change_name:\n kwargs[\"name\"] = f\"{source.get('name') or data_model['sources'][source['type']]['name']} (copy)\"\n return copy_item(source, **kwargs)", "title": "" }, { "docid": "291763f8f4c340d9c087eef8cb67e427", "score": "0.55594194", "text": "def object_copy(\n self,\n CopySource,\n ExtraArgs=None,\n Callback=None,\n SourceClient=None,\n Config=None,\n):\n return self.meta.client.copy(\n CopySource=CopySource,\n Bucket=self.bucket_name,\n Key=self.key,\n ExtraArgs=ExtraArgs,\n Callback=Callback,\n SourceClient=SourceClient,\n Config=Config,\n )", "title": "" }, { "docid": "ab7651abde2366fa2cb90e2ee2e14287", "score": "0.5385646", "text": "def __init__(__self__, *,\n export_settings: pulumi.Input['SnowflakeExportCopyCommandArgs'],\n type: pulumi.Input[str],\n disable_metrics_collection: Optional[Any] = None,\n max_concurrent_connections: Optional[Any] = None,\n query: Optional[Any] = None,\n source_retry_count: Optional[Any] = None,\n source_retry_wait: Optional[Any] = None):\n pulumi.set(__self__, \"export_settings\", export_settings)\n pulumi.set(__self__, \"type\", 'SnowflakeSource')\n if disable_metrics_collection is not None:\n pulumi.set(__self__, \"disable_metrics_collection\", disable_metrics_collection)\n if max_concurrent_connections is not None:\n pulumi.set(__self__, \"max_concurrent_connections\", max_concurrent_connections)\n if query is not None:\n pulumi.set(__self__, \"query\", query)\n if source_retry_count is not None:\n pulumi.set(__self__, \"source_retry_count\", source_retry_count)\n if source_retry_wait is not None:\n pulumi.set(__self__, \"source_retry_wait\", source_retry_wait)", "title": "" }, { "docid": "575c678e92594069be2066d55a59ee40", "score": "0.5368072", "text": "def copy(catalog):", "title": "" }, { "docid": "971aec66c05629b278ac6ed05c75c610", "score": "0.53304446", "text": "def test_copy_campaign_to_applications(self):\n pass", "title": "" }, { "docid": "77ce0f7b2174c270aabd6c63ee526d02", "score": "0.5289707", "text": "def __call__(self, *args):\n return _osg.CopyOp___call__(self, *args)", "title": "" }, { "docid": "094a58f9995ccbc2c104dcaa29cb9330", "score": "0.52192825", "text": "def copy_csdb(self, src):\n dst = os.path.join(self.topdir, 'SOURCES', os.path.basename(src))\n logger.info(\"Copying user supplied CellServDB '{0}' to '{1}'.\".format(src, dst))\n mkdirp(os.path.dirname(dst))\n shutil.copy(src, dst)\n self.downloaded.append(dst)\n return dst", "title": "" }, { "docid": "034bdc46363c899b9541bdf68a45f987", "score": "0.51355714", "text": "def cpr_from_cp(cp):\n cp_fields = [\"name\", \"image\", \"vm-flavor\", \"port_security_enabled\", \"type_yang\"]\n cp_copy_dict = {k: v for k, v in cp.as_dict().items() if k in cp_fields}\n cpr_dict = {}\n cpr_dict.update(cp_copy_dict)\n return VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_ConnectionPoint.from_dict(cpr_dict)", "title": "" }, { "docid": "2bd4658fec807713ab3df4494be96073", "score": "0.512589", "text": "def test_copy_transaction(self):\n with CatalogTransaction():\n token = self.root.manage_copyObjects(['universe'])\n self.root.manage_pasteObjects(token)\n\n self.assertItemsEqual(\n self.search(fulltext='dark energy'),\n [('/root/universe', 'public'),\n ('/root/copy_of_universe', 'public')])", "title": "" }, { "docid": "41c2886c7f995f91461e90ca5f07f36e", "score": "0.50669616", "text": "def set_copy_complex_contract_source_account(self, account):\n self.switch_to_detail_frame()\n try:\n if account.strip() == \"\" or account is None:\n select = Select(\n self.wait().until(EC.presence_of_element_located(self.source_account_dropdown_locator)))\n select.select_by_index(2)\n else:\n self.select_option(self.source_account_dropdown_locator, account)\n except:\n raise\n finally:\n self.switch_to_default_content()", "title": "" }, { "docid": "671ceb46e7707a47f5d198ffbcc05a73", "score": "0.50053513", "text": "def copy():\n pass", "title": "" }, { "docid": "c34c6cd6dc3beae150558fb16d349275", "score": "0.49974194", "text": "def test_get_customer_activity_report(self):\n pass", "title": "" }, { "docid": "36ed050d11c1a2acedf121313a30dda6", "score": "0.49678954", "text": "def set_copy_complex_contract_source_contract(self, contract):\n self.switch_to_detail_frame()\n try:\n if contract.strip() == \"\" or contract is None:\n select = Select(self.wait().until(EC.presence_of_element_located(self.source_contract_dropdown_locator)))\n select.select_by_index(2)\n else:\n self.select_dropdown_item_from_select_tag(self.source_contract_dropdown_locator, contract)\n except:\n raise\n finally:\n self.switch_to_default_content()", "title": "" }, { "docid": "09d656dec607595c264f56cd4b1a5c17", "score": "0.4965662", "text": "def copy(self):\n \n pass", "title": "" }, { "docid": "21ef605ba70a51255eebfdf89e6b1aab", "score": "0.4941272", "text": "def copy(self, **kwargs):\n pass", "title": "" }, { "docid": "0d0f25a9554af12b2a078a7d348b7bd0", "score": "0.49325377", "text": "def slot_copy_row(self, action):\n _, item, _ = action.data()\n self.send_data_to_clipboard(str(item))", "title": "" }, { "docid": "1c7f5b1a043c0da3d6b28f602127ca29", "score": "0.49323624", "text": "def smart_copy():\r\n app = wingapi.gApplication\r\n editor = app.GetActiveEditor()\r\n if editor is None:\r\n return\r\n selection = editor.GetSelection()\r\n if selection[1] - selection[0] > 0:\r\n app.ExecuteCommand('copy')\r\n else:\r\n doc = editor.GetDocument()\r\n lineno = doc.GetLineNumberFromPosition(selection[0])\r\n start = doc.GetLineStart(lineno)\r\n if lineno + 1 < doc.GetLineCount():\r\n end = doc.GetLineStart(lineno + 1)\r\n else:\r\n end = doc.GetLineEnd(lineno)\r\n editor.SetSelection(start, end)\r\n app.ExecuteCommand('copy')\r\n editor.SetSelection(selection[0], selection[1])", "title": "" }, { "docid": "df1344c72a3c3bafff31f4c0fe1e24b7", "score": "0.4923553", "text": "def copy_ti_process(self, source_ti_name, target_ti_name):\r\n source_odata = self.get_ti_process(source_ti_name)\r\n src_ti = TIProcess.get_from_dict(source_odata)\r\n data = src_ti.as_dict()\r\n if self.ti_exists(target_ti_name):\r\n response = self.update_ti_process(target_ti_name, data)\r\n else:\r\n response = self.create_ti_process(target_ti_name, data)\r\n return response", "title": "" }, { "docid": "b65a687570de55abe9142ffc6edc1654", "score": "0.49123198", "text": "def copyFlexor(*args, **kwargs):\n\n pass", "title": "" }, { "docid": "48cda43c9f259fb8bc9127f3435e6b7a", "score": "0.4909941", "text": "def copy(self, copy_type=None):\n if (copy_type is None) or (copy_type == \"full\"):\n return self.full_copy()\n elif copy_type == 'yang':\n return self.yang_copy()\n else:\n return self.empty_copy()", "title": "" }, { "docid": "dd35d3f776ae1e1e53d35223b0a3006f", "score": "0.4898139", "text": "def cp(self, source, target):\n self._add_two_argument_command(\"cp\", source, target)", "title": "" }, { "docid": "7eb795b835237e247e498143cd05a1c7", "score": "0.48956156", "text": "def copy_file(self, source, dest):\r\n source = self.core.expandvars(source, self.product)\r\n dest = self.core.expandvars(dest, self.product)\r\n self.core.api.os.shell.copy_file(source, dest)", "title": "" }, { "docid": "90543b0a5f9b43dfe53bdc7d5886c75d", "score": "0.48856464", "text": "def copy_smoke_test(self):\n self._smoke('copy')", "title": "" }, { "docid": "57939b842d4866854d448e43e8a731ee", "score": "0.4870783", "text": "def copy():", "title": "" }, { "docid": "f00a3c46eb21847f1492718eba8d480e", "score": "0.48701966", "text": "def copy(name):\n try:\n service = _services[name]\n pyperclip.copy(service.now())\n click.echo(f\"{service.name} was copied to your clipboard!\")\n except KeyError:\n click.echo(f\"{service.name} does not exist\")", "title": "" }, { "docid": "3bd25fa084bc5728e03f5a773389a33a", "score": "0.48589304", "text": "def get_copy_statement(table_name: str,\n data_source: str,\n iam_role: str = trim_value(CONFIG['IAM_ROLE']['ARN']),\n json_path: Optional[str] = None,\n region: str = 'us-west-2'):\n json_path = json_path if json_path is not None else 'auto'\n return (f\"COPY {table_name:s}\\n\"\n f\"FROM '{data_source:s}'\\n\"\n f\"IAM_ROLE '{iam_role:s}'\\n\"\n f\"REGION '{region:s}'\\n\"\n f\"FORMAT JSON AS '{json_path:s}'\\n\"\n f\"EMPTYASNULL\\n\"\n f\"BLANKSASNULL;\")", "title": "" }, { "docid": "4c7588213837b111673b09e7cde80eb7", "score": "0.48551258", "text": "def create_aws_source(self, koku_host, koku_port):\n source_url = f\"http://{koku_host}:{koku_port}/api/cost-management/v1/sources/\"\n json_info = {\n \"name\": self.source_name,\n \"source_type\": \"AWS-local\",\n \"authentication\": {\"credentials\": {\"role_arn\": \"arn:aws:iam::111111111111:role/LocalAWSSource\"}},\n \"billing_source\": {\"data_source\": {\"bucket\": \"/tmp/local_bucket_1\"}},\n }\n LOG.info(\"Creating a source with the following information:\\n\" f\"\\t{json_info}\")\n source_info = requests.post(source_url, json=json_info)\n source_data = source_info.json()\n return source_data[\"uuid\"]", "title": "" }, { "docid": "91e889562923dd6da8e642206341816f", "score": "0.48202702", "text": "def create_card(self, customer_id, data):\n return stripe.Customer.create_source(customer_id, source=data['card_id'])", "title": "" }, { "docid": "7108fd245cc750f6981756ec86f126be", "score": "0.48186058", "text": "def CopyFrom(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "e677464cc280250dd277e3384f154150", "score": "0.4806522", "text": "def before_save_instance(self , instance , using_transactions , dry_run):\n tmp_list = [Carbon.objects.get( name = cb ).cid for cb in\n re.split( '[;;]' , instance.carbon_source.strip( ) )]\n carbon_source = \";\".join( tmp_list )\n instance.carbon_source = carbon_source", "title": "" }, { "docid": "28dc64e534b11d9ab9801167db556b9d", "score": "0.4802152", "text": "def click_copy_from_existing_complex_contract(self):\n self.switch_to_detail_frame()\n try:\n self.click_element(self.copy_from_existing_complex_contract_locator, legacy_screen_loader=True)\n except:\n raise\n finally:\n self.switch_to_default_content()", "title": "" }, { "docid": "b7af64c3413ea76b001dbaade64f6fc7", "score": "0.4799096", "text": "def __init__(__self__, *,\n type: pulumi.Input[str],\n url: Any,\n annotations: Optional[pulumi.Input[Sequence[Any]]] = None,\n connect_via: Optional[pulumi.Input['IntegrationRuntimeReferenceArgs']] = None,\n description: Optional[pulumi.Input[str]] = None,\n encrypted_credential: Optional[Any] = None,\n parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input['ParameterSpecificationArgs']]]] = None,\n password: Optional[pulumi.Input[Union['AzureKeyVaultSecretReferenceArgs', 'SecureStringArgs']]] = None,\n username: Optional[Any] = None):\n pulumi.set(__self__, \"type\", 'SapCloudForCustomer')\n pulumi.set(__self__, \"url\", url)\n if annotations is not None:\n pulumi.set(__self__, \"annotations\", annotations)\n if connect_via is not None:\n pulumi.set(__self__, \"connect_via\", connect_via)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if encrypted_credential is not None:\n pulumi.set(__self__, \"encrypted_credential\", encrypted_credential)\n if parameters is not None:\n pulumi.set(__self__, \"parameters\", parameters)\n if password is not None:\n pulumi.set(__self__, \"password\", password)\n if username is not None:\n pulumi.set(__self__, \"username\", username)", "title": "" }, { "docid": "0053c6cda512932119d310f54fe574a6", "score": "0.47977275", "text": "def copy_rows(self, source):\n pass", "title": "" }, { "docid": "6a451a1bc4806818698409a0e4de2fca", "score": "0.47948673", "text": "def __init__(__self__, *,\n type: pulumi.Input[str],\n url: Any,\n annotations: Optional[pulumi.Input[Sequence[Any]]] = None,\n connect_via: Optional[pulumi.Input['IntegrationRuntimeReferenceArgs']] = None,\n description: Optional[pulumi.Input[str]] = None,\n encrypted_credential: Optional[pulumi.Input[str]] = None,\n parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input['ParameterSpecificationArgs']]]] = None,\n password: Optional[pulumi.Input[Union['AzureKeyVaultSecretReferenceArgs', 'SecureStringArgs']]] = None,\n username: Optional[Any] = None):\n pulumi.set(__self__, \"type\", 'SapCloudForCustomer')\n pulumi.set(__self__, \"url\", url)\n if annotations is not None:\n pulumi.set(__self__, \"annotations\", annotations)\n if connect_via is not None:\n pulumi.set(__self__, \"connect_via\", connect_via)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if encrypted_credential is not None:\n pulumi.set(__self__, \"encrypted_credential\", encrypted_credential)\n if parameters is not None:\n pulumi.set(__self__, \"parameters\", parameters)\n if password is not None:\n pulumi.set(__self__, \"password\", password)\n if username is not None:\n pulumi.set(__self__, \"username\", username)", "title": "" }, { "docid": "10a15b82f51caae5f788f0f08283a894", "score": "0.4791195", "text": "def __init__(__self__, *,\n type: pulumi.Input[str],\n additional_columns: Optional[Any] = None,\n disable_metrics_collection: Optional[Any] = None,\n max_concurrent_connections: Optional[Any] = None,\n query: Optional[Any] = None,\n query_timeout: Optional[Any] = None,\n source_retry_count: Optional[Any] = None,\n source_retry_wait: Optional[Any] = None):\n pulumi.set(__self__, \"type\", 'MagentoSource')\n if additional_columns is not None:\n pulumi.set(__self__, \"additional_columns\", additional_columns)\n if disable_metrics_collection is not None:\n pulumi.set(__self__, \"disable_metrics_collection\", disable_metrics_collection)\n if max_concurrent_connections is not None:\n pulumi.set(__self__, \"max_concurrent_connections\", max_concurrent_connections)\n if query is not None:\n pulumi.set(__self__, \"query\", query)\n if query_timeout is not None:\n pulumi.set(__self__, \"query_timeout\", query_timeout)\n if source_retry_count is not None:\n pulumi.set(__self__, \"source_retry_count\", source_retry_count)\n if source_retry_wait is not None:\n pulumi.set(__self__, \"source_retry_wait\", source_retry_wait)", "title": "" }, { "docid": "e85a15e3bc174b77374bbd5df1c3069a", "score": "0.47803244", "text": "def copy(self, name=''):\n payload = {\"name\": name or \"Copy - \" + random_title()}\n endpoint = self.json.related['copy']\n page = Page(self.connection, endpoint=endpoint)\n return page.post(payload)", "title": "" }, { "docid": "56cf2a46b67a1250b3859db5f078b49f", "score": "0.47720608", "text": "def OnCopy(self, event):\n self.parent.Copy()", "title": "" }, { "docid": "a5bc1dbeaeea142bc022ae2314540d96", "score": "0.47701758", "text": "def test_copy_action(workspace, measurement, exopy_qtbot):\n task = BreakTask(name='Test')\n measurement.root_task.add_child_task(0, task)\n action = TaskCopyAction(workspace=workspace,\n action_context=dict(copyable=True,\n data=(None, None, task, None)))\n action.triggered = True\n new = CLIPBOARD.instance\n assert isinstance(new, BreakTask)\n assert new.name == 'Test'", "title": "" }, { "docid": "1ee614a2a1b2a8668bdfdf9e8e13e02d", "score": "0.4768771", "text": "def ssh_cp(self, input, deployment, dest='~'):\n deploy_config = self.config.for_deployment(deployment)\n command = ['scp', '-o', 'StrictHostKeyChecking=no']\n command += input\n command += [deploy_config.target + \":{}\".format(dest)]\n self.call_subprocess(command)", "title": "" }, { "docid": "3ff1e74cc387121fec05f55c77c3804e", "score": "0.47645432", "text": "def __init__(__self__, *,\n type: pulumi.Input[str],\n additional_columns: Optional[Any] = None,\n disable_metrics_collection: Optional[Any] = None,\n max_concurrent_connections: Optional[Any] = None,\n query: Optional[Any] = None,\n source_retry_count: Optional[Any] = None,\n source_retry_wait: Optional[Any] = None):\n pulumi.set(__self__, \"type\", 'DynamicsCrmSource')\n if additional_columns is not None:\n pulumi.set(__self__, \"additional_columns\", additional_columns)\n if disable_metrics_collection is not None:\n pulumi.set(__self__, \"disable_metrics_collection\", disable_metrics_collection)\n if max_concurrent_connections is not None:\n pulumi.set(__self__, \"max_concurrent_connections\", max_concurrent_connections)\n if query is not None:\n pulumi.set(__self__, \"query\", query)\n if source_retry_count is not None:\n pulumi.set(__self__, \"source_retry_count\", source_retry_count)\n if source_retry_wait is not None:\n pulumi.set(__self__, \"source_retry_wait\", source_retry_wait)", "title": "" }, { "docid": "9b16405ce155351fc5a0b814526b0c11", "score": "0.47581026", "text": "def copy_from(self, src, dest, verbose=True):\n self._raise()", "title": "" }, { "docid": "48218d2adcac1808bd3d80633694095b", "score": "0.4752859", "text": "def copy(src):\n return DataSample(src.getContent(), src.getDatatype(), src.getTimestamp())", "title": "" }, { "docid": "baafcfd523885f0d5389412e858422f7", "score": "0.47496715", "text": "def copy(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "153b9f28c117fc563165f437c726229f", "score": "0.47431093", "text": "def copy(client, name, to_name=None, file_=None, to_file=None):\n data = {\"name\": name}\n if to_name is not None:\n data[\"to_name\"] = to_name\n if file_ is not None:\n data[\"file\"] = file_\n else:\n active_file = client.file_get_active()\n if active_file:\n data[\"file\"] = active_file[\"file\"]\n if to_file is not None:\n data[\"to_file\"] = to_file\n return client._creoson_post(\"note\", \"copy\", data)", "title": "" }, { "docid": "45bdd246c9dd863b2d60df4592fe96aa", "score": "0.47422126", "text": "def __init__(__self__, *,\n type: pulumi.Input[str],\n additional_columns: Optional[Any] = None,\n disable_metrics_collection: Optional[Any] = None,\n max_concurrent_connections: Optional[Any] = None,\n query: Optional[Any] = None,\n read_behavior: Optional[Any] = None,\n source_retry_count: Optional[Any] = None,\n source_retry_wait: Optional[Any] = None):\n pulumi.set(__self__, \"type\", 'SalesforceServiceCloudSource')\n if additional_columns is not None:\n pulumi.set(__self__, \"additional_columns\", additional_columns)\n if disable_metrics_collection is not None:\n pulumi.set(__self__, \"disable_metrics_collection\", disable_metrics_collection)\n if max_concurrent_connections is not None:\n pulumi.set(__self__, \"max_concurrent_connections\", max_concurrent_connections)\n if query is not None:\n pulumi.set(__self__, \"query\", query)\n if read_behavior is not None:\n pulumi.set(__self__, \"read_behavior\", read_behavior)\n if source_retry_count is not None:\n pulumi.set(__self__, \"source_retry_count\", source_retry_count)\n if source_retry_wait is not None:\n pulumi.set(__self__, \"source_retry_wait\", source_retry_wait)", "title": "" }, { "docid": "27352e3dbda0b1be8c2093be709bac56", "score": "0.47394192", "text": "def get_source(self):\n pass", "title": "" }, { "docid": "fe74d2ad52cce577200c992ac3fbe26e", "score": "0.4737517", "text": "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "title": "" }, { "docid": "fe74d2ad52cce577200c992ac3fbe26e", "score": "0.4737517", "text": "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "title": "" }, { "docid": "fe74d2ad52cce577200c992ac3fbe26e", "score": "0.4737517", "text": "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "title": "" }, { "docid": "fe74d2ad52cce577200c992ac3fbe26e", "score": "0.4737517", "text": "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "title": "" }, { "docid": "e081ac86c7d6096ce4e9af2e5edce40e", "score": "0.4721869", "text": "def test_com_day_cq_wcm_msm_impl_actions_content_copy_action_factory(self):\n pass", "title": "" }, { "docid": "e441261d96d309380e6651d50524fd59", "score": "0.47212785", "text": "def copyfile(self, source, outputfile):\r\n shutil.copyfileobj(source, outputfile)", "title": "" }, { "docid": "d38c63024de4cc3f0f5c9370e6123676", "score": "0.4713284", "text": "def execute(self, context):\n self.log.info(f\"StageToRedshiftOperator-->{self.table_name} - Begin\")\n # connect to Redshift\n redshift_hook = PostgresHook(postgres_conn_id=self.conn_id)\n self.log.info(f\" connected to {self.conn_id}\")\n \n sql_script = f\"\"\"\n COPY {self.table_name} \n FROM 's3://{self.s3_bucket}/{self.s3_path}' \n ACCESS_KEY_ID '{self.aws_key}'\n SECRET_ACCESS_KEY '{self.aws_secret}'\n REGION '{self.region}'\n JSON '{self.copy_json_option}'\n TIMEFORMAT as 'epochmillisecs'\n TRUNCATECOLUMNS \n BLANKSASNULL \n EMPTYASNULL\n \"\"\" \n # execute script (Copy)\n\n self.log.info(f\" Executing: {sql_script}\") \n redshift_hook.run(sql_script)\n self.log.info(f\"StageToRedshiftOperator-->{self.table_name} - End\")", "title": "" }, { "docid": "1a07cc7a0dcfc02ec79630b78c2f5be9", "score": "0.4701526", "text": "def copy_layer(self, source, layer_name):\n pass", "title": "" }, { "docid": "0ba9be4c2e5e7dbbbc3061a89cd472d2", "score": "0.46995074", "text": "def _get_copy_sflow(self):\n return self.__copy_sflow", "title": "" }, { "docid": "052c9305cc02746304e37c3caee9775b", "score": "0.46955204", "text": "def copySource(inProject,outProject,source = \"\") :\n subarray=Subarray.getSubarray()\n [p1,o1,s1,t1] = splitObsblockId(inProject)\n [p2,o2,s2,t2] = splitObsblockId(outProject)\n aiv = carma.observertools.ItemValue(\"project\",p1)\n biv = carma.observertools.ItemValue(\"obsblock\",o1)\n civ = carma.observertools.ItemValue(\"subObsblock\",s1)\n \n proj1 = subarray.queryProject([aiv,biv,civ])\n if(len(proj1) != 1) :\n mythrow(\"No matching project found for %s\" % (inProject))\n aiv = carma.observertools.ItemValue(\"project\",p2)\n biv = carma.observertools.ItemValue(\"obsblock\",o2)\n civ = carma.observertools.ItemValue(\"subObsblock\",s2)\n proj2 = subarray.queryProject([aiv,biv,civ])\n if(len(proj2) != 1) :\n mythrow(\"No matching project found for %s\" % (outProject))\n trial1 = -1\n if(t1 != -1) :\n for trialNo in range(0,len(proj1[0].obsblock[0].subObsblock[0].trial)) :\n if(t1 == proj1[0].obsblock[0].subObsblock[0].trial[trialNo].trialID) :\n trial1 = trialNo\n break\n if(trial1 == -1) :\n mythrow(\"Trial %i not found in %s.%s.%s\" % (t1,p1,o1,s1))\n else :\n trial1 = proj1[0].obsblock[0].subObsblock[0].trial[-1].trialID\n\n sourceID = -1\n if(source != \"\") :\n for srcNo in range(0,len(proj1[0].obsblock[0].subObsblock[0].trial[trial1].source)) :\n if(source == proj1[0].obsblock[0].subObsblock[0].trial[trial1].source[srcNo].sourceName) :\n sourceID = srcNo\n break\n if(sourceID == -1) :\n mythrow(\"Source %s not found in %s.%s.%s\" % (source,p1,o1,s1))\n else :\n sourceID = 0\n\n trial2 = 0\n if(t2 != -1) :\n for trialNo in range(0,len(proj2[0].obsblock[0].subObsblock[0].trial)) :\n if(t1 == proj2[0].obsblock[0].subObsblock[0].trial[trialNo].trialID) :\n trial2 = trialNo\n break\n if(trial2 == -1) :\n mythrow(\"Trial %i not found in %s.%s.%s\" % (t2,p2,o2,s2))\n else :\n trial2 = proj2[0].obsblock[0].subObsblock[0].trial[-1].trialID\n \n name = proj1[0].obsblock[0].subObsblock[0].trial[trial1].source[sourceID].sourceName\n ra = proj1[0].obsblock[0].subObsblock[0].trial[trial1].source[sourceID].ra\n dec = proj1[0].obsblock[0].subObsblock[0].trial[trial1].source[sourceID].dec\n ephem = proj1[0].obsblock[0].subObsblock[0].trial[trial1].source[sourceID].ephemeris\n scal = proj1[0].obsblock[0].subObsblock[0].trial[trial1].source[sourceID].isSelfcalibratable\n haLow = proj1[0].obsblock[0].reqLowHourAngleCoverage\n haHi = proj1[0].obsblock[0].reqHiHourAngleCoverage\n ralow = proj1[0].obsblock[0].lowRa\n raHi = proj1[0].obsblock[0].highRa\n\n aiv = carma.observertools.ItemValue(\"source\",name)\n biv = carma.observertools.ItemValue(\"srcRA\",str(ra))\n civ = carma.observertools.ItemValue(\"srcDEC\",str(dec))\n div = carma.observertools.ItemValue(\"ephemeris\",str(ephem))\n eiv = carma.observertools.ItemValue(\"selfcalibratable\",str(scal))\n fiv = carma.observertools.ItemValue(\"requestedHaCoverageLow\",str(haLow))\n giv = carma.observertools.ItemValue(\"requestedHaCoverageHi\",str(haHi))\n hiv = carma.observertools.ItemValue(\"requestedRaCoverageLow\",str(ralow))\n iiv = carma.observertools.ItemValue(\"requestedRaCoverageHi\",str(raHi))\n subarray.projectEdit(p2,o2,s2,t2,[aiv,biv,civ,div,eiv],carma.observertools.ESTATUS_REPLACE)\n subarray.projectEdit(p2,o2,\"none\",-1,[fiv,giv,hiv,iiv],carma.observertools.ESTATUS_EDIT)\n return 1", "title": "" }, { "docid": "b4621e525de6b96b6e1ccf673044732a", "score": "0.46910417", "text": "def copy_in(source, dest, **kwargs):\n\t\t\tdes_name = dest.name + '/'\n\t\t\tsrcs = source.name.split('/')\n\t\t\tdes_name = des_name + srcs[-1]\n\t\t\t# Avoid overwrite existing object\n\t\t\twhile des_name in self:\n\t\t\t\tdes_name += '-copy'\n\t\t\tself.copy(source, des_name, **kwargs)", "title": "" }, { "docid": "2fc7fb2c0044b4eb5bbeb7b3dd52e831", "score": "0.46893805", "text": "def __init__(__self__, *,\n type: pulumi.Input[str],\n additional_columns: Optional[Any] = None,\n disable_metrics_collection: Optional[Any] = None,\n max_concurrent_connections: Optional[Any] = None,\n query: Optional[Any] = None,\n query_timeout: Optional[Any] = None,\n source_retry_count: Optional[Any] = None,\n source_retry_wait: Optional[Any] = None):\n pulumi.set(__self__, \"type\", 'ConcurSource')\n if additional_columns is not None:\n pulumi.set(__self__, \"additional_columns\", additional_columns)\n if disable_metrics_collection is not None:\n pulumi.set(__self__, \"disable_metrics_collection\", disable_metrics_collection)\n if max_concurrent_connections is not None:\n pulumi.set(__self__, \"max_concurrent_connections\", max_concurrent_connections)\n if query is not None:\n pulumi.set(__self__, \"query\", query)\n if query_timeout is not None:\n pulumi.set(__self__, \"query_timeout\", query_timeout)\n if source_retry_count is not None:\n pulumi.set(__self__, \"source_retry_count\", source_retry_count)\n if source_retry_wait is not None:\n pulumi.set(__self__, \"source_retry_wait\", source_retry_wait)", "title": "" }, { "docid": "91b4d4da2cfc3a35c7267722e42815ee", "score": "0.46890247", "text": "def get_customer_data(self):\n\t\tcustomer = {}\n\t\tsignals.customer_data_query.send(sender=type(self), instance=self, customer=customer)\n\t\treturn customer", "title": "" }, { "docid": "b64a5101a11031f0d5f5e3ac075691d0", "score": "0.46798572", "text": "def duplicateService(self, context):\n dup_id = context.generateUniqueId(type_name = 'AnalysisService')\n context.invokeFactory(id = dup_id, type_name = 'AnalysisService')\n dup = context[dup_id]\n dup.setTitle('! Copy of %s' % self.Title())\n dup.edit(\n description = self.Description(),\n Instructions = self.getInstructions(),\n ReportDryMatter = self.getReportDryMatter(),\n Unit = self.getUnit(),\n Precision = self.getPrecision(),\n Price = self.getPrice(),\n CorporatePrice = self.getCorporatePrice(),\n VAT = self.getVAT(),\n Keyword = self.getKeyword(),\n Instrument = self.getInstrument(),\n Calculation = self.getCalculation(),\n MaxTimeAllowed = self.getMaxTimeAllowed(),\n DuplicateVariation = self.getDuplicateVariation(),\n AnalysisCategory = self.getAnalysisCategory(),\n Department = self.getDepartment(),\n Accredited = self.getAccredited(),\n Uncertainties = self.getUncertainties(),\n ResultOptions = self.getResultOptions(),\n )\n dup.processForm()\n dup.reindexObject()\n return dup_id", "title": "" }, { "docid": "3e4749b3a211f202fc2720188924b8ca", "score": "0.46753472", "text": "def __init__(__self__, *,\n type: pulumi.Input[str],\n additional_copy_options: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n additional_format_options: Optional[pulumi.Input[Mapping[str, Any]]] = None):\n pulumi.set(__self__, \"type\", 'SnowflakeImportCopyCommand')\n if additional_copy_options is not None:\n pulumi.set(__self__, \"additional_copy_options\", additional_copy_options)\n if additional_format_options is not None:\n pulumi.set(__self__, \"additional_format_options\", additional_format_options)", "title": "" }, { "docid": "56c38c1d35c34d6f6ea3d18e09996ad2", "score": "0.46748066", "text": "def copy(src, dest):\n from xbmcvfs import copy as vfscopy\n log(2, \"Copy file '{src}' to '{dest}'.\", src=src, dest=dest)\n return vfscopy(from_unicode(src), from_unicode(dest))", "title": "" }, { "docid": "87dbc64d2dd6cda3f61bb3928edc02f4", "score": "0.4670875", "text": "def Copy(self, dest, *src, **opts):\n iret = 0\n kargs = {}\n # take values from opts if exist\n kargs['verbose'] = opts.get('verbose', self.verbose)\n kargs['follow_output'] = opts.get('follow_output', False)\n kargs['separated_stderr'] = False\n if opts.get('niverr'):\n niverr = opts['niverr']\n else:\n niverr = '<F>_COPY_ERROR'\n ddest = self.filename2dict(dest)\n if self.IsRemote(dest) \\\n or (ddest['user'] != '' and ddest['user'] != local_user):\n fdest = ddest['user']+'@'+ddest['mach']+':'+ddest['name']\n else:\n fdest = ddest['name']\n self._dbg('source list : %s' % (src,), 'destination : %s' % fdest, stack_id=2)\n\n if len(src) < 1:\n self._mess(_(u'no source file to copy'), '<A>_ALARM')\n\n for f in src:\n # here because we can change 'proto' if necessary\n proto = opts.get('protocol', self.param['remote_copy_protocol'])\n jret = 0\n df = self.filename2dict(f)\n if self.IsRemote(f):\n fsrc = df['user']+'@'+df['mach']+':'+df['name']\n else:\n fsrc = df['name']\n df['user'] = df['mach'] = ''\n cmd = ''\n tail = '.../'+'/'.join(f.split('/')[-2:])\n if not opts.has_key('alt_comment'):\n kargs['alt_comment'] = ufmt(_(u\"copying %s...\"), tail)\n else:\n kargs['alt_comment'] = opts['alt_comment']\n if df['mach'] == '' and ddest['mach'] == '':\n cmd = command['copy'] % { \"args\" : fsrc+' '+fdest }\n else:\n if proto == 'RSYNC' and df['mach'] != '' and ddest['mach'] != '':\n proto = 'RCP'\n self._mess(_(u\"copying a remote file to another remote server \" \\\n \"isn't allowed through RSYNC, trying with RCP.\"))\n if proto == 'RCP':\n cmd = 'rcp -r '+fsrc+' '+fdest\n elif proto == 'SCP':\n cmd = 'scp -rBCq -o StrictHostKeyChecking=no '+fsrc+' '+fdest\n elif proto == 'RSYNC':\n if self.IsDir(f) and not self.Exists(dest):\n self.MkDir(dest)\n cmd = 'rsync -rz '+os.path.join(fsrc, '*')+' '+fdest\n else:\n cmd = 'rsync -rz '+fsrc+' '+fdest\n elif proto == 'HTTP':\n str_user = ''\n if not df['user'] in ('', 'anonymous'):\n str_user = df['user']+'@'\n # dest must be local\n if ddest['mach'] == '':\n cmd = 'wget http://'+str_user+df['mach']+df['name']+' -O '+fdest\n else:\n cmd = ''\n self._mess(ufmt(_(u'remote destination not allowed through %s' \\\n ' : %s'), proto, fdest), niverr)\n if cmd != '':\n jret, out = self.local_shell(cmd, **kargs)\n if jret != 0 and niverr != 'SILENT':\n self._mess(ufmt(_(u'error during copying %s to %s'), f, fdest) \\\n + os.linesep + ufmt(_(u'message : %s'), out), niverr)\n else:\n self._mess(_(u'unexpected error or unknown copy protocol : %s') \\\n % proto, niverr)\n iret = max(jret, iret)\n return iret", "title": "" }, { "docid": "e463ed2dba312231fca9527f040f6806", "score": "0.4670229", "text": "def customer(self, customer):\n\n self._customer = customer", "title": "" }, { "docid": "c24e8441f274784c0ac7c6c018fbe57f", "score": "0.46670288", "text": "def __init__(__self__, *,\n type: pulumi.Input[str],\n additional_columns: Optional[Any] = None,\n disable_metrics_collection: Optional[Any] = None,\n max_concurrent_connections: Optional[Any] = None,\n query: Optional[Any] = None,\n source_retry_count: Optional[Any] = None,\n source_retry_wait: Optional[Any] = None):\n pulumi.set(__self__, \"type\", 'CommonDataServiceForAppsSource')\n if additional_columns is not None:\n pulumi.set(__self__, \"additional_columns\", additional_columns)\n if disable_metrics_collection is not None:\n pulumi.set(__self__, \"disable_metrics_collection\", disable_metrics_collection)\n if max_concurrent_connections is not None:\n pulumi.set(__self__, \"max_concurrent_connections\", max_concurrent_connections)\n if query is not None:\n pulumi.set(__self__, \"query\", query)\n if source_retry_count is not None:\n pulumi.set(__self__, \"source_retry_count\", source_retry_count)\n if source_retry_wait is not None:\n pulumi.set(__self__, \"source_retry_wait\", source_retry_wait)", "title": "" }, { "docid": "e2d31bd8413469ec4a9b5ae5e64c6573", "score": "0.46632424", "text": "def __init__(__self__, *,\n type: pulumi.Input[str],\n additional_columns: Optional[Any] = None,\n disable_metrics_collection: Optional[Any] = None,\n extraction_mode: Optional[Any] = None,\n max_concurrent_connections: Optional[Any] = None,\n projection: Optional[Any] = None,\n query_timeout: Optional[Any] = None,\n selection: Optional[Any] = None,\n source_retry_count: Optional[Any] = None,\n source_retry_wait: Optional[Any] = None,\n subscriber_process: Optional[Any] = None):\n pulumi.set(__self__, \"type\", 'SapOdpSource')\n if additional_columns is not None:\n pulumi.set(__self__, \"additional_columns\", additional_columns)\n if disable_metrics_collection is not None:\n pulumi.set(__self__, \"disable_metrics_collection\", disable_metrics_collection)\n if extraction_mode is not None:\n pulumi.set(__self__, \"extraction_mode\", extraction_mode)\n if max_concurrent_connections is not None:\n pulumi.set(__self__, \"max_concurrent_connections\", max_concurrent_connections)\n if projection is not None:\n pulumi.set(__self__, \"projection\", projection)\n if query_timeout is not None:\n pulumi.set(__self__, \"query_timeout\", query_timeout)\n if selection is not None:\n pulumi.set(__self__, \"selection\", selection)\n if source_retry_count is not None:\n pulumi.set(__self__, \"source_retry_count\", source_retry_count)\n if source_retry_wait is not None:\n pulumi.set(__self__, \"source_retry_wait\", source_retry_wait)\n if subscriber_process is not None:\n pulumi.set(__self__, \"subscriber_process\", subscriber_process)", "title": "" }, { "docid": "d2c04592f0cfeacfc511054be7301642", "score": "0.46528223", "text": "def _copy(source, destination):\r\n \r\n if not destination.exists():\r\n destination.createResource()\r\n fileObj = source.readData()\r\n destination.writeData(fileObj)", "title": "" }, { "docid": "01135e7e2289219fa670de6ff37eb2af", "score": "0.46514955", "text": "def copy(self, src: str, dst: str, filter: str|List[str]|None = None):\n if util.copy_if_needed(src, dst, filter):\n if self.config.verbose: console(f'copy {src} --> {dst}')", "title": "" }, { "docid": "2288d3b9f08ddb60d2d9f73fc1e99b51", "score": "0.46508646", "text": "def _CopySourceFile(self, source_filename):\n shutil.copy(source_filename, self._rpmbuild_sources_path)", "title": "" }, { "docid": "d7d01ae2ee1455e7c0a5f0f69564a752", "score": "0.46469226", "text": "def copy(self):\n g.clipboard=self", "title": "" }, { "docid": "8eb26a7d565e05942115661f6c0fad11", "score": "0.4642123", "text": "def collect(self):\n self.customer_rows = self.gcc.run()", "title": "" }, { "docid": "5579ba5b39bbe0748bff0b78b75a921e", "score": "0.46390942", "text": "def __init__(__self__, *,\n type: pulumi.Input[str],\n additional_copy_options: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n additional_format_options: Optional[pulumi.Input[Mapping[str, Any]]] = None):\n pulumi.set(__self__, \"type\", 'SnowflakeExportCopyCommand')\n if additional_copy_options is not None:\n pulumi.set(__self__, \"additional_copy_options\", additional_copy_options)\n if additional_format_options is not None:\n pulumi.set(__self__, \"additional_format_options\", additional_format_options)", "title": "" }, { "docid": "66f07f272b1359683af711abf755cf90", "score": "0.4638871", "text": "def __init__(__self__, *,\n type: pulumi.Input[str],\n additional_columns: Optional[Any] = None,\n disable_metrics_collection: Optional[Any] = None,\n http_request_timeout: Optional[Any] = None,\n max_concurrent_connections: Optional[Any] = None,\n query: Optional[Any] = None,\n query_timeout: Optional[Any] = None,\n source_retry_count: Optional[Any] = None,\n source_retry_wait: Optional[Any] = None):\n pulumi.set(__self__, \"type\", 'SapEccSource')\n if additional_columns is not None:\n pulumi.set(__self__, \"additional_columns\", additional_columns)\n if disable_metrics_collection is not None:\n pulumi.set(__self__, \"disable_metrics_collection\", disable_metrics_collection)\n if http_request_timeout is not None:\n pulumi.set(__self__, \"http_request_timeout\", http_request_timeout)\n if max_concurrent_connections is not None:\n pulumi.set(__self__, \"max_concurrent_connections\", max_concurrent_connections)\n if query is not None:\n pulumi.set(__self__, \"query\", query)\n if query_timeout is not None:\n pulumi.set(__self__, \"query_timeout\", query_timeout)\n if source_retry_count is not None:\n pulumi.set(__self__, \"source_retry_count\", source_retry_count)\n if source_retry_wait is not None:\n pulumi.set(__self__, \"source_retry_wait\", source_retry_wait)", "title": "" }, { "docid": "1840569bdd988f766fcc472f8e8f055e", "score": "0.46384212", "text": "def collectSource(self):\n pass", "title": "" }, { "docid": "5b8f5718d54d76bdad205abdc4135d48", "score": "0.4632911", "text": "def copy_metric(metric, data_model, change_name: bool = True):\n kwargs: dict[str, Any] = {\n \"sources\": {\n uuid(): copy_source(source, data_model, change_name=False) for source in metric[\"sources\"].values()\n },\n }\n if change_name:\n kwargs[\"name\"] = f\"{metric.get('name') or data_model['metrics'][metric['type']]['name']} (copy)\"\n return copy_item(metric, **kwargs)", "title": "" }, { "docid": "1506c6ca9eb8b00f834f2768ab4db1b8", "score": "0.46278012", "text": "def bucket_copy(\n self,\n CopySource,\n Key,\n ExtraArgs=None,\n Callback=None,\n SourceClient=None,\n Config=None,\n):\n return self.meta.client.copy(\n CopySource=CopySource,\n Bucket=self.name,\n Key=Key,\n ExtraArgs=ExtraArgs,\n Callback=Callback,\n SourceClient=SourceClient,\n Config=Config,\n )", "title": "" }, { "docid": "a91db1bb7e2267c46094bd861f22f8c1", "score": "0.462466", "text": "def __init__(__self__, *,\n type: pulumi.Input[str],\n query: Optional[Any] = None,\n source_retry_count: Optional[Any] = None,\n source_retry_wait: Optional[Any] = None):\n pulumi.set(__self__, \"type\", 'MagentoSource')\n if query is not None:\n pulumi.set(__self__, \"query\", query)\n if source_retry_count is not None:\n pulumi.set(__self__, \"source_retry_count\", source_retry_count)\n if source_retry_wait is not None:\n pulumi.set(__self__, \"source_retry_wait\", source_retry_wait)", "title": "" }, { "docid": "9b0a825687ec27e9cf66f7a14ae27098", "score": "0.46236083", "text": "def repository(context, src_image_name: ImageName, dest_image_name: ImageName):\n\n context.obj[\"src_image_name\"] = src_image_name\n context.obj[\"dest_image_name\"] = dest_image_name\n context.obj[\"imagesource\"] = DeviceMapperRepositoryImageSource(\n dry_run=context.obj[\"dry_run\"]\n )\n copy(context)", "title": "" }, { "docid": "87efbcbe0c0add016d7838ced4c558d0", "score": "0.46168962", "text": "def copy_object(self, source_key, destination_key):\n source_object_info = {\n 'Bucket': OUTPUT_BUCKET_NAME,\n 'Key': source_key\n }\n logger.info(f'Copying {source_key} to {destination_key}')\n return self.__bucket.copy(source_object_info, destination_key)", "title": "" }, { "docid": "97c83b4157f16f7b0814850db46118bd", "score": "0.46135655", "text": "def test_com_day_cq_wcm_msm_impl_actions_version_copy_action_factory(self):\n pass", "title": "" }, { "docid": "7552a96c92127889dabd38357034ea78", "score": "0.4612312", "text": "def copyItem(self):\n # retrieve data from model \n rowData = []\n\n selected = self.selectionModel().selectedRows()\n if not selected:\n return\n\n for index in selected:\n rowData.append(self.model.getData()[index.row()])\n\n if rowData:\n self.trace('Test Param > Picke data: %s' % rowData )\n # pickle data\n mime = QMimeData()\n mime.setData( self.__mime__ , QByteArray(pickle.dumps(rowData)) )\n \n self.trace('Steps to copy> Copying to clipboard')\n # copy to clipboard\n QApplication.clipboard().setMimeData(mime,QClipboard.Clipboard)\n self.trace('Steps to copy > Coppied to clipboard')\n self.pasteAction.setEnabled(True)", "title": "" }, { "docid": "dc7ce632c952f8d1ea465551e18bfde0", "score": "0.45988595", "text": "def _specific_copy(self, obj: 'Service') -> None:\n super()._specific_copy(obj)\n self.category_name = obj.category_name\n self.role = obj.role", "title": "" }, { "docid": "d49fc48da2d3f779b3ccbf3fcdad3a2b", "score": "0.45872515", "text": "def copy_to(self, src, dest, verbose=True):\n self._raise()", "title": "" }, { "docid": "39bd7f7d108ba24655b0c35d6b93591d", "score": "0.45828944", "text": "def copy(self, src, dst):\n pass", "title": "" }, { "docid": "9c97c6d182a0a617855997540411b381", "score": "0.45811248", "text": "def slot_copy_column(self, action):\n _, item, model_index = action.data()\n self.send_data_to_clipboard(item.data(model_index.column()))", "title": "" }, { "docid": "a5d10cd8445e68865c65ad262751e5b5", "score": "0.45731315", "text": "def __init__(__self__, *,\n type: pulumi.Input[str],\n query: Optional[Any] = None,\n source_retry_count: Optional[Any] = None,\n source_retry_wait: Optional[Any] = None):\n pulumi.set(__self__, \"type\", 'ConcurSource')\n if query is not None:\n pulumi.set(__self__, \"query\", query)\n if source_retry_count is not None:\n pulumi.set(__self__, \"source_retry_count\", source_retry_count)\n if source_retry_wait is not None:\n pulumi.set(__self__, \"source_retry_wait\", source_retry_wait)", "title": "" }, { "docid": "1f0cbac97f2dff237bb2a48f807e9c8f", "score": "0.45726287", "text": "def copy(\n self,\n CopySource,\n Bucket,\n Key,\n ExtraArgs=None,\n Callback=None,\n SourceClient=None,\n Config=None,\n):\n subscribers = None\n if Callback is not None:\n subscribers = [ProgressCallbackInvoker(Callback)]\n\n config = Config\n if config is None:\n config = TransferConfig()\n\n with create_transfer_manager(self, config) as manager:\n future = manager.copy(\n copy_source=CopySource,\n bucket=Bucket,\n key=Key,\n extra_args=ExtraArgs,\n subscribers=subscribers,\n source_client=SourceClient,\n )\n return future.result()", "title": "" }, { "docid": "b191476ee7bb23dc97edc3c92f3ab3d0", "score": "0.4564881", "text": "def copy(self, cr, uid, ids, default={}, context=None):\n category_name = self.browse(cr, uid, ids, context).name\n \n if category_name:\n default.update({'name': category_name + '(copy)'})\n \n return super(sale_category, self).copy(cr, uid, ids, default, context)", "title": "" }, { "docid": "3ad46edfde5ad3baa5c77e5677e98284", "score": "0.4558506", "text": "def customer(self):\n return self.__customer", "title": "" }, { "docid": "354a4c4bc8aefe74489d125e5754c051", "score": "0.45526385", "text": "def streamClipboard(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "dbe2f4c007260d20161bb2715771dd44", "score": "0.454187", "text": "def __init__(self, source):\n super(Caustics, self).__init__(source)", "title": "" }, { "docid": "2ccecd0656ba88c42c0e3f88686f18de", "score": "0.45333916", "text": "def __init__(__self__, *,\n type: pulumi.Input[str],\n additional_columns: Optional[Any] = None,\n disable_metrics_collection: Optional[Any] = None,\n max_concurrent_connections: Optional[Any] = None,\n query: Optional[Any] = None,\n query_timeout: Optional[Any] = None,\n source_retry_count: Optional[Any] = None,\n source_retry_wait: Optional[Any] = None):\n pulumi.set(__self__, \"type\", 'OracleServiceCloudSource')\n if additional_columns is not None:\n pulumi.set(__self__, \"additional_columns\", additional_columns)\n if disable_metrics_collection is not None:\n pulumi.set(__self__, \"disable_metrics_collection\", disable_metrics_collection)\n if max_concurrent_connections is not None:\n pulumi.set(__self__, \"max_concurrent_connections\", max_concurrent_connections)\n if query is not None:\n pulumi.set(__self__, \"query\", query)\n if query_timeout is not None:\n pulumi.set(__self__, \"query_timeout\", query_timeout)\n if source_retry_count is not None:\n pulumi.set(__self__, \"source_retry_count\", source_retry_count)\n if source_retry_wait is not None:\n pulumi.set(__self__, \"source_retry_wait\", source_retry_wait)", "title": "" }, { "docid": "bf5da36b930c23d282425196254e953b", "score": "0.45322627", "text": "def shlwapi_IStream_Copy(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"pstmFrom\", \"pstmTo\", \"cb\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "title": "" }, { "docid": "eab4a591d8cb2b3908829d0f739e587e", "score": "0.4531237", "text": "def copy(self) -> CopyRequestBuilder:\n from .copy.copy_request_builder import CopyRequestBuilder\n\n return CopyRequestBuilder(self.request_adapter, self.path_parameters)", "title": "" }, { "docid": "e96ed17fb71d7353cc9a4d4eae980f83", "score": "0.45311207", "text": "def copy_file(ctx, source, dest, overwrite=False):\n try:\n sfa = create_service(ctx, \"com.sun.star.ucb.SimpleFileAccess\")\n if sfa.exists(dest):\n if not overwrite:\n return\n if sfa.exists(source):\n sfa.copy(source, dest)\n except Exception as e:\n if not sfa.exists(dir_url(dest)):\n sfa.createFolder(dir_url(dest))\n if sfa.exists(source):\n sfa.copy(source, dest)", "title": "" }, { "docid": "1af4ad5fdc7e95f973d99e256c4efa85", "score": "0.45310608", "text": "def __init__(self, copied_from=None, course=None, sis_batch=None, user=None, copied_to=None, page_view=None):\r\n self._copied_from = copied_from\r\n self._course = course\r\n self._sis_batch = sis_batch\r\n self._user = user\r\n self._copied_to = copied_to\r\n self._page_view = page_view\r\n\r\n self.logger = logging.getLogger('pycanvas.Courseeventlink')", "title": "" }, { "docid": "7fb535910b7767bc4b44c510ac2e3f4c", "score": "0.45268765", "text": "def __init__(__self__, *,\n type: pulumi.Input[str],\n query: Optional[Any] = None,\n source_retry_count: Optional[Any] = None,\n source_retry_wait: Optional[Any] = None):\n pulumi.set(__self__, \"type\", 'SapEccSource')\n if query is not None:\n pulumi.set(__self__, \"query\", query)\n if source_retry_count is not None:\n pulumi.set(__self__, \"source_retry_count\", source_retry_count)\n if source_retry_wait is not None:\n pulumi.set(__self__, \"source_retry_wait\", source_retry_wait)", "title": "" } ]
d22a81c545031a3b6b5decaa0d244ede
List all UUIDs in the archive Returns
[ { "docid": "d9d04b7c847b2b6a431bfeace34e5ab2", "score": "0.6825332", "text": "def list_uuids():\n # return common_utils.file_list('ephys/')\n if braingeneers.get_default_endpoint().startswith('http'):\n return [s.split('/')[-2] for s in s3wrangler.list_directories('s3://braingeneers' + '/ephys/')]\n else:\n # list file locally\n return os.listdir(braingeneers.get_default_endpoint() + '/ephys/')", "title": "" } ]
[ { "docid": "11ec5dae8e00fc06f688c12de46acdcd", "score": "0.68951184", "text": "def uuids(self, count=None) -> Iterable[str]:\n response = self.session.get(\n urljoin(self.url, '_uuids'), \n params = {'count': count} if count is not None else None\n )\n if not response.ok: raise CouchDBException.auto(response)\n return response.json()['uuids']", "title": "" }, { "docid": "5dbd6daab68a31303aa2bee7352f2eff", "score": "0.66752654", "text": "def uuids(self):\n return self._uuids", "title": "" }, { "docid": "a64cfb69a8e8299168b1f5a8de7061c9", "score": "0.6169834", "text": "def list_archives(self, safe_id):\n\n try:\n res = self.api.storage.c14.safe(safe_id).archive.get()\n except slumber.exceptions.HttpClientError as e:\n res = self.handle_error(e)\n except slumber.exceptions.HttpServerError as e:\n res = self.handle_error(e)\n\n return res", "title": "" }, { "docid": "26447e8e1597bbd9f64b4753313039ef", "score": "0.60473835", "text": "def archive_list_locations(self, safe_id, uuid):\n\n try:\n res = (self.api.storage.c14.safe(safe_id).archive(uuid).location\n .get())\n except slumber.exceptions.HttpClientError as e:\n res = self.handle_error(e)\n except slumber.exceptions.HttpServerError as e:\n res = self.handle_error(e)\n\n return res", "title": "" }, { "docid": "5e137570cef8ed5ba0da24f8390b5925", "score": "0.5970061", "text": "def get_all_uuids():\n res = []\n for country, station_map in _STATIONS_MAP.items():\n res.extend([get_uuid(country, s) for s in station_map.values()])\n return sorted(res)", "title": "" }, { "docid": "a0fa235cd8c3193d561dddd6a6a7bab4", "score": "0.594044", "text": "def list(self, **kwargs):\n return self.http_get('/backups', params=kwargs)", "title": "" }, { "docid": "3e61e75aec8356b483ff3f3e84f9dc4f", "score": "0.5917187", "text": "def volume_get_all_by_instance_uuid(context, instance_uuid):\n return IMPL.volume_get_all_by_instance_uuid(context, instance_uuid)", "title": "" }, { "docid": "b627989b16c50029f178bb37f2107225", "score": "0.58733773", "text": "def print_all_uuids():\n d = os.listdir(configs.POLICY_DIR)\n d = [f.split('-')[0] for f in d]\n d = set(d)\n for uid in d:\n try:\n uid = str(int(uid))\n g = glob.glob(configs.POLICY_DIR + uid + '*')\n g = [f.split('-')[1] for f in g]\n g = [int(f) for f in g]\n print(uid, max(g))\n except ValueError:\n continue", "title": "" }, { "docid": "9fc97060f3a054077bd59283fc685ef7", "score": "0.5835003", "text": "def all_archives(self):\n return Backup.select()", "title": "" }, { "docid": "ce9e095f7681cefb69bd30c38f394245", "score": "0.5731247", "text": "def _remove_all_uuids() :\n mc.warning( 'You are using a risky method, if you don\\'t know what you are doing, please call RnD!' )\n for node in mc.ls( long = True ) :\n _remove_uuid( node )", "title": "" }, { "docid": "e5bdd3438bbd5f2f4a47f96e44a688b1", "score": "0.5725424", "text": "def do_backup_list(cs, args):\n backups = cs.backups.list()\n columns = ['ID', 'Volume ID', 'Status', 'Name', 'Size', 'Object Count',\n 'Container']\n utils.print_list(backups, columns)", "title": "" }, { "docid": "d2e13b3bc8e4b76b5c18dce274d41773", "score": "0.5703132", "text": "def torrent_list(self):\n return []", "title": "" }, { "docid": "63369009e7bf537a8a54dd386bc606d0", "score": "0.5678621", "text": "def get_snapshot_list(self):", "title": "" }, { "docid": "a007b510acbe7eae4f730b608316ae48", "score": "0.56314474", "text": "def query_deployments(self):\r\n c = self._db.cursor()\r\n c.execute(\"select uuid from Deployments\")\r\n res = c.fetchall()\r\n return [x[0] for x in res]", "title": "" }, { "docid": "23cd17ec1857d6a5769c98bd03733a40", "score": "0.559948", "text": "def list(self, volume_id):\n return self.http_get('/volumes/%s/backups' % volume_id)", "title": "" }, { "docid": "510977dfdaf50fae85f12be673470cfe", "score": "0.55962455", "text": "def get_users(self, uuid=None):\n return self.request('users', uuid=uuid)", "title": "" }, { "docid": "1027c408d2a0c4dff44745ada354f006", "score": "0.55850315", "text": "def archive_list_jobs(self, safe_id, uuid):\n\n try:\n res = self.api.storage.c14.safe(safe_id).archive(uuid).job.get()\n except slumber.exceptions.HttpClientError as e:\n res = self.handle_error(e)\n except slumber.exceptions.HttpServerError as e:\n res = self.handle_error(e)\n\n return res", "title": "" }, { "docid": "053b6a3df028d38266d75b1556a699f2", "score": "0.55777806", "text": "def test_list_instance_uuids(self):\n host = fake.HostSystem()\n vm_folder = fake.Folder(name='fake-folder')\n fake._create_object('Folder', vm_folder)\n respool = fake.ResourcePool()\n fake._create_object('ResourcePool', respool)\n cluster = fake.ClusterComputeResource()\n cluster._add_root_resource_pool(respool.obj)\n fake._create_object('ClusterComputeResource', cluster)\n\n virtualMachine1 = fake.VirtualMachine(name=\"VM-1\",\n instanceUuid=\"test-uuid-1\",\n runtime_host=host.obj,\n parent=vm_folder.obj,\n resourcePool=respool.obj)\n virtualMachine2 = fake.VirtualMachine(name=\"VM-2\",\n instanceUuid=\"test-uuid-2\",\n runtime_host=host.obj,\n parent=vm_folder.obj,\n resourcePool=respool.obj)\n\n fake_vms = fake.FakeRetrieveResult()\n fake_vms.add_object(virtualMachine1)\n fake_vms.add_object(virtualMachine2)\n fake_folders = fake.FakeRetrieveResult()\n fake_folders.add_object(vm_folder)\n fake_pools = fake.FakeRetrieveResult()\n fake_pools.add_object(respool)\n\n session = fake_session(fake_virtual_machines=fake_vms,\n fake_vm_folder=fake_folders,\n fake_resource_pools=fake_pools)\n driver = fake_driver(session, cluster_ref=cluster.obj)\n instance_uuids = vm_util.list_instance_uuids(driver)\n self.assertIn(\"test-uuid-1\", instance_uuids)\n self.assertIn(\"test-uuid-2\", instance_uuids)\n self.assertEquals(2, len(instance_uuids))", "title": "" }, { "docid": "12d6178d35caa9cd71f25ac75c5007de", "score": "0.5565945", "text": "def list_contents(self):\n return self.zip.namelist()", "title": "" }, { "docid": "c0604c073fe49ffa42c9cb84efd89f1b", "score": "0.5563355", "text": "def get(self, uuid=None):\n \n objs = self.controller.list(uuid=uuid)\n\n objs = {\n str(obj.uuid): obj.to_dict()\n for obj in objs\n }\n self.set_header('Object-Count', len(objs))\n return objs", "title": "" }, { "docid": "379dd9814f41d80a4ab02ad6c62f6a38", "score": "0.55500656", "text": "def archive_informations(self, safe_id, uuid):\n\n try:\n res = (self.api.storage.c14.safe(safe_id).archive(uuid).bucket\n .get())\n except slumber.exceptions.HttpClientError as e:\n res = self.handle_error(e)\n except slumber.exceptions.HttpServerError as e:\n res = self.handle_error(e)\n\n return res", "title": "" }, { "docid": "32e35b71b86d212c0793b559b299cb42", "score": "0.5538592", "text": "def get_all(self) -> list:\n\t\tpass", "title": "" }, { "docid": "6b13627b04abe58e2cbb2a2b32b92049", "score": "0.54587704", "text": "async def instances_list(self, cluster_uuid: str) -> list:\n\n _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)\n self.log.debug(\"list releases for cluster {}\".format(cluster_id))\n\n # config filename\n _kube_dir, helm_dir, config_filename, _cluster_dir = self._get_paths(\n cluster_name=cluster_id, create_if_not_exist=True\n )\n\n command = \"{} --kubeconfig={} --home={} list --output yaml\".format(\n self._helm_command, config_filename, helm_dir\n )\n\n output, _rc = await self._local_async_exec(\n command=command, raise_exception_on_error=True\n )\n\n if output and len(output) > 0:\n return yaml.load(output, Loader=yaml.SafeLoader).get(\"Releases\")\n else:\n return []", "title": "" }, { "docid": "cd9891408c26f7ee5d0a3e908c0dce5f", "score": "0.54527044", "text": "def getList(self):", "title": "" }, { "docid": "cd9891408c26f7ee5d0a3e908c0dce5f", "score": "0.54527044", "text": "def getList(self):", "title": "" }, { "docid": "cd9891408c26f7ee5d0a3e908c0dce5f", "score": "0.54527044", "text": "def getList(self):", "title": "" }, { "docid": "cd9891408c26f7ee5d0a3e908c0dce5f", "score": "0.54527044", "text": "def getList(self):", "title": "" }, { "docid": "f232ebf7bfab300e74e0741c51775df6", "score": "0.54369074", "text": "def uuid(ctx, cli_obj, filename):\n for photo in photoscript.PhotosLibrary().selection:\n if filename:\n print(f\"# {photo.filename}\")\n print(photo.uuid)", "title": "" }, { "docid": "095bb4a8828602f3116b284f9a11e02b", "score": "0.5434843", "text": "def get_archive(self):", "title": "" }, { "docid": "1e90d345a8bbc63fa88472d58d24b43f", "score": "0.5432478", "text": "def _ls(self):\n return self._s.list()", "title": "" }, { "docid": "058bf7009e717d378a7322f7626f4255", "score": "0.54191446", "text": "def list(self):\n pass", "title": "" }, { "docid": "c09619b3f2ced1ccddc573bc1b0fccd4", "score": "0.53945243", "text": "def get_file_list(archive_url):\n\n # create response object\n r = requests.get(archive_url)\n\n # create beautiful-soup object\n soup = BeautifulSoup(r.content, \"html.parser\")\n\n # find all files in the directory using the tag <a>\n links = soup.findAll('a')\n logging.info(links)\n\n # filter the link sending with .gz\n file_list = [archive_url + link['href'] for link in links if link['href'].endswith('gz')]\n\n return file_list", "title": "" }, { "docid": "4367595942cefce9a218c0e666e8ebc7", "score": "0.53770113", "text": "def get_archive(self, safe_id, uuid):\n\n try:\n res = self.api.storage.c14.safe(safe_id).archive(uuid).get()\n except slumber.exceptions.HttpClientError as e:\n res = self.handle_error(e)\n except slumber.exceptions.HttpServerError as e:\n res = self.handle_error(e)\n\n return res", "title": "" }, { "docid": "7fd676f81202616cf16a8faf5ee9705e", "score": "0.5371707", "text": "def vos_listaddrs():\n def done(rc, out, err):\n return rc == 0\n\n def retry(rc, out, err):\n if \"server or network not reponding\" in err:\n return True\n if \"no quorum elected\" in err:\n return True\n if \"invalid RPC (RX) operation\" in err:\n return True # May occur during server startup.\n if \"Couldn't read/write the database\" in err:\n return True # May occur during server startup.\n return False\n\n vos = lookup_command('vos')\n out = run_command([vos, 'listaddrs', '-noresolve', '-printuuid'],\n done=done, retry=retry)\n servers = []\n uuid = None\n addrs = []\n for line in out.splitlines():\n m = re.match(r'UUID: (\\S+)', line)\n if m:\n uuid = UUID.parse(m.group(1))\n addrs = []\n continue\n m = re.match(r'(\\S+)', line)\n if m:\n addrs.append(m.group(1))\n continue\n m = re.match(r'$', line)\n if m:\n # Records are terminated with a blank line.\n servers.append(dict(uuid=uuid, addrs=addrs))\n uuid = None\n addrs = []\n log.debug(\"servers=%s\", servers)\n return servers", "title": "" }, { "docid": "39547106ee89d2bf4d273a0ffa2bb822", "score": "0.5368455", "text": "def uuid_nodes() :\n unwanted_nodes = config.DEFAULT_NODES + config.UNWANTED_NODES\n for node in mc.ls( long = True ,\n type = config.TRACKABLE_NODE_TYPES ) :\n if node in unwanted_nodes :\n continue\n yield node", "title": "" }, { "docid": "a02d93786a0f3421c630d11f7bd5e1b5", "score": "0.53645146", "text": "def get_archive(self):\n result = requests.get(\"{}/archive/\".format(self.root_url),\n headers={'Content-type': 'application/json'})\n self.assertEqual(result.status_code, 200)\n return result.json()", "title": "" }, { "docid": "d0eb7e9664d1be731d92838d171c0d80", "score": "0.53635716", "text": "def get_archives(self):\n return self.archives", "title": "" }, { "docid": "d1f59e4b83fcfaad15f8f2881853b79c", "score": "0.5353642", "text": "def list_backups(cfg):\n list_available_backups(cfg)", "title": "" }, { "docid": "c7d59346a5049bf6cd92ee17802c7c2e", "score": "0.5352478", "text": "def list_users(self):\n line_count = 1\n user_line_to_uuid = {} #new dict to hold uuid\n for uuid, value in self.all_users.items():\n user_line_to_uuid[str(line_count)] = uuid\n print('{}. {}'.format(line_count, value.user_name))\n line_count += 1\n return user_line_to_uuid # to select_customer\n\n ##Old Code\n # count = 1\n # for user in self.all_users:\n # print(str(count) + '. ' + user[2])\n # count = count + 1", "title": "" }, { "docid": "fb296d0045c29ea23f5c1c7044094429", "score": "0.53471035", "text": "def get_assets(self):\n return # osid.repository.AssetList", "title": "" }, { "docid": "95f03fb55b38e1f3c2887cb58460dda3", "score": "0.5345863", "text": "def inventory_tag_list(self) -> Sequence['outputs.TagResponse']:\n return pulumi.get(self, \"inventory_tag_list\")", "title": "" }, { "docid": "e4d8aa41242ae4b651fe65c426709368", "score": "0.5340658", "text": "def task_result_get_all_by_uuid(task_uuid):\n return IMPL.task_result_get_all_by_uuid(task_uuid)", "title": "" }, { "docid": "ce53a32e4ff0a2b84ed2fd88133644d8", "score": "0.5334367", "text": "async def list(self, ctx):\n embed = Embed(title='List of Registered Tags')\n for tag in self.db.get_tags_by_guild_id(ctx.guild.id):\n creator = self.bot.get_user(int(tag.creator_id))\n embed = embed.add_field(name='--------------------------------',\n value=f\"[{tag.tag}]({tag.cdn_url})\\nBy: {creator.name}\",\n inline=False)\n await ctx.send(embed=embed)", "title": "" }, { "docid": "30dc645c7f1c52b8d9e369e75d8e6131", "score": "0.5322519", "text": "def do_snapshot_list(cs, args):\n all_tenants = int(os.environ.get(\"ALL_TENANTS\", args.all_tenants))\n search_opts = {\n 'all_tenants': all_tenants,\n 'display_name': args.display_name,\n 'status': args.status,\n 'vsm_id': args.vsm_id,\n }\n\n snapshots = cs.vsm_snapshots.list(search_opts=search_opts)\n _translate_vsm_snapshot_keys(snapshots)\n utils.print_list(snapshots,\n ['ID', 'Volume ID', 'Status', 'Display Name', 'Size'])", "title": "" }, { "docid": "7801443195e6a98ae3d6865650f4df7b", "score": "0.5321124", "text": "def console_get_all_by_instance(context, instance_uuid):\n return IMPL.console_get_all_by_instance(context, instance_uuid)", "title": "" }, { "docid": "15571a98728f411eed1ae9d040e1d61f", "score": "0.53165", "text": "def get_instances(self):\n return [\"<%s prefix:%s (uid:%s)>\" % (self.__class__.__name__,\n i.prefix, self.uid)\n for i in self.instances]", "title": "" }, { "docid": "48c8e255a64a204ed381f548605eb9ca", "score": "0.53146684", "text": "def list(self):", "title": "" }, { "docid": "48c8e255a64a204ed381f548605eb9ca", "score": "0.53146684", "text": "def list(self):", "title": "" }, { "docid": "71d0f5645dfc2dfa021caba22e730b0d", "score": "0.5307477", "text": "def list(self, path):\n pass", "title": "" }, { "docid": "08bdbf4559dd29de464efc1c4d82d3c3", "score": "0.5306467", "text": "def resource_get_all(deployment_uuid, provider_name=None, type=None):\n return IMPL.resource_get_all(deployment_uuid,\n provider_name=provider_name,\n type=type)", "title": "" }, { "docid": "92b976c54215035667666abcde088031", "score": "0.53040695", "text": "def all_ticker(self) -> List[str]:\n return sorted(self.cache.ID.unique().tolist())", "title": "" }, { "docid": "9fade87c6fc6b204392926e23fe39eeb", "score": "0.52855074", "text": "def do_node_list(cs, args):\n nodes = cs.nodes.list()\n columns = ('uuid', 'type', 'image_id')\n utils.print_list(nodes, columns,\n {'versions': magnum_utils.print_list_field('versions')})", "title": "" }, { "docid": "89b743e5125c57eb93968b790d334506", "score": "0.5276971", "text": "def detail_list(self, request, backups):\n return self._list_view(self.detail, request, backups)", "title": "" }, { "docid": "8fef89744e085dea14eddf37c20b5d57", "score": "0.5276142", "text": "def read_all_user_uuids(org_uuid: str, limit: int = 1_000) -> Set[str]:\n\n start = 0\n total = 1\n all_employee_uuids = set()\n while start < total:\n employee_list = os2mo.os2mo_get(\n f\"{{BASE}}/o/{org_uuid}/e/?limit={limit}&start={start}\"\n ).json()\n\n batch = set(map(itemgetter(\"uuid\"), employee_list[\"items\"]))\n all_employee_uuids.update(batch)\n start = employee_list[\"offset\"] + limit\n total = employee_list[\"total\"]\n return all_employee_uuids", "title": "" }, { "docid": "99ba73b33f0a0a3051b4b2958e774015", "score": "0.5274784", "text": "def list_entries(location):\n assert location\n abs_location = os.path.abspath(os.path.expanduser(location))\n assert os.path.isfile(abs_location)\n\n # TODO: harden error handling\n with Archive(abs_location) as archive:\n for entry in archive:\n yield entry", "title": "" }, { "docid": "0e92c9c0066b2e3509babff9c741415c", "score": "0.527349", "text": "def get_tag_list(self) :\r\n\t\treturn self.get_api_list(CONST.URI_PREFIXE_API + CONST.API_LIST_TAG)", "title": "" }, { "docid": "c8a2ec323c4a19c286a21d381ba1a625", "score": "0.52668023", "text": "def list_command(args):\n # REVIEW: Can optimize by doing prefix queries of remote files.\n list_remote_files()\n list_file_info(args, options.listing)", "title": "" }, { "docid": "b5efd6a6a4ed906542df043f813beef7", "score": "0.52486616", "text": "def list(self):\n return self.http_get('/volumes')", "title": "" }, { "docid": "9478ac39587c8d5875d0c7ddcf982fa8", "score": "0.5248022", "text": "def download_multiple(\n cls, uuid_list: list, path: str = \".\"\n ) -> Tuple[Response, str]:\n with requests.post(base_url, stream=True, data={\"ids\": uuid_list}) as r:\n d = r.headers[\"content-disposition\"]\n fname = re.findall(\"filename=(.+)\", d)[0]\n local_filename = (\n fname\n if fname\n else f\"gdc_download_{datetime.now().strftime('%Y%m%d%H%M%S')}.tar.gz\"\n )\n total_size = int(r.headers.get(\"content-length\", 0))\n bar = tqdm(total=total_size, unit=\"iB\", unit_scale=True)\n with open(os.path.join(path, local_filename), \"wb\") as f:\n for data in r.iter_content(chunk_size=1024):\n size = f.write(data)\n bar.update(size)\n return r, local_filename", "title": "" }, { "docid": "8a1053a1972b14b4e18647c9a7d5f4bd", "score": "0.5247968", "text": "def list_all(self):\n return self.list_by_allianz(-1)", "title": "" }, { "docid": "60b86f1ddc146c27a910d32db690fa72", "score": "0.5247938", "text": "def snapshot_schedule_template_uuids(self) -> list:\n return self.__snapshot_schedule_template_uuids", "title": "" }, { "docid": "60b86f1ddc146c27a910d32db690fa72", "score": "0.5247938", "text": "def snapshot_schedule_template_uuids(self) -> list:\n return self.__snapshot_schedule_template_uuids", "title": "" }, { "docid": "60b86f1ddc146c27a910d32db690fa72", "score": "0.5247938", "text": "def snapshot_schedule_template_uuids(self) -> list:\n return self.__snapshot_schedule_template_uuids", "title": "" }, { "docid": "65ab0d94886b470300149a839ecc06d5", "score": "0.52388465", "text": "def test_list_instance_uuids_hide_vms(self):\n host = fake.HostSystem()\n vm_folder = fake.Folder(name='fake-folder')\n fake._create_object('Folder', vm_folder)\n ignore_folder = fake.Folder(name='pf9_cinder_volumes')\n fake._create_object('Folder', ignore_folder)\n respool = fake.ResourcePool()\n fake._create_object('ResourcePool', respool)\n cluster = fake.ClusterComputeResource()\n cluster._add_root_resource_pool(respool.obj)\n fake._create_object('ClusterComputeResource', cluster)\n\n virtualMachine1 = fake.VirtualMachine(name=\"VM-1\",\n instanceUuid=\"test-uuid-1\",\n runtime_host=host.obj,\n parent=vm_folder.obj,\n resourcePool=respool.obj)\n virtualMachine2 = fake.VirtualMachine(name=\"VM-2\",\n instanceUuid=\"test-uuid-2\",\n runtime_host=host.obj,\n parent=ignore_folder.obj,\n resourcePool=respool.obj)\n\n fake_vms = fake.FakeRetrieveResult()\n fake_vms.add_object(virtualMachine1)\n fake_vms.add_object(virtualMachine2)\n fake_folders = fake.FakeRetrieveResult()\n fake_folders.add_object(vm_folder)\n fake_folders.add_object(ignore_folder)\n fake_pools = fake.FakeRetrieveResult()\n fake_pools.add_object(respool)\n\n session = fake_session(fake_virtual_machines=fake_vms,\n fake_vm_folder=fake_folders,\n fake_resource_pools=fake_pools)\n driver = fake_driver(session, cluster_ref=cluster.obj)\n\n instance_uuids = vm_util.list_instance_uuids(driver)\n self.assertIn(\"test-uuid-1\", instance_uuids)\n self.assertNotIn(\"test-uuid-2\", instance_uuids)\n self.assertEquals(1, len(instance_uuids))", "title": "" }, { "docid": "b62e13b45daa52cc186fd326fd478bc2", "score": "0.5237343", "text": "def do_list(self, args):\n\t\tparts = shlex.split(args)\n\t\tfilters = {}\n\n\t\tids_only = False\n\n\t\twhile len(parts) != 0:\n\t\t\tkey = parts[0]\n\t\t\tif key == \"-l\" or key == \"--list\":\n\t\t\t\tids_only = True\n\t\t\t\tparts = parts[1:]\n\t\t\t\tcontinue\n\n\t\t\tif key.startswith(\"-\"):\n\t\t\t\tkey = key.replace(\"-\", \"\")\n\t\t\t\tif key not in [\"md5\", \"id\", \"_id\", \"length\", \"content_type\", \"contentType\"] and not key.startswith(\"metadata\"):\n\t\t\t\t\tkey = \"metadata.\" + key\n\n\t\t\t\tvalue = parts[1]\n\n\t\t\t\tif re.match(r'^[0-9]+$', value):\n\t\t\t\t\tvalue = int(value)\n\n\t\t\t\t# allow multiple values - gets treated as looking for that key's value\n\t\t\t\t# to be one of the provided values. E.g. id in [\"ID1\", \"ID2\", \"ID3\", ...]\n\t\t\t\tif key in filters:\n\t\t\t\t\tif not isinstance(filters[key], list):\n\t\t\t\t\t\tfilters[key] = [filters[key]]\n\t\t\t\t\tfilters[key].append(value)\n\t\t\t\telse:\n\t\t\t\t\tfilters[key] = value\n\n\t\t\t\tparts = parts[2:]\n\t\t\telse:\n\t\t\t\tparts = parts[1:]\n\n\t\tres = self._talus_client.corpus_list(**filters)\n\n\t\tif ids_only:\n\t\t\tfor cfile in res:\n\t\t\t\tprint(cfile[\"_id\"][\"$oid\"])\n\t\t\treturn\n\n\t\theaders = [\"id\", \"size (bytes)\", \"md5\", \"content-type\", \"upload date\", \"other attrs\"]\n\t\tvalues = []\n\n\t\tprint(\"{} corpus files found\".format(len(res)))\n\t\tfor cfile in res:\n\t\t\t# {\n\t\t\t#\tu'contentType': u'text/plain',\n\t\t\t#\tu'chunkSize': 261120,\n\t\t\t#\tu'metadata': {u'filename': None},\n\t\t\t#\tu'length': 5,\n\t\t\t#\tu'uploadDate': {u'$date': 1439550357245},\n\t\t\t#\tu'_id': {u'$oid': u'55cdcb95dd18da0008caa791'},\n\t\t\t#\tu'md5': u'0d599f0ec05c3bda8c3b8a68c32a1b47'\n\t\t\t#}\n\t\t\tvalues.append([\n\t\t\t\tcfile[\"_id\"][\"$oid\"],\n\t\t\t\tcfile[\"length\"],\n\t\t\t\tcfile[\"md5\"],\n\t\t\t\tcfile[\"contentType\"],\n\t\t\t\tarrow.get(cfile[\"uploadDate\"][\"$date\"]/1000.0).humanize(),\n\t\t\t\t\" \".join(\"{}={}\".format(k,v) for k,v in cfile[\"metadata\"].iteritems())\n\t\t\t])\n\n\t\tprint(tabulate(values, headers=headers))", "title": "" }, { "docid": "1346e4a6fbbd1f6166887d82b1e315c3", "score": "0.5232431", "text": "def list(self):\n return self._subresources(Revision, get(\n self.path(),\n auth = self.auth\n ))", "title": "" }, { "docid": "f474c85d30b06b6abbb0efdb7f6fd318", "score": "0.5224673", "text": "def _get_uuid(self, *args: Any, **kwargs: Any) -> str:", "title": "" }, { "docid": "6edd61a4549fc67c3a5cebe52b0f25b2", "score": "0.52230084", "text": "def __get_all_ids(self, table):\n id_list = []\n for r in self.get_all_data(table=table):\n logger.info(\"Id %s found.\", r['id'])\n id_list.append(str(r['id']))\n\n return id_list", "title": "" }, { "docid": "b1ff4d51ab7c797aabb034df696e218f", "score": "0.52210426", "text": "def getBackupList(self):\n cmd = \"ls -d %s/20*\" % os.path.join(self.settings(\"backup-destination\"), \n self.settings('local-hostname'))\n st, response = self.remoteCommand(cmd)\n bu_list = []\n if st == 0:\n bu_list = response.split('\\n')\n bu_list.sort()\n\n return bu_list", "title": "" }, { "docid": "04d3d40ff266cbc708700a6a63a5cf91", "score": "0.52163225", "text": "def get_users(self, uuid=None, **kwargs):\n return self.request('users', uuid=uuid, **kwargs)", "title": "" }, { "docid": "54f32d3bf727fde3ba57efd55a60a03f", "score": "0.5214996", "text": "def get_assets_ids():\n asset_with_id = dao.get_all_listed_assets()\n asset_ids = [a[\"id\"] for a in asset_with_id if \"id\" in a]\n return Response(json.dumps(asset_ids), 200, content_type=\"application/json\")", "title": "" }, { "docid": "5112c2009254ae5420c0f7eeb0116469", "score": "0.5213273", "text": "def manga_list(self):\n return self.session.manga_list(self.username)", "title": "" }, { "docid": "27b4c1a4c4d938b3be9f9e700caf0d34", "score": "0.52128625", "text": "def _get_id_list(self):\n raise NotImplementedError()", "title": "" }, { "docid": "4426f8983d811b3767fb1910d2ac8892", "score": "0.5209409", "text": "def get_partition_uuids(self, node):\n return self._command(node=node,\n method='standby.get_partition_uuids',\n params={},\n wait=True)", "title": "" }, { "docid": "fd9bca57c6bc89a7b23cbc432c04d062", "score": "0.5208815", "text": "def snapshot_get_all_for_volume(context, volume_id):\n return IMPL.snapshot_get_all_for_volume(context, volume_id)", "title": "" }, { "docid": "3826c4ef59d9ab790033fec1376609e4", "score": "0.5207876", "text": "def _get_file_list(self):\n p = custom_popen([SZ_TOOL, '-sccUTF-8', '-scsUTF-8', 'l', self.archive.encode(locale.getpreferredencoding())])\n out = p.communicate()[0]\n m = re.findall(SZ_L, out)\n if not m:\n p = custom_popen([SZ_TOOL, 'l', self.archive.encode(locale.getpreferredencoding())])\n out = p.communicate()[0]\n m = re.findall(SZ_L, out)\n self._list = [FileInfo(*x) for x in m if len(x) == 6]", "title": "" }, { "docid": "da4e1a928f723cc6e0234f26eb6c7b00", "score": "0.51995426", "text": "def getListing(self, directory: ghidra.formats.gfilesystem.GFile) -> List[ghidra.formats.gfilesystem.GFile]:\n ...", "title": "" }, { "docid": "c406735fc459db74a7459090c9627bc3", "score": "0.51766855", "text": "def info(self, ids=None, arguments=None, timeout=None):\r\n if not arguments:\r\n arguments = self.torrent_get_arguments\r\n return self._request('torrent-get', {'fields': arguments}, ids, timeout=timeout)", "title": "" }, { "docid": "a853454b5935dcdb4b27cac3667a397f", "score": "0.5173479", "text": "def list_items(lib, opts, args):\n\n ids = []\n with lib.transaction() as tx:\n sql = 'SELECT id FROM last_import'\n ids = [row['id'] for row in tx.query(sql)]\n for item_id in ids:\n for item in lib.items('id::^{}$'.format(item_id)):\n print_(str(item))", "title": "" }, { "docid": "cc3025b6f4eaa3e0f34f1b7e0ebfdb91", "score": "0.5170614", "text": "def list_all_repr(self):\n from anima.dcc.mayaEnv import Maya\n\n m = Maya()\n v = m.get_version_from_full_path(self.path)\n\n if v is None:\n return []\n\n rep = Representation(version=v)\n return rep.list_all()", "title": "" }, { "docid": "2198a94780b28065bbfed17bbdbd8b30", "score": "0.51611096", "text": "def list_snapshots(self, tags=None, last=False, path=None):\n tags = tags or []\n cmd = [\"restic\", \"snapshots\", \"--json\"]\n for tag in tags:\n cmd.extend([\"--tag\", tag])\n\n if path:\n cmd.extend([\"--path\", path])\n if last:\n cmd.append(\"--last\")\n proc = self._run_cmd(cmd)\n return json.loads(proc.stdout)", "title": "" }, { "docid": "9a2ca42e87013a58b6cc3f228a539ae4", "score": "0.51606435", "text": "def list(self):\n\n return self._list(self._path(), 'registry_images')", "title": "" }, { "docid": "897b0598b1316268711c3f1fde1cba8f", "score": "0.5159357", "text": "def list_artifacts(self, path):\n pass", "title": "" }, { "docid": "6cb446bd267bba6a8b0c74b5a7e8e88a", "score": "0.51490015", "text": "def test_id_server_uuids_uuid_get(self):\n response = self.client.open(\n '/id-server/entities_by_uuid/{uuid}'.format(uuid=TEST_UUID_2),\n method='GET')\n print(response.data.decode('utf-8'))\n self.assertStatus(response, 200,\n 'Response body is : ' + response.data.decode('utf-8'))", "title": "" }, { "docid": "11965378088185d108b116c21c038ff9", "score": "0.5130463", "text": "def list(self):\n raise NotImplementedError", "title": "" }, { "docid": "11965378088185d108b116c21c038ff9", "score": "0.5130463", "text": "def list(self):\n raise NotImplementedError", "title": "" }, { "docid": "870677bfc204c6ecedcfd6cc7014efd1", "score": "0.5127465", "text": "def list(self):\n self._send_command('List')", "title": "" }, { "docid": "b6318db2f812714a48cb2710f2fc8455", "score": "0.51202744", "text": "def list(self):\n\n return self.repo.list()", "title": "" }, { "docid": "d48de1e29db037ad723f488cabb8fc48", "score": "0.5119767", "text": "def get_links() -> List[bytes]:\n client = redis.Redis(host,port,0)\n links = client.lrange('links',0,-1)\n return links", "title": "" }, { "docid": "00409ec55babf40e1d3157c94b8b231a", "score": "0.5113705", "text": "def get_all_instruments():\n rm = visa.ResourceManager()\n return rm.list_resources()", "title": "" }, { "docid": "649bf4d88d2693974e33146826e73731", "score": "0.5111102", "text": "def list_versions(self) -> List[str]:\n raise NotImplementedError()", "title": "" }, { "docid": "92f03ec7c3f6cc23b11b1d20cbded23d", "score": "0.5094682", "text": "def _listArchive(self, archivePath):\n\n\t\tprint \"Checking archive path %s \" % archivePath\n\n\t\t# make sure the backup directory exists\n\t\tif not os.path.exists(archivePath):\n\t\t\tprint \"Archive path %s doesn't exist\" % archivePath\n\t\t\treturn\n\t\t\n\t\t# get immediate files and directories\n\t\tdirList = os.listdir(archivePath)\n\n\t\tif not dirList:\n\t\t\t# list is empty\n\t\t\tprint \"Archive dir %s is empty\" % archivePath\n\t\t\treturn\n\n\t\t# sort the results\n\t\tlist.sort(dirList)\n\n\t\tfor name in dirList:\n\t\t\tfullNamePath = os.path.join(archivePath, name)\n\t\t\tif os.path.isfile(fullNamePath):\n\t\t\t\tprint \"(file) %s\" % fullNamePath\n\t\t\telse:\n\t\t\t\tprint \"(dir) %s\" % fullNamePath", "title": "" }, { "docid": "4b1676a69a0a17aa388addd33da96a3d", "score": "0.50876147", "text": "def show_tags(repository: str):\n data = async_task([Repository(repository)], \"get_tags\")\n print(\"Tag list for repository : {}\".format(repository))\n for tag in data[0]:\n print(\" - {}\".format(tag.tag))", "title": "" }, { "docid": "78d5c7e33d8af73aab9e27d5bfa32cfd", "score": "0.50624985", "text": "def vios_uuids(self):\n # Get the hosting UUID\n if self._nl_vios_ids is None:\n nl_vios_wrap = partition.get_mgmt_partition(self.adapter)\n self._nl_vios_ids = [nl_vios_wrap.uuid]\n return self._nl_vios_ids", "title": "" }, { "docid": "29e8a658e9ffa1703f329d9ae72ac310", "score": "0.50594723", "text": "def dumpList(self):\n return self.usercommand(120)", "title": "" }, { "docid": "e5840eb25f12410d8e05345dc7ba8e51", "score": "0.50591", "text": "def list_(*args):\n return list(args)", "title": "" }, { "docid": "10d3aa0e5219be23c38d70d2faa7a9e9", "score": "0.50584793", "text": "def get_all(self):\n pass", "title": "" }, { "docid": "5a1f847e1c81dbaf8ac1202668f42b7d", "score": "0.50534654", "text": "def list(self, details=False, new=False):\n list_archives_endpoint = self._archive_endpoint\n logger.debug(\"List archives endpoint: %s\", list_archives_endpoint)\n logger.debug(\"List archives details: %s\", str(details))\n\n archives = self.client.get(list_archives_endpoint)\n logger.debug(\"Found %d archives\", len(archives))\n\n if new:\n archives = set(archives) - set(self.get_command_history(self.name, \"read\"))\n logger.debug(\"New archives: %d\", len(archives))\n\n for archive in archives:\n yield self._details(archive) if details else archive", "title": "" }, { "docid": "a5e88a14716f6f16023045e619f68ddf", "score": "0.5051117", "text": "def _GetAllAssets(self, response):\n try:\n results, registered_portable_set = self._GetDatabaseList()\n results.extend(self._GetPortableGlobesList(registered_portable_set))\n http_io.ResponseWriter.AddJsonBody(\n response, constants.STATUS_SUCCESS, results)\n except exceptions.PublishServeException as e:\n logger.error(e)\n http_io.ResponseWriter.AddJsonFailureBody(response, str(e))\n except psycopg2.Warning as w:\n logger.error(w)\n http_io.ResponseWriter.AddJsonFailureBody(response, str(w))\n except psycopg2.Error as e:\n logger.error(e)\n http_io.ResponseWriter.AddJsonFailureBody(response, str(e))\n except Exception as e:\n logger.error(e)\n http_io.ResponseWriter.AddJsonFailureBody(\n response, \"Server-side Internal Error: {0}\".format(e))", "title": "" }, { "docid": "581e40361035dde37895f1557e8b93ad", "score": "0.50463927", "text": "def getFilesToArchive(*args, **kwargs):\n \n pass", "title": "" } ]
e0d4f32a4abc50f1cee0808579b0aa39
Computes and returns a file's hash digest.
[ { "docid": "ff27398279d04e808916db9a1727ec9e", "score": "0.75774205", "text": "def get_file_hash(filename):\n\t# Use SHA-2 256-bit\n\thash_function = hashlib.sha256()\n\t# Use the storage's default buffer\n\tbuffer_size = io.DEFAULT_BUFFER_SIZE\n\twith open(filename, \"rb\") as handler:\n\t\tbuffer = handler.read(buffer_size)\n\t\t# While the buffer is not empty\n\t\twhile len(buffer) > 0:\n\t\t\t# Update the hash digest\n\t\t\thash_function.update(buffer)\n\t\t\t# Get the next buffer\n\t\t\tbuffer = handler.read(buffer_size)\n\t# Return the digest as uppercase hexadecimal\n\treturn hash_function.hexdigest().upper()", "title": "" } ]
[ { "docid": "d79ccda8a7f00e31c016ed25ae02799b", "score": "0.809099", "text": "def _digest_file(file):\n BUF_SIZE = 65536\n\n m = hashlib.sha256()\n with open(file, 'rb') as f:\n while True:\n buf = f.read(BUF_SIZE)\n if not buf:\n break\n m.update(buf)\n\n return m.hexdigest()", "title": "" }, { "docid": "3aa84e50e8c3e1e96c248b134880d131", "score": "0.79340106", "text": "def computeHashOfFile(f, digests, bufsize=0x1000):\n arr = f.read(bufsize)\n if len(arr)==0:\n return None # empty file, no hash\n while arr:\n for m in digests:\n m.update(arr)\n arr = f.read(bufsize)\n return [m.digest() for m in digests]", "title": "" }, { "docid": "dcba3c89bac1084b5ce36edcac3df7d3", "score": "0.76963395", "text": "def calc_hash(self):\n try:\n m = hashlib.sha1()\n read_fd = open( self.path, \"r\")\n buff = read_fd.read()\n m.update( buff )\n read_fd.close() \n \n return m.hexdigest()\n except Exception, inst:\n iftlog.exception(\"iftfile.calc_hash\", inst)\n return \"\"", "title": "" }, { "docid": "dd2f4f3d2e8ab8079c073d12e5262f47", "score": "0.7668045", "text": "def hash_file(file):\n file_data = file.read()\n file.seek(0) # Resets read pointer\n return sha1(file_data).hexdigest()", "title": "" }, { "docid": "5e260a4655e1c5b82893a2fffccc8f2a", "score": "0.7634614", "text": "def getHashForFile(f):\n hashVal = hashlib.sha1()\n while True:\n r = f.read(1024)\n if not r:\n break\n hashVal.update(r)\n f.seek(0)\n\n return hashVal.hexdigest()", "title": "" }, { "docid": "14786f4bd8bf7dba6068007a0c2a783b", "score": "0.76021516", "text": "def hash_file(f):\n h = sha1()\n h.update(b'file\\n')\n for chunk in BufferedReader(f):\n h.update(chunk)\n return h.hexdigest()", "title": "" }, { "docid": "b36361096885838807dc18df3f93e32c", "score": "0.75935155", "text": "def _CalculateDigestHash(self, file_entry, data_stream_name):\n file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)\n if not file_object:\n return None\n\n file_object.seek(0, os.SEEK_SET)\n\n hasher_object = hashers_manager.HashersManager.GetHasher('sha256')\n\n data = file_object.read(self._READ_BUFFER_SIZE)\n while data:\n hasher_object.Update(data)\n data = file_object.read(self._READ_BUFFER_SIZE)\n\n return hasher_object.GetStringDigest()", "title": "" }, { "docid": "225c48e5223a9e9f410f8d3f2c3697ac", "score": "0.7565348", "text": "def compute_hash(file_path):\n\n h = hashlib.sha1()\n\n with open(file_path, 'rb') as file:\n chunk = 0\n while not chunk:\n chunk = file.read(1024)\n h.update(chunk)\n\n return h.hexdigest()", "title": "" }, { "docid": "4f3c4b254e206e336880bd8e92c37bff", "score": "0.75503606", "text": "def hash_file(filename):\r\n\r\n # make a hash object\r\n h = hashlib.sha1()\r\n\r\n # open file for reading in binary mode\r\n with open(filename,'rb') as file:\r\n\r\n # loop till the end of the file\r\n chunk = 0\r\n while chunk != b'':\r\n # read only 1024 bytes at a time\r\n chunk = file.read(1024)\r\n h.update(chunk)\r\n\r\n # return the hex representation of digest\r\n return h.hexdigest()", "title": "" }, { "docid": "727b07e368283b38a78875138d3340e8", "score": "0.75466853", "text": "def get_hash(file_path):\n read_size = 64 * 1024\n with open(file_path, 'rb') as f:\n data = f.read(read_size)\n f.seek(-read_size, os.SEEK_END)\n data += f.read(read_size)\n return hashlib.md5(data).hexdigest()", "title": "" }, { "docid": "197ca9df949bb817edbeb51dfead5b41", "score": "0.7475144", "text": "def hash_file(filename):\n\n # make a hash object\n h = hashlib.sha1()\n\n # open file for reading in binary mode\n with open(filename, 'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n\n # return the hex representation of digest\n return h.hexdigest()", "title": "" }, { "docid": "d2d79d0699ad6ba28ddf9b41912250af", "score": "0.74567366", "text": "def hash_file(filename):\n\n # make a hash object\n h = hashlib.sha1()\n\n # open file for reading in binary mode\n with open(filename,'rb') as file:\n\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n\n # return the hex representation of digest\n return h.hexdigest()", "title": "" }, { "docid": "ce36db71ccc0d16e61579bbd9153805a", "score": "0.7455763", "text": "def hash_file(f):\n from hashlib import md5\n m = md5()\n while True:\n buf = f.read(65535)\n if not buf:\n return m.hexdigest()\n m.update(buf)", "title": "" }, { "docid": "51acf3e80be21eee16b15a96af9cdeeb", "score": "0.74398243", "text": "def hash_file(filename):\n # make a hash object\n h = hashlib.sha1()\n # open file for reading in binary mode\n with open(filename,'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n # return the hex representation of digest\n return h.hexdigest()", "title": "" }, { "docid": "506432a09df92a69c843888d072f9619", "score": "0.7438069", "text": "def hash_file(filename):\n\n # make a hash object\n h = hashlib.sha1()\n\n # open file for reading in binary mode\n with open(filename, 'rb') as file:\n\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n\n # return the hex representation of digest\n return h.hexdigest()", "title": "" }, { "docid": "409add8e1b28e2193dda50152a7525e6", "score": "0.7437886", "text": "def get_file_checksum(self, filename):\n with open(filename, 'rb') as f:\n file_hash = hashlib.md5()\n chunk = f.read(8192)\n while chunk:\n file_hash.update(chunk)\n chunk = f.read(8192)\n return file_hash.hexdigest()", "title": "" }, { "docid": "ce3b369c100010e17a7cef92b79aeda7", "score": "0.7427097", "text": "def hash_file(filename):\n h = hashlib.sha1()\n # open file for reading in binary mode\n with open(filename, 'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n # return the hex representation of digest\n return h.hexdigest()", "title": "" }, { "docid": "b537d4614da357b09361a317b882e337", "score": "0.74266344", "text": "def compute_file_hash(git_hash, path):\n return hashlib.sha1('{}:{}'.format(git_hash, path).encode()).hexdigest()", "title": "" }, { "docid": "344f7f51fec3cdeb7192b175730ce3e6", "score": "0.741107", "text": "def hashfile_sha256(file):", "title": "" }, { "docid": "8568127250eecf967d4d91d41f92f35d", "score": "0.73898983", "text": "def compute_hash(filepath):\n hasher = md5()\n\n with io.open(filepath, 'rb') as stream:\n chunk = stream.read(HASHING_BLOCK_SIZE)\n while len(chunk) > 0:\n hasher.update(chunk)\n chunk = stream.read(HASHING_BLOCK_SIZE)\n\n md5_binary = hasher.digest()\n md5_bytes = b64encode(md5_binary)\n md5_unicode = md5_bytes.decode('utf-8')\n\n return md5_unicode", "title": "" }, { "docid": "ee93bd0c83dfdd3248540364804e7ab6", "score": "0.7388421", "text": "def _getFileChecksum(self, file_name_and_path):\n\n # algorithm from http://pythoncentral.io/hashing-files-with-python/\n hasher = hashlib.md5()\n with open(file_name_and_path, 'rb') as afile:\n buf = afile.read()\n hasher.update(buf)\n return hasher.hexdigest()", "title": "" }, { "docid": "e87462a21efbac76fb0405fd334f7915", "score": "0.7382486", "text": "def file_hash(path):\n\n HASH_BLOCKSIZE = 1 << 20\n hash = hashlib.md5()\n\n with open(path, 'rb') as f:\n while True:\n data = f.read(HASH_BLOCKSIZE)\n\n if not data:\n break\n\n hash.update(data)\n\n h = int(hash.hexdigest(), 16)\n\n return h", "title": "" }, { "docid": "f089bb18629af1d14edc13a0b3fbbeff", "score": "0.73699826", "text": "def _calculate_hash(self, file_object: IO) -> str:\n hasher = self.hashlib()\n for chunk in self.iterchunks(file_object):\n hasher.update(chunk)\n return hasher.hexdigest()", "title": "" }, { "docid": "d5e319aa8d7e251f4873a27ec809fac4", "score": "0.73603517", "text": "def file_hash(pth):\n\tshaer = sha256()\n\twith open(pth, 'rb') as fh:\n\t\tdata = ' '\n\t\twhile data:\n\t\t\tdata = fh.read(32768)\n\t\t\tshaer.update(data)\n\treturn b'sha256__' + shaer.digest()", "title": "" }, { "docid": "991eaa9e880d54811da45128cb6ad906", "score": "0.73348856", "text": "def hashof(fn):\n\n h = hashlib.md5()\n\n with open(fn,'rb') as f:\n while True:\n t = f.read(4096)\n if len(t) == 0:\n break\n h.update(t)\n \n return h.hexdigest()", "title": "" }, { "docid": "7c8ab2a34d989a945a7dc803c349c23f", "score": "0.72873354", "text": "def _hash_file(filename):\n BLOCKSIZE = 65536\n hasher = hashlib.sha1()\n with open(filename, \"rb\") as afile:\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(BLOCKSIZE)\n\n return hasher.hexdigest()", "title": "" }, { "docid": "1ed3b68581c9798c9029375a839e6926", "score": "0.72869915", "text": "def get_file_hash(filename):\n file = codecs.open(filename, \"r\", encoding='utf-8')\n md5 = hashlib.md5()\n while True:\n data = file.read(128).encode('utf-8')\n if not data:\n break\n md5.update(data)\n\n file.close()\n return md5.digest()", "title": "" }, { "docid": "f19f8afe1ea1ab39146b1ee5f67c9ef6", "score": "0.7283765", "text": "def create_filehash(path):\r\n h = hasher()\r\n with open(path, \"rb\") as f:\r\n buf = f.read(BLOCKSIZE)\r\n while len(buf) > 0:\r\n h.update(buf)\r\n buf = f.read(BLOCKSIZE)\r\n return h.digest().encode(\"base64\").strip().strip(\"=\")", "title": "" }, { "docid": "fe22f414a538bb227dc0bfbd4838ce82", "score": "0.7283721", "text": "def file_hash(fname):\n # Calculate the hash in chunks to avoid overloading the memory\n chunksize = 65536\n hasher = hashlib.sha256()\n with open(fname, \"rb\") as fin:\n buff = fin.read(chunksize)\n while buff:\n hasher.update(buff)\n buff = fin.read(chunksize)\n return hasher.hexdigest()", "title": "" }, { "docid": "fe22f414a538bb227dc0bfbd4838ce82", "score": "0.7283721", "text": "def file_hash(fname):\n # Calculate the hash in chunks to avoid overloading the memory\n chunksize = 65536\n hasher = hashlib.sha256()\n with open(fname, \"rb\") as fin:\n buff = fin.read(chunksize)\n while buff:\n hasher.update(buff)\n buff = fin.read(chunksize)\n return hasher.hexdigest()", "title": "" }, { "docid": "91012ba6ac8269d70f28e10113d88bd1", "score": "0.7271207", "text": "def get_hash( filename ):\n m = hashlib.sha1()\n try:\n file_handle = open(filename)\n while True:\n chunk = file_handle.read( DEFAULT_FILE_CHUNKSIZE )\n if len(chunk) == 0:\n break\n m.update( chunk )\n \n return m.hexdigest()\n except:\n return E_FILE_NOT_FOUND", "title": "" }, { "docid": "573c67e828d2f18ef760f18b258373ea", "score": "0.72473085", "text": "def getFileHash(self,fname):\n fo = open(fname)\n file_hash = hashlib.sha256(fo.read()).hexdigest()\n fo.close()\n return(file_hash)", "title": "" }, { "docid": "957c0d81f80c20ac7858b1f2149025f4", "score": "0.72382146", "text": "def file_checksum(filename):\n return sha256(open(filename).read()).hexdigest()", "title": "" }, { "docid": "899e4204c98804b4193e3fe5f1b55c2c", "score": "0.7237199", "text": "def file_hash(fileobj):\n hsh = hashlib.sha256()\n with fileobj.open('rb') as f:\n for chunk in iter(lambda: f.read(8192), ''):\n hsh.update(chunk)\n return hsh", "title": "" }, { "docid": "bd63d10e8704cbbbcbe5b09b3d6565c7", "score": "0.72183406", "text": "def hash_file(filename):\n hasher = hashlib.sha1()\n with open(filename, 'rb') as f:\n buf = f.read(65536)\n while len(buf) > 0:\n hasher.update(buf)\n buf = f.read(65536)\n\n return hasher.hexdigest()", "title": "" }, { "docid": "74aa8ae78c95f0872c9fcb763ea8797c", "score": "0.7214065", "text": "def get_file_hash(buff: IO[bytes]) -> str:\n import hashlib\n\n sha = hashlib.sha224()\n chunk_size = FILE_HASH_CHUNK_SIZE\n init_pos = buff.seek(0, io.SEEK_CUR)\n while True:\n chunk = buff.read(chunk_size)\n if not chunk:\n break\n sha.update(chunk)\n buff.seek(init_pos, io.SEEK_SET)\n return sha.hexdigest()", "title": "" }, { "docid": "a5721fb5aaf7040a50b317768f9a7c7d", "score": "0.716302", "text": "def filehash(filepath):\n import hashlib\n\n BUF_SIZE = 65536\n sha256 = hashlib.sha256()\n\n with open(filepath, \"rb\") as f:\n while True:\n data = f.read(BUF_SIZE)\n if not data:\n break\n sha256.update(data)\n\n return sha256.hexdigest()", "title": "" }, { "docid": "bc0b8409a666d89c26f5252272eb4f6a", "score": "0.71576494", "text": "def hash_file(filename, blocksize=65536):\n hasher = hashlib.sha256()\n with open( filename, \"rb\" ) as fd:\n buf = fd.read(blocksize)\n while len(buf) > 0:\n hasher.update(buf)\n buf = fd.read(blocksize)\n return hasher.digest()", "title": "" }, { "docid": "760e34a1213f5b640be6bb7363f91f3c", "score": "0.7142514", "text": "def _GetSha1Digest(file_path):\n sha = hashlib.sha1()\n with open(file_path, 'rb') as f:\n data = f.read()\n sha.update(data)\n return sha.digest()", "title": "" }, { "docid": "3ba97437728ebe2633c442f50296993f", "score": "0.71362257", "text": "def hash_file(filename):\n md5 = hashlib.md5()\n with open(filename, \"rb\") as f:\n for chunk in iter(lambda: f.read(8192), b\"\"):\n md5.update(chunk)\n return md5.hexdigest()", "title": "" }, { "docid": "1a4a4ee2918e3d8d2b0625ac0d966dc2", "score": "0.7127926", "text": "def hexdigest( self ):\n data = self.__catFiles()\n sha256 = hashlib.sha256( data )\n return sha256.hexdigest()", "title": "" }, { "docid": "8f722741f90805c26a4ea906be7879f6", "score": "0.7121576", "text": "def hash_of_file(path):\r\n with open(path, 'r') as archive:\r\n sha = sha256()\r\n while True:\r\n data = archive.read(2 ** 20)\r\n if not data:\r\n break\r\n sha.update(data)\r\n return encoded_hash(sha)", "title": "" }, { "docid": "6e2521c66639d77c68033fa20680c48c", "score": "0.7117344", "text": "def hash_file(filename, block_size=32768):\n import hashlib\n try:\n with open(filename, 'rb') as f:\n hasher = hashlib.sha256()\n while True:\n buf = f.read(block_size)\n if not buf:\n break\n hasher.update(buf)\n except IOError:\n return None\n return hasher.hexdigest()", "title": "" }, { "docid": "2c371e45661680437694502d6b56d5a5", "score": "0.71116126", "text": "def checksum(path):\n\n hasher = sha256()\n with open(path, \"rb\") as fp:\n for block in iter(partial(fp.read, 4096), b\"\"):\n hasher.update(block)\n return hasher.hexdigest()", "title": "" }, { "docid": "8a0ea8e2b441bb9d95c06faee80a26c6", "score": "0.71082693", "text": "def calculate_hash (file):\n sha3 = hashlib.sha3_256()\n buffer_size = 65536\n with open(file, 'rb') as f:\n while True:\n chunk = f.read(buffer_size)\n if chunk:\n sha3.update(chunk)\n else:\n break\n return sha3.hexdigest()", "title": "" }, { "docid": "41fb9aabca44692aba0532f05b1d3165", "score": "0.70960766", "text": "def file_checksum(\n filename: FileLike, hash_type: str = \"md5\", block_size: int = 4096\n) -> HASH:\n try:\n file_hash = getattr(hashlib, hash_type)()\n except AttributeError:\n raise RuntimeError(\"Invalid or unsupported hash type: %s\" % hash_type)\n\n if isinstance(filename, str):\n with open(filename, \"rb\") as file_:\n for chunk in read_in_chunks(file_, block_size=block_size):\n file_hash.update(chunk)\n else:\n for chunk in read_in_chunks(filename, block_size=block_size):\n file_hash.update(chunk)\n # rewind the stream so it can be re-read later\n if filename.seekable():\n filename.seek(0)\n\n return file_hash", "title": "" }, { "docid": "1982eed73e6c870a8cb9c944eef8922a", "score": "0.7092127", "text": "def getFileCheckSum(filename):\n md5_obj = hashlib.md5()\n\n f = None\n try:\n f = open(filename, 'rb')\n data = f.read(1024)\n while data:\n md5_obj.update(data)\n data = f.read(1024)\n f.close()\n except:\n if f:\n f.close()\n f = None\n return None\n return md5_obj.hexdigest()", "title": "" }, { "docid": "ce1f4b983116a9b5a029add45e5de54d", "score": "0.7074979", "text": "def fingerprint_file(file):\n with open(file,'rb') as fd:\n # read contents of the file\n _file_data = fd.read() \n # pipe contents of the file through\n file_fingerprint = md5(_file_data).hexdigest()\n return file_fingerprint", "title": "" }, { "docid": "13014ea12a4ab973ac60b6e652e83439", "score": "0.7064478", "text": "def hashfile(fspec, hasher=hashlib.sha256, blocksize=65536, num_characters=12):\n fh = _resolve_fspec(fspec)\n buf = fh.read(blocksize)\n hasher = hasher() # instantiate this hashing instance\n while len(buf) > 0:\n hasher.update(buf.encode('utf-8'))\n buf = fh.read(blocksize)\n return int(hasher.hexdigest(), 16) % 10 ** num_characters", "title": "" }, { "docid": "1e6012fb8cda3bfb443c613e1f4453d4", "score": "0.70446527", "text": "def filehash(filepath, blocksize=4096):\r\n sha = hashlib.sha256()\r\n with open(filepath, 'rb') as fp:\r\n while 1:\r\n data = fp.read(blocksize)\r\n if data:\r\n sha.update(data)\r\n else:\r\n break\r\n return sha.hexdigest()", "title": "" }, { "docid": "46a1774cd5a74f7d689b0935d614dac8", "score": "0.70257545", "text": "def hash_file(filepath):\n HASH_BLOCKSIZE = 65536\n hasher = hashlib.sha1()\n\n with open(filepath, \"rb\") as f:\n buf = f.read(HASH_BLOCKSIZE)\n while buf:\n hasher.update(buf)\n buf = f.read(HASH_BLOCKSIZE)\n\n return hasher.hexdigest()", "title": "" }, { "docid": "81ca58530ab47e121133069a98d69384", "score": "0.7023153", "text": "def get_hash(filename, first_chunk_only=False, hash=hashlib.sha1):\n hashobj = hash()\n file_object = open(filename, 'rb')\n\n if first_chunk_only:\n hashobj.update(file_object.read(1024))\n else:\n for chunk in chunk_reader(file_object):\n hashobj.update(chunk)\n hashed = hashobj.digest()\n\n file_object.close()\n return hashed", "title": "" }, { "docid": "3a8eb3e85a9c2e11eaaa50793af6fa3f", "score": "0.7022016", "text": "def get_hash(self, file_path):\n with open(file_path, 'rb') as f:\n return sha512(f.read()).hexdigest()", "title": "" }, { "docid": "f1a66efab3a417d582fdc10eab077417", "score": "0.7014707", "text": "def hash_file(filename, hasher=None, blocksize=65536):\n return hash_stream(open(filename, 'rb'), hasher, blocksize)", "title": "" }, { "docid": "b0831ddc210d0d0b6e005cb67e130c8a", "score": "0.70020586", "text": "def hash_file(file):\n md5 = hashlib.md5()\n data = file.read(BUF_SIZE)\n file_size = BUF_SIZE\n while data:\n md5.update(data)\n data = file.read(BUF_SIZE)\n file_size += BUF_SIZE\n if file_size >= MAX_SIZE:\n raise MemoryError(\"file size too large\")\n # Reset cursor for file write\n file.seek(0, 0)\n return md5.hexdigest()", "title": "" }, { "docid": "9c86852d8c690febcb0463388bb037de", "score": "0.6991933", "text": "def getFileHash(fname):\r\n curHash = hashlib.md5()\r\n try:\r\n with open(fname,\"rb\") as afile:\r\n buf = afile.read(blkSize)\r\n while len(buf)>0:\r\n curHash.update(buf)\r\n buf = afile.read(blkSize)\r\n except FileNotFoundError:\r\n return None\r\n except PermissionError:\r\n return None\r\n return curHash.hexdigest()", "title": "" }, { "docid": "db25ae36cf9a2dc0c877491cdc97cff6", "score": "0.6990566", "text": "def hashing_function(filename):\n\n md5_hash = hashlib.md5()\n\n with open(filename, \"rb\") as f:\n content = f.read()\n md5_hash.update(content)\n\n return md5_hash.hexdigest()", "title": "" }, { "docid": "9d29aead78803ae033671016f1e76887", "score": "0.6968236", "text": "def get_file_hash(file_with_path):\n with open(file_with_path, \"rb\") as f:\n return zlib.adler32(b\"\".join(f.readlines()))", "title": "" }, { "docid": "a855ae02821042eae930688ec1511c5b", "score": "0.6958615", "text": "def checksum_file(filename):\n sha256 = hashlib.sha256()\n with open(filename, \"rb\") as f:\n for block in iter(lambda: f.read(4096), b\"\"):\n sha256.update(block)\n return sha256.hexdigest()", "title": "" }, { "docid": "62c083e3b4d66952e9d903d3544613d0", "score": "0.69565296", "text": "def hash_file(fine_name, hashtype=\"sha256\", block_size=64 * 1024):\n \n with open(fine_name, 'rb') as file:\n fhash = hashlib.new(hashtype, b\"\")\n while True:\n data = file.read(block_size)\n if not data:\n break\n fhash.update(data)\n return fhash.hexdigest()", "title": "" }, { "docid": "e3cb2d3a0bc38814fc5deb14e2d0b720", "score": "0.6954799", "text": "def compute_hash( filepath, method='md5', dryrun=False, verbose=False, **kwargs):\n if 'sha256' in method:\n myhasher = hashlib.sha256()\n elif 'sha1' in method:\n myhasher = hashlib.sha1()\n elif 'md5' in method:\n myhasher = hashlib.md5()\n else:\n raise RuntimeError(\"Unsupported/invalid hash algorithm: \" + method)\n\n with open( filepath, \"rb\" ) as fb:\n for data in iter( lambda: fb.read(4096), b\"\"):\n myhasher.update( data )\n\n return myhasher.hexdigest()", "title": "" }, { "docid": "aee8c4ade7a4e541b631a253fbbc6121", "score": "0.69487613", "text": "def digest(self):\n retval = SHA256.new(self._h.digest()).digest()\n assert len(retval) == 32\n return retval", "title": "" }, { "docid": "938c9951debd4db6941dddfe29194598", "score": "0.69487226", "text": "def calc_md5(file_name):\n import hashlib\n curr_hash = hashlib.md5()\n with open(file_name, 'rb') as file_in:\n chunk = file_in.read(8192)\n while chunk:\n curr_hash.update(chunk)\n chunk = file_in.read(8192)\n return curr_hash.hexdigest()", "title": "" }, { "docid": "ecbbdc97f713083784598dc965c6640b", "score": "0.694535", "text": "def _checksum(file_path):\n BUF_SIZE = 2**16 # chunk size\n checksum = hashlib.sha256()\n with open(file_path, 'rb') as fp:\n while True:\n chunk = fp.read(BUF_SIZE)\n if not chunk:\n break\n checksum.update(chunk)\n\n return checksum.hexdigest()", "title": "" }, { "docid": "3e0f5fa9341d341bdf3762986d286c22", "score": "0.6936609", "text": "def sha_file(sha, f): \n read = None\n while read!='':\n read = f.read(1024*1024)\n sha.update(read)\n \n hash = sha.digest()\n sha.update(sha.digest())\n hash2 = sha.digest()\n return hash, hash2", "title": "" }, { "docid": "79444322071718df0186a4a06228b3da", "score": "0.6930955", "text": "def digest(abs_path, algorithm):\n size = 65536\n if algorithm == 'md5':\n checksum = hashlib.md5()\n elif algorithm == 'sha256':\n checksum = hashlib.sha256()\n else:\n raise ValueError(\"algorithm must be either md5 or sha256\")\n with open(abs_path, 'rb') as f:\n buf = f.read(size)\n while len(buf) > 0:\n checksum.update(buf)\n buf = f.read(size)\n return checksum.hexdigest()", "title": "" }, { "docid": "4092f9b4d39a3bf87a8b0248cf7bce61", "score": "0.6866369", "text": "def get_file_md5(self, file_path):\n hash_md5 = hashlib.md5()\n if os.path.exists(file_path):\n with open(file_path, 'rb') as file_path_f:\n while True:\n data_flow = file_path_f.read(8096)\n if not data_flow:\n break\n hash_md5.update(data_flow)\n return hash_md5.hexdigest()", "title": "" }, { "docid": "7cc361c6dc5605f9d41a80754890cc47", "score": "0.6866038", "text": "def checkCHECKSUM(file):\n f = open(file, \"rb\")\n try:\n fileHash = hashlib.md5(f.read()).hexdigest()\n finally:\n f.close()\n return fileHash", "title": "" }, { "docid": "3ad49c3b7f7e7421417ab6bcfd947714", "score": "0.6847223", "text": "def _hash_file(fpath, chunk_size=65535):\n hasher = hashlib.md5()\n\n with open(fpath, 'rb') as fpath_file:\n for chunk in iter(lambda: fpath_file.read(chunk_size), b''):\n hasher.update(chunk)\n\n return hasher.hexdigest()", "title": "" }, { "docid": "3a4bf058300365ffacb6717e1628a41d", "score": "0.68406403", "text": "def md5_filehash(self):\n return self._md5_filehash", "title": "" }, { "docid": "f7a7e8c654d493a0358e61ab6cf192c1", "score": "0.68404925", "text": "def sha1sum(self, f, block=2**20):\n if(os.path.isfile(f) and os.access(f, os.R_OK)):\n mFile = open(f,'rb');\n sha1 = hashlib.sha1()\n \n while True:\n data = mFile.read(block)\n if not data:\n break\n sha1.update(data)\n \n return sha1.hexdigest()\n \n else:\n raise IOError('File \"'+f+'\" does not exist or is unreadable.')", "title": "" }, { "docid": "4b02bbe0bb67025ead6dc61ccf6578d7", "score": "0.6826805", "text": "def _get_hash(self, file_obj):\n size = 0\n hash_buider = self.hash_builder()\n for piece in self._get_file_iterator(file_obj):\n hash_buider.update(piece)\n size += len(piece)\n\n file_obj.seek(0)\n\n return \"%s_%x\" % (hash_buider.hexdigest(), size)", "title": "" }, { "docid": "cde0cbb3ff5856f338fdb6b50c012bed", "score": "0.682662", "text": "def createChecksum(cls, filename):\n fis = io.open(filename)\n buffer_ = buffer()\n if cls.complete == None:\n cls.complete = md5.md5()\n else:\n cls.complete.reset()\n len = int()\n while True:\n len = fis.read(buffer_)\n if len > 0:\n cls.complete.update(buffer_, 0, len)\n if not ((len != -1)):\n break\n fis.close()\n return cls.complete.digest()", "title": "" }, { "docid": "68378ef327276020a3267086b2c21ee4", "score": "0.6816967", "text": "def hash(fname):\n hash_md5 = md5()\n with open(fname, 'rb') as f:\n for chunk in iter(lambda: f.read(4096), b''):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()", "title": "" }, { "docid": "bb834f862da19cb8cd4d5d432a8ec091", "score": "0.6792177", "text": "def file_hash(self, filepath):\n\n try:\n sha256 = hashlib.sha256()\n with open(filepath, 'rb') as f:\n for block in iter(lambda: f.read(4096), b''):\n sha256.update(block)\n return sha256.hexdigest()\n except FileNotFoundError as e:\n logging.error(f\"Couldn't hash the file {filepath}: {e}\")\n return False", "title": "" }, { "docid": "9a732b49ab823b7c57a2df858f4d2f13", "score": "0.6791023", "text": "def hash_md5_file(file_path):\n str_md5 = \"\"\n if os.path.isfile(file_path):\n str_md5 = hashlib.md5(open(file_path, 'rb').read()).hexdigest()\n return str_md5", "title": "" }, { "docid": "a40ec0495f181aa32bfe8d4d0c99eb64", "score": "0.67867017", "text": "def inspect_hash(path):\n m = hashlib.sha256()\n with path.open(\"rb\") as fp:\n while True:\n data = fp.read(HASH_BLOCK_SIZE)\n if not data:\n break\n m.update(data)\n\n return m.hexdigest()", "title": "" }, { "docid": "301cea3dccd707afe3b235922b882996", "score": "0.6774713", "text": "def get_file_digests(filename, digests, block_size=2 ** 20):\r\n with open(filename, 'rb') as f:\r\n while True:\r\n data = f.read(block_size)\r\n if not data:\r\n break\r\n for d in digests:\r\n d.update(data)\r\n return digests", "title": "" }, { "docid": "4ae80079285b39412475bd6da2a0c757", "score": "0.6772481", "text": "def get_hash(self) -> str:\r\n assert self.tarfile_path\r\n if self.calculated_hash:\r\n return self.calculated_hash\r\n sha256 = hashlib.sha256()\r\n archive = tarfile.open(self.tarfile_path, 'r:gz')\r\n for member in archive.getmembers():\r\n if member.isfile():\r\n with archive.extractfile(member) as target:\r\n while True:\r\n data = target.read(HASH_READ_BUF_SIZE)\r\n if not data:\r\n break\r\n sha256.update(data)\r\n archive.close()\r\n self.calculated_hash = sha256.hexdigest()\r\n return sha256.hexdigest()", "title": "" }, { "docid": "07bcf7d4852dc5d8a74709730a70eae9", "score": "0.67711604", "text": "def checksum(fileobj, hashalg):\n if not hashalg:\n return {}\n m = { h:hashlib.new(h) for h in hashalg }\n chunksize = 8192\n while True:\n chunk = fileobj.read(chunksize)\n if not chunk:\n break\n for h in hashalg:\n m[h].update(chunk)\n return { h: m[h].hexdigest() for h in hashalg }", "title": "" }, { "docid": "dc4ac8d4c03816d6106dca35917b131f", "score": "0.6769612", "text": "def digest(self):\n return self.min_hash.digest()", "title": "" }, { "docid": "ebbe77cdf5408c9946d50c72f3c09043", "score": "0.6760986", "text": "def get_file_digests(filename, digests, block_size=2 ** 20):\n with open(filename, 'rb') as f:\n while True:\n data = f.read(block_size)\n if not data:\n break\n for d in digests:\n d.update(data)\n return digests", "title": "" }, { "docid": "03afaa67a6be95b8eda6ed0c789a5172", "score": "0.6726007", "text": "def get_file_crc(file, block_size=1024 * 14):\n h = hashlib.md5()\n # if isinstance(file, basestring):\n # file = open(file, 'rb')\n pos = file.tell()\n file.seek(0)\n block = file.read(block_size)\n while block:\n h.update(block)\n block = file.read(block_size)\n file.seek(pos)\n # TODO: iterate over file.chunks() if (file.multiple_chunks())\n # h.update(file.read()) # for InMemoryUploadedFile\n return h.hexdigest()", "title": "" }, { "docid": "7afc046ecceb8dbe2e58d5bb4b0f7aeb", "score": "0.67209977", "text": "def _calc_hash(self, path, filename, fast=True):\n full_path = os.path.join(path, filename)\n if not fast:\n with open(full_path, 'rb') as f:\n content = f.read()\n f.close()\n hash_data = hashlib.sha1(content).hexdigest()\n else:\n info = os.stat(full_path)\n content = str(info.st_mtime) + str(info.st_size)\n hash_data = hashlib.sha1(content.encode('utf-8')).hexdigest()\n logging.debug('Hash of %s is %s', full_path, str(hash_data))\n return hash_data", "title": "" }, { "docid": "436cfb44f23bfab198b5fa0e20a5a8b7", "score": "0.6719924", "text": "def get_file_hash(filePath):\n md5_hash = md5()\n\n try:\n with open(filePath, 'rb') as fileHandle:\n while True:\n chunk = fileHandle.read(2048)\n\n if not chunk:\n break\n\n md5_hash.update(chunk)\n except PermissionError as accessErr:\n print(\"No permission to read file: {}. {}\".format(filePath, accessErr), file=sys.stderr)\n \n return md5_hash.hexdigest()", "title": "" }, { "docid": "013710ac6bc721b47d85d93197326542", "score": "0.67179537", "text": "def sha256_file(pn):\n h = hashlib.sha256()\n with open(pn, 'rb') as f:\n h.update(f.read())\n return h.hexdigest()", "title": "" }, { "docid": "926eea425a09794310821b40ca8b5ae6", "score": "0.6692987", "text": "def _md5sum(filename):\n return hashlib.md5(open(filename, 'rb').read()).hexdigest()", "title": "" }, { "docid": "7b7efc7297d231cec0e50ebb0ff29186", "score": "0.6691657", "text": "def md5hash(fname):\n hash = md5()\n with open(fname, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash.update(chunk)\n return hash.hexdigest()", "title": "" }, { "docid": "5c0bbb45991c5b069966332637cd0e2f", "score": "0.669163", "text": "def get_hash_of_binary_file_contents (self, file_path, algorithm='MD5'):\n file_contents = self.read_binary_file(file_path)\n file_hash = self.get_hash_of_string(file_contents, algorithm)\n return file_hash", "title": "" }, { "docid": "eebe9fe34fa6e111b37fb7539536b690", "score": "0.66824645", "text": "def getFileNameHash(filename):\n return hashlib.md5(filename).hexdigest()", "title": "" }, { "docid": "660c2c9665c645136c645eb7ca0a1f56", "score": "0.66664094", "text": "def sha256file(abspath, nbytes=0, chunk_size=DEFAULT_CHUNK_SIZE):\n return get_file_fingerprint(abspath, hashlib.sha256, nbytes=nbytes, chunk_size=chunk_size)", "title": "" }, { "docid": "c4a90f199debb2f0c40d217dfb04616e", "score": "0.6661352", "text": "def index_hash(self):\n file_hashes = []\n # we sort to have the same files every time\n for file_name in sorted(relative_paths_in_dir(self.index_directory)):\n index_file_name = join(self.index_directory, file_name)\n if os.path.isfile(index_file_name):\n f = open(index_file_name, \"rb\")\n file_bytes = f.read()\n m = hashlib.sha256()\n # we encode the file contents\n # and we encode the file path\n m.update(file_bytes)\n m.update(file_name.encode(\"utf-8\"))\n file_hashes.append(m.hexdigest())\n m = hashlib.sha256()\n # TODO: this should be a merkle tree eventually\n # (or maybe an merkel-mountain-range), to reap the benefits\n m.update(\",\".join(file_hashes).encode('utf-8'))\n return m.hexdigest()", "title": "" }, { "docid": "3f3d0fd5b4ffe2fcee4f56f4097ff1b4", "score": "0.6650516", "text": "def md5_sum(file_path):\n with open(file_path, \"rb\") as f:\n return hashlib.md5(f.read()).hexdigest()", "title": "" }, { "docid": "8444dfedc624acbe4142402f37ff2f3e", "score": "0.66472673", "text": "def hash4file(filepath, salt=None):\r\n import hashlib\r\n m = hashlib.sha1() # Perfectionist can use sha224.\r\n\r\n if filepath:\r\n block_size = 2**16 # Perfectionist can tune.\r\n lf = open(filepath,\"r\")\r\n while True:\r\n data = lf.read(block_size)\r\n if not data:\r\n break\r\n m.update(data)\r\n lf.close()\r\n if salt:\r\n m.update(salt)\r\n return m.hexdigest()[:16]", "title": "" }, { "docid": "26068d80a8a4b2e290c510b2a13bba03", "score": "0.663665", "text": "def calc_checksum(filename):\n try:\n f = open(filename, \"rb\")\n contents = f.read()\n m = hashlib.md5()\n m.update(contents)\n checksum = m.hexdigest()\n return checksum\n\n except IOError:\n return None", "title": "" }, { "docid": "f6ca09f2443f6520e0a77c164fdd2108", "score": "0.6634604", "text": "def hash_file_formatted(path, hash_alg=None):\n\n hash_alg = hash_alg or DEFAULT_HASH_ALG\n hasher = hashlib.new(hash_alg)\n\n BUF_SIZE = 65536\n\n with open(path, 'rb') as f:\n while True:\n data = f.read(BUF_SIZE)\n if not data:\n break\n hasher.update(data)\n\n return util.format_hash(hash_alg, hasher.hexdigest())", "title": "" }, { "docid": "1ea1c1fa078ba482c5e2caeb672a75a3", "score": "0.663338", "text": "def sha256sum(filename):\n with open(filename, 'rb') as f:\n m = hashlib.sha256()\n while True:\n data = f.read(8192)\n if not data:\n break\n m.update(data)\n return m.hexdigest()", "title": "" }, { "docid": "c445fecacbd2b21a41cb1b455652dbf6", "score": "0.6626729", "text": "def checksum(ctx, filename):\n assert (ctx.checksum in ['SHA256', 'MD5']), 'Invalid checksum type: {0} instead of MD5 or SHA256'.format(ctx.checksum)\n try:\n if ctx.checksum == 'SHA256':\n shell = os.popen(\"sha256sum {0} | awk -F ' ' '{{ print $1 }}'\".format('{0}/{1}'.format(ctx.directory, filename)), 'r')\n elif ctx.checksum == 'MD5':\n shell = os.popen(\"md5sum {0} | awk -F ' ' '{{ print $1 }}'\".format('{0}/{1}'.format(ctx.directory, filename)), 'r')\n return shell.readline()[:-1]\n except:\n raise Exception('Checksum failed for {0}'.format('{0}/{1}'.format(ctx.directory, filename)))", "title": "" }, { "docid": "f76816c635634a0fb3b7140cb238bbc7", "score": "0.66208446", "text": "def get_hash_of_file(fname, algo=\"sha512\"):\n block_size = 65536\n if algo.lower() == \"md5\":\n _hash = hashlib.md5()\n else:\n _hash = hashlib.sha512()\n with open(fname, \"rb\") as f:\n fb = f.read(block_size)\n while fb:\n _hash.update(fb)\n fb = f.read(block_size)\n\n return _hash.hexdigest()", "title": "" }, { "docid": "731623b83e08c2a33572cc3b9196b27b", "score": "0.66079605", "text": "def get_md5_checksum(file_path):\n with open(file_path, 'rb') as fh:\n m = hashlib.md5()\n while True:\n data = fh.read(8192)\n if not data:\n break\n m.update(data)\n return m.hexdigest()", "title": "" } ]
bf03a23c0273b35a0d761f0e33ab7b44
DataBall constructor, connect to the go_fish database, define a cursor, and get the start time of the game. Initialize several other fields used in the database. 8
[ { "docid": "50c0f8aaed5ffd2608573129f5b91381", "score": "0.626582", "text": "def __init__(self,difficulty=0):\n self.conn = sqlite3.connect('go_fish.db') #Connect to database\n self.curs = self.conn.cursor()\n \n # start time of game\n self.ts = datetime.datetime.now()\n \n # initialize other fields/qualifiers of the game\n self.won = None\n self.cards_per_request = 0\n self.turn_number = 1\n self.difficulty = difficulty\n \n # 'empty guess' tracking variable- an empty guess is a guess for which the \n # player had to 'go fish'\n self.empty_guesses = 0\n self.top_empty_guess_ct = 0", "title": "" } ]
[ { "docid": "caceb3c401ac325d0e26409cca25db8d", "score": "0.62242234", "text": "def __init__(self):\n self.score = 0\n self.assassin_kills = 0\n self.mage_kills = 0\n self.ogre_kills = 0\n self.db = Database()", "title": "" }, { "docid": "cd3453ce1bf460fd52d8eb29dee288b2", "score": "0.61079764", "text": "def __init__(self,jobData):\n self.connected = False\n self.dbName = 'wrfHydroCalib_DB'\n self.db = None\n self.conn = None\n self.dbCursor = None", "title": "" }, { "docid": "4925ca32feda851beedf815a1f6772c2", "score": "0.60836726", "text": "def __init__(self):\n self.db = self.getConnection() #get connection to DB\n self.simulation_numbers = self.db['simulation_numbers'] #table simulation_numbers\n self.simulations = self.db['simulations'] #table simulations\n self.iterations = self.db['iterations'] #table iterations\n self.weater_data = self.db['weater_data'] #table weather data\n self.electricity_prices = self.db['electricity_prices'] #table electrictity prices\n self.addIndexes() # indexes for speed", "title": "" }, { "docid": "f4e0250ff822af34fadeb2844f00a152", "score": "0.6039546", "text": "def __init__(self, time_stamp):\n self.variable_used = []\n self.database = Database()\n self.time_stamp = time_stamp\n self.x = dict(self.database.x)\n self.y = dict(self.database.y)\n self.z = dict(self.database.z)", "title": "" }, { "docid": "c1dc6c29810f24665266853d5e61f380", "score": "0.59123254", "text": "def __init__(self, name):\n self.data_records_current_id = -1\n try:\n self.db = sqlite3.connect(name)\n except sqlite3.OperationalError:\n with open(\"log.txt\", 'a') as f:\n f.write(\"Could not open a database at time {}\".format(time.time()))\n exit()\n self.cur = self.db.cursor()\n self._create_tables((CREATE_TABLE_POWERINVERTER,\n CREATE_TABLE_GPS,\n CREATE_TABLE_DATARECORDS))", "title": "" }, { "docid": "795068383b5966e6544b89652ea915ac", "score": "0.59058416", "text": "def creatingAllData():\n connection = sqlite3.connect('data.db')\n cursor = connection.cursor()\n createData(1, False, \"1347059530000\", str(int(time.time())*1000),\n best_lat, best_lon, best_timezone)\n createData(2, False, \"1349478489000\", str(int(time.time())*1000),\n best_lat, best_lon, best_timezone)\n createData(3, False, \"1353545153000\", str(int(time.time())*1000),\n best_lat, best_lon, best_timezone)\n createData(4, False, \"1353545237000\", str(int(time.time())*1000),\n best_lat, best_lon, best_timezone)\n createData(1, True, \"1335859200000\", str(int(time.time())*1000), nasa_lat,\n nasa_lon, nasa_timezone)\n createData(2, True, \"1335859200000\", str(int(time.time())*1000), nasa_lat,\n nasa_lon, nasa_timezone)\n createData(3, True, \"1335859200000\", str(int(time.time())*1000), nasa_lat,\n nasa_lon, nasa_timezone)\n createData(4, True, \"1335859200000\", str(int(time.time())*1000), nasa_lat,\n nasa_lon, nasa_timezone)\n createData(5, True, \"1335859200000\", str(int(time.time())*1000), nasa_lat,\n nasa_lon, nasa_timezone)\n createData(6, True, \"1335859200000\", str(int(time.time())*1000), nasa_lat,\n nasa_lon, nasa_timezone)\n createData(7, True, \"1335859200000\", str(int(time.time())*1000), nasa_lat,\n nasa_lon, nasa_timezone)\n createData(8, True, \"1335859200000\", str(int(time.time())*1000), nasa_lat,\n nasa_lon, nasa_timezone)\n createData(9, True, \"1335859200000\", str(int(time.time())*1000), nasa_lat,\n nasa_lon, nasa_timezone)\n #Save your changes\n connection.commit()", "title": "" }, { "docid": "1e3942fad38367eb800409365d258457", "score": "0.5885854", "text": "def __init__(self,data):\r\n \r\n # store the instantiation of the datalogger class into a variable\r\n self.__dataLogger = data\r\n # Instantiate the Database class to have a connection to the database\r\n self.__database = Database()\r\n\r\n # Hard code the name of the table that needs to be created\r\n self.__table = \"TEMPERATURE_data\"\r\n\r\n # Get the data needed to be insert into the database from the DataLogger class\r\n self.__currentTemperature = str(data.getTemperature())\r\n self.__currentHumidity = str(data.getHumidity())\r\n self.__currentTime = str(data.getTime())\r\n self.__currentDate = str(data.getDate())\r\n \r\n # Create the database table with the hard coded value\r\n self.__database.createDBTable(self.__table)\r\n\r\n # insert the data into the table created\r\n self.__database.insertDBData(self.__currentTime, self.__currentTemperature,self.__currentHumidity,self.__currentDate)\r\n\r\n # inform the database to save changes made to the table\r\n self.__database.saveChanges()", "title": "" }, { "docid": "a6a741089c038db05e8dffbc8eb09948", "score": "0.58817136", "text": "def __init__ (self, db_entry=None, player=None, team=None, price=0, \n name=None, club=None, squad_num=0, instance=0):\n self.db = db_entry\n self.name = name\n self.player_key = name\n self.club = club\n self.current_club = club\n self.pos = \"--\"\n self.manager = \"None\"\n self.total_score = 0\n self.total_missed = 0\n self.week_score = 0\n self.week_missed = 0\n self.price = price\n self.squad_num = squad_num\n self.url = \"\"\n self.in_team = squad_num >= 0 and squad_num < 11\n self.is_sub = squad_num >= 11\n self.fixtures = \"\"\n\n if db_entry == None and player != None:\n # Create a new db structure and save it\n self.db = ffdb.FFDBTeamPlayer()\n self.db.year = player.year\n self.db.manager = team.manager\n self.db.userid = team.userid\n self.db.player_key = player.player_key\n self.db.name = player.name\n self.db.instance = instance\n self.db.club = player.club\n self.db.current_club = player.club\n self.db.pos = player.pos\n self.db.price = price\n self.db.squad_num = squad_num\n self.db.url = player.url\n # Update the instance number if this player has already been in\n # the team previously\n for explayer in team.explayers:\n if player.player_key == explayer.player_key:\n self.db.instance += 1\n # Set the squad number for the current week\n season = ffdb.FFDBSeason.load(team.year)\n self.db.update_squad_num(season.current_week())\n # Save this new item in the data store\n self.db.save()\n\n if db_entry != None:\n self.name = db_entry.name\n self.player_key = db_entry.player_key\n self.club = db_entry.club\n self.current_club = db_entry.current_club\n self.pos = db_entry.pos\n self.manager = db_entry.manager\n self.price = db_entry.price\n self.total_score = db_entry.get_total_points()\n self.total_missed = db_entry.get_total_missed()\n self.week_score = db_entry.get_week_points()\n self.week_missed = db_entry.get_week_missed()\n self.squad_num = db_entry.get_next_squad_num()\n self.url = db_entry.url\n self.in_team = (db_entry.get_current_squad_num() >= 0 and \n db_entry.get_current_squad_num() < 11)\n self.is_sub = db_entry.get_current_squad_num() >= 11\n\n self.safemgr = FFTeam.mgr_to_safemgr(self.manager)\n # Set the status and reason from the FFplayer object or by looking\n # up the entry in the data store. This is also used to update the\n # current_club field if necessary.\n if player != None and db_entry != None:\n self.status = player.status\n self.reason = player.reason\n elif db_entry == None:\n # This is a temporary entry. Mark as ineligable\n self.status = ffdb.FFDBPlayer.INELIGABLE\n self.reason = \"Placeholder\"\n else:\n # If we don't have enough informtion to fill in the status and\n # reason then leave them blank for now. They will be filled in\n # later if anyone needs them\n self.status = None\n self.reason = None", "title": "" }, { "docid": "3562d2413cbae99a326d903725c00985", "score": "0.58659214", "text": "def __init__(self, db, shadows=False, **kw):\n self.db = db\n self.shadows = shadows\n\n self.width = 900\n self.height = 500\n self.bottomMargin = 80\n \n self.pmLow = 408\n self.pmHigh = 416#425.5\n self.pixelPerMile = self.width / (self.pmHigh - self.pmLow)\n\n self.positions, self.times, self.speeds = self.plotPoints(**kw)\n \n self.speedRange = (\n 0, #self.speeds.min() - 3,\n 80, #self.speeds.max() + 3\n )", "title": "" }, { "docid": "12879e87f3bd856f610d07c858876ae9", "score": "0.5857365", "text": "def __init__(self, db_file, log=False):\n # self.conn = sqlite3.connect(db_file)\n # self.cursor = self.conn.cursor()\n self.db_file = db_file\n self.autoclose = True\n self.logger = None # Default\n if log:\n logging.basicConfig(format=\"SQL: %(message)s\")\n self.logger = logging.getLogger(\"candle_sql\")\n self.logger.setLevel(logging.DEBUG)", "title": "" }, { "docid": "655d6f6de3471d068089c4a88461e4c9", "score": "0.5825229", "text": "def test_data():\n \n temp = 22.0\n humi = 76.9\n itmp = 21.9\n pres = 1016\n lcur = 0.2\n batv = 13.0\n wspeed = 0.0\n wdir = 180.0\n rain = 0.0\n pcur = 0.51\n\n cnx = open_database()\n \n cursor = cnx.cursor()\n data = (time.strftime(\"%Y-%m-%d\"), temp,humi,itmp,pres,lcur,batv,wspeed,wdir,rain,pcur)\n print(data)\n cursor.execute(INSERT_DEF, data)\n cnx.commit()\n\n query = (\"SELECT ts, temperature FROM GardenLabData\")\n cursor.execute(query)\n for( ts, temperature) in cursor:\n print( ts, temperature)\n \n cursor.close()\n cnx.close()", "title": "" }, { "docid": "0da500df70809571d6b4f234d9468fcf", "score": "0.5806642", "text": "def createData(sens_no, nasa, start, end, lat, lon, timezon):\n connection = sqlite3.connect('data.db')\n cursor = connection.cursor()\n if nasa:\n sensorID = nasa_sensors_dict[sens_no]\n sensorLoc = nasa_sensors_loc[sens_no]\n table = \"nasalight\" + str(sens_no)\n else:\n sensorID = sensors_dict[sens_no]\n sensorLoc = sensors_loc[sens_no]\n table = \"light\" + str(sens_no)\n x = sensorLoc[0]\n y = sensorLoc[1]\n url = \"http://new.openbms.org/backend/api/prev/uuid/\" + sensorID +\\\n \"?&start=\" + start + \"&end=\" + end + \"&limit=100000&\"\n timestamp, reading, unixtime = parse(url)\n # fill in the gaps in the unixtimes\n #timestamp, reading, unixtime = fill_gaps(timestamp, reading, unixtime)\n for count in range(len(reading)):\n time = timestamp[count]\n #print(time)\n sunpos = getSunpos(lat, lon, timezon, time[3], time[2],\n time[1], time[4], time[5], time[6])\n cloud = cursor.execute('SELECT cloudiness FROM cloud WHERE day = ' +\n str(time[1]) + ' AND month = ' + str(time[2]) +\n ' AND year = ' + str(time[3]) + ' AND hour = ' +\n str(time[4]))\n cloudiness = cloud.fetchone()\n if nasa == False:\n if cloudiness is not None:\n to_db = [unixtime[count], time[0], time[1], time[2], time[3],\n time[4], time[5],time[6], reading[count], sunpos[0],\n sunpos[1], str(cloudiness[0]), x, y, float('NaN'),\n float('NaN'), float('NaN'), float('NaN'), float('NaN'),\n float('NaN'),float('Nan'),float('Nan')]\n else:\n to_db = [unixtime[count], time[0], time[1], time[2], time[3],\n time[4], time[5],time[6], reading[count], sunpos[0],\n sunpos[1], \"None\", x, y, float('NaN'), float('NaN'),\n float('NaN'), float('NaN'), float('NaN'), float('NaN'),\n float('Nan'),float('Nan')]\n cursor.execute('INSERT OR IGNORE INTO ' + table +\n ' VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)',\n to_db)\n else:\n if cloudiness is not None:\n to_db = [unixtime[count], time[0], time[1], time[2], time[3],\n time[4], time[5],time[6], reading[count], sunpos[0],\n sunpos[1], str(cloudiness[0]), x, y, float('NaN'),\n float('NaN'), float('NaN'), float('NaN'), float('NaN'),\n float('NaN')]\n else:\n to_db = [unixtime[count], time[0], time[1], time[2], time[3],\n time[4], time[5],time[6], reading[count], sunpos[0],\n sunpos[1], \"None\", x, y, float('NaN'), float('NaN'),\n float('NaN'), float('NaN'), float('NaN'), float('NaN')]\n cursor.execute('INSERT OR IGNORE INTO ' + table +\n ' VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)',\n to_db)\n connection.commit()", "title": "" }, { "docid": "0fb210f2ee33cb8d454fcf6e040cc683", "score": "0.57703054", "text": "def __init__(self, dbname):\r\n self.connection = sqlite3.connect(dbname)\r\n self.cur = self.connection.cursor()", "title": "" }, { "docid": "f1f7c83c658719ce037769e7efc0d30c", "score": "0.5762066", "text": "def __init__ (self, db_entry, manager=None):\n self.db_entry = db_entry\n self.year = db_entry.year\n self.player_key = db_entry.player_key\n self.name = db_entry.name\n self.pos = db_entry.pos\n self.club = db_entry.club\n self.last_season = db_entry.last_season\n self.total_score = db_entry.get_total_score()\n self.url = (\"http://www.fantasyleague.com/Pro/Stats/Player/\" +\n \"{}.aspx\".format(db_entry.url))\n self.img_url = (\"http://www.fantasyleague.com/sharedassets/\" + \n \"images/players/{}.jpg\".format(db_entry.url))\n self.status = db_entry.status\n self.reason = db_entry.reason\n self.manager = manager\n self.teamplayers = []", "title": "" }, { "docid": "e625421536643577ecc402a2a409e3aa", "score": "0.5732227", "text": "def __init__(self, db_name):\n self.conn = sqlite3.connect(db_name)\n self.cursor = self.conn.cursor()", "title": "" }, { "docid": "0576818a0dcce27c780e97b6c86f0d4b", "score": "0.5725861", "text": "def __init__(self, countables_initial=None):\n self.t_initial = time.time()\n self.c_initial = countables_initial\n self.records = []", "title": "" }, { "docid": "d56e79c1cd8dc022af9fabd46aca48fd", "score": "0.57225883", "text": "def __init__(self, database, i_face, test=False):\r\n self.po_db = database\r\n self.i_face = i_face\r\n self.test = test\r\n\r\n self.connection = db.connect(database.db_name)\r\n self.layer = None", "title": "" }, { "docid": "a77d9e58761f17c523b72940dbc60203", "score": "0.56958145", "text": "def __init__(self, db):\r\n self.mydb = sqlite3.connect(db)\r\n self.cursor = self.mydb.cursor()", "title": "" }, { "docid": "d32e0cf6507e5c759cb3bad88cd063ef", "score": "0.5689434", "text": "def __init__(self, database=DB_FILE, table=DB_CAT, comment=DB_PZ):\n self.conn = sql.connect(database)\n self.cur = self.conn.cursor()\n self.table = table\n self.comment = comment", "title": "" }, { "docid": "846db9bd5a7b79ee9df83e58a223c7f0", "score": "0.5686574", "text": "def __init__(\n self,\n game_id: str,\n season: int,\n day: int,\n home_team: TeamState,\n away_team: TeamState,\n home_score: int,\n away_score: int,\n inning: int,\n half: InningHalf,\n outs: int,\n strikes: int,\n balls: int,\n ) -> None:\n self.game_id = game_id\n self.season = season\n self.day = day\n self.home_team = home_team\n self.away_team = away_team\n self.home_score = home_score\n self.away_score = away_score\n self.inning = inning\n self.half = half\n self.outs = outs\n self.strikes = strikes\n self.balls = balls\n if self.half == InningHalf.TOP:\n self.cur_batting_team = self.away_team\n self.cur_pitching_team = self.home_team\n else:\n self.cur_batting_team = self.home_team\n self.cur_pitching_team = self.away_team\n # initialize per side variables\n self.num_bases = self.cur_batting_team.num_bases\n self.balls_for_walk = self.cur_batting_team.balls_for_walk\n self.strikes_for_out = self.cur_batting_team.strikes_for_out\n self.outs_for_inning = self.cur_batting_team.outs_for_inning\n self.cur_base_runners: Dict[int, str] = {}\n self.is_game_over = False\n self.clf: Dict[Ml, Any] = {}\n self.game_log: List[str] = [\"Play ball.\"]\n self._load_ml_models()\n self.refresh_game_status()", "title": "" }, { "docid": "3af58cbefae7ae21f37f77768fc952a3", "score": "0.5681751", "text": "def __init__(self, dbf, debug=False):\n self.db = DB(dbf, debug=debug)", "title": "" }, { "docid": "ad7f685331e9c847f08223b84d89a3b1", "score": "0.56601745", "text": "def __init__(self):\n # self.id = random.randint(1,101)\n # print('Generated an id of ', self.id)\n # print('Loading database from file')\n pass", "title": "" }, { "docid": "ba4c05037e6c7c34f1c6bde6a1b922a0", "score": "0.5639399", "text": "def init(self):\n self.engine.load_data(self.data_file)\n self.connect_to_db()", "title": "" }, { "docid": "636650853db6bb812782fd2bdb4b2409", "score": "0.563328", "text": "def __init__(self, db_file_path):\n self.db_file_path = db_file_path\n self.connection = sqlite3.connect(db_file_path)\n self.cursor = self.connection.cursor()", "title": "" }, { "docid": "4528bb12227013db288f743712333cd2", "score": "0.55924845", "text": "def __init__(self):\n self.basedb = self\n self.__feature = {} # {\"feature\": VALUE, ...}\n self._tables = {\n \"Citation\": {},\n \"Event\": {},\n \"Family\": {},\n \"Media\": {},\n \"Note\": {},\n \"Person\": {},\n \"Place\": {},\n \"Repository\": {},\n \"Source\": {},\n \"Tag\": {},\n }", "title": "" }, { "docid": "69b519333cf14a4cf6605bd74e4658e5", "score": "0.5579728", "text": "def __init__(self, dbfile):\n self.dbfile = dbfile\n self.cxn = sqlite3.connect(dbfile)\n self.cur = self.cxn.cursor()", "title": "" }, { "docid": "1614e154ebd6996011ebd2731cb4a0ad", "score": "0.5578826", "text": "def __init__(self, database_file):\n self.connection = sqlite3.connect(database_file)\n self.cursor = self.connection.cursor()", "title": "" }, { "docid": "2aaf822f7a2b5036249421cd8f0c5eff", "score": "0.5574275", "text": "def __init__(self, conn):\n self.conn = conn\n self.vehicle = conn.space_center.active_vessel\n self._Flight = self.vehicle.flight()\n self.altitude = conn.add_stream(getattr, self.vehicle.flight(), 'mean_altitude')\n self.apoapsis = conn.add_stream(getattr, self.vehicle.orbit, 'apoapsis_altitude')\n self.initial_longitude = self.vehicle.flight().longitude", "title": "" }, { "docid": "3b4417a2d1e3e8caae12d4d2f2618973", "score": "0.5570422", "text": "def start(self, data):\n # letzter Start\n self.lastStart = time.time()\n\n self.grid = dict()\n config = get_config()['field']\n self.length = config['length']\n self.width = config['width']\n self.goal_width = config['goal-width']\n\n # Cells\n self.xCount = 19 # Amount of cells in horizontal direction\n self.yCount = 13 # Amount of cells in vertical direction\n self.cellCount = self.yCount * self.xCount\n\n self.x_half = int(self.xCount / 2)\n self.y_half = int(self.yCount / 2)\n self.initialize_grid()\n\n # Ist -1 wenn der Roboter auf das eigene Tor guckt und 1 wenn er auf das gegnerische Tor guckt\n # Zu Beginn ist der Wert 1, da wir davon ausgehen,\n # dass der Roboter zum Start immer auf das gegnerische Tor guckt\n # Bisher ist jedoch noch keine Loesung dafuer implementiert, um festzustellen auf welches Tor\n # der Roboter schaut.\n self.field_half = 1\n\n # bin_tm und bin_opp speichern die Binaerrepraesentation der Gridworld\n # und werden zunaechst auf einen default-Wert gesetzt bis es Daten gibt,\n # die eingetragen werden koennen. Die DATA_KEYs werden in update mit ihnen verglichen,\n # damit einige Operationen nur durchgefuehrt werden, wenn sich die Daten tatsaechlich\n # geaendert haben.\n self.bin_tm = [999999, 999999, 999999, 999999, 999999, 999999, 999999, 999999]\n self.bin_opp = [999999, 999999, 999999, 999999, 999999, 999999, 999999, 999999]", "title": "" }, { "docid": "8aa4f53d86fc8ae937b56e6bc378b859", "score": "0.556528", "text": "def __init__(self):\n\t\tself.start_time = None\n\t\tself.stop_time = None", "title": "" }, { "docid": "ba3b36a22705040679b8fef4710268f6", "score": "0.5543013", "text": "def __init__(self, timestamp: float, speed: float) -> None:\n self.timestamp = timestamp\n self.speed = speed", "title": "" }, { "docid": "4251ba8a1b72f6536a8a81bad3e8280e", "score": "0.55365825", "text": "def __init__(self):\n self.start_time = None\n self.stop_time = None", "title": "" }, { "docid": "82ca518bd97984cdcb2c185e2712b0a5", "score": "0.5535444", "text": "def __init__(self):\n try:\n self._db_conn = pgconn.connect(\"dbname='ap_pilot' \\\n\t\t\t user='apadmin' host='192.168.3.252' password='dysan100'\")\n self._db_conn.autocommit = True\n except pgconn.Error as pgerror:\n print \"I am unable to connect to the database. %s\" % repr(pgerror)\n\n self._db_cur = self._db_conn.cursor()", "title": "" }, { "docid": "6cda4388b189f8500b7fafee4a08bbef", "score": "0.5526462", "text": "def __init__(self, symbol, start_data, end_data):\n self.start_date = self.to_date(start_data)\n self.end_date = self.to_date(end_data)\n self.symbol = symbol\n\n self.data = self.fetch_data()", "title": "" }, { "docid": "e04f9044efc90f54a45920f33dbd878b", "score": "0.550279", "text": "def init(self):\n self.room.db.desc = ROOM_DESC\n\n cabindoor = self.create_object(\n CabinDoor, key=\"door to the cabin\", aliases=[\"door\"])\n cabindoor.db.desc = CABINDOOR_DESC.strip()\n hintberry_plate = self.create_object(\n HintberryPlate, key=\"pie on a plate\",\n aliases=[\"stool\", \"hintberry\", \"hintberry pie\"])\n hintberry_plate.db.desc = HINTBERRY_PLATE_DESC.strip()\n windows = self.create_object(\n Windows, key=\"windows\", aliases=['window'])\n windows.db.desc = WINDOWS_DESC.strip()\n metalworks = self.create_object(\n Metalworks, key=\"metalworks (outside)\", aliases='metalworks')\n metalworks.db.desc = METALWORKS_DESC\n scarecrow = self.create_object(\n Scarecrow, key=\"scarecrow (outside)\", aliases=['scarecrow'])\n scarecrow.db.desc = SCARECROW_DESC.strip()\n rafters = self.create_object(\n Rafters, key='rafters')\n rafters.db.desc = RAFTERS_DESC.strip()\n chimes = self.create_object(\n Chimes, key='chimes with red herrings', aliases=\"chimes\")\n chimes.db.desc = CHIMES_DESC.strip()\n laundry = self.create_object(\n Laundry, key=\"laundry\")\n laundry.db.desc = LAUNDRY_DESC.strip()\n saddle = self.create_object(\n Saddle, key=\"saddle\")\n saddle.db.desc = SADDLE_DESC.strip()\n socks = self.create_object(\n Socks, key=\"socks\")\n socks.db.desc = SOCKS_DESC.strip()\n bathtowel = self.create_object(\n Bathtowel, key=\"bathtowel\", aliases=[\"towel\"])\n bathtowel.db.desc = BATHTOWEL_DESC.strip()\n fireplace = self.create_object(\n Fireplace, key=\"fireplace\", aliases=[\"chimney\"])\n fireplace.db.desc = FIREPLACE_DESC.strip()\n cauldron = self.create_object(\n Cauldron, key=\"cauldron\")\n cauldron.db.desc = CAULDRON_DESC.strip()\n painting = self.create_object(\n Painting, key=\"painting over the fireplace\", aliases=[\"painting\"])\n painting.db.desc = PAINTING_DESC.strip()\n ashes = self.create_object(\n Ashes, key=\"ashes in the fireplace\", aliases=[\"ashes\"])\n ashes.db.desc = ASHES_DESC.strip()\n closet = self.create_object(\n Closet, key=\"closet\")\n closet.db.desc = CLOSET_DESC.strip()\n kitchen = self.create_object(\n Kitchen, key=\"kitchen\")\n kitchen.db.desc = KITCHEN_DESC.strip()\n chair = self.create_object(\n Chair, key=\"chair\")\n chair.db.desc = CHAIR_DESC.strip()\n statue = self.create_object(\n Statue, key='statue', aliases=[\"monkey\"])\n statue.db.desc = STATUE_DESC.strip()\n hair = self.create_object(\n Hairs, key=\"hair\", aliases=['hairs', 'strands of hair'])\n hair.db.desc = HAIR_DESC.strip()\n bed = self.create_object(\n Bed, key=\"bed\")\n bed.db.desc = BED_DESC.strip()\n floor = self.create_object(\n Floor, key=\"floor\", aliases=[\"floor boards\"])\n floor.db.desc = FLOOR_DESC.strip()\n rug = self.create_object(\n Rug, key=\"rug\", aliases=['carpet'])\n rug.db.desc = RUG_DESC.strip()\n table = self.create_object(\n Table, key=\"table\")\n table.db.desc = TABLE_DESC.strip()\n mirror = self.create_object(\n Mirror, key=\"mirror\")\n mirror.db.desc = MIRROR_DESC.strip()\n plant = self.create_object(\n Plant, key=\"plant\")\n plant.db.desc = PLANT_DESC.strip()", "title": "" }, { "docid": "5e1bbb28a7da79c30b4819877a91051d", "score": "0.5500568", "text": "def __init__(self,\n db_manual_path=CONFIG.latlon_manual_csv_file,\n db_auto_path=CONFIG.latlon_auto_csv_file,\n extend=True):\n self.db_manual = latlon_db.DB(db_manual_path)\n self.db_auto = latlon_db.DB(db_auto_path)\n self.extend_database = extend", "title": "" }, { "docid": "0a81983c9cfbc83cf945df529a2717f6", "score": "0.5480463", "text": "def __init__(self):\n self.state = 'idle'\n self.time = 0.0\n self.target = 0.0\n self.sense_t = 0.0\n self.sense_b = 0.0\n self.cmd = 0.0\n self.cmd_t = 0.0\n self.cmd_b = 0.0", "title": "" }, { "docid": "f06087fdf75a9e0931547a19845cca4e", "score": "0.5475616", "text": "def __init__(self, _boil_time, _boil_temp, _is_boiling):\n logging.info(\"Thread %s: Start Boiling\", self)\n self._boil_time = TempBoilRecipe.TempBoilRecipe.boil_time\n self._boil_temp = TempBoilRecipe.TempBoilRecipe.boil_temp\n self._stage_date_time = datetime.datetime.now()\n self._stage_duration = datetime.datetime\n self._is_boiling = _is_boiling\n logging.info(\"Thread %s: End Boiling\", self)", "title": "" }, { "docid": "13b59a5d844f3f665dc419572a83aa4b", "score": "0.5459323", "text": "def initialise(self):\n self._current_time = GameTime.get_time()", "title": "" }, { "docid": "ee092ad9d1a751a504e5dcf9c491cdf0", "score": "0.5448792", "text": "def initialize(self, args): \n self.__args = args\n self.connection = MySQLdb.connect(user = args['user'], passwd = args['password'], db = args['db'])\n cursor = self.connection.cursor()\n cursor.execute(\"SHOW TABLES\")\n tables = [i[0] for i in cursor.fetchall()]\n if 'record_times' not in tables:\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS `record_times`\n (\n `id` int NOT NULL auto_increment,\n `time` int NOT NULL,\n `userId` int NOT NULL,\n `mapId` int NOT NULL,\n `rank` int NOT NULL DEFAULT '-1',\n `updatedAt` timestamp NOT NULL,\n PRIMARY KEY (`id`),\n UNIQUE KEY `record` (`userId`, `mapId`)\n );\n \"\"\")\n \n self.connection.commit()\n \n self.callMethod((None, 'subscribeEvent'), 'TmConnector', 'PlayerFinish', 'onPlayerFinish')\n self.callMethod((None, 'subscribeEvent'), 'TmConnector', 'EndMap', 'onEndMap')\n self.callMethod((None, 'subscribeEvent'), 'TmConnector', 'BeginMap', 'onBeginMap')\n self.callMethod((None, 'subscribeEvent'), 'Records', 'newRecord', 'onNewRecord')\n self.__retrieveCurrentMapId()\n self.__getCurrentRecords()", "title": "" }, { "docid": "b52812a38e0da8dd795c5774ad544776", "score": "0.54476297", "text": "def __init__(self):\n\n # result object has its own sqlite3 in-memory database\n self.conn = sqlite3.connect(':memory:')\n self._series = {}\n self._dataframes = {}\n self._df = None\n self._dfs = {}\n self._tables = {}", "title": "" }, { "docid": "7cdef5cfc43d3eaeee9baf41ce75a26e", "score": "0.5447248", "text": "def __init__(self):\n self.con = init_db()", "title": "" }, { "docid": "2137a1db453923ec7aa7dd47747c930c", "score": "0.5445318", "text": "def __init__(self, latitude=91*60*1000, longitude=181*60*1000, altitude=0,\n speed=0, heading=0, timestamp=0, satellites=0, fixtype=1,\n PDOP=0, HDOP=0, VDOP=0):\n self.latitude = latitude # 1/1000th minutes\n self.longitude = longitude # 1/1000th minutes\n self.altitude = altitude # metres\n self.speed = speed # knots\n self.heading = heading # degrees\n self.timestamp = timestamp # seconds since 1/1/1970 unix epoch\n self.satellites = satellites\n self.fixtype = fixtype\n self.PDOP = PDOP\n self.HDOP = HDOP\n self.VDOP = VDOP\n self.lat_dec_deg = latitude / 60000\n self.lon_dec_deg = longitude / 60000\n self.time_readable = datetime.datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')", "title": "" }, { "docid": "f98aa5fbb1542d7b96ff473cc3ca7566", "score": "0.54436785", "text": "def __init__(self, **kwargs):\n print(\"###### PeakDatabase init\")\n server = kwargs[\"server\"]\n self.client = MongoClient('mongodb://%s:27017/'%server)\n dbname = kwargs[\"dbname\"]\n self.db = self.client[dbname]\n self.poster = self.db.posts\n self.clientName = kwargs[\"name\"] #+ datetime.datetime.now().strftime(\"--%Y-%m-%d--%H:%M:%S\")\n dictionary = {self.clientName:{\"parameters\":{\"npxmin\" : kwargs[\"npix_min\"],\n \"npxmax\" : kwargs[\"npix_max\"],\n \"amaxthr\" : kwargs[\"amax_thr\"],\n \"atotthr\" : kwargs[\"atot_thr\"],\n \"sonmin\" : kwargs[\"son_min\"]}}}\n self.theid = self.poster.insert_one(dictionary).inserted_id", "title": "" }, { "docid": "fc8eb6ffb6bae98124c6635679cfaa0e", "score": "0.5431384", "text": "def __init__(self):\n self.startTime = time.time()", "title": "" }, { "docid": "d369c4fd0f4a56c58caf003a73b8c53a", "score": "0.54244465", "text": "def __init__(self, db_handle=None):\n self.set_db_handle(db_handle)\n self.seasons = []", "title": "" }, { "docid": "30d40a0ff34a398145b7990b183d44f5", "score": "0.54190624", "text": "def __init__(self, **kwargs):\n super(TimesList, self).__init__(**kwargs)\n database = load(database_name)\n self.data = [{'text': time + ' ' + scramble} for scramble, time in database.items()]", "title": "" }, { "docid": "ef23951ffb13b9142135f9d4b7c8d388", "score": "0.5383537", "text": "def __init__(self):\n self.database = DbConnector()\n self.cnx = self.database.handle", "title": "" }, { "docid": "b8b7143ac6ba39a9e5b9daefcd6bc1af", "score": "0.53834385", "text": "def init_db(cls):\n if cls._landmarks is None:\n cls._landmarks = lite.connect(ans.Answer.pathToData+'landmarks.db')\n lmcur = cls._landmarks.cursor()\n cls._landmarks.execute('CREATE TABLE IF NOT EXISTS landmarks (landmark_id INTEGER, name VARCHAR(255), lat DECIMAL(9,6), lon DECIMAL(9,6), east DECIMAL(9,6), north DECIMAL(9,6), querylat DECIMAL(9,6), PRIMARY KEY (landmark_id));')\n cls._landmarks.execute('CREATE TABLE IF NOT EXISTS landmark_queries (querylat DECIMAL(9,6));')\n lmcur.close()\n cls._landmarks.commit()\n\n if cls._boundaries is None:\n cls._boundaries = lite.connect(ans.Answer.pathToData+'oa_boundaries.db')", "title": "" }, { "docid": "e669daae9ebff2948190e27f60baacc8", "score": "0.53728855", "text": "def __init__(self, game, data):\r\n self.game = game\r\n self.data = data\r\n self.name = data['name']\r\n \r\n # Create our attributes (need to make new field references)\r\n self.attributes = {}\r\n for key in self.data.get('attributes', {}):\r\n self.attributes[key] = self.data['attributes'][key]\r\n \r\n # Create items\r\n self.items = []\r\n for key in self.data.get('items', {}):\r\n item = rpg_item.Item(self.game, self, self.data['items'][key])\r\n self.items.append(item)\r\n \r\n # Image\r\n self.image = rpg_image.Load(data['image'],\r\n colorkey=data.get('image_color_key',\r\n game.data['game']['color_key']))\r\n \r\n starting_position = data['pos']\r\n self.pos = rpg_base.Position(starting_position[0], starting_position[1])\r\n self.pos_last = self.pos\r\n \r\n # Save any data we need to manipulate\r\n self.money = data.get('money', 0)\r\n \r\n \r\n # More stats\r\n self.fatigued = False\r\n \r\n #Log('Actor data: %s' % self.data)\r", "title": "" }, { "docid": "b05c3b7017e3d40e0d789840b22f48e4", "score": "0.53703654", "text": "def __init__(self, db_path: str, table_name: str = 'items'):\n\n # Store the variables\n self.path = db_path\n self.table_name = table_name\n\n # Connect to the database\n self.db = sqlite3.connect(db_path)\n self.cursor = self.db.cursor()\n\n # Initialise the table\n self.__init_table()", "title": "" }, { "docid": "4352c102f4f798fe120b97c6ac560f8e", "score": "0.5364357", "text": "def __init__(self, bs_dict, date):\n self.player_name = bs_dict['name']\n self.made_fg = bs_dict['made_field_goals']\n self.made_threes = bs_dict['made_three_point_field_goals']\n self.made_ft = bs_dict['made_free_throws']\n self.points = self.__calculate_points(made_fg=self.made_fg,\n made_three=self.made_threes,\n made_ft=self.made_ft)\n self.offensive_rebounds = bs_dict['offensive_rebounds']\n self.defensive_rebounds = bs_dict['defensive_rebounds']\n self.rebounds = self.__calculate_rebounds(o_reb=self.offensive_rebounds,\n d_reb=self.defensive_rebounds)\n self.assists = bs_dict['assists']\n self.player_team = bs_dict['team'].name\n self.date = date\n self.location = bs_dict['location'].name\n self.opponent = bs_dict['opponent'].name\n self.outcome = bs_dict['outcome'].name\n self.seconds_played = bs_dict['seconds_played']\n self.attempted_threes = bs_dict['attempted_three_point_field_goals']\n self.attempted_ft = bs_dict['attempted_free_throws']\n self.attempted_fg = bs_dict['attempted_field_goals']\n self.steals = bs_dict['steals']\n self.blocks = bs_dict['blocks']\n self.turnovers = bs_dict['turnovers']\n self.personal_fouls = bs_dict['personal_fouls']\n self.game_score = bs_dict['game_score']", "title": "" }, { "docid": "7b102f26689b9924140ad52be0220f02", "score": "0.5357086", "text": "def __init__(self, dbfile=None, local=False):\n self.dbfile = dbfile\n self.local = local\n self.conn = None\n self.cursor = None", "title": "" }, { "docid": "736eff8eb04c33556a5d79101faa240b", "score": "0.535115", "text": "def __init__(self, dbname='default'):\n # Put import here so testing on systems \n # without all prereqs doesn't fail.\n global database\n import database \n self.dbname = dbname\n self.sp_idcache = {} # cache for SP rating instances\n self.pdm_idcache = {} # cache for PDM rating instances\n\n self.get_id = self.get_pdm_id # for backwards compatability", "title": "" }, { "docid": "2255aa25b0533d4ce6945425d9881fee", "score": "0.5350942", "text": "def __init__(self, root, size_x, size_y, bombs, time, mode, database):\n\t\tself.window = root\n\t\tself.window.configure(background='#BDC3C7')\n\t\tself.canv = tk.Canvas(root, width=24*size_x, height=24*size_y, background='#BDC3C7', highlightbackground=\"green\", highlightcolor=\"green\")\n\t\tself.size_x = size_x\n\t\tself.size_y = size_y\n\t\tself.flag_count = 0\n\t\tself.mode = mode\n\n\t\tself.database = database\n\t\tself.revealed = 0\n\t\t# timer setup\n\t\tself.time = time\n\t\tself.timer = tk.Label(root, text=\"\", background='#BDC3C7')\n\t\tquitButton = tk.Button(root, text=\"Quit\", background='#BDC3C7', command=lambda: self.quit())\n\t\tself.bomb_count = tk.Label(root, text=\"\", background='#BDC3C7')\n\t\tquitButton.grid(row=0, column=1)\n\t\tself.timer.grid(row=0, column=2)\n\t\tself.bomb_count.grid(row=0, column=0)\n\n\t\tself.board = [[Cell(self.canv, x, y) for y in range(size_y)] for x in range(size_x)]\n\t\tself.canv.tag_bind('rec', '<ButtonPress-1>', self.onObjectLeftClick)\n\t\tself.canv.tag_bind('rec', '<ButtonPress-3>', self.onObjectRightClick)\n\t\tself.canv.grid(row=1, columnspan=3)\n\t\tself.bombs = list()\n\t\tself.place_bombs(bombs)\n\t\tself.bomb_count.configure(text=\"Bombs: \" + str(len(self.bombs)))\n\t\tself.update_clock()", "title": "" }, { "docid": "bc1de002a7a1e98c73f1c9aeb53e9d9b", "score": "0.53438306", "text": "def __init__(self, db_file_name):\n super(LoadPlayer, self).__init__(db_file_name)", "title": "" }, { "docid": "eae19527faf14fec274accd4e3b6d04c", "score": "0.5341353", "text": "def _initData(self):\n self.alive = True\n\n self.COMMAND_MAP = {\n 'help': self.help,\n 'exit': self.exit,\n 'info': self.info,\n 'user': self.updateUser,\n 'server': self.updateServer,\n 'new game': self.newGame,\n 'hint': self.getHint\n }\n\n self.user = {\n 'first_name': DEFAULT_FIRST_NAME,\n 'last_name': DEFAULT_LAST_NAME,\n 'a_number': DEFAULT_A_NUMBER,\n 'alias': DEFAULT_ALIAS\n }\n\n self.server = {\n 'host': DEFAULT_SERVER_HOST,\n 'port': DEFAULT_SERVER_PORT\n }\n\n self.game = {\n 'id': None,\n 'definition': None,\n 'guess': None\n }", "title": "" }, { "docid": "c7f8b00be2f022703e5b5d8b1a1ba05a", "score": "0.5340799", "text": "def __init__(self, start, end, data_type=None):\n DataTypes.__init__(self)\n self.opener = \"'\"\n self.closer = \"'\"\n self.database_type = data_type if data_type is not None else \"DATE\"\n self._month_file = os.path.join('dbGen', 'nouns', 'months.txt')\n self._start_year, self._start_month, self._start_day = map(int, start.split('-'))\n self._end_year, self._end_month, self._end_day = map(int, end.split('-'))\n self._start_stamp = int(datetime.datetime(self._start_year, self._start_month, self._start_day, 0, 0).timestamp())\n self._end_stamp = int(datetime.datetime(self._end_year, self._end_month, self._end_day, 23, 59).timestamp())", "title": "" }, { "docid": "d1b3eebd7d0ed4087a97bf39a853bae6", "score": "0.5320153", "text": "def _read_db(self, fps_layout='central_park'):\n\n actuators = (targetdb.Actuator.select()\n .order_by(targetdb.Actuator.id)\n .join(targetdb.FPSLayout,\n on=(targetdb.Actuator.fps_layout_pk == targetdb.FPSLayout.pk))\n .where(targetdb.FPSLayout.label == fps_layout))\n\n nactuators = actuators.count()\n\n fibers = (targetdb.Fiber.select(targetdb.Fiber.fiberid,\n targetdb.Spectrograph.label.alias('spectrograph'),\n targetdb.Actuator.id,\n targetdb.FPSLayout.label.alias('fps_layout'))\n .join(targetdb.Spectrograph,\n on=(targetdb.Fiber.spectrograph_pk == targetdb.Spectrograph.pk))\n .join(targetdb.Actuator,\n on=(targetdb.Fiber.actuator_pk == targetdb.Actuator.pk))\n .join(targetdb.FPSLayout,\n on=(targetdb.Actuator.fps_layout_pk == targetdb.FPSLayout.pk))\n .where(targetdb.FPSLayout.label == fps_layout)\n ).dicts()\n\n self.npositioner = nactuators\n self.positionerid = np.zeros(nactuators, dtype=np.int32)\n self.xcen = np.zeros(nactuators, dtype=np.float32)\n self.ycen = np.zeros(nactuators, dtype=np.float32)\n self.boss = np.zeros(nactuators, dtype=np.bool)\n self.apogee = np.zeros(nactuators, dtype=np.bool)\n self.fiducial = np.zeros(nactuators, dtype=np.bool)\n self.indx = dict()\n\n indx = 0\n for indx, actuator in enumerate(actuators):\n self.positionerid[indx] = actuator.id\n self.indx[self.positionerid[indx]] = indx\n self.xcen[indx] = actuator.xcen\n self.ycen[indx] = actuator.ycen\n self.fiducial[indx] = actuator.actuator_type.label == 'Fiducial'\n\n for fiber in fibers:\n if(fiber['spectrograph'] == 'APOGEE'):\n self.apogee[self.indx[fiber['id']]] = True\n if(fiber['spectrograph'] == 'BOSS'):\n self.boss[self.indx[fiber['id']]] = True\n\n return", "title": "" }, { "docid": "c957b0f5fc44a9f46878265fe7223f5f", "score": "0.53132814", "text": "def __init__(self, sequence, time, user, database, query):\n self.time = time\n self.database_name = database\n self.query = query\n self.sequence = sequence\n self.username = user", "title": "" }, { "docid": "a491ce9e5f51b82d74c162c460237349", "score": "0.5306943", "text": "def __init__(self):\n\n self.cols = None\n self.rows = None\n self.goal_r = {}\n self.players = {}\n self.goals = {}\n self.grid = []\n self.actions = ['N', 'S', 'E', 'W', 'ST']\n self.commentator = False", "title": "" }, { "docid": "4fa995679c924b02184960db83900ca1", "score": "0.5303601", "text": "def __init__(self, dbname):\n self.db = sqlite.connect(dbname)", "title": "" }, { "docid": "f4d959694a4579d66f6f0557b0a2e6d3", "score": "0.5301556", "text": "def initialize(self):\n\n self._state = STATE_INACTIVE\n self._bricks = []\n self._paddle = self._createpaddle((GAME_WIDTH/2))\n self._ball = None\n self.STARTUP = GLabel(text=STARTUP_TEXT, x=STARTUP_X, y=STARTUP_Y)\n self.view.add(self.STARTUP)\n self._lives = NUM_LIVES\n self._message = GLabel(text='You have '+ `self._lives` + ' lives left\\nClick to start next round',\n x=STARTUP_X, y=STARTUP_Y)\n self._congrats = GLabel(text='Congratulations!\\nYou Won!',\n x=STARTUP_X, y=STARTUP_Y)\n self._youlose = GLabel(text='Sorry, you lost.', x=STARTUP_X, y=STARTUP_Y)\n self._displayLives = GLabel(text='Lives Remaining: ',x = 10, y=10)\n self.view.add(self._displayLives)\n self._displayLivesNum = GLabel(text=`self._lives`,x = 135, y=10)\n self.view.add(self._displayLivesNum)", "title": "" }, { "docid": "0d25e05a085dc8416000a3e41a2c75f9", "score": "0.5295123", "text": "def __init__(self):\n self.follow_table = defaultdict(lambda: defaultdict(bool))\n self.time_line = {}\n self.t = 0", "title": "" }, { "docid": "c6e3b53dd0f7c93817718087be63f761", "score": "0.529344", "text": "def __init__(self, db_name):\n self.con = sqlite.connect(db_name)", "title": "" }, { "docid": "c7291834393b3f6e53ad476964ad95db", "score": "0.5290914", "text": "def __init__(self):\n self.db = os.path.join(os.getenv(\"DBDIR\"), os.getenv(\"DB\"))\n self.dbConn = \"\"\n self.cur = \"\"", "title": "" }, { "docid": "f1f4af95cc78f516af79f1520fb32fd5", "score": "0.52892715", "text": "def __init__(\n self,\n rooms_data,\n persons_data,\n database_name='data_files/amity_database.db'):\n # Initializes all shelve files\n self.rooms_data = rooms_data\n self.persons_data = persons_data\n self.database_name = database_name\n\n # Set database name\n if not self.database_name:\n self.database_name = 'data_files/amity_database.db'\n else:\n self.database_name = database_name\n\n # Set up a connection and cursor\n self.db_conn = sqlite3.connect(self.database_name)\n self.db_cursor = self.db_conn.cursor()", "title": "" }, { "docid": "571be663aea4faec038a2c76a0173087", "score": "0.5284309", "text": "def __init__(self, dbfile):\n self.log = logging.getLogger(__name__)\n self.dbfile = dbfile\n self.dbinstance = None\n self.__open_db()", "title": "" }, { "docid": "86bdcebe78f57a52a4dc4e99cd4f49c6", "score": "0.52806795", "text": "def __init__(self):\n\tself.gravity_marker\t = 'GRAVITY'\n\tself.gravity = '8.5'\n\tself.frequency_marker = 'MEMBRANE_FREQUENCY'\n\tself.membrane_frequency = '1100'\n self.amp_marker = 'AMPLITUDE'\n self.amplitude = '0.33'\n self.a_ball_z_marker = 'A_BALL_HEIGHT'\n self.a_ball_z = '-6.2'\n self.spring_marker = 'SPRING_CONSTANT'\n self.spring_factor = '0.5'\n self.a_mass_marker = 'A_BALL_MASS'\n self.a_mass = '392.01'\n self.iterations_marker = 'ITERATIONS'\n self.iterations = '20000'\n self.sheet_radius_m = 'SHEET_RADIUS'\n self.sheet_radius = '88'\n\n self.processes = '8'", "title": "" }, { "docid": "2048ba80ac4f1fd4b770ddcc343e46a5", "score": "0.5275106", "text": "def __init__ (self, year, userid=None, db_entry=None, manager=None, \n name=None, position=0, populate=True, create=False):\n self.db = db_entry\n\n if self.db == None and userid != None:\n # Load this team from the data store\n self.db = ffdb.FFDBTeam.load(year, userid)\n\n if self.db == None and manager != None:\n # Try looking up the team by manager\n query = ffdb.FFDBTeam.query().filter(ndb.AND(\n ffdb.FFDBTeam.year==year,\n ffdb.FFDBTeam.manager==manager))\n teams = query.fetch(1)\n\n if len(teams) == 1:\n self.db = teams[0]\n else:\n # The value passed in may be a 'safemgr' which isn't stored in \n # the DB, and if they differ then the above query will fail.\n # Fall back to querying all teams and looking for a match.\n query = ffdb.FFDBTeam.query().filter(ffdb.FFDBTeam.year==year)\n for team in query:\n if FFTeam.mgr_to_safemgr(team.manager) == manager:\n self.db = team\n break\n\n if self.db == None and create:\n # No entry in the store either. Create a new one and save it\n self.db = ffdb.FFDBTeam(year=year, userid=userid, manager=manager, \n name=name, funds=STARTING_FUNDS)\n if name == None:\n self.db.name = \"{} {}\".format(manager, choice(TEAM_NAMES))\n\n self.db.save()\n elif self.db == None:\n # No entry and not creating one. This isn't allowed.\n raise KeyError(\"No database entry for userid '{}', \"\\\n \"mgr '{}'\".format(userid, manager))\n\n #Properties displayed on the website\n self.userid = self.db.userid\n self.year = year\n self.manager = self.db.manager\n self.safemgr = self.mgr_to_safemgr(self.manager)\n self.name = self.db.name\n self.funds = self.db.funds\n self.week_score = self.db.get_week_score()\n self.total_score = self.db.get_total_score()\n self.position = position\n self.players = []\n self.explayers = []\n\n if populate or create:\n self.populate_players()", "title": "" }, { "docid": "b71f0b10308f4c78239b73c990024d00", "score": "0.52745223", "text": "def __init__(self,DataClass):\r\n \r\n #make link to access data class backwards\r\n self.DataClass = DataClass\r\n self.Type = self.DataClass.Type\r\n \r\n #when the class is barely created it will be empty. To avoid conflict...\r\n isReady = False\r\n \r\n #Lines and grid\r\n self.LineOn = True\r\n self.GridOn = True", "title": "" }, { "docid": "39076834b623d2026e372e5be7fba951", "score": "0.52715063", "text": "def __init__(self):\n self.time = [0, 0]", "title": "" }, { "docid": "03ff16ee0f684cfc38e074281c09b0f6", "score": "0.5270158", "text": "def __init__(self, database):\n\t\tself.database = database", "title": "" }, { "docid": "528ef72bd02f40053e317017bc3bbdb2", "score": "0.52685857", "text": "def init_variables(self):\n self.c_speed = 10.0 / 3.6 # current speed [m/s]\n self.c_d = 0.0 # current lateral position [m]\n self.c_d_d = 0.0 # current lateral speed [m/s]\n self.c_d_dd = 0.0 # current lateral acceleration [m/s]\n self.s0 = 0.0 # current course position", "title": "" }, { "docid": "22b936d9219bcd6a4647cb0539c9443d", "score": "0.5265848", "text": "def __init__(self):\n self._pi = self.gen_pi()\n self._date = datetime.now()", "title": "" }, { "docid": "8c41cdcda2a600f1d0dde6aba5c1f237", "score": "0.5261295", "text": "def __init__(self,player,starting_coords):\n self._player = player\n self._location = starting_coords\n self._fences = 10", "title": "" }, { "docid": "3464fa1ca34839d1f02450aedca4ce18", "score": "0.52584183", "text": "def __init__(self, db):\n\t\tif db:\n\t\t\tself.db = db", "title": "" }, { "docid": "dbc9dc5a7275e2e39db13f1db5f5b126", "score": "0.52573633", "text": "def __init__(self):\n self.id = 0\n self.username = \"None\"\n self.mydb = connect.connect()\n self.cursor = self.mydb.cursor()\n self.url = \"https://people.eecs.ku.edu/~b040w377/laserpi.html\"", "title": "" }, { "docid": "3b645be66be62261b3082dcb1364d3b0", "score": "0.52563953", "text": "def __init__(self, datastore_root: str):\n self.session_storage: Dict[str, boto3.Session] = {}\n self.table_access_condition = threading.Condition()\n self.conn: Dict[int, sqlite3.Connection] = {}\n self.db_path = os.path.join(datastore_root, \"mturk.db\")\n self.init_tables()\n self.datastore_root = datastore_root\n self._last_hit_mapping_update_times: Dict[str, float] = defaultdict(\n lambda: time.monotonic()\n )", "title": "" }, { "docid": "05f9b48254a881c97a014e47f429a2ae", "score": "0.52519244", "text": "def __init__(self, db):\r\n if db:\r\n self.db = db\r\n self.fs = gridfs.GridFS(db)\r\n else:\r\n self.db = None\r\n self.fs = None", "title": "" }, { "docid": "28edfbc1cdf4dedf3c1b1e2def87b252", "score": "0.52502865", "text": "def __init__(self, map_id, rows, columns):\n\n Map.__init__(self, map_id, columns, rows, 5*columns)\n self.fishes = []\n self.moving_bridges = []\n self.sub_holes = []\n self.thrones = []\n self.clouds = []\n self.lake = None\n self.stones = []\n self.extras = []\n self.pole = None\n self.lives = []\n self.up_wall = None\n self.down_wall = None\n print(self.length, self.columns, self.rows)\n print(config.MAP_LENGTH, config.COLUMNS, config.ROWS)\n self.create_walls()\n self.create_clouds()\n self.create_pole()\n self.create_lake_fishes()\n self.create_bridges()\n self.create_moving_bridges()\n self.create_holes()\n self.create_coins()\n self.create_enemies()\n self.create_extras()\n self.initial_player_position = {'max_x': 4, 'max_y': self.up_wall.min_y - 1, 'min_x': 3, 'min_y': self.up_wall.min_y - 2}\n self.create_checkpoints()\n self.player_crossed_start = False\n self.player_crossed_lake = False\n self.player_crossed_thrones = False\n\n # self.music_conf = [{}]", "title": "" }, { "docid": "2bdf5de3d21d61c367460c7225341c23", "score": "0.52501166", "text": "def __init__(self):\r\n # Brython : fake values\r\n self.st_atime = datetime.datetime.now()\r\n self.st_mtime = self.st_ctime = self.st_atime_ns = \\\r\n self.st_mtime_ns = self.st_ctime_ns = self.st_atime\r\n self.st_uid = self.st_gid = self.st_ino = -1\r\n self.st_mode = 0\r\n self.st_size = 1", "title": "" }, { "docid": "2b84492c501d4bc5ef5507e1bb566e76", "score": "0.5245291", "text": "def init(file_path): # type: (str) -> None\n\n class ThreadDatabase(Thread):\n \"\"\"\n This thread runs in background and performs all operations with the database if needed\n Creates new database if not exists yet\n \"\"\"\n\n def run(self):\n connection = sqlite3.connect(file_path)\n\n # create the database schema if not exists yet\n connection.execute('CREATE TABLE IF NOT EXISTS \"bans\" ('\n '`id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,'\n '`time` INTEGER NOT NULL,'\n '`ip` TEXT NOT NULL);')\n connection.execute('CREATE TABLE IF NOT EXISTS \"attacks\" ('\n '`id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,'\n '`time` INTEGER NOT NULL,'\n '`ip` TEXT NOT NULL,'\n '`profile` TEXT NOT NULL,'\n '`user` TEXT,'\n '`data` INTEGER NOT NULL);')\n\n while AppRunning.is_running():\n if not Database.queue_in.empty():\n data = Database.queue_in.get() # type: dict\n # the present key determines what time of data this is\n if 'sql' in data: # perform SQL query\n db_respond = list(connection.execute(data['sql'], data['param']))\n elif 'commit' in data: # commit the saved data\n connection.commit()\n db_respond = True\n else: # not sure what to do, just respond None\n db_respond = None\n # return responded object\n Database.queue_out.put(db_respond)\n AppRunning.sleep_while_running(0.1)\n\n # start the background thread with database connection\n ThreadDatabase().start()", "title": "" }, { "docid": "53fbc620d791f02aa0df5b9ab0815bf0", "score": "0.52422607", "text": "def updateData(sens_no, nasa, lat, lon, timezon):\n connection = sqlite3.connect('data.db')\n cursor = connection.cursor()\n if nasa:\n sensorID = nasa_sensors_dict[sens_no]\n sensorLoc = nasa_sensors_loc[sens_no]\n table = \"nasalight\" + str(sens_no)\n else:\n sensorID = sensors_dict[sens_no]\n sensorLoc = sensors_loc[sens_no]\n table = \"light\" + str(sens_no)\n x = sensorLoc[0]\n y = sensorLoc[1]\n cursor.execute('SELECT MAX(unixtime) FROM ' + table)\n start = int(cursor.fetchone()[0])\n end = int(time.time())*1000\n limit = (end - start)/300000\n print(\"limit is:\" + str(limit))\n print(\"Start is:\" + str(start))\n print(\"End is:\" + str(end))\n createData(sens_no, start, end)\n #Save your changes\n connection.commit()", "title": "" }, { "docid": "5e00b1e46e43aa83a5075f28dd9e9006", "score": "0.52386785", "text": "def __init__(self, update_event_id, fps, update_rate, grid, canvas):\n\t\tself.update_event_id = update_event_id\n\t\tself.fps = fps\n\t\tself.update_rate = update_rate\n\t\tself.clock = pygame.time.Clock()\n\t\tself.paused = False\n\t\tself.grid = grid\n\t\tself.canvas = canvas\n\n\t\tself.mouse_drag_mode = \"\"\n\t\t\n\t\tpygame.time.set_timer(self.update_event_id, self.update_rate)", "title": "" }, { "docid": "64d1874b6e8742cf724a4f0e9535d356", "score": "0.5237281", "text": "def __init__(self, db=None):\n self.db = db", "title": "" }, { "docid": "0c7007a839c2d2b70d6cc57f65a54a5b", "score": "0.5234582", "text": "def __init__(self):\n self.conn = sqlite3.connect(ChannelDatabase.locale)\n self.curr = self.conn.cursor()", "title": "" }, { "docid": "ada7ae6beeadcc8d391cede1294cf759", "score": "0.52336127", "text": "def __init__(self):\n\n self.start_game = True\n self.launch_partie = False\n self.the_end = False\n self.death = False", "title": "" }, { "docid": "3f57de2d8314d2fc6d474b9fd6a50dda", "score": "0.52280873", "text": "def __init__(self, con, *args, **kwargs):\r\n\t\tself._con = con\r\n\t\tself._args, self._kwargs = args, kwargs\r\n\t\tself._clearsizes()\r\n\t\tself._cursor = con._cursor(*args, **kwargs)", "title": "" }, { "docid": "9fe8ba2d1693d6e56a65029aeedf3343", "score": "0.52249265", "text": "def example_data():\n\n spot1 = \"POINT(40.71911552 -74.00666661)\"\n spot2 = \"POINT(40.71117416 -74.00016545)\"\n spot3 = \"POINT(40.73172428 -74.00674436)\"\n station1 = Station(id=79, name=\"Franklin St & W Broadway\", point=spot1,\n num_bikes_available=0, num_docks_available=0)\n station2 = Station(id=82, name=\"St James Pl & Pearl St\", point=spot2,\n num_bikes_available=0, num_docks_available=0)\n station3 = Station(id=127, name=\"Barrow St & Hudson St\", point=spot3,\n num_bikes_available=0, num_docks_available=0)\n\n db.session.add_all([station1, station2, station3])\n db.session.commit()", "title": "" }, { "docid": "b0256e6786dbfdef3225353668d1675a", "score": "0.52224916", "text": "def __init__(self, path):\n self.con = sql.connect(path)\n self.cur = self.con.cursor()", "title": "" }, { "docid": "15e729f1c92c1a9877816b610c8a1f2e", "score": "0.52210635", "text": "def __init__(self):\n self._paddle=Paddle()\n self._bricks=[]\n self._ball=Ball()\n x=BRICK_SEP_H/2.0+BRICK_WIDTH/2.0\n y=GAME_HEIGHT-BRICK_Y_OFFSET-BRICK_HEIGHT/2.0\n for i in range(0,BRICK_ROWS):\n c=BRICK_COLORS[i/2]\n for j in range(0,BRICKS_IN_ROW):\n brick=Brick(x,y,c)\n self._bricks.append(brick)\n x=x+(BRICK_WIDTH+BRICK_SEP_H)\n x=BRICK_SEP_H/2.0+BRICK_WIDTH/2.0\n y=y-(BRICK_HEIGHT+BRICK_SEP_V)\n self._tries=3", "title": "" }, { "docid": "751773f7164f95b4e15718dbe729127d", "score": "0.5212421", "text": "def __init__(self):\n self.TheCase = {}\n self.TheObj = {}\n self.Model = {}\n self.cur = DataBase()\n print \"OllinTS has been loaded\\n\"", "title": "" }, { "docid": "db9c885fe06bf8e82af214e32c7db788", "score": "0.5211463", "text": "def __init__(self):\r\n date_time('Connecting to local database')\r\n\r\n self.conn = sqlite3.connect(DATABASE_PATH)\r\n self.cursor = self.conn.cursor()\r\n\r\n self.output_path = os.path.dirname(DATABASE_PATH)\r\n self.conn = sqlite3.connect(DATABASE_PATH)\r\n self.cursor = self.conn.cursor()\r\n\r\n # Set up database\r\n self.cursor.execute('PRAGMA synchronous = OFF')\r\n self.cursor.execute('PRAGMA journal_mode = OFF')\r\n self.cursor.execute('PRAGMA locking_mode = EXCLUSIVE')\r\n self.cursor.execute('PRAGMA count_changes = FALSE')\r\n\r\n # Create tables\r\n self.tables = {table: IsbnGraphTable(table, self.conn, self.cursor) for table in GRAPH_TABLES}", "title": "" }, { "docid": "1b5ac98a664c8c35c228389738b12945", "score": "0.5210843", "text": "def readDB(self,verbose):\n try:\n database = Dataset(self.filename)\n except:\n raise IOError(\"Error opening file %s in YamboRTCarriersParser\"%self.filename)\n self.E_bare = HaToeV*np.array(database.variables['RT_carriers_E_bare'])\n self.f_bare = np.array(database.variables['RT_carriers_f_bare'])\n self.kpoints = np.array(database.variables['RT_kpt'][:].T)\n self.bands_kpts = np.array(database.variables['RT_bands_kpts'])\n self.k_weight = np.array(database.variables['RT_k_weight'])\n self.delta_E = HaToeV*np.array(database.variables['RT_carriers_delta_E'])\n self.delta_f = np.array(database.variables['RT_carriers_delta_f'])", "title": "" }, { "docid": "7b2d870098290669079a1a75d066af5c", "score": "0.5208695", "text": "def init_stream_gage_db(first_time):\n # Create tables\n Base.metadata.create_all(engine)\n\n # Initial data\n if first_time:\n # Make session\n session = SessionMaker()\n\n\n # Gage 1\n gage1 = StreamGage(latitude=18.976622,\n longitude=-71.28982099634956,\n value=1 name=Presa de Sabaneta)\n\n session.add(gage1)\n\n\n # Gage 2\n gage2 = StreamGage(latitude=19.031064,\n longitude=-71.29954095389662,\n value=2 name=Paso de Lima)\n\n session.add(gage2)\n\n\n # Gage 3\n gage3 = StreamGage(latitude=18.892486,\n longitude=-71.25836565957668,\n value=3 name=Canafistol)\n\n session.add(gage3)\n\n # Gage 4\n gage4 = StreamGage(latitude=18.724674,\n longitude=-71.10897454764346,\n value=4 name=Sabana Alta)\n\n session.add(gage4)\n\n # Gage 5\n gage5 = StreamGage(latitude=18.817731,\n longitude=-71.11786344369251,\n value=5 Name=El Cacheo)\n\n session.add(gage5)\n\n\n\n session.commit()", "title": "" }, { "docid": "4ee3f2ae6ce4028a5b4fac3c91ba0017", "score": "0.52081543", "text": "def __init__(self):\n\n pygame.init()\n pygame.mixer.init()\n\n self.screen = pygame.display.set_mode((s.WIDTH, s.HEIGHT))\n pygame.display.set_caption(s.TITLE)\n\n self.clock = pygame.time.Clock()\n self.font_name = pygame.font.match_font(s.FONT_NAME)\n self.paused = False\n self.running = True\n self.score = 0\n self.load_data()", "title": "" }, { "docid": "213ae93ada8ae64caaef62df9055e48d", "score": "0.5207362", "text": "def __init__(self, deltat):\n\n self.clock = betman.all.clock.Clock(deltat)\n self.dbman = database.DBMaster()\n \n self.load_strategies()\n \n # market ids for all strategies (for updating prices)\n self.marketids = self.stratgroup.get_marketids()\n\n # we store selection objects as a dictionary of dictionaries.\n # This contains the selection objects (e.g. a particular\n # horse), and the selection objects contain the current price,\n # hence the name.\n self.prices = {const.BDAQID: {}, const.BFID: {}}\n\n # orders for both exchanges\n self.orders = {const.BDAQID: {}, const.BFID: {}}\n\n # call the API functions to refresh prices etc.\n self.on_startup()", "title": "" }, { "docid": "004ba6c8fe5989a9c1ea2028287e8173", "score": "0.5205208", "text": "def __init__(self, config, athlete_id):\n self.connection = pymysql.connect(host='localhost', user=config['mysql_user'], password=config['mysql_password'], db=config['mysql_base'], charset='utf8mb4')\n self.cursor = self.connection.cursor(pymysql.cursors.DictCursor)\n self.activities_table = config['mysql_activities_table']\n self.gears_table = config['mysql_bikes_table']\n self.athlete_id = athlete_id", "title": "" }, { "docid": "b0526b0f50538a5f31c3360b8fb7b504", "score": "0.5199727", "text": "def __init__(self, ddbb_conn):\n self.bbdd = ddbb_conn", "title": "" } ]
cf83537c115725c409330215d6b5a422
Represents a list of spdx.document.ExternalDocumentRef as a Python list of dictionaries
[ { "docid": "3aef02b25c3f372a6dd9dc469f800c58", "score": "0.69072384", "text": "def ext_document_references_to_list(cls, ext_doc_refs):\n ext_doc_refs_list = []\n\n for ext_doc_ref in ext_doc_refs:\n ext_doc_ref_dict = OrderedDict([\n ('externalDocumentId', ext_doc_ref.external_document_id),\n ('spdxDocumentNamespace', ext_doc_ref.spdx_document_uri),\n ('checksum', cls.checksum_to_dict(ext_doc_ref.check_sum)),\n ])\n ext_doc_refs_list.append(ext_doc_ref_dict)\n\n return ext_doc_refs_list", "title": "" } ]
[ { "docid": "02f0c1fdf2e1ca3705bb50309444c0c7", "score": "0.6509022", "text": "def to_dict(ret, deref_list=[]):\n retdict = ret.to_mongo().to_dict()\n for ref in deref_list:\n if isinstance(ret._data[ref], list):\n retdict[ref] = [x.to_mongo().to_dict() for x in ret._data[ref]]\n else:\n retdict[ref] = ret._data[ref].to_mongo().to_dict()\n return retdict", "title": "" }, { "docid": "bfa62911a7503c88c22ad467fa034aad", "score": "0.6317307", "text": "def transform_to_dict(cls, xref_list):\n xref_output = defaultdict(list)\n for _record in xref_list:\n # note that the 'xref' field names are from the chembl datasource, not the parser\n if 'xref_src' in _record and _record['xref_src'] == 'PubChem':\n assert _record['xref_name'].startswith('SID: ')\n xref_output['pubchem'].append({'sid': int(_record['xref_id'])})\n elif 'xref_src' in _record and _record['xref_src'] == 'Wikipedia':\n xref_output['wikipedia'].append({'url_stub': _record['xref_id']})\n elif 'xref_src' in _record and _record['xref_src'] == 'TG-GATEs':\n xref_output['tg-gates'].append({'name': _record['xref_name'], 'id': int(_record['xref_id'])})\n elif 'xref_src' in _record and _record['xref_src'] == 'DailyMed':\n xref_output['dailymed'].append({'name': _record['xref_name']})\n elif 'xref_src' in _record and _record['xref_src'] == 'DrugCentral':\n xref_output['drugcentral'].append({'name': _record['xref_name'], 'id': int(_record['xref_id'])})\n return xref_output", "title": "" }, { "docid": "4b812d66b28a15bca28f31ca9346c4b7", "score": "0.61689496", "text": "def ref_dict(self):\n\n return self._objs", "title": "" }, { "docid": "2f1617f867c231eb51a7a4b33e93d8b2", "score": "0.5951094", "text": "def _parse_references(self, doc):\n bib_entries = []\n if ('bib_entries' in doc\n and isinstance(doc['bib_entries'], dict)\n and len(doc['bib_entries']) > 0\n ):\n for bib in doc['bib_entries'].values():\n bib_entries.append({\n 'ref_id': bib.get('ref_id', ''),\n 'title': bib.get('title', ''),\n 'year': bib.get('year', None),\n 'issn': str(bib.get('issn', '')),\n 'doi': str(bib.get('other_ids', {}).get('DOI', \"\")),\n \"text\": str(bib.get('other_ids', {}).get('DOI', \"\"))\n })\n if len(bib_entries) == 0:\n doi = self._parse_doi(doc)\n bib_entries = find_references(doi)\n return bib_entries", "title": "" }, { "docid": "8a0ee5ad77c4e18e89e160cff3df027c", "score": "0.5849119", "text": "def Documents(self):\n return self._documents.values()", "title": "" }, { "docid": "101ca4d90810bb75ba8e35a35732d0cd", "score": "0.56637585", "text": "def get_document_list(self):\n try:\n return self.__dict__['document_list']\n except KeyError:\n obj_list = DocumentSet([\n self._connection.documents.get(i) for i in self.document_ids\n ])\n self.__dict__['document_list'] = obj_list\n return obj_list", "title": "" }, { "docid": "9f4206405314b29d353de9dca6388861", "score": "0.5650747", "text": "def as_dict(self):\n return [link.as_dict() for link in self if link]", "title": "" }, { "docid": "7f62e276e48616e3ad5212b0d30e38da", "score": "0.5595633", "text": "def to_dict(self):\n return self.data.to_dict('list')", "title": "" }, { "docid": "7f62e276e48616e3ad5212b0d30e38da", "score": "0.5595633", "text": "def to_dict(self):\n return self.data.to_dict('list')", "title": "" }, { "docid": "c29c34b094dfd063d743aa853677c894", "score": "0.5592673", "text": "def items(self):\n return zip(self.ref_book.fields, self.value)", "title": "" }, { "docid": "edbaf4b7c9846305d25baae9ed835859", "score": "0.5568751", "text": "def emit_references(self) -> Iterable[model.LSIFEntry]:\n for doc_id, ref_ids in self.references.items():\n yield model.Item(\n out_v=self.ref_result_id,\n in_vs=ref_ids,\n document=doc_id,\n property=\"references\",\n )", "title": "" }, { "docid": "72f83f9569a3f4630c7e4076eb191953", "score": "0.5535174", "text": "def to_dict_seq(o_list: Sequence[BaseAPIClass]) -> Sequence[Dict]:\n\treturn [to_dict(o) for o in o_list]", "title": "" }, { "docid": "d0c86ea42deb32d066b3f0807c5e2139", "score": "0.54926324", "text": "def get_doc_dicts(self, doc_ids):\n pass", "title": "" }, { "docid": "c5c4a1357fdef277dec666682efc1a1a", "score": "0.5481872", "text": "def references(self):\n output = []\n for record in self.values():\n output.extend(ref for ref in record.references if ref not in output)\n\n return output", "title": "" }, { "docid": "d2d7a6b05c862784be71faca8d06c5d0", "score": "0.5404749", "text": "def json(self):\n return simplejson.dumps(self._internal_list)", "title": "" }, { "docid": "d463983359dd94957e9150ccd47088d1", "score": "0.53864527", "text": "def references(self):\n return {}", "title": "" }, { "docid": "43f230b5be9e7f8d7bca2ce8ab9a9c59", "score": "0.53785855", "text": "def as_dict(inst):\n d = {}\n from attr import fields\n for field in fields(type(inst)):\n if field.name.endswith(\"Ref\"): continue\n\n value = getattr(inst, field.name)\n\n if field.name == \"audioBlockFormats\":\n value = list(map(as_dict, value))\n if hasattr(value, \"id\"):\n value = value.id\n elif isinstance(value, list):\n value = [item.id if hasattr(item, \"id\") else item for item in value]\n\n d[field.name] = value\n\n return d", "title": "" }, { "docid": "4953e2e0488328bba0dbc5726f60802d", "score": "0.5366276", "text": "def to_list(self):\n return [r.to_dict() for r in self]", "title": "" }, { "docid": "a2602445b458be2055faae826ade7da5", "score": "0.52632177", "text": "def to_dict(self) -> Dict:\n _dict = {}\n if hasattr(self, 'docs') and self.docs is not None:\n _dict['docs'] = [x.to_dict() for x in self.docs]\n if hasattr(self, 'id') and self.id is not None:\n _dict['id'] = self.id\n return _dict", "title": "" }, { "docid": "06cc55f34fdc013043e7299a68f202fc", "score": "0.52307326", "text": "def serialize(self):\n doc_item_dict = dict()\n doc_item_dict[\"id\"] = self.id\n doc_item_dict[\"key\"] = self.key\n doc_item_dict[\"len\"] = self.doc_len\n return doc_item_dict", "title": "" }, { "docid": "db8965d9a7f542c0af9dfbe960c9d782", "score": "0.5229486", "text": "def to_dict(self):\n return {\n 'uid': self.tlist.uid.hex,\n 'list': [{'pri': e.pri, 'title': e.title, 'done': e.done}\n for e in self.tlist.list],\n }", "title": "" }, { "docid": "79ef1c504b9cde5b9fb190431b02d756", "score": "0.5227061", "text": "def to_mongo(self, value):\n from mongoengine import Document\n\n if isinstance(value, basestring):\n return value\n\n if hasattr(value, 'to_mongo'):\n return value.to_mongo()\n\n is_list = False\n if not hasattr(value, 'items'):\n try:\n is_list = True\n value = dict([(k,v) for k,v in enumerate(value)])\n except TypeError: # Not iterable return the value\n return value\n\n if self.field:\n value_dict = dict([(key, self.field.to_mongo(item)) for key, item in value.items()])\n else:\n value_dict = {}\n for k,v in value.items():\n if isinstance(v, Document):\n # We need the id from the saved object to create the DBRef\n if v.pk is None:\n raise ValidationError('You can only reference documents once '\n 'they have been saved to the database')\n\n # If its a document that is not inheritable it won't have\n # _types / _cls data so make it a generic reference allows\n # us to dereference\n meta = getattr(v, 'meta', getattr(v, '_meta', {}))\n if meta and not meta['allow_inheritance'] and not self.field:\n from fields import GenericReferenceField\n value_dict[k] = GenericReferenceField().to_mongo(v)\n else:\n collection = v._get_collection_name()\n value_dict[k] = pymongo.dbref.DBRef(collection, v.pk)\n elif hasattr(v, 'to_mongo'):\n value_dict[k] = v.to_mongo()\n else:\n value_dict[k] = self.to_mongo(v)\n\n if is_list: # Convert back to a list\n return [v for k,v in sorted(value_dict.items(), key=operator.itemgetter(0))]\n return value_dict", "title": "" }, { "docid": "6fe8d0d4678dfe367eaf2dd78535fa33", "score": "0.5214337", "text": "def to_python(self, value):\n from mongoengine import Document\n\n if isinstance(value, basestring):\n return value\n\n if hasattr(value, 'to_python'):\n return value.to_python()\n\n is_list = False\n if not hasattr(value, 'items'):\n try:\n is_list = True\n value = dict([(k,v) for k,v in enumerate(value)])\n except TypeError: # Not iterable return the value\n return value\n\n if self.field:\n value_dict = dict([(key, self.field.to_python(item)) for key, item in value.items()])\n else:\n value_dict = {}\n for k,v in value.items():\n if isinstance(v, Document):\n # We need the id from the saved object to create the DBRef\n if v.pk is None:\n raise ValidationError('You can only reference documents once '\n 'they have been saved to the database')\n collection = v._get_collection_name()\n value_dict[k] = pymongo.dbref.DBRef(collection, v.pk)\n elif hasattr(v, 'to_python'):\n value_dict[k] = v.to_python()\n else:\n value_dict[k] = self.to_python(v)\n\n if is_list: # Convert back to a list\n return [v for k,v in sorted(value_dict.items(), key=operator.itemgetter(0))]\n return value_dict", "title": "" }, { "docid": "ae1ed2a51c37a9d6ffb77a4ab9e5d971", "score": "0.5209183", "text": "def to_dict(self) -> Dict:\n _dict = {}\n if hasattr(self, 'docs') and self.docs is not None:\n _dict['docs'] = [x.to_dict() for x in self.docs]\n if hasattr(self, 'new_edits') and self.new_edits is not None:\n _dict['new_edits'] = self.new_edits\n return _dict", "title": "" }, { "docid": "8dab0d44f37e4ca9001366e1ea8bda9a", "score": "0.5206102", "text": "def _sie_referenced(self):\n refs = [self._sie_coll]\n if self._sie_key is not None:\n refs.append(self._sie_key)\n return refs", "title": "" }, { "docid": "34555cd2c6df45a3fbabbe24b6320009", "score": "0.52049446", "text": "def get_list_json(self):\n all_entries = []\n\n for entry_id in self._entries.keys():\n all_entries.append(self._entries[entry_id].create_entry_dictionary())\n\n return all_entries", "title": "" }, { "docid": "3df13c64d3d90d26729d7cbb2b3bba6e", "score": "0.52035296", "text": "def serialize_items(items):\n final_list = []\n for item in items:\n final_list.append(item.__dict__)\n return final_list", "title": "" }, { "docid": "2ec6035c5d738486ef9f041f81ad61e1", "score": "0.5196107", "text": "def materialize(reference_dict: IndexReferences, source: List[Any]) -> Dict[str, Any]:\n materialized_dict = {}\n for field in reference_dict:\n reference = reference_dict[field]\n if isinstance(reference, list):\n materialized_dict[field] = [source[index] for index in reference]\n elif reference is not None:\n materialized_dict[field] = source[reference]\n else:\n materialized_dict[field] = None\n return materialized_dict", "title": "" }, { "docid": "6fd1fa077567e9d00d5223c033141d70", "score": "0.5144623", "text": "def to_dict(self):\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(InlineResponse2005Contacts, dict):\n for key, value in self.items():\n result[key] = value\n\n return result", "title": "" }, { "docid": "2fb17b23365b71ed76f91b9d1cc51207", "score": "0.5139868", "text": "def serialize(self):\n ind_item_dict = dict()\n ind_item_dict[\"id\"] = self.doc_id\n ind_item_dict[\"cnt\"] = self.token_cnt\n return ind_item_dict", "title": "" }, { "docid": "4bed32c31360860c9cebe0cb39a85e83", "score": "0.51370513", "text": "def _lists(self):\n lists = {}\n info_elm = self._xml.find('info')\n for listname in self._listnames():\n lists[listname] = []\n for entry in info_elm.find(listname).iterchildren():\n entry = entry.text\n lists[listname].append(entry)\n return lists", "title": "" }, { "docid": "a51a52ea7269dd1bb34905234b5d7a91", "score": "0.5119096", "text": "def data(self):\n return list(self.formatedList)", "title": "" }, { "docid": "a51a52ea7269dd1bb34905234b5d7a91", "score": "0.5119096", "text": "def data(self):\n return list(self.formatedList)", "title": "" }, { "docid": "3f717b9b3bd518171ee6cbef1526b5eb", "score": "0.5116425", "text": "def getMetadataItems(self, dataRefList, datasetType, nameList):\n valList = []\n for dataRef in dataRefList:\n metadata = dataRef.get(datasetType=datasetType)\n valList.append(dict((name, metadata.get(name)) for name in nameList))\n return valList", "title": "" }, { "docid": "ae499760dc311e937d85203a10210a33", "score": "0.5104178", "text": "def to_dict(cls, doc):\n creators = sorted(doc.creation_info.creators, key=lambda c: c.name)\n return OrderedDict([\n ('id', doc.spdx_id),\n ('specVersion', cls.version_to_dict(doc.version)),\n ('namespace', doc.namespace),\n ('name', doc.name),\n ('comment', doc.comment),\n ('dataLicense', cls.license_to_dict(doc.data_license)),\n ('licenseListVersion', cls.version_to_dict(doc.creation_info.license_list_version)),\n ('creators', [cls.entity_to_dict(creator) for creator in creators]),\n ('created', utils.datetime_iso_format(doc.creation_info.created)),\n ('creatorComment', doc.creation_info.comment),\n ('package', cls.package_to_dict(doc.package)),\n ('externalDocumentRefs', cls.ext_document_references_to_list(sorted(doc.ext_document_references))),\n ('extractedLicenses', cls.extracted_licenses_to_list(sorted(doc.extracted_licenses))),\n ('annotations', cls.annotations_to_list(sorted(doc.annotations))),\n ('reviews', cls.reviews_to_list(sorted(doc.reviews))),\n ('snippets', cls.snippets_to_list(sorted(doc.snippet))),\n ])", "title": "" }, { "docid": "eb483be53cc981a520407d84e097f6b7", "score": "0.508774", "text": "def to_dict(self):\n return {\"Ref_Key\": self.obj_id}", "title": "" }, { "docid": "71a60b533bf48597fbc6a3e512cf4894", "score": "0.5086859", "text": "def items_serializable(self):\n return [item.to_dict() for item in self.items]", "title": "" }, { "docid": "1287395a2907d2dd631f264cfd1adffa", "score": "0.5086333", "text": "def to_dict(self):\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(IOspfv3VirtualLinks, dict):\n for key, value in self.items():\n result[key] = value\n\n return result", "title": "" }, { "docid": "cea75194b4cedfcdec1c474e43987400", "score": "0.5085404", "text": "def _lookup_schema(self, schema: dict) -> Union[List, Mapping]:\n is_collection = schema.get(\"type\") == TYPE_ARRAY\n\n if is_collection:\n ref = schema[\"items\"][\"$ref\"]\n else:\n ref = schema.get(\"$ref\")\n\n if ref is None:\n raise NotImplementedError(\"Currently only schema refs are supported\")\n\n _schema = self.spec.copy()\n for key in ref.split(\"/\")[1:]:\n _schema = _schema[key]\n return _schema if not is_collection else [_schema]", "title": "" }, { "docid": "a70284b65884ed7f1aa2d9f44221ca06", "score": "0.5084513", "text": "def _sie_referenced(self):\n return [self._sie_obj]", "title": "" }, { "docid": "150c9855b47acc854865b88071b1047f", "score": "0.5081689", "text": "def gather_ref_as(self, src, refs):\n if isinstance(src, GnomeId):\n src = [src,]\n for ob in src:\n if hasattr(ob, '_ref_as'):\n names = ob._ref_as\n if not isinstance(names, list):\n names = [names,]\n for n in names:\n if n in refs:\n if ob not in refs[n]:\n #only add if it doesn't already exist in the list\n refs[n].append(ob)\n else:\n refs[n] = [ob,]", "title": "" }, { "docid": "9cb1a073764fbea7f70a3f32cac364a6", "score": "0.5078996", "text": "def as_dict(self):\n if not self:\n return {}\n\n return {self.name: [x.as_dict() for x in self]}", "title": "" }, { "docid": "0f3c55cd7ae17e357b6ea074c1271c7e", "score": "0.50656724", "text": "def newDpto():\n dpto = {\n \"Artworks\": None}\n\n dpto['Artworks'] = lt.newList('ARRAY_LIST')\n return dpto", "title": "" }, { "docid": "eb49960b01b764f6c9f21f4cc91f2662", "score": "0.5054321", "text": "def queryset_to_dict(self):\n return [x.to_dict(expand=False) for x in self]", "title": "" }, { "docid": "4e54b57378dcfce9a1fedd17c613faf6", "score": "0.50539076", "text": "def list_references(self):\n ref_list = []\n for ref_name in self.__dict__:\n if type(self.__dict__[ref_name])==Reference:\n ref_list.append(ref_name)\n return ref_list", "title": "" }, { "docid": "50841d1838af1d871dad16e031bf5a38", "score": "0.50483227", "text": "def references(self):\n # FIXME: we should also collect additional data from the references such as tags and ids\n references = []\n\n # we track each CPE as a reference for now\n for cpe in self.cpes:\n cpe_url = f\"https://nvd.nist.gov/vuln/search/results?adv_search=true&isCpeNameSearch=true&query={cpe}\"\n references.append(Reference(reference_id=cpe, url=cpe_url))\n\n # FIXME: we also add the CVE proper as a reference, but is this correct?\n references.append(\n Reference(\n url=f\"https://nvd.nist.gov/vuln/detail/{self.cve_id}\",\n reference_id=self.cve_id,\n severities=self.severities,\n )\n )\n\n # clean to remove dupes for the CVE id proper\n ref_urls = [\n ru\n for ru in self.reference_urls\n if ru != f\"https://nvd.nist.gov/vuln/detail/{self.cve_id}\"\n ]\n references.extend([Reference(url=url) for url in ref_urls])\n\n return references", "title": "" }, { "docid": "5b60cb63314d51818f723944281fd053", "score": "0.5047541", "text": "def normalize_reference_list(ref_id, fld):\n return [normalize_reference(ref.id, fld) for ref in ref_id]", "title": "" }, { "docid": "cca3d3af5765433f12f3f3b52d7b674e", "score": "0.50395507", "text": "def _sie_referenced(self):\n return [k for k,v in self._sie_entries] + [v for k,v in self._sie_entries]", "title": "" }, { "docid": "0fb364ab10e0bc03caac25627d554dca", "score": "0.5024851", "text": "def to_dict(self):\n result = {}\n\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n else:\n result[attr] = value\n\n return result", "title": "" }, { "docid": "0fb364ab10e0bc03caac25627d554dca", "score": "0.5024851", "text": "def to_dict(self):\n result = {}\n\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n else:\n result[attr] = value\n\n return result", "title": "" }, { "docid": "0fb364ab10e0bc03caac25627d554dca", "score": "0.5024851", "text": "def to_dict(self):\n result = {}\n\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n else:\n result[attr] = value\n\n return result", "title": "" }, { "docid": "0fb364ab10e0bc03caac25627d554dca", "score": "0.5024851", "text": "def to_dict(self):\n result = {}\n\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n else:\n result[attr] = value\n\n return result", "title": "" }, { "docid": "0fb364ab10e0bc03caac25627d554dca", "score": "0.5024851", "text": "def to_dict(self):\n result = {}\n\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n else:\n result[attr] = value\n\n return result", "title": "" }, { "docid": "0fb364ab10e0bc03caac25627d554dca", "score": "0.5024851", "text": "def to_dict(self):\n result = {}\n\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n else:\n result[attr] = value\n\n return result", "title": "" }, { "docid": "1d4853feddc3c55502e2653d030f79b0", "score": "0.5017841", "text": "def record_list(self):\r\n return self.record\r\n # return [self.species_name_inverse,self.record]\r", "title": "" }, { "docid": "a02eb59a0833763f22e627dc720cb862", "score": "0.50173855", "text": "def read_reference(entry):\n # expected input (example):\n # kegg_information['REFERENCE']:\n # [\n # 'REFERENCE PMID:11939774'\n # 'AUTHORS Cheong CG, Bauer CB, Brushaber KR, Escalante-Semerena JC, Rayment I'\n # 'TITLE Three-dimensional structure of the L-threonine-O-3-phosphate\n # decarboxylase (CobD) enzyme from Salmonella enterica.'\n # 'JOURNAL Biochemistry 41:4798-808 (2002)'\n # 'DOI:10.1021/bi012111w'\n # ],\n # [\n # 'REFERENCE PMID:11939774',\n # 'AUTHORS Cheong CG, Bauer CB, Brushaber KR, Escalante-Semerena JC, Rayment I'\n # 'TITLE Three-dimensional structure of the L-threonine-O-3-phosphate\n # decarboxylase (CobD) enzyme from Salmonella enterica.'\n # 'JOURNAL Biochemistry 41:4798-808 (2002)'\n # 'DOI:10.1021/bi012111w'\n # ]\n #\n # expected output (example):\n # reference output = [\n # {\n # 'dbxref': 'PMID:11939774',\n # 'authors': ['Cheong CG', 'Bauer CB', 'Brushaber KR', 'Escalante-Semerena JC', 'Rayment I']\n # 'title': 'Three-dimensional structure of the L-threonine-O-3-phosphate decarboxylase (CobD)\n # enzyme from Salmonella enterica.'\n # 'journal': 'Biochemistry 41:4798-808 (2002)'\n # 'DOI': '10.1021/bi012111w'\n # },\n # {\n # 'dbxref': 'PMID:11939774',\n # 'authors': ['Cheong CG', 'Bauer CB', 'Brushaber KR', 'Escalante-Semerena JC', 'Rayment I']\n # 'title': 'Three-dimensional structure of the L-threonine-O-3-phosphate decarboxylase (CobD)\n # enzyme from Salmonella enterica.'\n # 'journal': 'Biochemistry 41:4798-808 (2002)'\n # 'DOI': '10.1021/bi012111w'\n # }\n # ]\n\n reference_output = []\n for lines in entry:\n next_reference = {\"dbxref\": \"\", \"authors\": \"\", \"title\": \"\", \"journal\": \"\",\n \"doi\": \"\"} # Create a new Dictionary with empty values\n for line in lines:\n if line.startswith(\"REFERENCE\"):\n next_reference[\"dbxref\"] = \"\".join(line.strip().split(\" \", )[-1].replace(\"[\", \"\").replace(\"]\", \"\"))\n if line.startswith(\" AUTHORS\"):\n next_reference[\"authors\"] = \" \".join(line.split()[1:])\n if line.startswith(\" TITLE\"):\n next_reference[\"title\"] = \" \".join(line.split()[1:])\n if line.startswith(\" JOURNAL\"):\n next_reference[\"journal\"] = \" \".join(line.split()[1:])\n if line.strip().startswith(\"DOI:\"):\n next_reference[\"DOI\"] = line.split(\":\")[1:]\n reference_output.append(next_reference)\n return reference_output", "title": "" }, { "docid": "7cba864fd39a05e4d2897143b30d0e77", "score": "0.50142133", "text": "def to_dict(seqrecs):\n return dict([(s.id, s) for s in seqrecs])", "title": "" }, { "docid": "b367cabbe08fee7351a500ea3aab4cfd", "score": "0.5009511", "text": "def referenceFromJSON(cls, data):\n source = OrderedDict()\n\n # Before #84516 Wikibase did not implement snaks-order.\n # https://gerrit.wikimedia.org/r/#/c/84516/\n if 'snaks-order' in data:\n prop_list = data['snaks-order']\n else:\n prop_list = data['snaks'].keys()\n\n for prop in prop_list:\n for claimsnak in data['snaks'][prop]:\n claim = cls.fromJSON({'mainsnak': claimsnak,\n 'hash': data['hash']})\n claim.isReference = True\n if claim.getID() not in source:\n source[claim.getID()] = []\n source[claim.getID()].append(claim)\n return source", "title": "" }, { "docid": "831fdf9bfd8f05bd618b233cb73f0370", "score": "0.5004391", "text": "def _dictfetchall(self):\n return [dict(zip([col[0] for col in self.cursor.description], row)) \\\n for row in self.cursor.fetchall()]", "title": "" }, { "docid": "757b9e3b36aa00ba05545b0b4454fb4d", "score": "0.50014186", "text": "def get_documents(self, employee_id):\n doc_obj = self.pool['hr.document']\n cr = self.cr\n uid = self.uid\n doc_ids = doc_obj.search(\n cr, uid, [('emp_id', '=', employee_id)], order='issue_date')\n docs = {}\n for doc in doc_obj.browse(cr, uid, doc_ids):\n doc_type = doc.type_id.name\n if doc_type not in docs:\n docs[doc_type] = False\n docs[doc_type] = doc\n return docs", "title": "" }, { "docid": "fe5a80b6195d3db2e91e4c41f056f499", "score": "0.4991506", "text": "def to_dict(self):\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(DownloadLinkResponse, dict):\n for key, value in self.items():\n result[key] = value\n\n return result", "title": "" }, { "docid": "57840b3f52923a7da659e6b3a9daad62", "score": "0.49821475", "text": "def read_references(root):\n references = []\n for cit_art in root.findall(\"Seq-entry_seq/Bioseq/Bioseq_descr/Seq-descr/Seqdesc/Seqdesc_pub/Pubdesc/Pubdesc_pub/\"\n \"Pub-equiv/Pub/Pub_article/Cit-art\"):\n author_list = []\n journal = {}\n title = \"\"\n doi = \"\"\n # Find Authors\n for author in cit_art.findall(\"Cit-art_authors/Auth-list/Auth-list_names/Auth-list_names_std/Author\"):\n author_list.append(author.find(\"Author_name/Person-id/Person-id_name/Name-std/Name-std_last\").text + \", \" +\n author.find(\"Author_name/Person-id/Person-id_name/Name-std/Name-std_initials\").text)\n # Find Title\n title = cit_art.find(\"Cit-art_title/Title/Title_E/Title_E_name\").text\n # Find Journal\n journal = {\"name\": cit_art.find(\"Cit-art_from/Cit-art_from_journal/Cit-jour/Cit-jour_title/Title/Title_E/\"\n \"Title_E_iso-jta\").text,\n \"date\": cit_art.find(\"Cit-art_from/Cit-art_from_journal/Cit-jour/Cit-jour_imp/Imprint/Imprint_date/\"\n \"Date/Date_std/Date-std/Date-std_day\").text + \".\" +\n cit_art.find(\"Cit-art_from/Cit-art_from_journal/Cit-jour/Cit-jour_imp/Imprint/Imprint_date/\"\n \"Date/Date_std/Date-std/Date-std_month\").text + \".\" +\n cit_art.find(\"Cit-art_from/Cit-art_from_journal/Cit-jour/Cit-jour_imp/Imprint/Imprint_date/\"\n \"Date/Date_std/Date-std/Date-std_year\").text\n }\n # Find Pubmed DOI\n doi = cit_art.find(\"Cit-art_ids/ArticleIdSet/ArticleId/ArticleId_doi/DOI\").text\n # Put into dictionary\n references.append({\"authors\": author_list,\n \"title\": title,\n \"journal\": journal,\n \"doi\": doi\n })\n return {\"references\": references}", "title": "" }, { "docid": "1e7538debfea4058ea68acd778fc2a7b", "score": "0.49736077", "text": "def to_dict(self):\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(Links, dict):\n for key, value in self.items():\n result[key] = value\n\n return result", "title": "" }, { "docid": "112b67af5d37edc9bc3194705e048e52", "score": "0.49639672", "text": "def as_dict(self) -> Mapping[KT, Collection[VT]]:", "title": "" }, { "docid": "790bcc4970da524f72669c4849e1f532", "score": "0.4963081", "text": "def doc_dict(self):\n doc = {\n 'type': self.value_type,\n 'description': self.description,\n 'extended_description': self.details\n }\n return doc", "title": "" }, { "docid": "e3b5b575b90604907ab6a59910ef3be3", "score": "0.49608347", "text": "def to_dict(self):\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(ListSourceImagesResponse, dict):\n for key, value in self.items():\n result[key] = value\n\n return result", "title": "" }, { "docid": "bbe57ba9074cdd09ad4ce0ce54aec3ab", "score": "0.49607784", "text": "def getDataList(self, dataRefList, datasetType):\n return [dataRef.get(datasetType=datasetType) for dataRef in dataRefList]", "title": "" }, { "docid": "f0ba9a05522a7dc2512ecce711851d23", "score": "0.4951675", "text": "def _to_dict(self):\n _dict = {}\n if hasattr(self, 'model_id') and self.model_id is not None:\n _dict['model_id'] = self.model_id\n if hasattr(self, 'model_version') and self.model_version is not None:\n _dict['model_version'] = self.model_version\n if hasattr(self, 'documents') and self.documents is not None:\n _dict['documents'] = [x._to_dict() for x in self.documents]\n if hasattr(self,\n 'aligned_elements') and self.aligned_elements is not None:\n _dict['aligned_elements'] = [\n x._to_dict() for x in self.aligned_elements\n ]\n if hasattr(\n self,\n 'unaligned_elements') and self.unaligned_elements is not None:\n _dict['unaligned_elements'] = [\n x._to_dict() for x in self.unaligned_elements\n ]\n return _dict", "title": "" }, { "docid": "42eb27fd4a28cf0101984d0b08a883ec", "score": "0.49510846", "text": "def get_documents(self, resp):\n documents = []\n for value in resp['documents']:\n document = self.json_to_document(value)\n documents.append(document)\n return documents", "title": "" }, { "docid": "c2f3fd614f4ba99ba8f304a0d71f4a94", "score": "0.49428764", "text": "def as_list(self):\n return self.parse().asList()", "title": "" }, { "docid": "4bcf6dedac66b72cdf42de835516f596", "score": "0.49401042", "text": "def to_struct(self):\n return [url.to_struct() for url in self.urls]", "title": "" }, { "docid": "5decf21c419a9cc830e391a68311351e", "score": "0.49370453", "text": "def document_list_reference(self, document_id):\n try:\n uri = (API_URI['reference'] % locals())\n payload = {'id': document_id}\n r = requests.get(self.host + uri, headers=self.headers, params=payload)\n log_api('GET', uri, r)\n except requests.exceptions.ConnectionError:\n self.handleError()\n return r", "title": "" }, { "docid": "a18d83147c11ec4980937eafe699893e", "score": "0.49336827", "text": "def as_list(wrapped):\n\n original_converter = wrapped.converter\n\n def list_converter(values):\n if original_converter is not None and values is not None:\n return [original_converter(value) for value in values]\n\n return values\n\n wrapped.converter = list_converter\n wrapped.metadata['paxb.mapper'] = mappers.ListXmlWrapper(wrapped.metadata['paxb.mapper'])\n\n return wrapped", "title": "" }, { "docid": "09ddd4a77df90553e0897275b48fb42c", "score": "0.49274942", "text": "def toList(self):\n thisItem = self.asJSON()\n thisItem[\"parent\"] = \"root\"\n return [thisItem]", "title": "" }, { "docid": "d6fe84e3995857938a828a18dcebbafa", "score": "0.49273616", "text": "def _sie_referenced(self):\n return []", "title": "" }, { "docid": "5b47fa2b5169cdaea8b68e40182cf85b", "score": "0.49204466", "text": "def combine_data( self , docs ):\n\n detail_doc = {}\n \n docs_list = []\n\n for i in range( len( docs ) ):\n\n docs_list += docs[i]\n \n detail_doc[ i ] = [ len( docs[i] ) , self.__filenames[i].split(\".\")[0] ] \n\n return docs_list , detail_doc", "title": "" }, { "docid": "a0442e7243c6b736a39e914f01502c2b", "score": "0.49174353", "text": "def to_dict(self):\n return {'linked_elem_name': self.linked_elem_name,\n 'linked_elem_field': self.linked_elem_field,\n 'linked_elem_parent_name': self.linked_elem_parent_name,\n 'linked_elem_parent_option': self.linked_elem_parent_option}", "title": "" }, { "docid": "a54e8a2abff581e193163cfe61e1c95c", "score": "0.49066383", "text": "def domain_arxivdocs():\n\n arxiv_doc_1 = ArxivDocument(\n doc_id=\"url_1\",\n url=\"url_1\",\n title=\"title_1\",\n abstract=\"abstract_1\",\n authors=[\"author1\", \"author2\"],\n publish_date=\"publish_date_1\",\n pdf_url=\"pfg_url1\",\n )\n\n arxiv_doc_2 = ArxivDocument(\n doc_id=\"url_2\",\n url=\"url_2\",\n title=\"title_2\",\n abstract=\"abstract_2\",\n authors=[\"author2\", \"author2\"],\n publish_date=\"publish_date_2\",\n pdf_url=\"pfg_url2\",\n )\n\n arxiv_doc_3 = ArxivDocument(\n doc_id=\"url_3\",\n url=\"url_3\",\n title=\"title_3\",\n abstract=\"abstract_3\",\n authors=[\"author3\", \"author2\"],\n publish_date=\"publish_date_3\",\n pdf_url=\"pfg_url3\",\n )\n\n arxiv_doc_4 = ArxivDocument(\n doc_id=\"url_4\",\n url=\"url_4\",\n title=\"title_4\",\n abstract=\"abstract_4\",\n authors=[\"author4\", \"author2\"],\n publish_date=\"publish_date_4\",\n pdf_url=\"pfg_url4\",\n )\n\n return [arxiv_doc_1, arxiv_doc_2, arxiv_doc_3, arxiv_doc_4]", "title": "" }, { "docid": "9362bf74759451b6721df5de1e518970", "score": "0.4905935", "text": "def _pythonify_result(self, result):\n new_result = []\n for company in result['RESULT']['ROWS']:\n new_company = {}\n for i,item in enumerate(company):\n new_company[result['RESULT']['HEADER'][i]] = item\n new_result.append(new_company)\n return new_result", "title": "" }, { "docid": "2b4a84ee3d964d7f73b4b9ff356f38d7", "score": "0.49055138", "text": "def as_dicts(self) -> list[dict[str, str]]:\n if self._db is None:\n return []\n return self._db.as_dicts()", "title": "" }, { "docid": "a416531e97fab56b3dec1035defa2791", "score": "0.48966783", "text": "def get_all_refs(schema):\n\n all_refs = set()\n\n if type(schema) is dict:\n for key, val in schema.items():\n if key == \"$ref\" and type(val) is str:\n all_refs.add(val)\n\n all_refs.update(get_all_refs(val))\n elif type(schema) is list:\n for item in schema:\n all_refs.update(get_all_refs(item))\n\n return all_refs", "title": "" }, { "docid": "ee62f62aa60ed6dc468963e1be498c14", "score": "0.4890281", "text": "def get_data_references(self):\n refs = []\n qset = self.value_set\n for value in qset.all():\n for r in value.references.all():\n if r not in refs:\n refs.append(r)\n return sorted(refs, key=lambda r: r.author)", "title": "" }, { "docid": "50ef843f13f202661b207d8972ce343f", "score": "0.48866594", "text": "def get_item_documents(self):\n return self.document_assets.all()", "title": "" }, { "docid": "62738bbeed04dddd259fde0d24101af3", "score": "0.4881198", "text": "def get_indicator_publication(indicator: dict[str, Any]):\n publications = []\n for external_reference in indicator.get('external_references', []):\n url = external_reference.get('url', '')\n description = external_reference.get('description', '')\n source_name = external_reference.get('source_name', '')\n publications.append({'link': url, 'title': description, 'source': source_name})\n return publications", "title": "" }, { "docid": "0c03820a5021921f121b9c7f1a3711f2", "score": "0.48810115", "text": "def newIDList()->dict:\n idList = {\n 'idList': lt.newList('ARRAY_LIST', compareIds),\n 'size': 0\n }\n return idList", "title": "" }, { "docid": "6cc4aa26f8831b65ba8c800ce7d274ac", "score": "0.4872654", "text": "def records(self):\n return self._record_list", "title": "" }, { "docid": "9d99f3b9d2dc33e76c55cdc8f776541c", "score": "0.48661786", "text": "def _init_refs(self):\n anns, imgs = {}, {}\n for ann in self.instances['annotations']:\n anns[ann['id']] = ann\n for img in self.instances['images']:\n imgs[img['id']] = img\n\n refs, ref_to_ann = {}, {}\n for ref in self.splits:\n # ids\n ref_id = ref['ref_id']\n ann_id = ref['ann_id']\n # add mapping related to ref\n refs[ref_id] = ref\n ref_to_ann[ref_id] = anns[ann_id]\n\n self.refs = refs\n self.ref_to_ann = ref_to_ann", "title": "" }, { "docid": "6c4ebca71e8ebd02702f5f71b7597877", "score": "0.48658895", "text": "def to_dict(self):\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(Attachment, dict):\n for key, value in self.items():\n result[key] = value\n\n return result", "title": "" }, { "docid": "0da85449655d4b34607852fc51122b9f", "score": "0.48596942", "text": "def xref(self):\n return self[\"xref\"]", "title": "" }, { "docid": "a081dcb0ea693b4db6f45d5fc356b8c2", "score": "0.48594564", "text": "def rel_to_dict(rels):\n\n res = defaultdict(list)\n for r in rels:\n if not valid_def(r['definition']):\n continue\n res[r['identifier']].append((r['definition'], r['score']))\n return res", "title": "" }, { "docid": "9c8a44b6b068b150f8fe7059913a99a9", "score": "0.48594543", "text": "def parse(collection_path: str) -> Dict[int, List[int]]:\r\n root = cElementTree.parse(collection_path).getroot()\r\n match = re.match(r'{.*}', root.tag)\r\n namespace = match.group() if match else ''\r\n\r\n doc_ids = {}\r\n outlink_titles = {}\r\n for page in root.iter(namespace + 'page'):\r\n id_ = int(page.find(namespace + 'id').text)\r\n title = page.find(namespace + 'title').text\r\n assert id_ is not None and title is not None\r\n # Note this doesn't work on the small index, we aren't using\r\n # the small index anymore in the course\r\n text = page.find(namespace + 'revision').find(namespace + 'text').text\r\n if text is None:\r\n links = []\r\n else:\r\n links = extract_links(text)\r\n\r\n doc_ids[title] = id_\r\n outlink_titles[id_] = links\r\n\r\n outlink_ids = {}\r\n for id_, titles in outlink_titles.items():\r\n outlink_ids[id_] = [doc_ids[title]\r\n for title in titles\r\n if title in doc_ids]\r\n\r\n for id_ in get_isolates(outlink_ids):\r\n outlink_ids.pop(id_)\r\n\r\n return outlink_ids", "title": "" }, { "docid": "8b77afa5fb83a75a2a5d711147da6064", "score": "0.4859084", "text": "def to_dict(self, field_map={}) -> Dict:\n inv_field_map = {v: k for k, v in field_map.items()}\n _doc: Dict[str, str] = {}\n for k, v in self.__dict__.items():\n if k == \"content\":\n # Convert pd.DataFrame to list of rows for serialization\n if self.content_type == \"table\" and isinstance(self.content, pd.DataFrame):\n v = [self.content.columns.tolist()] + self.content.values.tolist()\n k = k if k not in inv_field_map else inv_field_map[k]\n _doc[k] = v\n return _doc", "title": "" }, { "docid": "cf14e83e5161538b80264b8116201865", "score": "0.48433688", "text": "def document_to_dict(self, expand=True):\n expand_fields = getattr(self, \"_expand_fields\", []) if expand else []\n return to_dict(self.select_related(), expand_fields)", "title": "" }, { "docid": "88f6362667de4f98124736b916651ccf", "score": "0.48378682", "text": "def array_jsonify(records):\n return [dict(r.items()) for r in records]", "title": "" }, { "docid": "f6e867439a5ae6c8727cb311ed67283c", "score": "0.4836375", "text": "def dict(self):\n res = {}\n for (k, v) in iteritems(self.__dict__):\n if isinstance(v, SolrResult):\n res[k] = v.dict\n else:\n res[k] = v\n return res", "title": "" }, { "docid": "1dbddbaa2d1cc4e9b768619ee0482099", "score": "0.48356593", "text": "def get_list(self):\n return self.record_list", "title": "" }, { "docid": "6ece184b9887933fc126c4f5e71989a5", "score": "0.48238862", "text": "def retrieve(dbxrefs, basics=True, dbsource=True, references=True):\n resolved = dbxref.resolver.resolve(dbxrefs, check_existence=False)\n documents = []\n for entry in resolved:\n xml_url = entry[\"locations\"][\"xml\"][0]\n logger.debug(\"URL: %s\", xml_url)\n gi = requests.get(xml_url)\n logger.debug(\"Content: %s\", gi.text)\n output = {\"id\": entry[\"dbxref\"]}\n try:\n root = ET.fromstring(gi.text)\n if basics:\n try:\n output.update(read_basics(root))\n except KeyError:\n print(\"One ore more of the basic information were not available for given dbxref. \"\n \"Please check the source data.\")\n raise\n if dbsource:\n try:\n output.update(read_dbsource(root))\n except KeyError:\n print(\"Source database information wasn't or wasn't fully available. Please check the source data\")\n raise\n if references:\n try:\n output.update(read_references(root))\n except KeyError:\n print(\"reference information wasn't or wasn't fully available. Please check the source data\")\n raise\n except (RuntimeError, ET.ParseError):\n print(\"An error occurred\")\n raise\n documents.append(output)\n return documents", "title": "" }, { "docid": "5cb678683b150f71c4b5ec27f8f862b3", "score": "0.4819621", "text": "def santize_JSON(instance,ext_ob_name=\"extOb\",rel_name=\"Ordered\",append_attr=True,attrs = None): \r\n return lists_to_relations(extract_components(instance,prefix=ext_ob_name,attrs=attrs),rel_name=rel_name,append_attr=append_attr,attrs=attrs)", "title": "" }, { "docid": "38220d14d3537d4d85adc58985d8e5c6", "score": "0.4818021", "text": "def getDict(self):\n returnList = []\n\n for listProjects in self.projects.values():\n for index, proj in enumerate(listProjects):\n d = proj.getDict()\n returnList.append(d)\n\n return returnList", "title": "" }, { "docid": "a5fe1964e29276a7a1dd9532a9860796", "score": "0.4814739", "text": "def getSchema(self):\n\n #JP - Schema hardcoded from flattened dictionary manually referencing first returned result\n #Could easily employ flatten_dict to dynamically generate dictionary\n schema = [\n 'web_url',\n 'snippet',\n 'multimedia',\n 'headline.main',\n 'headline.kicker',\n 'headline.content_kicker',\n 'headline.print_headline',\n 'headline.name',\n 'headline.seo',\n 'headline.sub',\n 'keywords',\n 'document_type',\n 'type_of_material',\n '_id',\n 'word_count',\n 'score'\n ]\n\n return schema", "title": "" } ]
5be360da2c57ecc95fded4d0ffa094a2
Filter your queryset as you want, then return it.
[ { "docid": "034fef724d5ff4b2b0c05a72b18f3891", "score": "0.0", "text": "def get_receivers_queryset(self, receiver_ids):\n return receiver_ids", "title": "" } ]
[ { "docid": "2eab7744c15e7982917bedecf46b5c80", "score": "0.76472706", "text": "def filter(self, qs):\n return qs.all()", "title": "" }, { "docid": "0d55fa6bdebd3337e64ce5e6580d617b", "score": "0.7603844", "text": "def filter_queryset(self, qs, filter_param):\n return qs", "title": "" }, { "docid": "78e1338ac795014c08c60999084c7c28", "score": "0.7531174", "text": "def filter(self, *args, **kwargs) -> 'QuerySet':\n return self.__class__(filter_items(self.data, *args, **kwargs))", "title": "" }, { "docid": "9486632772f351d46db782ddbae46a50", "score": "0.739901", "text": "def get_queryset(self):\n queryset = super().get_queryset()\n queryset = self.filter_queryset(queryset)\n\n return queryset.all()", "title": "" }, { "docid": "19730e055527dbabf79ffc7d8cc2e4cf", "score": "0.7395545", "text": "def filter_queryset(self, queryset):\r\n return filter_products(queryset, self.request.user)", "title": "" }, { "docid": "b08d220246ef83a2ddc89c7e1a991f62", "score": "0.735471", "text": "def queryset(self, request, queryset):\n # Compare the requested value (either '80s' or '90s')\n # to decide how to filter the queryset.\n val = self.value()\n if not val:\n return queryset\n elif val == \"living\":\n return queryset.filter(deleted__isnull=True)\n elif val == \"deleted\":\n return queryset.filter(deleted__isnull=False)\n elif val == \"added_by_log\":\n return queryset.filter(added_by_log=True)\n else:\n return queryset", "title": "" }, { "docid": "24e9ebea3eacdf35ce963102b85ee154", "score": "0.72444576", "text": "def _apply_filtering_to_queryset_for_display(self, queryset):\n return queryset", "title": "" }, { "docid": "56b14d703b9e44154457df2a3532078d", "score": "0.72164387", "text": "def filter_queryset(self, queryset):\n return queryset.filter(owner=self.request.user)", "title": "" }, { "docid": "192ae9843d0d9718a78de54f54a46d17", "score": "0.71308756", "text": "def queryset(self, request, queryset):\n # Compare the requested value (either '80s' or '90s')\n # to decide how to filter the queryset.\n if self.value():\n return queryset.filter(data__type=self.value())\n else:\n return queryset", "title": "" }, { "docid": "bd6d2c5f02728341fe1201a8056eb3f9", "score": "0.71259665", "text": "def filter_queryset(self, queryset):\n for name, value in self.form.cleaned_data.items():\n if value is None:\n continue\n elif isinstance(value,models.Model):\n value = value.pk\n elif name not in self.filters:\n continue\n queryset = self.filters[name].filter(queryset, value)\n assert isinstance(queryset, models.QuerySet), \\\n \"Expected '%s.%s' to return a QuerySet, but got a %s instead.\" \\\n % (type(self).__name__, name, type(queryset).__name__)\n return queryset", "title": "" }, { "docid": "664c357a4e5a484fba60112498936e1f", "score": "0.7028975", "text": "def filter(self,**kwds):\n return self.all(**kwds)", "title": "" }, { "docid": "19ae40a933b7e502d0475eb1752fe3ba", "score": "0.7025862", "text": "def queryset(self, request, queryset):\n # Compare the requested value (either '80s' or '90s')\n # to decide how to filter the queryset.\n if not self.value():\n return queryset\n else:\n return queryset.filter(key_group=self.value())", "title": "" }, { "docid": "12b488ebf4d81f37d3c8b9f95a732b84", "score": "0.6942982", "text": "def queryset(self, request, queryset):\n # Decide how to filter the queryset based on the request\n if self.value() == 'none':\n return queryset.filter(sense__isnull=True)\n if self.value() == 'morethanone':\n return queryset.filter(sense__gte=1)", "title": "" }, { "docid": "20df7d5a6683b1b59d6765c88e3c01d6", "score": "0.6924387", "text": "def queryset(self, request, queryset):\n # Compare the requested value (either '80s' or '90s')\n # to decide how to filter the queryset.\n if self.value() == \"yes\":\n return queryset.filter(team__isnull=False)\n if self.value() == \"no\":\n return queryset.filter(team__isnull=True)", "title": "" }, { "docid": "fefe73abfaca48b2648ad63e2e6f29e4", "score": "0.68974674", "text": "def queryset(self, request, queryset):\n # Compare the requested value (either '80s' or '90s')\n # to decide how to filter the queryset.\n # return queryset.filter(birthday__gte=date(1990, 1, 1),)\n if self.value() == 'blank':\n blank = []\n for s in queryset:\n if s.away_wins() == 0 and s.home_wins() == 0:\n blank.append(s.id)\n return ScoreSheet.objects.filter(id__in=blank)\n else:\n return queryset", "title": "" }, { "docid": "0952ef3b5d4488dddef1be2851946367", "score": "0.6867878", "text": "def filter_queryset(self, request, queryset, view):\r\n raise NotImplementedError(\".filter_queryset() must be overridden.\")", "title": "" }, { "docid": "c64ad2f4439ba3c6c728f87b399bf854", "score": "0.68638456", "text": "def filter(self, the_filter):\n return self.__model__.query.filter(the_filter).all()", "title": "" }, { "docid": "58d8f4140cd50ce790d9410907dac728", "score": "0.68631166", "text": "def get_queryset(self):\n objects = super(ListView, self).get_queryset()\n if 'filter' in self.request.GET:\n self.filter = self.request.GET['filter']\n if self.filter:\n objects = objects.filter(author__contains=self.filter)\n return objects", "title": "" }, { "docid": "729eac0cfb3a00c2f60274c063d6f8e5", "score": "0.68393165", "text": "def queryset(self, request, queryset):\n # Compare the requested value (either '80s' or '90s')\n # to decide how to filter the queryset.\n if self.value():\n print self.value()\n return queryset.filter(data__provider__isnull=True) if self.value() == 'not-defined' else queryset.filter(data__provider=self.value())\n else:\n return queryset", "title": "" }, { "docid": "45c29f4a49f9958f5d6fd4b38d7950fc", "score": "0.68325746", "text": "def filter(self, **kwargs):\n # TODO: error handling\n return self.schema(many=True).dump(self.filter_model(**kwargs)).data", "title": "" }, { "docid": "00f2f8acf4f189e929c8d9ae2d5b1f51", "score": "0.6815404", "text": "def filter_queryset_for_instances(self, queryset, instances):", "title": "" }, { "docid": "a32a16db99fa71af0b63bd10c6ff6d92", "score": "0.67885065", "text": "def filter_queryset(self, request, queryset, view):\n self.request = request\n self.view = view\n\n # enable addition of extra filters (i.e., a Q())\n # so custom filters can be added to the queryset without\n # running into https://code.djangoproject.com/ticket/18437\n # which, without this, would mean that filters added to the queryset\n # after this is called may not behave as expected\n extra_filters = self.view.get_extra_filters(request)\n\n disable_prefetches = self.view.is_update()\n\n self.DEBUG = settings.DEBUG\n\n return self._build_queryset(\n queryset=queryset,\n extra_filters=extra_filters,\n disable_prefetches=disable_prefetches,\n )", "title": "" }, { "docid": "385a61a9d2743e75210c67640cc8f6a5", "score": "0.6751936", "text": "def filter_queryset(self, queryset):\r\n for backend in self.get_filter_backends():\r\n queryset = backend().filter_queryset(self.request, queryset, self)\r\n return queryset", "title": "" }, { "docid": "9e3d1db24cb48d50d762bd5c96879548", "score": "0.673579", "text": "def get_queryset(self):\n\n user = self.request.user\n return Filter.objects.filter(owner_id=user.id).all()", "title": "" }, { "docid": "c7c70b55b1f2f59878b185852a4d9a6d", "score": "0.6727362", "text": "def filter_queryset(self, search_params, sort_params):\n return (\n self.get_queryset().custom_filter(search_params, sort_params)\n )", "title": "" }, { "docid": "81570be378765e09bdfd5140908653c5", "score": "0.6717536", "text": "def queryset(self, request, queryset):\n # Compare the requested value (either '80s' or 'other')\n # to decide how to filter the queryset.\n if self.value() == 'yes':\n return queryset.filter(Q(email__isnull=False) &\n ~Q(email=''))\n if self.value() == 'no':\n return queryset.filter(Q(email__isnull=True) |\n Q(email=''))", "title": "" }, { "docid": "c05bd7b4d19f8add88c9d41e3fa50af7", "score": "0.6699097", "text": "def queryset(self, request, queryset):\n # Compare the requested value (either '80s' or '90s')\n # to decide how to filter the queryset.\n val = self.value()\n if not val:\n return queryset\n elif val == \"True\":\n return queryset.filter(container_terminated__isnull=True)\n else:\n return queryset.filter(container_terminated__isnull=False)", "title": "" }, { "docid": "251f31b7b4b0432ff6307a78dd1c6662", "score": "0.66955835", "text": "def queryset(self, request, queryset):\n if self.value():\n partner = Service.objects.get(pk=self.value())\n return queryset.filter(partner=partner)\n return queryset", "title": "" }, { "docid": "f29e2822d6c60e7e78932ff77ef5c633", "score": "0.66952074", "text": "def filter_queryset(self, request, queryset, view):\n params = request.query_params\n filtered_ids = self._get_pk_parameter(params)\n if filtered_ids is not None:\n queryset = queryset.filter(**{f\"{self.key}__in\": filtered_ids})\n return queryset", "title": "" }, { "docid": "4dc364744a1dc21305d776b185a83551", "score": "0.6684309", "text": "def queryset(self, request, queryset):\n # Compare the requested value (either '80s' or '90s')\n # to decide how to filter the queryset.\n if self.value() == 'zero':\n return queryset.filter(assessment__module__title__contains = \"0. \")\n if self.value() == 'one':\n return queryset.filter(assessment__module__title__contains = \"1. \")\n if self.value() == 'two':\n return queryset.filter(assessment__module__title__contains = \"2. \")\n if self.value() == 'three':\n return queryset.filter(assessment__module__title__contains = \"3. \")\n if self.value() == 'four':\n return queryset.filter(assessment__module__title__contains = \"4. \")\n if self.value() == 'five':\n return queryset.filter(assessment__module__title__contains = \"5. \")\n if self.value() == 'six':\n return queryset.filter(assessment__module__title__contains = \"6. \")\n if self.value() == 'seven':\n return queryset.filter(assessment__module__title__contains = \"7. \")\n if self.value() == 'eight':\n return queryset.filter(assessment__module__title__contains = \"8. \")\n if self.value() == 'nine':\n return queryset.filter(assessment__module__title__contains = \"9. \")\n if self.value() == 'ten':\n return queryset.filter(assessment__module__title__contains = \"10. \")", "title": "" }, { "docid": "ee11514e58bef3608da1c263628ed6e7", "score": "0.6679822", "text": "def filter_queryset(self, queryset: Sequence) -> Sequence:\n filters = get_query_filters(\n self.request.query_params, CamelCaseToUnderscoreTransform()\n )\n for key, value in filters.items():\n queryset = [item for item in queryset if getattr(item, key) == value]\n\n return queryset", "title": "" }, { "docid": "665351e77e5ae1dbdff7008852f862b7", "score": "0.66754365", "text": "def get_queryset(self):\n return self.queryset.filter(user=self.request.user)", "title": "" }, { "docid": "665351e77e5ae1dbdff7008852f862b7", "score": "0.66754365", "text": "def get_queryset(self):\n return self.queryset.filter(user=self.request.user)", "title": "" }, { "docid": "665351e77e5ae1dbdff7008852f862b7", "score": "0.66754365", "text": "def get_queryset(self):\n return self.queryset.filter(user=self.request.user)", "title": "" }, { "docid": "665351e77e5ae1dbdff7008852f862b7", "score": "0.66754365", "text": "def get_queryset(self):\n return self.queryset.filter(user=self.request.user)", "title": "" }, { "docid": "665351e77e5ae1dbdff7008852f862b7", "score": "0.66754365", "text": "def get_queryset(self):\n return self.queryset.filter(user=self.request.user)", "title": "" }, { "docid": "665351e77e5ae1dbdff7008852f862b7", "score": "0.66754365", "text": "def get_queryset(self):\n return self.queryset.filter(user=self.request.user)", "title": "" }, { "docid": "665351e77e5ae1dbdff7008852f862b7", "score": "0.66754365", "text": "def get_queryset(self):\n return self.queryset.filter(user=self.request.user)", "title": "" }, { "docid": "665351e77e5ae1dbdff7008852f862b7", "score": "0.66754365", "text": "def get_queryset(self):\n return self.queryset.filter(user=self.request.user)", "title": "" }, { "docid": "665351e77e5ae1dbdff7008852f862b7", "score": "0.66754365", "text": "def get_queryset(self):\n return self.queryset.filter(user=self.request.user)", "title": "" }, { "docid": "0c3d94282a067e114f8595742df3702c", "score": "0.6654447", "text": "def get_queryset(self):\n return self.model.objects.filter(status=True).order_by('?')[:1]", "title": "" }, { "docid": "2e816a5ef04d213a53ddd03592e7d0dc", "score": "0.6653596", "text": "def get_queryset(self):\n return self.queryset.filter(model_id=self.kwargs['model_id'])", "title": "" }, { "docid": "2e816a5ef04d213a53ddd03592e7d0dc", "score": "0.6653596", "text": "def get_queryset(self):\n return self.queryset.filter(model_id=self.kwargs['model_id'])", "title": "" }, { "docid": "7b31857a4e73833970f3b2734df72370", "score": "0.6646337", "text": "def queryset(self, request, queryset):\n if self.value() is None:\n return queryset\n return queryset.filter(tags__category=Tag.WRITING, tags__slug=self.value())", "title": "" }, { "docid": "7b31857a4e73833970f3b2734df72370", "score": "0.6646337", "text": "def queryset(self, request, queryset):\n if self.value() is None:\n return queryset\n return queryset.filter(tags__category=Tag.WRITING, tags__slug=self.value())", "title": "" }, { "docid": "0d275a00f89215adfbf7154ebf737dfa", "score": "0.66439646", "text": "def filter_queryset(self, queryset):\n for name, filt in self.filters.items():\n val = self.values.get(name, None)\n if name is None:\n continue\n params = filt.filter_params(val)\n if not params:\n continue\n if isinstance(params, dict):\n queryset = queryset.filter(**params)\n if isinstance(params, QNode):\n queryset = queryset.filter(params)\n return queryset", "title": "" }, { "docid": "84ce608f174cfbba1e3e674ac24a1ff8", "score": "0.66353875", "text": "def queryset(self, request, queryset):\n # Compare the requested value to decide how to filter the queryset.\n field_key = 'class_day'\n if queryset.model == Registration:\n field_key = 'classoffer__' + field_key\n return queryset.filter(**{field_key: self.value()}) if self.value() else queryset", "title": "" }, { "docid": "71a4e16f5db5906664f3327808bc4857", "score": "0.66158414", "text": "def get_queryset(self):\n queryset = self.queryset\n query_set = queryset.filter(our_company=self.request.user.our_company)\n return query_set", "title": "" }, { "docid": "71a4e16f5db5906664f3327808bc4857", "score": "0.66158414", "text": "def get_queryset(self):\n queryset = self.queryset\n query_set = queryset.filter(our_company=self.request.user.our_company)\n return query_set", "title": "" }, { "docid": "1a5e32cd07cd4fcf90db0468d3d08749", "score": "0.6598393", "text": "def get_queryset(self):\r\n queryset = super().get_queryset()\r\n return queryset.filter(user_id=self.request.user.id)", "title": "" }, { "docid": "351ebc3af1825e68fb2fa2a807ca9c8b", "score": "0.6596728", "text": "def get_queryset(self):\n return self.queryset.all()", "title": "" }, { "docid": "351ebc3af1825e68fb2fa2a807ca9c8b", "score": "0.6596728", "text": "def get_queryset(self):\n return self.queryset.all()", "title": "" }, { "docid": "181e1f0fb02603532f9402a40a564398", "score": "0.65958464", "text": "def get_queryset(self):\n return self.queryset.filter(name=self.kwargs.get('name'))", "title": "" }, { "docid": "90ede286d15ad8e7eb1c43f79aa683a1", "score": "0.65952593", "text": "def filter(cls, *args, **kwargs) -> QuerySet:\n return QuerySet(cls).filter(*args, **kwargs)", "title": "" }, { "docid": "9f55c6e9e4610007f36cee7ce4d87a75", "score": "0.65935546", "text": "def filter_queryset(self, queryset):\n for backend in list(self.filter_backends):\n queryset = backend().filter_queryset(self.request, queryset, view=self)\n return queryset", "title": "" }, { "docid": "cd3b7167a586abd279eb50e38051da09", "score": "0.6585923", "text": "def queryset(self, request, queryset):\n # Compare the requested value (either '80s' or '90s')\n # to decide how to filter the queryset.\n if self.value() == '0 - 0.5':\n return queryset.filter(created__gte=timezone.now() - timedelta(days=6*30))\n if self.value() == '0.5 - 1':\n return queryset.filter(created__gte=timezone.now() - timedelta(days=12*30),\n created__lte=timezone.now() - timedelta(days=6*30))\n if self.value() == '1 - 1.5':\n return queryset.filter(created__gte=timezone.now() - timedelta(days=18*30),\n created__lte=timezone.now() - timedelta(days=12*30))\n if self.value() == '1.5 - 2':\n return queryset.filter(created__gte=timezone.now() - timedelta(days=24*30),\n created__lte=timezone.now() - timedelta(days=18*30))\n if self.value() == '>2':\n return queryset.filter(created__lte=timezone.now() - timedelta(days=24*30))", "title": "" }, { "docid": "ec88017167f292045d349c5e3f564c01", "score": "0.6579629", "text": "def filter_queryset(self, queryset):\n res = queryset\n for fld, val in self.request.query_params.items():\n res = self.filter_queryset_field(res, fld, val)\n return res", "title": "" }, { "docid": "0ebe4761b57df2a72c2455317493ddd1", "score": "0.6579391", "text": "def filter_queryset(self, request, queryset, view):\n params = request.query_params\n filtered_choices = self._get_choice_parameter(params)\n if filtered_choices is not None:\n queryset = queryset.filter(**{f\"{self.field}__in\": filtered_choices})\n return queryset", "title": "" }, { "docid": "b47db0b3e7a22dec2e72be53d34bcf09", "score": "0.6576168", "text": "def filter_queryset(self, queryset):\n user = self.request.user\n if user.is_staff:\n return queryset\n\n return queryset.filter(\n Q(children__stockrecords__partner__users__pk=user.pk)\n | Q(stockrecords__partner__users__pk=user.pk)\n ).distinct()", "title": "" }, { "docid": "5f256d5fe52c76f43b9b54489616d0b0", "score": "0.6572804", "text": "def get_queryset(self):\n return super().get_queryset().filter(user=self.request.user)", "title": "" }, { "docid": "5f256d5fe52c76f43b9b54489616d0b0", "score": "0.6572804", "text": "def get_queryset(self):\n return super().get_queryset().filter(user=self.request.user)", "title": "" }, { "docid": "2a5844f4c4cee3a6be76ee3ee02811da", "score": "0.65608215", "text": "def get_queryset(self):\r\n return filter_products(Product.objects.all(), self.request.user)", "title": "" }, { "docid": "2a5844f4c4cee3a6be76ee3ee02811da", "score": "0.65608215", "text": "def get_queryset(self):\r\n return filter_products(Product.objects.all(), self.request.user)", "title": "" }, { "docid": "2c35eabbf8971621e0c52bc83222ba03", "score": "0.6557135", "text": "def filter(cls, **kwargs: dict) -> 'QuerySet<Meta.Model>':\n return cls.Meta.model.objects.filter(**kwargs)", "title": "" }, { "docid": "b13342c3978f863e9fb51cdf1cff157a", "score": "0.65403384", "text": "def filter(self, *args, **kwargs):\n\t\ttry:\n\t\t\tif self.manager is not None:\n\t\t\t\treturn self.manager.filter(*args, **kwargs)\n\t\texcept self.manager.model.DoesNotExist:\n\t\t\tpass\n\t\texcept Exception as e:\n\t\t\tlgr.error('%sService filter exception: %s' % (self.manager.model.__name__, e))\n\t\treturn None", "title": "" }, { "docid": "a5618c3aef83f2dadfbb340f91889b11", "score": "0.65382916", "text": "def get_filter(self, **filter_kwargs):\n\n q_objects = super(ListView, self).get_filter(**filter_kwargs)\n form = self.get_filter_form()\n if form:\n q_objects.extend(form.get_filter())\n\n return q_objects", "title": "" }, { "docid": "cce494e0fa3e928fefd8c9ac9d497d48", "score": "0.6520529", "text": "def get_queryset(self):\n queryset = self.queryset\n query_set = queryset.filter(id=self.request.user.id)\n return query_set", "title": "" }, { "docid": "3969b5973dd3b4065882f1fe6be93563", "score": "0.6518351", "text": "def get_list(self, queryset=None):\n if queryset is None:\n query = self.get_queryset()\n else:\n query = queryset\n return query.filter(**self.cleaned_data)", "title": "" }, { "docid": "87cc022598e4beaa36783b2d0f10af69", "score": "0.6503563", "text": "def queryset(self, request, queryset):\n if self.value():\n return queryset.filter(user__institute__pk=self.value())", "title": "" }, { "docid": "da3f9986152c643716d33cab2c5b3576", "score": "0.6501807", "text": "def filter(self, *args, **kwargs):\n if 'item' in kwargs:\n args = [kwargs.pop('item')] + list(args)\n return Model.filter(self, *args, **kwargs)", "title": "" }, { "docid": "dd686aea0e8bd2d7deeab7c9884a5432", "score": "0.6493883", "text": "def _applyFilters(self, queryset, request):\n if 'id' in request.GET:\n queryset = queryset.filter(source__id=request.GET['id'])\n\n if 'owner' in request.GET:\n queryset = queryset.filter(source__owner__id=request.GET['owner'])\n\n return queryset", "title": "" }, { "docid": "bd012d96d7f0f463247fb781f3b1e68f", "score": "0.6492855", "text": "def queryset(self, request, queryset):\n\n if self.value():\n return queryset.filter(release__project__id__exact=self.value())\n\n else:\n return queryset", "title": "" }, { "docid": "6f8b281cfe8d2e8ebdb1d62f3666431e", "score": "0.6484853", "text": "def queryset(self, request, queryset):\n if self.value():\n return super().queryset(request, queryset)\n\n return super().queryset(request, queryset).none()", "title": "" }, { "docid": "035a68dca25ffd8fd28138d7c2c39a70", "score": "0.6479277", "text": "def filter_queryset(self, queryset):\n for backend in list(self.filter_backends):\n queryset = backend().filter_queryset(self.request, queryset, self)\n return queryset", "title": "" }, { "docid": "5681ccd854720dfb667d86ef627cf7e8", "score": "0.64733696", "text": "def get_queryset(self):\n # not not = bool(), but more faster\n assigned_only = not not int(self.request.query_params.get(\"assigned_only\", 0))\n\n queryset = self.queryset\n if assigned_only:\n queryset = queryset.filter(receita__isnull=False)\n return queryset.filter(user=self.request.user).order_by(\"-name\").distinct()", "title": "" }, { "docid": "40d77ca9183563dd46cfa8ca4299249e", "score": "0.64707386", "text": "def filter(self, *Qs, **kwargs):\n #TODO: make me return models, not rows\n #TODO: write a test\n #TODO: make me lazy\n\n q = Q(**kwargs) # make a query with the given kwargs\n with self.cursor as c:\n\n rows = c.execute(self.__select_sql__(*(q, ) + Qs))\n return rows.fetchall()", "title": "" }, { "docid": "1194bf6bfa68317309fc93fe259bcd4b", "score": "0.64667654", "text": "def queryset(self, request, queryset):\n\n if self.value() == 'in':\n return queryset.filter(user_type=1)\n if self.value() == 'out':\n return queryset.filter(user_type=2)", "title": "" }, { "docid": "9199a7edd00ee0c2d9617a6089780074", "score": "0.64409965", "text": "def queryset(self, request, queryset):\n # Compare the requested value (either '80s' or '90s')\n # to decide how to filter the queryset.\n lookup = self.value()\n if lookup:\n queryset = queryset.filter(keywords__contains=[lookup])\n return queryset", "title": "" }, { "docid": "db21bc399034de354feb0249ccd9fef0", "score": "0.64373446", "text": "def filter(self, **kw):\n return ModelQuery(self, self.model, headers=self.headers).filter(**kw)", "title": "" }, { "docid": "7d04f2830a2950d95e7d94cbfe5a46d1", "score": "0.6437188", "text": "def queryset(self, request, queryset):\n # Compare the requested value (either '80s' or 'other')\n # to decide how to filter the queryset.\n if self.value() == 'yes':\n return queryset.filter(Q(devilryuserprofile__full_name__isnull=False) &\n ~Q(devilryuserprofile__full_name__exact=''))\n if self.value() == 'no':\n return queryset.filter(Q(devilryuserprofile__full_name__isnull=True) |\n Q(devilryuserprofile__full_name__exact=''))", "title": "" }, { "docid": "e4d4a49e04191678057c78dfb001d2d2", "score": "0.64327496", "text": "def filter_queryset(self, qs):\n # If a search term, filter the query\n if self.search:\n return qs.filter(\n Q(username__icontains=self.search)\n | Q(first_name__icontains=self.search)\n | Q(last_name__icontains=self.search)\n # | Q(state__icontains=self.search)\n # | Q(year__icontains=self.search)\n )\n return qs", "title": "" }, { "docid": "4178b4c4607b67eff33d45ffe74aef64", "score": "0.64196503", "text": "def augment_queryset(self, state, queryset):\n return queryset", "title": "" }, { "docid": "f28b7f633dfabef3a6531647efe18b44", "score": "0.6412965", "text": "def get_query_set(self):\r\n\r\n return self.get_queryset()", "title": "" }, { "docid": "3ca3d4a662737d232509f6045eb03995", "score": "0.6408798", "text": "def filter(self, *args, **kwargs):\n kwargs = self._check_pk_hash(**kwargs)\n return super(BaseQuerySet, self).filter(*args, **kwargs)", "title": "" }, { "docid": "4b843f3a28c3c931d9e6371b651f097b", "score": "0.64004195", "text": "def get_queryset(self):\n kwargs = {'model': self.model, 'using': self._db}\n if hasattr(self, '_hints'):\n kwargs['hints'] = self._hints\n\n return self._queryset_class(**kwargs).filter(is_removed=False)", "title": "" }, { "docid": "d41187263f77a15a8f25e6fbb054c5da", "score": "0.63963914", "text": "def build_queryset(self):\r\n model_class = self.sync_job.target_django_model.model_class()\r\n filters = [(i.predicate, i.value) for i in self.sync_job.filters.all()]\r\n if len(filters) > 0:\r\n return model_class.objects.filter(**dict(filters)).all()\r\n else:\r\n return model_class.objects.all()", "title": "" }, { "docid": "eec2fb2ab4c7071ebbef1ec82a31bc63", "score": "0.63941103", "text": "def get_queryset(self):\n\n queryset = self.queryset.filter(owner=self.request.user)\n return queryset", "title": "" }, { "docid": "3377251802679e6859e3094e42595bed", "score": "0.6392279", "text": "def _apply_rel_filters(self, queryset):\n return self.descriptor.filter_queryset_for_instances(queryset, [self.instance])", "title": "" }, { "docid": "f34ccfd32f44983b97068b64ae8f0f13", "score": "0.6390106", "text": "def get_queryset(self):\n return self.model.objects", "title": "" }, { "docid": "44766fcd2dc6ba4d3cb6ae46607dfd67", "score": "0.63862276", "text": "def apply_filters(self, request, applicable_filters):\r\n return self.get_object_list(request).filter(**applicable_filters)", "title": "" }, { "docid": "ec94ddf3ed887ea13a6a87feb737da94", "score": "0.6382643", "text": "def get_queryset(self): # 限制返回结果时可以重写该方法\n return Question.objects.filter(pub_date__lte=timezone.now())", "title": "" }, { "docid": "1131adcea67e6083da541935510330af", "score": "0.63820934", "text": "def get_queryset(self):\n queryset = self.queryset\n query_set = queryset.filter(name=self.request.user.our_company)\n return query_set", "title": "" }, { "docid": "7e781d9d1c42d13c21370ea46c15534f", "score": "0.63818026", "text": "def filter(cls, *args):\n return cls.query.filter(*args)", "title": "" }, { "docid": "2e52207a2edd65efafa1a053f0ff8ad6", "score": "0.6378699", "text": "def get_queryset(self):\n return Recruit.objects.filter(pub_date__lte=timezone.now())", "title": "" }, { "docid": "9049ba857b32fb81c12fff55cd7bbc09", "score": "0.63758117", "text": "def filter(self, **kwargs ):\n pass", "title": "" }, { "docid": "d0ad70ac62b1a2e690ef24cace69dbdf", "score": "0.6373131", "text": "def get_query_set(self):\n return self.get_queryset()", "title": "" }, { "docid": "8a49d2abfe276728dc2df6ba16b2c883", "score": "0.6370073", "text": "def get_queryset(self):\n qs = super().get_queryset()\n qs = qs.filter(approved=True)\n return qs", "title": "" }, { "docid": "b62474378ac92553ccf9ff52408e45dc", "score": "0.6367566", "text": "def get_queryset(self):\n return self.queryset.filter(tenant=self._get_tenant())", "title": "" }, { "docid": "d46ee39d3a620793e3a9ff3d09bbac8a", "score": "0.63640165", "text": "def _filter(self, query, **kwargs):\n query = self._auto_filter(query, **kwargs)\n return query", "title": "" }, { "docid": "26760af3553bf41676f806d3e9d7a76e", "score": "0.63613987", "text": "def queryset(self, request, queryset):\n # Compare the requested value\n # to decide how to filter the queryset.\n if self.value() in ('N',):\n return queryset.filter(is_placed=self.value()) \n elif self.value() == None:\n return queryset.filter(is_placed='Y')", "title": "" }, { "docid": "18d6c3959db0df0b9407efe466ac46fe", "score": "0.6358364", "text": "def index_queryset(self, using=None):\n logger.info(\"%s has been searched\",self.get_model())\n return self.get_model().objects.all()\n # return self.get_model().objects.filter(pub_date__lte=datetime.datetime.now())", "title": "" } ]
eea86eec1e8b1516c6844724dff753de
Checks if a user is a staff in the academic office group. academic office group only has the permissions to view and respond to transcript requests except the student who made them
[ { "docid": "37eb986ac632d23448faf8d27388334b", "score": "0.7403345", "text": "def academic_office_staff_only(function):\n\n def wrap(request, *args, **kwargs):\n staff = Staff.objects.filter(user=request.user).first()\n group = get_object_or_404(Group, name='Academic Office')\n\n # staff might return None and None has no ID\n if staff:\n # checks if the user belongs to the academic office group\n staff_check = group.staff_set.filter(pk=staff.id).first()\n else:\n staff_check = False\n\n if staff_check:\n return function(request, *args, **kwargs)\n else:\n raise PermissionDenied()\n\n return wrap", "title": "" } ]
[ { "docid": "9ed70dacee95781f7afb5723dc3fb0b1", "score": "0.7718486", "text": "def is_staff(self):\n return self.role == 'AD' or self.role == 'SA'", "title": "" }, { "docid": "ede8d602b0890c5162be4ebab03a37b5", "score": "0.75189376", "text": "def is_staff(self):\n return self.staff", "title": "" }, { "docid": "b3821266b13b5cfab71964e38b90805e", "score": "0.75036347", "text": "def is_staff_check(ctx):\r\n return {\"simsvip staff\"} & {role.name.lower() for role in ctx.message.author.roles}", "title": "" }, { "docid": "bde35b5d4ca4009b2768884c9b170e8e", "score": "0.7488345", "text": "def user_is_student_or_acadoffice_staff(function):\n\n def wrap(request, *args, **kwargs):\n staff = Staff.objects.filter(user=request.user).first()\n group = get_object_or_404(Group, name='Academic Office')\n\n # staff might return None and None has no ID\n if staff:\n # checks if the user belongs to the academic office group\n staff_check = group.staff_set.filter(pk=staff.id).first()\n else:\n staff_check = False\n\n if (request.user.is_student or staff_check):\n return function(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n return wrap", "title": "" }, { "docid": "4f7112a0ca52cbd3d1a1d5961fa1965c", "score": "0.7432018", "text": "def is_staff(self):\n # Simplest possible answer: All admins are staff\n return self.is_admin", "title": "" }, { "docid": "4f7112a0ca52cbd3d1a1d5961fa1965c", "score": "0.7432018", "text": "def is_staff(self):\n # Simplest possible answer: All admins are staff\n return self.is_admin", "title": "" }, { "docid": "ee64bfcc6db0f5232aa3fcaa6fcfc852", "score": "0.73414034", "text": "def user_is_hiccup_staff(user):\n return user.groups.filter(name=FP_STAFF_GROUP_NAME).exists()", "title": "" }, { "docid": "c0512b89c74c041c9a9970d050fb3c19", "score": "0.7217785", "text": "def is_htk_staff(self):\n is_staff = False\n if self.user.is_staff:\n is_staff = True\n else:\n staff_map = get_htk_staff_id_email_map()\n staff_email = staff_map.get(self.user.id)\n if staff_email:\n is_staff = self.has_email(staff_email)\n return is_staff", "title": "" }, { "docid": "4c3c159c409e7a09b4090d6d0ddbd0d3", "score": "0.7200159", "text": "def is_staff(self):\r\n return self.is_admin", "title": "" }, { "docid": "724035847eb2660048e3b68f6e9a80b4", "score": "0.71442556", "text": "def is_staff(self):\n return self.is_admin", "title": "" }, { "docid": "06e425d4513600ecedfea235121180b7", "score": "0.7090631", "text": "def test_can_access_as_staff_user(self):\n resp = self._get_response(self.staff_user, self.user.id)\n self.assertEquals(resp.status_code, 200)", "title": "" }, { "docid": "06e425d4513600ecedfea235121180b7", "score": "0.7090631", "text": "def test_can_access_as_staff_user(self):\n resp = self._get_response(self.staff_user, self.user.id)\n self.assertEquals(resp.status_code, 200)", "title": "" }, { "docid": "69dc23e25fd38672ab80fed429a0ac9d", "score": "0.70743495", "text": "def check_staff(cls, data):\n user = app.App.mongodb.db.staff.find_one({\"employ_id\":data['employ_id']})\n if user == None:\n return True\n else:\n return False", "title": "" }, { "docid": "3efca6ccfa94b79988045ffac4ba6c59", "score": "0.70637923", "text": "def is_staff(request):\n return request.user is not None and request.user.is_staff and request.user.is_authenticated()", "title": "" }, { "docid": "ff94aa5039f191f0fc5f859509d3e402", "score": "0.7020347", "text": "def test_can_access_as_staff_user(self):\n resp = self._get_response(self.staff_user)\n self.assertEquals(resp.status_code, 200)", "title": "" }, { "docid": "4781c0a5b4a5f6f24bb5601d537070b3", "score": "0.68768096", "text": "def ensure_users_are_staff(\n cls,\n errors: Dict[str, List[ValidationError]],\n field: str,\n cleaned_input: dict,\n ):\n users = cleaned_input[field]\n non_staff_users = [user.pk for user in users if not user.is_staff]\n if non_staff_users:\n # add error\n ids = [graphene.Node.to_global_id(\"User\", pk) for pk in non_staff_users]\n error_msg = \"User must be staff member.\"\n code = PermissionGroupErrorCode.ASSIGN_NON_STAFF_MEMBER.value\n params = {\"users\": ids}\n cls.update_errors(errors, error_msg, field, code, params)", "title": "" }, { "docid": "02e3dbe197f3cd5b8b8a7d780ad2eb07", "score": "0.6781806", "text": "def has_permission(self, request, view):\n return user_is_hiccup_staff(request.user)", "title": "" }, { "docid": "168d70b1f0803de263152e983b6cdfb5", "score": "0.6583202", "text": "def is_doc(request):\n for group in request.user.groups.all():\n if group.name == \"Doctor\":\n return True\n return False", "title": "" }, { "docid": "477416b569a04fed97f49526af9a526f", "score": "0.647397", "text": "def if_in_staffgroup(self, staff, mvt):\n\t\tanswer = 'TBD'\n\t\tif self.staffgroup == 'no':\n\t\t\tanswer = 'no'\n\t\telse:\n\t\t\tfor i in range(len(self.staffgroup[mvt])):\n\t\t\t\t#interval = self.grandstaff[mvt][i].split('-')\n\t\t\t\tfor j in range(len(self.staffgroup[mvt][0])):\n\t\t\t\t\tstaff_beg = self.staffgroup[mvt][0][j]\n\t\t\t\t\tstaff_end = self.staffgroup[mvt][1][j]\n\t\t\t\t\tif staff in range(int(staff_beg)-1, int(staff_end) -1 + 1):\n\t\t\t\t\t\tanswer = 'yes'\n\t\t\tif answer == 'TBD':\n\t\t\t\tanswer = 'no'\n\t\treturn answer", "title": "" }, { "docid": "14b23d8487b02cf9115218020a43d40c", "score": "0.6416489", "text": "def set_staff_status(self, is_staff):\n try:\n assert self.user and hasattr(self.user, 'is_staff')\n except AssertionError:\n pass\n else:\n self.user.is_staff = is_staff\n self.user.save()\n return self.user", "title": "" }, { "docid": "f8537c3af250097c493994c94ebf91e7", "score": "0.63761824", "text": "def has_permission(self, request):\n return request.user.is_active and request.user.is_staff", "title": "" }, { "docid": "8a5030fcd65da156abd0bcc72fe53790", "score": "0.6330188", "text": "def staff(self):\n return self._staff", "title": "" }, { "docid": "30775a8f36dbd66fe44cb0f1f4b2ed75", "score": "0.62233347", "text": "def _check_staff(self, course):\n names = ['about_course', 'instructor_dashboard', 'progress']\n urls = self._reverse_urls(names, course)\n urls.extend([\n reverse('book', kwargs={'course_id': str(course.id),\n 'book_index': index})\n for index in range(len(course.textbooks))\n ])\n for url in urls:\n self.assert_request_status_code(200, url)\n\n # The student progress tab is not accessible to a student\n # before launch, so the instructor view-as-student feature\n # should return a 404.\n # TODO (vshnayder): If this is not the behavior we want, will need\n # to make access checking smarter and understand both the effective\n # user (the student), and the requesting user (the prof)\n url = reverse(\n 'student_progress',\n kwargs={\n 'course_id': str(course.id),\n 'student_id': self.enrolled_user.id,\n }\n )\n self.assert_request_status_code(302, url)\n\n # The courseware url should redirect, not 200\n url = self._reverse_urls(['courseware'], course)[0]\n self.assert_request_status_code(302, url)", "title": "" }, { "docid": "9536052bcbdf3355f1ad1463bc9452f0", "score": "0.6200692", "text": "def authenticateAsStaff(self):\n user = get_user_model().objects.create_user('teststaff', is_staff=True)\n return self.authenticate(user)", "title": "" }, { "docid": "aa17fd04bec97dd64b705443482bfddb", "score": "0.6196512", "text": "def has_module_perms(self, app_label):\n # Second simplest possible answer: yes, if user is staff\n return self.is_staff", "title": "" }, { "docid": "5faaba4bec18ea5cbfa90a8a17818cdf", "score": "0.61775076", "text": "def staff_required(func):\n\n @login_required\n def staff_required_wrapper(request, *args, **kwds):\n if not request.is_staff:\n return HttpTextResponse('You do not have permission to view this page', status=403)\n return func(request, *args, **kwds)\n return staff_required_wrapper", "title": "" }, { "docid": "c51fffe2bed9492639288a829705bbe2", "score": "0.6160979", "text": "def require_global_staff(func):\n @wraps(func)\n def wrapped(request, *args, **kwargs):\n if GlobalStaff().has_user(request.user):\n return func(request, *args, **kwargs)\n else:\n return HttpResponseForbidden(\n \"Must be {platform_name} staff to perform this action.\".format(\n platform_name=settings.PLATFORM_NAME\n )\n )\n return login_required(wrapped)", "title": "" }, { "docid": "5d128198d656174d000455453580446f", "score": "0.6146045", "text": "def has_permission(self, request, view):\n\n return request.user.is_staff == False", "title": "" }, { "docid": "33b15bdf79f74db77330f757197d9fde", "score": "0.61127585", "text": "def set_user_as_staff(sender, instance, action, **kwargs):\n\n\t# Check the type of the Many-To-Many signal\n\tif action in (\"post_add\", \"post_remove\", \"post_clear\",):\n\t\t# User is staff when he belongs to any of the groups\n\t\tif instance.groups.all().count() > 0:\n\t\t\tUser.objects.filter(pk=instance.pk).update(is_staff=True)\n\t\telse:\n\t\t\tUser.objects.filter(pk=instance.pk).update(is_staff=False)", "title": "" }, { "docid": "c2ac3791c15539a077111170dfe994c6", "score": "0.61075675", "text": "def test_different_programs_staff(self):\n program, _ = create_program()\n staff_user = create_enrolled_profile(program, Staff.ROLE_ID).user\n self.client.force_login(staff_user)\n self.make_http_request(self.client.post, self.url, status.HTTP_403_FORBIDDEN, data=self.request_data)", "title": "" }, { "docid": "4a1e025af09080bc8cd6fb7902e8d85c", "score": "0.6097816", "text": "def test_func(self):\n return (self.request.user.pk == self.kwargs['pk']) or (self.request.user.is_staff)", "title": "" }, { "docid": "7192c448003520438b05d98f2f432135", "score": "0.60961485", "text": "def test_get_staff(self):\n httpretty.register_uri(\n httpretty.GET,\n '{0}staff/{1}'.format(\n self.GRADEBOOK_REGISTER_BASE,\n self.GRADEBOOK_ID\n ),\n body=json.dumps(self.STAFF_BODY)\n )\n self._register_get_gradebook()\n gradebook = GradeBook(self.CERT, self.URLBASE, self.GBUUID)\n staff = gradebook.get_staff(self.GRADEBOOK_ID)\n self.assertEqual(staff, self.STAFF_BODY['data'])\n\n # Check simple style\n staff = gradebook.get_staff(self.GRADEBOOK_ID, simple=True)\n expected_staff = gradebook.unravel_staff(self.STAFF_BODY)\n simple_list = []\n for member in expected_staff.__iter__():\n simple_list.append({\n 'accountEmail': member['accountEmail'],\n 'displayName': member['displayName'],\n 'role': member['role'],\n })\n for member in staff:\n self.assertIn(member, simple_list)", "title": "" }, { "docid": "0864292fe9a8d13cf75c674b93be7a85", "score": "0.6091234", "text": "def staff_required(function=None, login_url=None):\n actual_decorator = user_passes_test(\n lambda u: u.is_authenticated() and u.get_profile().in_staff_group(),\n login_url=login_url\n )\n if function:\n return actual_decorator(function)\n return actual_decorator", "title": "" }, { "docid": "062db8f0621699d89a4701f0a134ef58", "score": "0.608307", "text": "def is_doctor(user):\n if user:\n return user.groups.filter(name='Doctor').count() != 0\n return False", "title": "" }, { "docid": "ac0f8c8c83e45fb2305d235e58950713", "score": "0.6061863", "text": "def test_learner_view_not_program_staff(self):\n self.client.force_login(self.staff_user)\n # Get rid of existing recipient program enrollment that staff_user has a Staff role in\n ProgramEnrollment.objects.filter(user=self.recipient_user).delete()\n url = reverse(self.url_name, kwargs={'student_id': self.recipient_user.profile.student_id})\n resp_post = self.client.post(url, data={}, format='json')\n assert resp_post.status_code == status.HTTP_403_FORBIDDEN", "title": "" }, { "docid": "28a0b59afe6bc88c63f431b2c3cd4c1f", "score": "0.6044439", "text": "async def _staff(self, ctx):\r\n await self.bot.say('{0.message.author.mention}, you are staff.'.format(ctx))", "title": "" }, { "docid": "0a88b1d2c9d6809f93191c08218fc2ab", "score": "0.60364133", "text": "def premoderate(user, data):\n return user.is_staff", "title": "" }, { "docid": "196ac4775a62d29813546cf81fe97856", "score": "0.6020321", "text": "def check_moderator_permission(user: User, office_id: str) -> None:\n try:\n office = OfficeBuilding.get(office_id)\n except OfficeBuilding.DoesNotExist:\n return\n\n check_user_is_moderator(office.office_moderator_username, user.username)", "title": "" }, { "docid": "8cc2f896dfedac102cd8711d8b7e55e3", "score": "0.60074157", "text": "def staff_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME):\n actual_decorator = user_passes_test(\n lambda u: u.is_staff,\n redirect_field_name=redirect_field_name\n )\n if function:\n return actual_decorator(function)\n return actual_decorator", "title": "" }, { "docid": "b9d93ce3a9c8d96984d5d5f8a26b1335", "score": "0.60028887", "text": "def form_valid(self, form):\n\n if hasattr(form.get_user(), 'staff') and (form.get_user().staff.user_type == 'staff'):\n return super(InstitutionStaffLoginView, self).form_valid(form)\n else:\n invalidInstitution = 'Not an Valid Staff Credentials'\n # if '__all__' in form.errors:\n # form.errors.update({'__all__': form.errors['__all__'] + [invalidInstitution]})\n # else:\n # form.errors.update({'__all__': [invalidInstitution]})\n form.add_error(None, invalidInstitution)\n return super(InstitutionStaffLoginView, self).form_invalid(form)", "title": "" }, { "docid": "f3711c3d8134295e78d69b8935efa0d1", "score": "0.6002248", "text": "def user_is_staff_superuser(function):\n\n def wrap(request, *args, **kwargs):\n if not request.user.is_staff and not request.user.is_superuser:\n raise PermissionDenied\n return function(request, *args, **kwargs)\n\n wrap.__doc__ = function.__doc__\n wrap.__name__ = function.__name__\n return wrap", "title": "" }, { "docid": "6d7f16c1cd9d424d8d448ea80cc893a5", "score": "0.59897524", "text": "def promote_to_staff(user):\n user.is_staff = True\n user.save()", "title": "" }, { "docid": "7a7cbc78b15cd3c65a8459708c9667f0", "score": "0.59787375", "text": "def view(self, user, calendar_permanent_url, *args):\n if user.is_anonymous:\n return False\n\n if user.is_client:\n return False\n\n if user.is_administrator:\n return False\n\n if user.is_manager:\n return False\n\n if user.is_advisor:\n if calendar_permanent_url.user_id is None:\n return True\n elif user.is_advisor and user == calendar_permanent_url.user:\n return True\n\n return self.admin_permission(user, calendar_permanent_url, *args)", "title": "" }, { "docid": "bb2631ead65d09d55e9d89ec3dd7c27c", "score": "0.59714", "text": "def user_can_edit_assessment(user, **kwargs):\n return user.is_staff or user.has_perm('assess.change_assessmentrecord')", "title": "" }, { "docid": "36b673c4ee499d3202e5ff81ba8c2cfb", "score": "0.59589815", "text": "def guild_staff_check(cls, func: Callable) -> Callable:\n\n @functools.wraps(func)\n async def wrapped(\n self: \"EventsCog\",\n context: Context,\n *args,\n **kwargs\n ) -> None:\n \"\"\"\n Inner function.\n\n :param self: EventsCog instance\n :param context: Command context\n :param args: arguments\n :param kwargs: Keyword arguments\n \"\"\"\n guild_id = context.guild.id\n if str(guild_id) not in self.guild_event_logs:\n return\n\n # We can assume that the author is a member since the\n # command group is guild only.\n author: Member = context.author\n guild_event_log: GuildEventLog = self.guild_event_logs[\n str(guild_id)\n ]\n staff_role: Role = guild_event_log.staff_role\n\n if staff_role not in author.roles:\n raise OpheliaCommandError(\"events_not_staff\")\n\n return await func(self, context, *args, **kwargs)\n\n return wrapped", "title": "" }, { "docid": "727dcf4bfd8e9ade5acac6409d2ad096", "score": "0.59397286", "text": "def test_attendance_check_view_for_non_staff(self):\n client = Client()\n group = mixer.blend('edziennik.Group')\n response = self.client.get(\n reverse('edziennik:attendance_check', args=(group.id,)))\n self.assertEqual(response.status_code, 404)", "title": "" }, { "docid": "8837f2051de60822068d204a8fbeb462", "score": "0.59181434", "text": "def test_org_staff_access(self):\n self.login(self.org_staff_user)\n url = reverse('instructor_dashboard', kwargs={'course_id': str(self.course.id)})\n self.assert_request_status_code(200, url)\n\n url = reverse('instructor_dashboard', kwargs={'course_id': str(self.test_course.id)})\n self.assert_request_status_code(200, url)\n\n url = reverse('instructor_dashboard', kwargs={'course_id': str(self.other_org_course.id)})\n self.assert_request_status_code(404, url)", "title": "" }, { "docid": "581a7fe8a0789557c28f7e94e19e2a99", "score": "0.59027314", "text": "def has_permission(self, request, view) -> bool:\n if request.method in SAFE_METHODS:\n return True\n return request.user.is_staff", "title": "" }, { "docid": "af640c03ee3e0e26f6f9b9c76457edb7", "score": "0.5882593", "text": "def has_permission(self, request, view):\n if request.method in permissions.SAFE_METHODS:\n return request.user and request.user.is_authenticated\n return request.user and request.user.is_staff", "title": "" }, { "docid": "49241e08684646f9a8263ba7344b36d6", "score": "0.5872566", "text": "def test_is_not_staff_access(self):\n\n self.user.is_staff = False\n\n can_add_company_permission = Permission.objects.get(name='Can add Firma')\n self.user.user_permissions.add(can_add_company_permission)\n self.user.save()\n\n self.client.force_login(self.user, backend=None)\n\n response = self.client.get('/company/company/add/')\n self.assertEqual(response.status_code, 302)", "title": "" }, { "docid": "7396ac31e93ba616417843452ebf45d4", "score": "0.5860241", "text": "def is_allowed(user):\n allowed_group = set(['admin', 'staff'])\n usr = User.objects.get(username=user)\n groups = [ x.name for x in usr.groups.all()]\n if allowed_group.intersection(set(groups)):\n return True\n return False", "title": "" }, { "docid": "1eae65b454f2228c53ea0e14735fea07", "score": "0.5823131", "text": "def test_cannot_access_if_not_staff(self):\n resp = self._get_response(self.user)\n self.assertEquals(resp.status_code, 302)\n self.assertEquals(resp.url, reverse('booking:permission_denied'))", "title": "" }, { "docid": "de66e9d79067efdfdc3508aa33e66fdf", "score": "0.5817334", "text": "def test_cannot_access_if_not_staff(self):\n resp = self._get_response(self.user, self.user.id)\n self.assertEquals(resp.status_code, 302)\n self.assertEquals(resp.url, reverse('booking:permission_denied'))", "title": "" }, { "docid": "de66e9d79067efdfdc3508aa33e66fdf", "score": "0.5817334", "text": "def test_cannot_access_if_not_staff(self):\n resp = self._get_response(self.user, self.user.id)\n self.assertEquals(resp.status_code, 302)\n self.assertEquals(resp.url, reverse('booking:permission_denied'))", "title": "" }, { "docid": "786b7aa061067c6a8eb8606c62fa933a", "score": "0.5809222", "text": "def testStaffProfilePermissions(self):\n c = Client()\n c.login(username=\"Dawg\", password='pass')\n resp1 = c.get('/people/Manager/')\n self.assertEqual(resp1.status_code, 200) # access to created profiles\n resp2 = c.get('/people/Dawg/')\n self.assertEqual(resp2.status_code, 200) # can create his own profile\n resp3 = c.get('/people/Manager/edit/')\n self.assertContains(resp3, 'Sorry') # can't edit other profiles\n resp4 = c.get('/people/BigBoss/')\n self.assertContains(resp4, 'Sorry') # can't add other profiles\n c.logout()\n c.login(username='Dawg2', password='pass')\n resp5 = c.get('/people/Dawg2/edit/')\n self.assertContains(resp5, 'You are editing') # can edit own profile\n c.logout()", "title": "" }, { "docid": "7ea2737a88e1c582becc6cc18311e379", "score": "0.5806007", "text": "def in_fisheriescape_edit_group(user):\n if user:\n if in_fisheriescape_admin_group(user) or user.groups.filter(name='fisheriescape_edit').count() != 0:\n return True", "title": "" }, { "docid": "13db659673f91961e8200c5d30fa74f5", "score": "0.5770119", "text": "def can_edit(user, simulation):\n if simulation.user == user or user.is_superuser:\n return True\n else:\n return False", "title": "" }, { "docid": "a5f6d46d3408314f1549804552643572", "score": "0.5746235", "text": "def staff_or_super_required(view_func):\n @wraps(view_func)\n def _checklogin(request, *args, **kwargs):\n if request.user.is_active and (request.user.is_staff or request.user.is_superuser):\n # The user is valid. Continue to the admin page.\n return view_func(request, *args, **kwargs)\n\n return login(request)\n return _checklogin", "title": "" }, { "docid": "f19dd8c6df1f5612ecda995c128357eb", "score": "0.57205385", "text": "def get_staff(self):\n return self._staff", "title": "" }, { "docid": "0d46f19cb97b402d35e65008fbfbc6d2", "score": "0.56956565", "text": "def access_denied_no_staff(self, request):\r\n return HttpResponse(\"No Access\")", "title": "" }, { "docid": "c7ae6e10f0e6189f9966d34fe696ab55", "score": "0.56907463", "text": "def test_staff_view(self):\n response = self.client.get('/', app.config['STAFF_SUBDOMAIN'])\n self.assertEqual(response.status_code, 302)", "title": "" }, { "docid": "b9391305c6c337f088322a0521c90805", "score": "0.56741446", "text": "def has_object_permission(self, request, view, obj):\n if not obj.email_optin:\n return False\n\n sender_user = request.user\n recipient_enrolled_program_ids = obj.user.programenrollment_set.values_list('program', flat=True)\n\n # If the sender is a staff/instructor in any of the recipients enrolled programs, the\n # sender has permission\n if sender_user.role_set.filter(\n role__in=[Staff.ROLE_ID, Instructor.ROLE_ID],\n program__id__in=recipient_enrolled_program_ids\n ).exists():\n return True\n\n # If the sender has paid for any course run in any of the recipient's enrolled programs, the\n # sender has permission\n matching_program_enrollments = (\n sender_user.programenrollment_set\n .filter(program__id__in=recipient_enrolled_program_ids)\n .select_related('program').all()\n )\n edx_user_data = CachedEdxUserData(sender_user)\n for program_enrollment in matching_program_enrollments:\n mmtrack = MMTrack(\n sender_user,\n program_enrollment.program,\n edx_user_data\n )\n if mmtrack.has_paid_for_any_in_program():\n return True\n\n return False", "title": "" }, { "docid": "03ce97d80cf5a508d5506c0eee584044", "score": "0.56729084", "text": "def get_possible_staff(id_current_staff, is_list):\n current_staffer = ild.device_staff.get(id_current_staff)\n staff_type = ild.staffers.get(str(current_staffer)).get('~23')\n return grs.get_other_staffers(staff_type, current_staffer, is_list)", "title": "" }, { "docid": "cada352b1d73855689a31fa6cf7354ba", "score": "0.5663215", "text": "def _coach_for_the_learner_group(user, obj):\n if obj is not None and _assert_type(obj, LearnerGroup):\n classroom = obj.classroom()\n return user.is_facility_admin() or (user in [role.user for role in classroom.coaches()])\n else:\n return user.is_facility_admin()", "title": "" }, { "docid": "8b42ccf5cfce75beae6e2bf7a81aaeff", "score": "0.5657093", "text": "def is_nurse(request):\n for group in request.user.groups.all():\n if group.name == \"Nurse\":\n return True\n return False", "title": "" }, { "docid": "1752aabe17b27905cc4a5256a9e746e5", "score": "0.5653363", "text": "def _get_available_staff(self, staff: List):\n \n return [s for s in staff if s._coincides(self.room)]", "title": "" }, { "docid": "592c68f4cfcf7901a0c5be491269c026", "score": "0.5646757", "text": "def staff(self, department):\n path = '/departments/staff'\n params = {'department': department}\n return self._get(path, params=params)", "title": "" }, { "docid": "0f39302b6c8e255caa842242575fc200", "score": "0.56254417", "text": "def has_object_permission(self, request, view, study):\n user = request.user\n if user in study.researchers.all():\n return True\n else:\n return False", "title": "" }, { "docid": "4e42e29a88138ac456f70acabbd8f44e", "score": "0.56223685", "text": "def is_writable_to(self, user):\n if self.user is not None:\n return self.user == user\n elif self.group is not None:\n return self.group in user.groups.all()\n else:\n return True", "title": "" }, { "docid": "d486dc8159fa67a6aff34c3891a69dc5", "score": "0.56027967", "text": "def test_create_super_user(self):\n self.assertTrue(self.super_user.is_staff)", "title": "" }, { "docid": "ea6c9b360f44a3f03d48b5c2fc243a50", "score": "0.5577755", "text": "def user_can_create_assessment(user, **kwargs):\n return user.is_staff or user.has_perm('assess.add_assessmentrecord')", "title": "" }, { "docid": "425f474bdfc7732ea08493cf81df81a0", "score": "0.5558828", "text": "def is_admin(user):\n if user:\n return user.groups.filter(name='Admin').count() != 0\n return False", "title": "" }, { "docid": "4beff1d1ffcfadc263f3aef7a30ad2a5", "score": "0.5551983", "text": "def get_editable(self, dispatcher, user):\n if dispatcher.doc.get('locked'): return False\n return user.get('role') == 'admin' and \\\n dispatcher.doc['name'] != 'system'", "title": "" }, { "docid": "630675bbf89f0851ee05cf48168d1ed7", "score": "0.5548789", "text": "def have_permission(self, user):\n return user.is_admin", "title": "" }, { "docid": "a11aa00f8553ff4bebe017f62a804104", "score": "0.5547597", "text": "def get_org_edit_permissions(user):\n return user.is_org_admin", "title": "" }, { "docid": "be8d00003cedda4c2350a5f56e4de2c2", "score": "0.5501724", "text": "def if_has_access(self, user):\n # author, chair, discussant, moderator\n if user in self.authors or user.is_chair(self.conference) or \\\n user.id == self.uploader_id or self.sessions.filter(\n or_(discussant_paper_session.c.user_id == user.id,\n and_(PaperSession.session_id == Session.id,\n moderator_session.c.session_id == Session.id,\n moderator_session.c.user_id == user.id))).first():\n return True\n return False", "title": "" }, { "docid": "c26c67aac8d47c275b7947c58f4fc3e8", "score": "0.5501187", "text": "def staff(self, staff):\n\n self._staff = staff", "title": "" }, { "docid": "e0f2f38dea0ec1c2ea6cf1d151b55921", "score": "0.5500297", "text": "def test_dark_launch_global_staff(self):\n now = datetime.datetime.now(pytz.UTC)\n tomorrow = now + datetime.timedelta(days=1)\n\n self.course.start = tomorrow\n self.test_course.start = tomorrow\n self.course = self.update_course(self.course, self.user.id)\n self.test_course = self.update_course(self.test_course, self.user.id)\n\n self.login(self.global_staff_user)\n self.enroll(self.course, True)\n self.enroll(self.test_course, True)\n\n # and now should be able to load both\n self._check_staff(self.course)\n self._check_staff(self.test_course)", "title": "" }, { "docid": "3ebbe2dcc472c0735e8f5ca754cd56f6", "score": "0.5496188", "text": "def _is_reviewer_or_superuser(user):\n if user.is_superuser:\n return True\n # Raise an exception if the Reviewers group does not\n # exist, because this is a critical problem.\n try:\n reviewers_group = Group.objects.get(name=\"Reviewers\")\n except Group.DoesNotExist:\n raise Exception(\"Reviewers user group does not exist.\")\n return reviewers_group in user.groups.all()", "title": "" }, { "docid": "f0075c8dca9dc2fbf192b9e827cd1d10", "score": "0.5492786", "text": "def is_patient(request):\n for group in request.user.groups.all():\n if group.name == \"Patient\":\n return True\n return False", "title": "" }, { "docid": "067fb3ad99799f4790a710b463b729bc", "score": "0.54893976", "text": "def user_is_allowed_to_view(self):\n\n info = getUtility(IContactInformation)\n\n if info.is_client_assigned():\n return True\n elif self._is_user_admin():\n return True\n return False", "title": "" }, { "docid": "425f806d323fcce013eab809a3d10f58", "score": "0.5466786", "text": "def if_in_grandstaff(self, staff, mvt):\n\t\tanswer = 'TBD'\n\t\tif self.grandstaff == 'no':\n\t\t\tanswer = 'no'\n\t\telse:\n\t\t\tfor i in range(len(self.grandstaff[mvt])):\n\t\t\t\t#interval = self.grandstaff[mvt][i].split('-')\n\t\t\t\tfor j in range(len(self.grandstaff[mvt][0])):\n\t\t\t\t\tstaff_beg = self.grandstaff[mvt][0][j]\n\t\t\t\t\tstaff_end = self.grandstaff[mvt][1][j]\n\t\t\t\t\tif staff in range(int(staff_beg)-1, int(staff_end) -1 + 1):\n\t\t\t\t\t\tanswer = ['yes', j]\n\t\t\tif answer == 'TBD':\n\t\t\t\tanswer = 'no'\n\t\treturn answer", "title": "" }, { "docid": "01c15f2751929d7e8c711e1df18629ce", "score": "0.54658854", "text": "def _adviceIsEditableByCurrentUser(self, org_uid):\n # is_complete by default for \"non finances\" advices\n is_complete = True\n if org_uid in finance_group_uids():\n is_complete = self._is_complete()\n return is_complete and super(CustomMeetingItem, self)._adviceIsEditableByCurrentUser(org_uid)", "title": "" }, { "docid": "5051fb3472a8b36dc679bd61de562f74", "score": "0.545331", "text": "def viewable_by(self, user: Optional[User]) -> bool:\n allowed_to_read = True\n if self.groups.exists(): # pylint: disable=E1101\n if not Group.objects.intersection(user.groups, self.groups).exists():\n allowed_to_read = False\n return allowed_to_read", "title": "" }, { "docid": "e74e378f0a165c9a94cfc8e6d487a402", "score": "0.54457724", "text": "def is_admin(self, user):\n if user.is_superuser:\n return True\n\n if user.groups.filter(pk=self.admins_group.pk).exists():\n return True\n\n else:\n return False", "title": "" }, { "docid": "646809b97862806ca8b20e474b0a0412", "score": "0.5444191", "text": "def has_object_permission(self, request, view, obj):\n if request.user.is_staff is True or request.user.is_superuser is True or request.user.id == obj.id:\n return True\n return False", "title": "" }, { "docid": "5f38a0e2b31424fba98cd91ba85db642", "score": "0.543633", "text": "def test_staff_router(self):\n ct = ContentType.objects.get(name='template', app_label='theming')\n Permission.objects.all().delete()\n perm3 = Permission.objects.create(codename='ik_action3', name=\"Can do action 3\", content_type=ct)\n m3 = Member.objects.get(username='member3')\n m3.is_staff = True\n m3.email_verified = True\n m3.save()\n add_permission_to_user(perm3, m3)\n self.client.login(username='member3', password='admin')\n response = self.client.get(reverse('ikwen:staff_router'), follow=True)\n final = response.redirect_chain[-1]\n location = final[0].replace('?splash=yes', '').strip('/').split('/')[-1]\n self.assertEqual(location, 'ikwen-service-2')", "title": "" }, { "docid": "dd125848bd1d4842edd470222cac972a", "score": "0.5430362", "text": "def has_write_permission(request):\n # # query here to see if the current user is an admin for the project connected to the group that they are\n # # trying to write....How to do this\n # group = request.data.get('group')\n\n if request.user.is_superuser or request.user.is_admin():\n return True\n else:\n return False", "title": "" }, { "docid": "f1690b754b02b5a1e7116d6e3376cc14", "score": "0.5421486", "text": "def has_permission(self, request, view):\n feedback = Feedback.objects.filter(saloon__id=view.kwargs['pk'], user=request.user)\n if feedback:\n return False\n return True", "title": "" }, { "docid": "387cf4ac0bec41db1eea2beb6d021d45", "score": "0.54148227", "text": "def validate_user(request):\n if not request.user.is_superuser or not request.user.is_staff:\n raise Http404\n\n return True", "title": "" }, { "docid": "f6870800c3a4e706e2afaa284bb183a2", "score": "0.54089737", "text": "def test_404_with_non_staff_user(self):\n # Making sure that user is not a staff / course's staff.\n self.user.is_staff = False\n self.user.save()\n\n # Assert the user's role\n self.assertFalse(self.user.is_staff)\n self.assertFalse(CourseStaffRole(self.course.id).has_user(self.user))\n\n # Now, Make request to deletion handler\n transcript_delete_url = self.get_url_for_course_key(self.course.id, edx_video_id='test_id', language_code='en')\n response = self.client.delete(transcript_delete_url)\n self.assertEqual(response.status_code, 404)", "title": "" }, { "docid": "1fa6b2e8edd83dc25605b7f35de3a46e", "score": "0.54056907", "text": "def test_global_staff_access(self):\n self.login(self.global_staff_user)\n\n # and now should be able to load both\n urls = [reverse('instructor_dashboard', kwargs={'course_id': str(self.course.id)}),\n reverse('instructor_dashboard', kwargs={'course_id': str(self.test_course.id)})]\n\n for url in urls:\n self.assert_request_status_code(200, url)", "title": "" }, { "docid": "26506daaa1551092df292a97a9a043c6", "score": "0.5395496", "text": "def has_object_permission(self, request, view, obj):\n if request.method in permissions.SAFE_METHODS:\n return True\n return obj.id == request.user.id or request.user.is_staff", "title": "" }, { "docid": "fe2aedb7cd8b2108d6b4c6790b7fe6c5", "score": "0.53883994", "text": "def Caduceus_Staff(self):\n\t\tprint(self.name.title() + \" Staff.\")", "title": "" }, { "docid": "7863b899c33f77d40b10f69fb115b6c3", "score": "0.5384721", "text": "def test_create_competence_as_staff(self):\n staff = User.objects.create_user(username=\"staffname\", is_staff=True)\n data = {\n 'name': 'Busskörkort'\n }\n self.client.force_authenticate(user=staff)\n response = self.client.post(self.url, data=data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "title": "" }, { "docid": "bdfd8a2a3daa68c8a04a2dc88532b1a5", "score": "0.5378981", "text": "def is_nurse(user):\n if user:\n return user.groups.filter(name='Nurse').count() != 0\n return False", "title": "" }, { "docid": "6031f2ff2c319c0b9f44618254ab78d1", "score": "0.5377442", "text": "def test_list_staff_user(self):\n user = self.template_users['staff_user']\n self.client.login(email=user['email'], password=user['password'])\n\n # User orders\n url = reverse(self.list_url_user, kwargs={'user_pk': self.template_users['normal_user1']['id']})\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data['results']), len(self.objects))\n\n # Company orders\n url = reverse(self.list_url_company, kwargs={'company_pk': self.company['id']})\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data['results']), len(self.objects))", "title": "" }, { "docid": "0fbf93610c5f5a17cfaf6815c8d80cc0", "score": "0.5375452", "text": "def test_admin_user_only_access(self):\n\n url = V1_REVIEW_LIST_URL\n data = {\"title\": \"New title\"}\n\n reviewers = models.Reviewer.objects.filter(is_staff=False)\n\n create_random_reviews(random.randint(4, 20), reviewers)\n\n all_reviews = models.CompanyReview.objects.all()\n all_users = models.Reviewer.objects.all()\n\n for user in all_users:\n for review in all_reviews:\n self.client.force_authenticate(user=user)\n url = reverse(V1_REVIEW_DETAIL, kwargs={\"pk\": review.id})\n\n response = self.client.patch(url, data, format=\"json\")\n\n if user.is_staff:\n self.assertEqual(response.status_code, 200)\n\n else:\n self.assertEqual(response.status_code, 403)", "title": "" }, { "docid": "c48c98078fed1a9adff05a04212fc337", "score": "0.5370931", "text": "def has_permission(self, request, view):\n return request.method in permissions.SAFE_METHODS and request.user.is_consultant()", "title": "" }, { "docid": "80df47b36e22cfd0bfe69d2b0fdf6810", "score": "0.5369014", "text": "def is_member_everyone(target_user, requesting_user):\n return True", "title": "" } ]
d089ca98a7f206ce1b664456d5053423
Write grasp metrics to database
[ { "docid": "b4e254f3886d18043c38f274672b02c7", "score": "0.61728704", "text": "def write_grasp_metrics(grasp_metric_dict, data, force_overwrite=False):\n for grasp_id, metric_dict in grasp_metric_dict.iteritems():\n grasp_key = GRASP_KEY + '_' + str(grasp_id)\n if grasp_key in data.keys():\n grasp_metric_data = data[grasp_key][GRASP_METRICS_KEY]\n\n for metric_tag, metric in metric_dict.iteritems():\n if metric_tag not in grasp_metric_data.attrs.keys():\n grasp_metric_data.attrs.create(metric_tag, metric)\n elif force_overwrite:\n grasp_metric_data.attrs[metric_tag] = metric\n else:\n logging.warning('Metric %s already exists for grasp %s and overwrite was not requested. Aborting write request' %(metric_tag, grasp_id))\n return False\n return True", "title": "" } ]
[ { "docid": "163855fe95f27b3266f7cf87530e0800", "score": "0.65698516", "text": "def write(self, metrics):\n raise NotImplementedError", "title": "" }, { "docid": "ff5e1267431f6473fa5a4050fcf11be3", "score": "0.64918494", "text": "def put_genomicsMetrics(db, metrics):\n return db, metrics", "title": "" }, { "docid": "5a6e9d86f83cc52cf2d1df303049eb5d", "score": "0.6151252", "text": "def write_to_db():\n pass", "title": "" }, { "docid": "6001a5a65691bcfcbdd1c6931a135c75", "score": "0.60404927", "text": "def write_db_direct(self, data):\n db_client = self.get_db_client(data['metadata']['metrics'])\n\n converted_data = DataConverter.convert_data(data)\n\n if db_client:\n if db_client.write_points(converted_data):\n self.app.logger.info(\n \"Writing to InfluxDB hosted at %s \"\n \"has been successful for %s!\" %\n (self.db_host, data['metadata']['metrics']))\n else:\n self.app.logger.error(\n \"Writing to InfluxDB hosted at %s \"\n \"has FAILED for %s!\" %\n (self.db_host, data['metadata']['metrics']))\n else:\n self.app.logger.error(\n \"%s database not connected..\" %\n data['metadata']['metrics'])", "title": "" }, { "docid": "68d3388831c340b9a210664047d98250", "score": "0.6017884", "text": "def _write_metrics(metrics_mngrs, metrics, round_num):\n loop = asyncio.get_event_loop()\n\n if not isinstance(metrics, dict):\n raise TypeError('metrics should be type `dict`.')\n if not isinstance(round_num, int):\n raise TypeError('round_num should be type `int`.')\n logging.info('Metrics at round {:d}:\\n{!s}'.format(round_num,\n pprint.pformat(metrics)))\n loop.run_until_complete(\n asyncio.gather(*[m.release(metrics, round_num) for m in metrics_mngrs]))", "title": "" }, { "docid": "4deb35dc6b7334f9e190aa2dc429d09c", "score": "0.59698355", "text": "def upload_to_database(self, metric, metric_names, metric_types):\n self.create_table(database_name= self.database_name,\n table_name = self.metric_type,\n column_names = metric_names,\n column_types = metric_types)\n self.insert_into_database(database_name = self.database_name,\n table_name = self.metric_type,\n column_names = ['timestamp', 'metric'],\n values = metric)\n return True", "title": "" }, { "docid": "7e9cf3e5120ffc63c40bf492606deae2", "score": "0.5945567", "text": "def write():\n write_db()", "title": "" }, { "docid": "6a1dfec204351092f99f06d47ec27553", "score": "0.5842387", "text": "def create_metric_values(self):\n metric = Metric()\n self.__get_metrics_values_from_source_code()\n metric = self.__transform_values_into_metric(metric)\n metric.version_id = self.version.version_id\n self.session.add(metric)\n self.session.commit()", "title": "" }, { "docid": "23783b7682750dbc80f035d0de13006b", "score": "0.5779823", "text": "def record_metering_data(self, data):\n with PoolConnection(self.conn_pool) as db:\n db.execute('INSERT INTO samples ')", "title": "" }, { "docid": "0ee3aa8af672603fe1021dd17036be5b", "score": "0.57068247", "text": "def write_metric_definitions(conn: object, metric_definitions: list):\n if not check_table_exist(conn, 'metrics_definition'):\n cols = ('metric_id', 'metric_name', 'description', 'metric_type',\n 'metric_data_type', 'units', 'accuracy', 'sensing_interval',\n 'discrete_values', 'data_type')\n\n metric_definitions_table = [(i['Id'], i['Name'], i['Description'],\n i['MetricType'], i['MetricDataType'], i['Units'], i['Accuracy'], \n i['SensingInterval'], i['DiscreteValues'], \n util.data_type_mapping[i['MetricDataType']])for i in metric_definitions]\n\n # Sort\n metric_definitions_table = util.sort_tuple_list(metric_definitions_table)\n \n mgr = CopyManager(conn, 'metrics_definition', cols)\n mgr.copy(metric_definitions_table)\n \n conn.commit()", "title": "" }, { "docid": "e3612545ed41b3f5aa93eb43fd79aa6f", "score": "0.56972015", "text": "def proc_scrape():\n metrics = requests.get('http://192.168.2.151:9445/metrics').text\n collection_ts = int(datetime.utcnow().timestamp())\n records=[]\n for f in text_string_to_metric_families(metrics):\n if 'nvidia_' in f.name:\n for s in f.samples:\n record = {\n \"MeasureName\": '_'.join([f.name, f.type]),\n \"Dimensions\": [{\"Name\" : \"host\", \"Value\" : \"phoebe\"}] + [{\"Name\": k, \"Value\": str(v)} for k,v in s.labels.items()],\n \"MeasureValue\": str(s.value),\n \"Time\" : str(collection_ts),\n \"TimeUnit\" : \"SECONDS\"\n }\n records.append(record)\n else:\n pass\n r = timestream.write_records(\n DatabaseName = db,\n TableName = tbl,\n Records = records\n )\n return(r)", "title": "" }, { "docid": "55b53169a6dc9ebdf76029637a8e80f3", "score": "0.56568635", "text": "def metrics():", "title": "" }, { "docid": "e5fdb5f6b36791276992c1d7405a0042", "score": "0.5644987", "text": "def store_database(\n outputs, database_path, table_name=\"outputs\", scenario=0, run_idx=0, times=None,\n):\n if times:\n outputs.insert(0, column=\"times\", value=times)\n\n if table_name != \"mcmc_run_info\":\n outputs.insert(0, column=\"idx\", value=f\"run_{run_idx}\")\n outputs.insert(1, column=\"Scenario\", value=f\"S_{scenario}\")\n\n store_db = Database(database_path)\n store_db.dump_df(table_name, outputs)", "title": "" }, { "docid": "3dc3c1bca18ee9d25b1fc1fc2af464eb", "score": "0.56437945", "text": "def __save_metrics__(rmses, maes, save_path):\n metrics_results = pd.DataFrame(columns=['RMSE', 'MAE'])\n #rmse = np.mean(rmses)\n #mae = np.mean(maes)\n record = pd.Series([rmses, maes], index=['RMSE', 'MAE'])\n metrics_results = metrics_results.append(record, ignore_index=True)\n save_path = save_path+'metrics_results.csv'\n metrics_results.to_csv(save_path, index=False)\n print(\"Metrics results can be found here: \" + save_path)", "title": "" }, { "docid": "f701f7df69331d2a71eb1a1bb0171133", "score": "0.56424224", "text": "def write_from_db():\n\n\tquery_start_date = datetime(2019,1,1).replace(tzinfo=timezone.utc).timestamp()\n\tquery_end_date =datetime(2020,6,20).replace(tzinfo=timezone.utc).timestamp()\n\tfile = open(DATASETS_PATH + 'dataset.csv', 'w', newline='')\n\tfield_names = [\"time\", \"neg_ns\", \"pos_ns\", \"neutral_ns\",\"sum_pos_ns\",\"sum_neg_ns\",\n\t\"sum_pos_s\",\"sum_neg_s\",\"comp_ns\",\"neg_s\", \"pos_s\", \"neu_s\",\"comp_s\",\"coin\",\"open\",\n\t\"close\", \"high\", \"low\"]\n\twriter = csv.DictWriter(file,field_names)\n\twriter.writeheader()\n\n\tfor crypto in crypto_subreddits.keys():\n\t\tdata = get_time_series_data(crypto, query_start_date, query_end_date)\n\t\n\t\t#write rows to csv file\n\t\tfor key in data.keys():\n\t\t\twriter.writerow(data[key])\n\t\tprint(len(data), \" written from \" + crypto)\n\tfile.close()", "title": "" }, { "docid": "8ab3e82c37637a0e3122f31e4badbc02", "score": "0.56163716", "text": "def save(self, db=None):\n for k, v in self._properties.items():\n metric = self.tme_manager.get(sprint=self.sprint,\n team=self.team, key=k, db=db)\n if not metric:\n metric = self.tme_manager.create(sprint=self.sprint,\n team=self.team, key=k, db=db)\n metric.value = v\n metric.save()\n return True", "title": "" }, { "docid": "ecf3a7e1acbbfd4ddcd0aa4196beabbb", "score": "0.559254", "text": "def InsertMetricFile(control, filename):", "title": "" }, { "docid": "bc0602501780ca7d61f31266b7dd6ea6", "score": "0.55788183", "text": "def save(connection, site_metric: dict):\n cursor = connection.cursor()\n try:\n url = site_metric.get('url')\n timestamp = site_metric.get('timestamp')\n status_code = site_metric.get('status_code')\n response_time = site_metric.get('response_time')\n regex_valid = site_metric.get('regex_valid')\n\n query = f'INSERT INTO weblog (url, checked_at, ' \\\n f'status_code, response_time, valid_regex) ' \\\n f'VALUES (\\'{url}\\', \\'{timestamp}\\', {status_code}, ' \\\n f'{response_time}, {regex_valid})'\n\n cursor.execute(query)\n connection.commit()\n logger.info('object saved into database')\n except (Exception, psycopg2.Error) as error:\n connection.rollback()\n raise RuntimeError(f'Consumer Operation Error: {error}') from error\n finally:\n cursor.close()", "title": "" }, { "docid": "50260010a34d94f955728d14989bb676", "score": "0.5559327", "text": "def write_to_db(df):\n\tcon = MySQLdb.connect(user='root',host='127.0.0.1',db='pa')\n\tsql.write_frame(df,con=con,name='pa',if_exists='replace',flavor='mysql')", "title": "" }, { "docid": "b1a2a637f0a43df5af90f2b7e6d793a2", "score": "0.55452704", "text": "def save_metrics(metrics: Dict[str, float], file_path: str) -> None:\n with open(file_path, 'w') as file:\n json.dump(metrics, file)", "title": "" }, { "docid": "4cc4a8d3956576078f2fba0bb152d84f", "score": "0.55340236", "text": "def record_metrics(metrics, args):\n with open('interpretation_metrics/model_metrics_{}'.format(args.file_num), 'a') as f:\n f.write(\"META DATA\\n\")\n f.write(\"---------\\n\")\n f.write(\"Model Name: {}\\n\".format(args.model_name))\n f.write(\"Attack Target: {}\\n\".format(args.attack_target))\n f.write(\"Gradient Model File: {}\\n\".format(args.gradient_model_file))\n f.write(\"Predictive Model File: {}\\n\".format(args.predictive_model_file))\n f.write(\"Baseline Model File: {}\\n\".format(args.baseline_model_file))\n f.write(\"Cuda: {}\\n\".format(args.cuda))\n\n # #############################\n # SIMPLE GRADIENT MODEL METRICS\n # #############################\n\n f.write(\"\\nGradient Combined\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['simple_gradient_combined'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nGradient Regularized\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['simple_gradient_regularized'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nGradient Baseline\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['simple_gradient_baseline'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nGradient Evil Twin\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['simple_gradient_evil_twin'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nGradient Simple Combined\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['simple_gradient_simple_combined'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n # #############################\n # SMOOTH GRADIENT MODEL METRICS\n # #############################\n\n f.write(\"\\nSmoothGrad Combined\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['smooth_gradient_combined'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nSmoothGrad Regularized\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['smooth_gradient_regularized'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nSmoothGrad Baseline\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['smooth_gradient_baseline'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nSmoothGrad Evil Twin\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['smooth_gradient_evil_twin'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nSmoothGrad Simple Combined\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['smooth_gradient_simple_combined'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n # #################################\n # INTEGRATED GRADIENT MODEL METRICS\n # #################################\n\n f.write(\"\\nInteGrad Combined\\n\")\n f.write(\"--------------------------------------------\\n\")\n for key, val in metrics['integr_gradient_combined'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nInteGrad Regularized\\n\")\n f.write(\"--------------------------------------------\\n\")\n for key, val in metrics['integr_gradient_regularized'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nInteGrad Baseline\\n\")\n f.write(\"--------------------------------------------\\n\")\n for key, val in metrics['integr_gradient_baseline'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nInteGrad Evil Twin\\n\")\n f.write(\"--------------------------------------------\\n\")\n for key, val in metrics['integr_gradient_evil_twin'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nInteGrad Simple Combined\\n\")\n f.write(\"--------------------------------------------\\n\")\n for key, val in metrics['integr_gradient_simple_combined'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nModel Accuracies\\n\")\n f.write(\"------------------\\n\")\n f.write(\"{}: {:.3f}\\n\".format(\"Combined Model Acc\", metrics['combined_model']['accuracy']))\n f.write(\"{}: {:.3f}\\n\".format(\"Regularized Model Acc\", metrics['regularized_model']['accuracy']))\n f.write(\"{}: {:.3f}\\n\".format(\"Baseline Model Acc\", metrics['baseline_model']['accuracy']))\n f.write(\"{}: {:.3f}\\n\".format(\"Evil Twin Model Acc\", metrics['evil_twin_model']['accuracy']))\n f.write(\"{}: {:.3f}\\n\".format(\"Simple Combined Model Acc\", metrics['simple_combined_model']['accuracy']))", "title": "" }, { "docid": "af5e723f8c01ad596983affb2ee4b722", "score": "0.5515534", "text": "def write_database(data,database,dataout):\n \n if not os.path.exists(database):\n output = FileTools.safe_hdf5_open(database,'w')\n else:\n output = FileTools.safe_hdf5_open(database,'a')\n\n obsid = BaseClasses.DataStructure.getObsID(data)\n\n if obsid in output:\n grp = output[obsid]\n else:\n grp = output.create_group(obsid)\n\n if 'Level3Stats' in grp:\n del grp['Level3Stats']\n stats = grp.create_group('Level3Stats')\n\n for dname, dset in dataout.items():\n if dname in stats:\n del stats[dname]\n stats.create_dataset(dname, data=dset)\n output.close()", "title": "" }, { "docid": "547533eadcd6baada24ab2e932fd1e1e", "score": "0.5485183", "text": "def write_loss(self):\n pd.DataFrame(self.metrics).to_csv(os.path.join(self.save_dir, 'lossfile.csv'))", "title": "" }, { "docid": "7ec5667d93c48b7ccb22ad27fba31c45", "score": "0.5473562", "text": "def do_extra(session: scoped_session, sql: str, kind: str, gauge_help: str, reporter: Reporter) -> None:\n\n for metric, count in session.execute(sqlalchemy.text(sql)):\n reporter.do_report(\n str(metric).split(\".\"), count, kind=kind, kind_help=gauge_help, tags={\"metric\": metric}\n )", "title": "" }, { "docid": "b4e591fe339bc6f324e42e1a643efd81", "score": "0.54515225", "text": "def test_measurement_columns(app):\n \n with app.app_context():\n measurement = _get_measurement()\n measurement.value = str(measurement.value) + \"kg\"\n db.session.add(measurement)\n with pytest.raises(StatementError):\n db.session.commit()\n \n db.session.rollback()\n \n measurement = _get_measurement()\n measurement.time = time.time()\n db.session.add(measurement)\n with pytest.raises(StatementError):\n db.session.commit()", "title": "" }, { "docid": "631a2516a99abd12d376ab7626474a58", "score": "0.5443657", "text": "def upload_to_salesforce(self, metric):\n metric_data = {\n \"patientId\": self.database_name[1:],\n \"metricList\": [{\n \"metric\": self.metric_type,\n \"metricValue\": metric[1]}\n ]\n }\n self.sf.apexecute('FMMetrics/insertMetrics', method='POST', data=metric_data)\n return True", "title": "" }, { "docid": "4ea10fb811b6336cc82d566ba9c24c3b", "score": "0.5432976", "text": "def log_metrics(self):\n pass # TODO", "title": "" }, { "docid": "2f2f5cb9e9083b096eeaad1206f03b85", "score": "0.54164493", "text": "def _write_to_tensorboard(self, list_metrics, stage=\"train\"):\n with self.writer.as_default():\n for key, value in list_metrics.items():\n tf.summary.scalar(stage + \"/\" + key, value.result(), step=self.steps)\n self.writer.flush()", "title": "" }, { "docid": "908fa9160d57249d1dfcedc00d4b4e4c", "score": "0.5398339", "text": "def writer(db_usrnm= None, db_psswd=None,db_host=None, db_name=None,action=None):\n engine=create_engine(f'postgresql+psycopg2://{db_usrnm}:{db_psswd}@{db_host}/{db_name}')\n Session=sessionmaker(bind=engine)\n \n Base=declarative_base()\n \n class Event(Base):\n \"\"\"\n Mapper for the events table\n\n \"\"\"\n __tablename__='events'\n event_id=Column(Integer, primary_key=True)\n date =Column(DateTime)\n time =Column(Time)\n location =Column(String)\n title =Column(String)\n surtitle =Column(String)\n subtitle =Column(String)\n artists =Column(String)\n musical_pieces =Column(String) \n additional_information =Column(String)\n biography =Column(String)\n performance_times =Column(String)\n performance_duration =Column(String)\n image_link =Column(String)\n \n s=Session()\n r=s.query(Event).all()\n if action=='elastic' or action=='both':\n es=Elasticsearch({'elasticsearch'})\n \n if not es.indices.exists('events'):\n actions=[{\n '_index':'events',\n '_type':'doc',\n '_source':{\n 'event_id': row.event_id,\n 'date': row.date,\n 'time': row.time if type(row.time) == str else row.time.strftime('%H:%M'),\n 'location': row.location,\n 'title': row.title,\n 'surtitle': row.surtitle,\n 'subtitle': row.subtitle,\n 'artists': row.artists,\n 'musical_pieces': row.musical_pieces,\n 'additional_information': row.additional_information,\n 'biography': row.biography,\n 'performance_times': row.performance_times,\n 'performance_duration': row.performance_duration,\n 'image_link': row.image_link,\n }} for row in r]\n\n helpers.bulk(es,actions)\n if action == 'plot-image' or action=='both':\n plt_dst=os.getcwd()+'/plots/'\n print('plotting in ',plt_dst)\n\n df=pd.read_sql_table('events',engine)\n aggregation={\n 'Events':pd.NamedAgg(column='event_id',aggfunc='count')\n }\n events_agg=df.groupby(\n pd.Grouper(\n key='date',\n freq='D'\n )\n ).agg(**aggregation)\n fig, ax = plt.subplots(nrows=1,ncols=1, figsize=(12,9))\n\n ax.bar(x=list(map(lambda x: x.strftime(r'%d.%m.%Y'),events_agg.index)), height=events_agg.Events, color='g')\n ax.set_title('Events per day',size='x-large',y=1.05)\n ax.set_xlabel('Date',size='x-large',y=-1.5)\n ax.set_ylabel('Number of events', rotation=90,size='x-large')\n ax.set_ylim([0,5])\n ax.grid(b=True, which='major',axis='y')\n plt.tight_layout(w_pad=0.5)\n\n try:\n plt.savefig(f'{plt_dst}events_per_day_fig.png',edgecolor='w', pad_inches=.5, bbox_inches='tight') \n except FileNotFoundError:\n os.makedirs(plt_dst)\n plt.savefig(f'{plt_dst}events_per_day_fig.png',edgecolor='w', pad_inches=.5, bbox_inches='tight')", "title": "" }, { "docid": "ccf48b213ea3d388ee96b36504d4b0ea", "score": "0.53856194", "text": "def write_metrics(\n model_metrics: dict[str, str | float],\n writers: list[str],\n folder: str | None = None,\n):\n # Write to file as each run is computed\n if model_metrics == {} or model_metrics is None:\n return\n\n # Write to CSV\n metrics_df = pd.DataFrame(model_metrics, index=[0])\n result_folder = Path(\"runs\") if folder is None else Path(f\"runs/{folder}\")\n result_path = result_folder / f\"{model_metrics['model_name']}_{model_metrics['device']}.csv\"\n Path.mkdir(result_path.parent, parents=True, exist_ok=True)\n if not result_path.is_file():\n metrics_df.to_csv(result_path)\n else:\n metrics_df.to_csv(result_path, mode=\"a\", header=False)\n\n if \"tensorboard\" in writers:\n write_to_tensorboard(model_metrics)", "title": "" }, { "docid": "98c9b6d40709646fcf167cf9fee78bdd", "score": "0.53832257", "text": "def set_metric(self, metric, timestamp, host, value):\n new_entry = MetricTable.insert(metric=metric,\n timestamp=timestamp,\n host=host,\n value=value)\n self.session.add(new_entry)\n self.session.commit()", "title": "" }, { "docid": "1d799c28c9c2656a0b4200162792c027", "score": "0.53814185", "text": "def update_metrics(conn, metrics):\n sql = ''' UPDATE metrics\n SET passengers_refused = ? ,\n passengers_separated = ?'''\n cur = conn.cursor()\n cur.execute(sql, metrics)", "title": "" }, { "docid": "5ef8e7b8b6c0cec17cc9e27d3b0b4ead", "score": "0.5380576", "text": "def write(self, dataframe):", "title": "" }, { "docid": "c26ad9bfea28c2f11d8c3f4e415f7f48", "score": "0.53684664", "text": "def write_summaries(self, log_data, global_step):\n for metrics, data in log_data.items():\n if metrics not in self.writers:\n self.writers[metrics] = self._build_writer(metrics)\n\n name = log_data.get_group_name(metrics) or metrics\n self.writers[metrics].add_scalar(name, log_data[metrics], global_step)", "title": "" }, { "docid": "eb641ad16a4652a3d565c19889e5af49", "score": "0.53674245", "text": "def save_metrics(dest_path: str, metrics: Dict[str, List],\n file_name: str = None):\n if file_name is not None:\n dest_path = os.path.join(dest_path, file_name)\n with open(dest_path, 'w', newline='') as file:\n write = csv.writer(file)\n write.writerow(metrics.keys())\n write.writerows(zip(*metrics.values()))", "title": "" }, { "docid": "6cf064ff43a5f133111e1fa51ca0b6ef", "score": "0.5362528", "text": "def writeToFireBase():\n try:\n collections = COL_TELEMETRY.document(timestampStr).collections()\n for col, sensor in zip(collections, SENSORS):\n for sec in buffer.keys():\n data_per_timeframe = int(buffer[sec][sensor])\n col.document(\"0\").update({\n str(sec) : data_per_timeframe\n })\n buffer.clear()\n print(\"Buffer clear\")\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(exc_type, fname, exc_tb.tb_lineno)\n print(e)", "title": "" }, { "docid": "34028c1842fd5bbdf379080651ff5956", "score": "0.5357114", "text": "def test_03_post_metric(self):\n\n print(\"Testing post metric\")\n\n count = SystemMetric.query.count()\n\n request = add_metric()\n\n self.assertIn(\n 'Added metric with ID:',\n request.text\n )\n self.assertEqual(SystemMetric.query.count(), count + 1)", "title": "" }, { "docid": "65ef056d6831feccd9670d5a6f08ce5f", "score": "0.5339377", "text": "def insert_counts():\n log(f'Inserting {DATASET_ID} counts')\n\n sql = \"\"\"\n INSERT INTO counts\n (count_id, event_id, dataset_id, taxon_id, count, count_json)\n SELECT count_id,\n event_id,\n ? AS dataset_id,\n taxon_id,\n 1 AS count,\n count_json\n FROM maps_bands\n JOIN events\n ON sta = JSON_EXTRACT(event_json, '$.STA')\n AND net = JSON_EXTRACT(event_json, '$.NET')\n AND date = JSON_EXTRACT(event_json, '$.DATE')\n JOIN taxa USING (spec);\n \"\"\"\n with db.connect() as cxn:\n cxn.execute(sql, (DATASET_ID, ))\n cxn.commit()", "title": "" }, { "docid": "a5c5a1daec7925d1137cdaaf7f5ed2de", "score": "0.53332734", "text": "def saveMetricsResults():\n save_file_path = 'results_metrics.txt'\n print(\"[Results] Logging simulation info to '%s' file...\" % save_file_path)\n\n f = open(save_file_path, 'a')\n f.write(results_data_formatter.format(\n args.model_path,\n test_split,\n abs_rel.mean(),\n sq_rel.mean(),\n rms.mean(),\n log_rms.mean(),\n d1_all.mean(),\n a1.mean(), a2.mean(),\n a3.mean()))\n f.close()", "title": "" }, { "docid": "3f08a5bfdf21cb1e1258f91b8b03e6e8", "score": "0.5325614", "text": "def write_report(self):\n \n person1 = self.database.get_person_from_gramps_id(self.person_id)\n person2 = self.database.get_person_from_gramps_id(self.person2_id)\n self.__process_relationship(person1, person2)\n #(rank, ahnentafel, person_key) = self.__calc_person_key(person)\n #self.__process_person(person, rank, ahnentafel, person_key)", "title": "" }, { "docid": "9b2cd4d65fc29865dede09af01ce0ead", "score": "0.5324289", "text": "def save_metrics_report(score_file:dict, score_file_path:str, indentation=4):\n try:\n with open(score_file_path, \"w\") as f:\n json.dump(score_file, f, indent=indentation)\n logging.info(f\"Score File is saved at {score_file_path}\")\n except Exception as e:\n logging.exception(\"error while saving model: \", e)\n raise e", "title": "" }, { "docid": "3aace6b69ef4d7354b9c04ce43a6bc4e", "score": "0.5267325", "text": "def write(self, meters):\n metrics_str = self.format_metrics(meters)\n self.file.write(metrics_str)", "title": "" }, { "docid": "4da01887139acff62fbd5aef6330a458", "score": "0.52669847", "text": "def save_to_influx(dict):\n data_points = []\n\n measurements = list(dict.keys())\n # device and timestamp are recorded with every measurement\n measurements.remove('device')\n measurements.remove('timestamp')\n\n # build structure to insert into InfluxDB\n for measurement in measurements:\n data = {}\n data['measurement'] = measurement\n data['tags'] = {\n 'device': dict['device']\n }\n # convert string milliseconds to int nano-seconds\n data[\"time\"] = int(float(dict['timestamp']) * 1000) # nano-seconds utc\n data[\"fields\"] = { \"value\": float(dict[measurement])}\n\n print(data)\n data_points.append(data)\n\n # write multiple records at once\n client.write_points(data_points, time_precision='u')\n print(f'Saved {len(data_points)} measurements to InfluxDB')", "title": "" }, { "docid": "01a922152050f505c026352e46082c81", "score": "0.5245547", "text": "def write_to_db(self):\n fileName = Config().dq_db_file_path\n mode = \"update\" if os.path.isfile(fileName) else \"create\"\n\n writer = DQDB(fileName, mode = mode)\n writer.fill(self.runnr, self.getWritableResults())\n writer.close()", "title": "" }, { "docid": "f953e09a9c41382240c1b4618633b023", "score": "0.5233201", "text": "def metric(ctx, experiment):\n ctx.obj[\"experiment\"] = experiment", "title": "" }, { "docid": "25e6f4447426d7fe53183c1ce91968c4", "score": "0.52264816", "text": "def write_metrics(self, testbed_path: str = 'testbed') -> None:\n metrics_start = time.time()\n track_path=str(self.monitor.testbed_exp+\"/\"+self.monitor.exp_id+\"-training_track.txt\")\n IO_Functions()._write_list(self.training_track, track_path)\n ### Add elements to json experiment Description architecture\n eda_json = self.monitor.read_eda_json(self.monitor.testbed_exp, self.monitor.exp_id)\n\n ## Add values to platform_parameters\n eda_json['model_hyperparameters']['max_epochs'] = self.max_epochs\n\n ## Add dataset shape as number of records (inputs, targets)\n eda_json['dataset_config']['train_records'] = str(self.data.train_shape)\n eda_json['dataset_config']['valid_records'] = str(self.data.valid_shape)\n eda_json['dataset_config']['test_records'] = str(self.data.test_shape)\n\n ## Add values to platform_parameters\n eda_json['platform_parameters']['processing_mode'] = self.processing_mode\n\n\n ## Add values to results\n eda_json['results']['f1_score_weigted'] = self.test_f1_weighted\n eda_json['results']['f1_score_micro'] = self.test_f1_micro\n eda_json['results']['loss_validation'] = str(self.min_loss)\n eda_json['results']['time_latency'] = self.time_latency\n eda_json['results']['time_dataset'] = self.time_dataset\n eda_json['results']['time_training'] = self.time_training\n eda_json['results']['time_testing'] = self.time_testing\n\n ## End time metrics\n self.time_metrics = time.time()-metrics_start\n eda_json['results']['time_metrics'] = self.time_metrics\n\n ## Serialize the eda json and rewrite the file\n eda_json = json.dumps(eda_json, separators=(',', ': '), indent=2)\n file_path = str(self.monitor.testbed_exp+\"/\"+self.monitor.exp_id+\"-exp_description.json\")\n IO_Functions()._write_file(eda_json, file_path)\n\n\n ## End computational recording\n self.monitor.end_platform_recording()\n \n ## End power recording\n self.monitor.end_power_recording()\n\n sp.Popen([\"mv\",\"log.txt\",str(self.monitor.testbed_exp)])\n sp.Popen([\"mv\",\"F1_data.txt\",str(self.monitor.testbed_exp)])\n logger.info(\"Tesbed directory: {}\".format(self.monitor.testbed_exp))", "title": "" }, { "docid": "1807a7f835810889786d61c35d52200b", "score": "0.52192414", "text": "def writingToDatabase(db_user,db_password,hostname,db_port,db_name,df,table_name):\n try:\n engine = create_engine(f'postgresql://{db_user}:{db_password}@{hostname}:{db_port}/{db_name}')\n df.to_sql(table_name, engine, if_exists='append',index=False)\n print(\"Writting to the Datbase \")\n print(f'{df.shape[0]} records have been written')\n \n\n\n except (Exception, psycopg2.Error) as error :\n print (\"Error while writing data to PostgreSQL DB\", error)", "title": "" }, { "docid": "32c7f0a3751c5122bf94472b010088d5", "score": "0.5219137", "text": "def generate_metric_def_table_sql():\n metric_def_table_sql = \"CREATE TABLE IF NOT EXISTS metrics_definition \\\n (id SERIAL PRIMARY KEY, metric_id TEXT NOT NULL, metric_name TEXT, \\\n description TEXT, metric_type TEXT, metric_data_type TEXT, \\\n units TEXT, accuracy REAL, sensing_interval TEXT, \\\n discrete_values TEXT[], data_type TEXT, UNIQUE (id));\"\n return metric_def_table_sql", "title": "" }, { "docid": "e5d96699c9ae1c3062e6960ebcb37d55", "score": "0.5215024", "text": "def save_statistics(self, filepath=None):\n if filepath is None:\n filepath = os.path.join(self.config['log_dir'],\n 'model_statistics.txt')\n with open(filepath, 'w') as f:\n f.write(self.get_statistics())", "title": "" }, { "docid": "1b351d0ce8acbf5d165b688d28e31b46", "score": "0.52148855", "text": "def export_performance_metrics(filepath, metrics_table, header, sheet_name=\"metrics\"):\r\n\r\n book = xlwt.Workbook() # excel work book\r\n\r\n book = write_table_to_sheet([header] + metrics_table, book, sheet_name=sheet_name)\r\n\r\n book.save(filepath)\r\n logger.info(\"Exported per epoch performance metrics in '{}'\".format(filepath))\r\n\r\n return book", "title": "" }, { "docid": "34ccbab1c6ace9d612ca00d8a25ba099", "score": "0.5212526", "text": "def root():\n db.drop_all()\n db.create_all()\n\n # Get data from api, make objects with it, and add to db\n for row in df.index:\n db_comment = Comment(user=df.User[row],text=df.Text[row]) # rating = df.Rating[row]\n db.session.add(db_comment)\n\n db.session.commit()\n return 'Data stored'", "title": "" }, { "docid": "05d5d09b9e861eb50fac409999e73cab", "score": "0.520814", "text": "def write_metrics(self, testbed_path: str = 'testbed') -> None:\n metrics_start = time.time()\n track_path=str(self.monitor.testbed_exp+\"/\"+self.monitor.exp_id+\"-training_track.txt\")\n IO_Functions()._write_list(self.training_track, track_path)\n ### Add elements to json experiment Description architecture\n eda_json = self.monitor.read_eda_json(self.monitor.testbed_exp, self.monitor.exp_id)\n\n ## Add values to platform_parameters\n eda_json['model_hyperparameters']['max_epochs'] = self.max_epochs\n\n ## Add dataset shape as number of records (inputs, targets)\n eda_json['dataset_config']['train_records'] = str(self.data.train_shape)\n eda_json['dataset_config']['valid_records'] = str(self.data.valid_shape)\n eda_json['dataset_config']['test_records'] = str(self.data.test_shape)\n\n ## Add values to platform_parameters\n eda_json['platform_parameters']['processing_mode'] = self.processing_mode\n eda_json['platform_parameters']['gpu_id'] = self.idgpu[0]\n\n ## Add values to results\n eda_json['results']['f1_score_weigted'] = self.test_f1_weighted\n eda_json['results']['f1_score_micro'] = self.test_f1_micro\n eda_json['results']['time_latency'] = self.time_latency\n eda_json['results']['time_dataset'] = self.time_dataset\n eda_json['results']['time_training'] = self.time_training\n eda_json['results']['time_testing'] = self.time_testing\n\n ## End time metrics\n self.time_metrics = time.time()-metrics_start\n eda_json['results']['time_metrics'] = self.time_metrics\n\n ## Serialize the eda json and rewrite the file\n eda_json = json.dumps(eda_json, separators=(',', ': '), indent=2)\n file_path = str(self.monitor.testbed_exp+\"/\"+self.monitor.exp_id+\"-exp_description.json\")\n IO_Functions()._write_file(eda_json, file_path)\n\n\n ## End computational recording\n self.monitor.end_platform_recording()\n \n ## End power recording\n self.monitor.end_power_recording()\n\n logger.info(\"Tesbed directory: {}\".format(self.monitor.testbed_exp))", "title": "" }, { "docid": "079b5a7bb00976106ae79593e8a355a2", "score": "0.52056575", "text": "def _save_tensorboard_summaries(self, iteration, num_episodes,\n average_reward, average_steps_per_second):\n metrics = [('Train/NumEpisodes', num_episodes),\n ('Train/AverageReturns', average_reward),\n ('Train/AverageStepsPerSecond', average_steps_per_second)]\n for name, value in metrics:\n self._summary_writer.scalar(name, value, iteration)\n self._summary_writer.flush()", "title": "" }, { "docid": "60bb0b3485faa493a2ce8e801c00c1b1", "score": "0.52026093", "text": "def log(points):\n log = get_log(\"log\")\n\n conn = db.DB.conn()\n\n ifdb_points = []\n for pt in points:\n # Don't just write anything, pull out only the fields we need.\n ifdb_points.append(\n {\n \"measurement\": pt['measurement'],\n \"tags\": pt['tags'],\n \"fields\": pt['fields'],\n }\n )\n\n try:\n conn.write_points(ifdb_points)\n\n except InfluxDBClientError, e:\n if e.code == 404:\n log.warn(\n \"Database {} not present {}. Attempting to create.\".format(\n conn._database, e\n )\n )\n conn.create_database(conn._database)\n log.warn(\"Retrying write to new DB '{}'\".format(conn._database))\n conn.write_points(ifdb_points)", "title": "" }, { "docid": "d13419d31a973980be7d79523c5f468d", "score": "0.5198709", "text": "def write_summaries(self, step, writer, batch_type='training'):", "title": "" }, { "docid": "322d78d960a15791792e5c4b338d88ea", "score": "0.51956266", "text": "def save_data(df, database_filename):\n engine = create_engine(f'sqlite:///{database_filename}')\n df.to_sql('disaster', engine, index=False, if_exists=\"replace\")", "title": "" }, { "docid": "77fe17ce40e8d90fabd3dd0565d9ae1b", "score": "0.5181937", "text": "def write_metric_to_file(filename, percent):\n try:\n with open(filename, 'w') as metric_file:\n metric_file.write(str(percent).strip())\n except IOError:\n print u\"Warning: could not write metric data to {}\".format(filename)", "title": "" }, { "docid": "70430fc6ae72f0171dfe8d0ae73fe9da", "score": "0.5181523", "text": "def record_custom_metrics(metrics):\n newrelic.agent.record_custom_metrics(metrics)", "title": "" }, { "docid": "c3911c0213dc4bbe578e9d521b9c42a6", "score": "0.5175382", "text": "def write_to_tensorboard(\n model_metrics: dict[str, str | float],\n):\n scalar_metrics = {}\n scalar_prefixes: list[str] = []\n string_metrics = {}\n for key, metric in model_metrics.items():\n if isinstance(metric, (int, float, bool)):\n scalar_metrics[key] = metric\n else:\n string_metrics[key] = metric\n scalar_prefixes.append(metric)\n writer = SummaryWriter(f\"runs/{model_metrics['model_name']}_{model_metrics['device']}\")\n for key, metric in model_metrics.items():\n if isinstance(metric, (int, float, bool)):\n scalar_metrics[key.replace(\".\", \"/\")] = metric # need to join by / for tensorboard grouping\n writer.add_scalar(key, metric)\n else:\n if key == \"model_name\":\n continue\n scalar_prefixes.append(metric)\n scalar_prefix: str = \"/\".join(scalar_prefixes)\n for key, metric in scalar_metrics.items():\n writer.add_scalar(scalar_prefix + \"/\" + str(key), metric)\n writer.close()", "title": "" }, { "docid": "527270a712c2dad188f58637f514d5a6", "score": "0.51633537", "text": "def log_metrics(self, metrics_by_name, info):\n for metric_name, metric_ptr in metrics_by_name.items():\n\n if metric_name not in self.saved_metrics:\n self.saved_metrics[metric_name] = {\n \"values\": [],\n \"steps\": [],\n \"timestamps\": [],\n }\n\n self.saved_metrics[metric_name][\"values\"] += metric_ptr[\"values\"]\n self.saved_metrics[metric_name][\"steps\"] += metric_ptr[\"steps\"]\n\n timestamps_norm = [ts.isoformat() for ts in metric_ptr[\"timestamps\"]]\n self.saved_metrics[metric_name][\"timestamps\"] += timestamps_norm\n\n self.save_json(self.saved_metrics, \"metrics.json\")", "title": "" }, { "docid": "fefefcbc95b9b481188a010bd34ad64a", "score": "0.5160489", "text": "def save_data(df, database_filename):\n engine = create_engine('sqlite:///{}'.format(database_filename))\n df.to_sql('Tweets', engine, index=False)", "title": "" }, { "docid": "4453ac2ac30c5806b6365a749f16935e", "score": "0.5152921", "text": "def commit(self, timestamp=None):\n try:\n cursor = self.storage.cursor()\n self.storage.start_transaction()\n\n # take the current timestamp and use it to make this value consistent\n if timestamp is None:\n cursor.execute('SELECT /* mycroft_holmes */ NOW()')\n timestamp = cursor.fetchone()[0]\n\n self.logger.info(\"Using timestamp %s\", timestamp)\n\n for feature_id, feature_metrics in self.data.items():\n for (metric, value) in feature_metrics.items():\n self.logger.info(\"Storing %s ...\", (feature_id, metric, value))\n\n cursor.execute(\n 'INSERT INTO /* mycroft_holmes */ features_metrics '\n '(feature, metric, value, timestamp) '\n 'VALUES (%(feature)s, %(metric)s, %(value)s, %(timestamp)s)',\n {\n 'feature': feature_id,\n 'metric': metric,\n 'value': value,\n 'timestamp': timestamp,\n }\n )\n\n # self.logger.debug('SQL: %s', cursor.statement)\n\n self.storage.commit()\n\n self.data = dict()\n self.logger.info('Data has been stored')\n\n except MySqlError as ex:\n self.logger.error('Storage error occured: %s', ex)\n raise ex", "title": "" }, { "docid": "d3729710d248ec4bde5e997838a2e565", "score": "0.5151634", "text": "def write(self):\n print(\"Data will be written to {}\".format(self.filename))\n self.xl_writer.write(self.reader.df)\n print(\"Data is now being written to {}.{} collection in MongoDB\".format(self.collname, self.summarycollname))\n self.mong_writer.write(self.reader.df)\n return", "title": "" }, { "docid": "1ba7098f70f52f68533b2f35cb3af966", "score": "0.51329887", "text": "def _write_db(self):\n self.state.set('spotlight', 'current', self.current_app_index)\n self.state.set('spotlight', 'queue', list(self.queue_data))\n self.state.set('spotlight', 'start_time',\n utctimestamp(self.start_time) if self.start_time is not None else None)\n self.state.set('spotlight', 'reminders', [utctimestamp(t) for t in self.reminders])\n self.state.write()", "title": "" }, { "docid": "83ddfc49c8f86214e306875d9682514a", "score": "0.51316017", "text": "def __add_metric(self, name, table_name):\n self.__metrics.append({'name': name, 'expressions': [{'tableName': table_name, 'columnName': name}]})", "title": "" }, { "docid": "193d07854ad8a795adccb976519ccdc6", "score": "0.5127245", "text": "def write(self, filename = \"index.txt\"):\n from tabulate import tabulate\n with open(filename, \"w\") as f:\n f.write(tabulate(self.problems, headers = \"keys\", tablefmt = \"psql\"))", "title": "" }, { "docid": "5237486d9e58ffa7d3c5105ab94e9ea2", "score": "0.5127164", "text": "def test_mongodb_save_basic_db(mongo_database):\n # Load DB\n mongodb = MongoDB(HWPCReport, MONGO_URI, MONGO_DATABASE_NAME, MONGO_INPUT_COLLECTION_NAME)\n\n mongodb.connect()\n\n # Check if save work\n basic_count = mongodb.collection.count_documents({})\n for report in gen_HWPCReports(2):\n mongodb.save(report)\n assert mongodb.collection.count_documents({}) == basic_count + 2", "title": "" }, { "docid": "383f78a3430eb69213bae09fc3947c8f", "score": "0.510998", "text": "def writeMonitoringData(rrd_data):\n with open(MONITORING, 'w') as f:\n ts = time()\n f.write(datetime.datetime.fromtimestamp(ts).strftime(\"%H:%M:%S:\") + str(ts) + \":\" + rrd_data)", "title": "" }, { "docid": "a710323516f2626325b2c1954f147ba4", "score": "0.510708", "text": "def save_db(df, db, scrapped_path, project_name):\n\n # Depending on project some structuration has to be done so that scrapped data fit database\n renaming_dict = {\n 'LBC':{'id_':'id_annonce', 'url':'url_annonce', 'description':'descr', 'date_absolue':'date_annonce'},\n 'PV':{'id_':'id_annonce', 'url':'url_annonce', 'date_absolue':'date_annonce'}\n }\n\n # Make the modification on the dataset\n now = f.get_now(original=True)\n df = df.rename(columns=renaming_dict[project_name])\n df['date_scrap'] = now\n df['processed'] = 0\n\n # Execute the insertion\n nb_lines = df.shape[0]\n if nb_lines > 0:\n table_name = f.get_raw_tablename(project_name) #raw_{}'.format(project_name.lower())\n db.execute_sql_insert(df, table_name)\n\n # Delete source file\n os.remove(path_source)\n \n # Display results\n print('> {} lines - new data {} saved.'.format(nb_lines, project_name))", "title": "" }, { "docid": "910eda23f27dabcd95c0c47da5e05556", "score": "0.5087439", "text": "def post_measurement(database: Database) -> dict:\n measurement = dict(bottle.request.json)\n metric_uuid = measurement[\"metric_uuid\"]\n if not (metric := latest_metric(database, metric_uuid)): # pylint: disable=superfluous-parens\n return dict(ok=False) # Metric does not exist, must've been deleted while being measured\n data_model = latest_datamodel(database)\n if latest := latest_measurement(database, metric_uuid):\n latest_successful = latest_successful_measurement(database, metric_uuid)\n latest_sources = latest_successful[\"sources\"] if latest_successful else latest[\"sources\"]\n copy_entity_user_data(latest_sources, measurement[\"sources\"])\n if not debt_target_expired(data_model, metric, latest) and latest[\"sources\"] == measurement[\"sources\"]:\n # If the new measurement is equal to the previous one, merge them together\n update_measurement_end(database, latest[\"_id\"])\n return dict(ok=True)\n return insert_new_measurement(database, data_model, metric, measurement, latest)", "title": "" }, { "docid": "75c593deeafc7e26e682df5a04d1dcba", "score": "0.5085263", "text": "def save_metrics(evaluation: Dict[str, Any], output_file: str):\n evaluation_dict = {k: v.__dict__ for k, v in evaluation.items()}\n\n with open(output_file, \"w\") as f:\n json.dump(evaluation_dict, f, indent=4)\n\n logger.info(\"Metric | Correct | Gold | Predicted | Aligned\")\n logger.info(\"-----------+-----------+-----------+-----------+-----------\")\n for metric in [\"Tokens\", \"Sentences\", \"Words\", \"UPOS\", \"XPOS\", \"UFeats\",\n \"AllTags\", \"Lemmas\", \"UAS\", \"LAS\", \"CLAS\", \"MLAS\", \"BLEX\"]:\n logger.info(\"{:11}|{:10.2f} |{:10.2f} |{:10.2f} |{}\".format(\n metric,\n 100 * evaluation[metric].precision,\n 100 * evaluation[metric].recall,\n 100 * evaluation[metric].f1,\n \"{:10.2f}\".format(100 * evaluation[metric].aligned_accuracy)\n if evaluation[metric].aligned_accuracy is not None else \"\"))", "title": "" }, { "docid": "8885289ed740afaa8dba68c8607593b2", "score": "0.5084035", "text": "def run_and_save_evaluation_metrics(df, target, model_uid, evaluation_list, db_schema_name, db_conn, log_to_db):\n main_df = pd.DataFrame()\n for evaluation_config in evaluation_list:\n temp_df = _evaluate_model(df[target], df[evaluation_config.evaluation_column],\n evaluation_config.scorer_callable, evaluation_config.metric_name)\n main_df = pd.concat([main_df, temp_df], axis=1)\n main_df = main_df.T\n main_df.reset_index(inplace=True)\n main_df.columns = ['scoring_metric', 'holdout_score']\n main_df['model_uid'] = model_uid\n main_df['holdout_type'] = 'test'\n if log_to_db:\n log_model_scores_to_mysql(main_df, db_schema_name, db_conn)\n main_df.to_csv(os.path.join('modeling', model_uid, 'diagnostics', 'evaluation_files', 'evaluation_scores.csv'),\n index=False)", "title": "" }, { "docid": "652df0a8d8b2525ca816f235d6579404", "score": "0.50831884", "text": "def write_dhw_sink_to_db(\n db_access,\n name,\n coord,\n heat_diss_tot_value,\n heat_diss_tot_value_unit,\n spatial_reference_id,\n network_id,\n network_graph_id ):\n\n appliance_id = db_access.add_citydb_object(\n insert_dhw_facilities,\n name = 'dhw_facility_' + name,\n heat_diss_tot_value = heat_diss_tot_value,\n heat_diss_tot_value_unit = heat_diss_tot_value_unit\n )\n\n node = write_terminal_element_to_db(\n db_access,\n name,\n coord,\n 'thermal-sink',\n spatial_reference_id,\n network_id,\n network_graph_id,\n cityobject_id = appliance_id\n )\n\n return node", "title": "" }, { "docid": "b13e12c5908a7fb878fd281888230f9c", "score": "0.50764656", "text": "def _save_tensorboard_summaries(self, iteration,\n num_episodes_train,\n average_reward_train,\n num_episodes_eval,\n average_reward_eval,\n average_steps_per_second):\n metrics = [('Train/NumEpisodes', num_episodes_train),\n ('Train/AverageReturns', average_reward_train),\n ('Train/AverageStepsPerSecond', average_steps_per_second),\n ('Eval/NumEpisodes', num_episodes_eval),\n ('Eval/AverageReturns', average_reward_eval)]\n for name, value in metrics:\n self._summary_writer.scalar(name, value, iteration)\n self._summary_writer.flush()", "title": "" }, { "docid": "38921f4c9b546f569b64469979b0d75a", "score": "0.5072812", "text": "def _write_data(self, step, dataset, eval_time, train_time=None,\n additional_losses=None):\n assert dataset in self.datasets, \"unknown dataset \"+str(dataset)\n\n # Write all the values to the file\n with self.writer.as_default():\n for key, metric in self.batch_metrics[dataset].items():\n tf.summary.scalar(key, metric.result(), step=step)\n\n for key, metric in self.per_class_metrics[dataset].items():\n tf.summary.scalar(key, metric.result(), step=step)\n\n for key, metric in self.losses[dataset].items():\n tf.summary.scalar(key, metric.result(), step=step)\n\n # Any other losses\n if additional_losses is not None:\n names, values = additional_losses\n\n for i, name in enumerate(names):\n # If TensorFlow string (when using tf.function), get the\n # value from it\n if not isinstance(name, str):\n name = name.numpy().decode(\"utf-8\")\n\n tf.summary.scalar(\"loss/%s\"%(name), values[i], step=step)\n\n # Regardless of mapping/task, log times\n tf.summary.scalar(\"step_time/metrics/%s\"%(dataset), eval_time, step=step)\n\n if train_time is not None:\n tf.summary.scalar(\"step_time/%s\"%(dataset), train_time, step=step)\n\n # Make sure we sync to disk\n self.writer.flush()", "title": "" }, { "docid": "962d3e479052898d9a6b89e8e3c9b70c", "score": "0.5065907", "text": "def _save_sql(self, path):\n s = json.dumps({\"name\": self.name})\n df = pd.DataFrame(\n [[pd.Timestamp(\"1990-01-01\"), 0, s, 0]],\n columns=[\"date\", \"netvalue\", \"comment\", \"totvalue\"],\n )\n df = df.append(self.price, ignore_index=True, sort=True)\n df.sort_index(axis=1).to_sql(\n \"xa\" + self.code, con=path, if_exists=\"replace\", index=False\n )", "title": "" }, { "docid": "930805a8c52a62d469f159c2326a6946", "score": "0.50630057", "text": "def _save_sql(self, path):\n s = json.dumps(\n {\n \"feeinfo\": self.feeinfo,\n \"name\": self.name,\n \"rate\": self.rate,\n \"segment\": self.segment,\n }\n )\n df = pd.DataFrame(\n [[pd.Timestamp(\"1990-01-01\"), 0, s, 0]],\n columns=[\"date\", \"netvalue\", \"comment\", \"totvalue\"],\n )\n df = df.append(self.price, ignore_index=True, sort=True)\n df.sort_index(axis=1).to_sql(\n \"xa\" + self.code, con=path, if_exists=\"replace\", index=False\n )", "title": "" }, { "docid": "58061fee8787ff14ec186d50fcd21d5c", "score": "0.5060511", "text": "def record(details):\n string = \"<{}>\\n\\tDate and time of test: {}\\n\\tTotal time: {}\\n\\tTotal updates: {}\\n\\tAverage update time: {}\\n\\n\";\n with open(\"performance_tests.txt\", \"a\") as file:\n file.write(string.format(details[\"description\"], datetime.now(), details[\"total time\"],\n details[\"total updates\"], details[\"average update time\"]));", "title": "" }, { "docid": "e559960fd842510218481529807e1354", "score": "0.5059203", "text": "def dump_metrics(path, **kwargs):\n metrics = {name: value for name, value in kwargs.items()}\n with open(path, 'w') as metrics_file:\n json.dump(metrics, metrics_file)", "title": "" }, { "docid": "71eeb06d0043a19dda7e893040ede59c", "score": "0.505472", "text": "def write_df(*args: Any) -> None:\n raise NotImplementedError(\n \"write_df() was renamed to write_data_file() in tensorboard-reducer v0.2.8\"\n )", "title": "" }, { "docid": "d890f4a6a256914838cd897da36191e6", "score": "0.50469923", "text": "def write_training_time(args):\n train_log = pd.read_pickle(os.path.join(args.temp_dir, 'log.pkl'))\n try:#if the log doesn't have a timereqd column, make one and save it\n _ =train_log['timereqd']\n except KeyError:\n print('log does not have timereqd column. Making one')\n train_log['timereqd'] = get_training_time(train_log)\n\n with open(args.temp_dir + '/log.pkl', 'wb') as f:\n pickle.dump(train_log, f, pickle.HIGHEST_PROTOCOL)", "title": "" }, { "docid": "a1d32365f090fbc9bf45614c88ef51c0", "score": "0.5024392", "text": "def save_report(self, tracked_agent: PerformanceTracker):\n # Table #\n self.save_table(tracked_agent)\n\n # Figures #\n self.save_figures(tracked_agent)", "title": "" }, { "docid": "7374c15b1511d2ff6c1ffad8a6c64c6a", "score": "0.50242394", "text": "def export(self, name, columns, points):\n data = [\n {\n \"name\": name,\n \"columns\": columns,\n \"points\": [points]\n }]\n try:\n self.client.write_points(data)\n except Exception as e:\n logger.error(\"Can not export stats to InfluxDB (%s)\" % e)", "title": "" }, { "docid": "af07895582d44370f229c7a0f371dfc0", "score": "0.5023863", "text": "def save_data(df, database_filename):\n engine = create_engine('sqlite:///../data/DisasterResponse.db')\n df.to_sql('disaster', engine, index=False, if_exists='replace')", "title": "" }, { "docid": "87bc2e81d1b3723c3d97779fa6d5b06c", "score": "0.5020676", "text": "def save_performance_samples(self, path, data):\n all_values = [v.value for v in data.value]\n samples = zip(data.sampleInfo, *all_values)\n\n with open(path, 'a') as f:\n for sample in samples:\n timestamp, values = sample[0].timestamp, sample[1:]\n if self.counter.unitInfo.key == 'percent':\n f.write('{},{}\\n'.format(str(timestamp), ','.join([str(v / 100) for v in values])))\n else:\n f.write('{},{}\\n'.format(str(timestamp), ','.join([str(v) for v in values])))", "title": "" }, { "docid": "2e2f744f82a96ee63379af91e24e3640", "score": "0.50200033", "text": "def report_metrics(data, groups):\n for group_name, pattern in groups.iteritems():\n metric = 'test_eng.coverage.{group}'.format(group=group_name.replace(' ', '_'))\n percent = data.coverage(pattern)\n\n if percent is not None:\n print u\"Writing to file {} ==> {}%.\".format(metric, percent)\n write_metric_to_file(metric, percent)", "title": "" }, { "docid": "b7c25c364ff1c2c4a60288c823f39b35", "score": "0.50140566", "text": "def send_metric(counters: List[SFX_OUTPUT_TYPE] = [], gauges: List[SFX_OUTPUT_TYPE] = []):\n if ingest:\n logging.debug(\"Counters to send: %s\", counters)\n logging.debug(\"Gauges to send: %s\", gauges)\n ingest.send(counters=map_datapoints(counters), gauges=map_datapoints(gauges))", "title": "" }, { "docid": "7d9b0a44fbb351c0a6e9422a277de8fe", "score": "0.50082034", "text": "def write_to_db(self) -> None:\n self.__handler.load_tweets_batch(self.__data[self.__DATA_TWEETS])\n self.__data[self.__DATA_TWEETS] = []\n\n # TODO Problematica catena di emoji, missclassification.\n self.__handler.load_tokens_batch(self.__data[self.__DATA_TOKENS])\n self.__data[self.__DATA_TOKENS] = []\n\n self.__handler.load_contained_ins_batch(\n self.__data[self.__DATA_CONTAINED_INS])\n self.__data[self.__DATA_CONTAINED_INS] = []", "title": "" }, { "docid": "48650034f1b3d148505c8be266e4edb1", "score": "0.5007238", "text": "def write(self, data, filename, db_name, write_mode=\"a\", dtype=\"float\"):\r\n self._check_directory(filename) \r\n # todo : overwrite check\r\n db = h5py.File(filename, write_mode)\r\n dataset = db.create_dataset(db_name, data.shape, dtype=dtype)\r\n dataset[:] = data[:]\r\n db.close()", "title": "" }, { "docid": "93fdec3501aec9abaeef9dafd80601d3", "score": "0.50052786", "text": "def export(self, name, columns, points):\n logger.debug(\"Export {} stats to InfluxDB\".format(name))\n # Manage prefix\n if self.prefix is not None:\n name = self.prefix + '.' + name\n # Create DB input\n if self.version == INFLUXDB_08:\n data = [{'name': name, 'columns': columns, 'points': [points]}]\n else:\n # Convert all int to float (mandatory for InfluxDB>0.9.2)\n # Correct issue#750 and issue#749\n for i, _ in enumerate(points):\n try:\n points[i] = float(points[i])\n except (TypeError, ValueError) as e:\n logger.debug(\"InfluxDB error during stat convertion %s=%s (%s)\" % (columns[i], points[i], e))\n\n data = [{'measurement': name,\n 'tags': self.parse_tags(self.tags),\n 'fields': dict(zip(columns, points))}]\n # Write input to the InfluxDB database\n try:\n self.client.write_points(data)\n except Exception as e:\n logger.error(\"Cannot export {} stats to InfluxDB ({})\".format(name, e))", "title": "" }, { "docid": "270125db2fd0f9906f54f03be2772d5e", "score": "0.5004135", "text": "def importMetrics(self):\n\n self.v2Metrics = self.metricDefintionV2(self.metrics)\n if self.v2Metrics:\n metrics = self.metrics\n\n else:\n metrics = self.metrics['result']\n\n # Loop through the metrics and call the API\n # to create/update\n for m in metrics:\n if self.v2Metrics:\n metric = metrics[m]\n metric['name'] = m\n else:\n metric = m\n self.createUpdate(metric)", "title": "" }, { "docid": "c00c13f8c1df3b65c3d2124a1a94be8b", "score": "0.500408", "text": "def log_metrics(\n cfg: Config,\n model,\n data: DatasetTriplet,\n step: int,\n save_to_csv: Optional[Path] = None,\n cluster_test_metrics: Optional[Dict[str, float]] = None,\n cluster_context_metrics: Optional[Dict[str, float]] = None,\n) -> None:\n model.eval()\n\n log.info(\"Encoding training set...\")\n train_inv_s = encode_dataset(\n cfg, data.train, model, recons=cfg.fdm.eval_on_recon, invariant_to=\"s\"\n )\n if cfg.fdm.eval_on_recon:\n # don't encode test dataset\n test_repr = data.test\n else:\n test_repr = encode_dataset(cfg, data.test, model, recons=False, invariant_to=\"s\")\n\n log.info(\"\\nComputing metrics...\")\n evaluate(\n cfg,\n step,\n train_inv_s,\n test_repr,\n name=\"x_zero_s\",\n eval_on_recon=cfg.fdm.eval_on_recon,\n pred_s=False,\n save_to_csv=save_to_csv,\n cluster_test_metrics=cluster_test_metrics,\n cluster_context_metrics=cluster_context_metrics,\n )", "title": "" }, { "docid": "b6c744d80c0cdda9b6dafed1ab27602e", "score": "0.4996482", "text": "def write(step, meta_loss, loss, accuracy, losses, accuracies, f):\n lstr = \"\"\n for l in losses:\n lstr += \"{:f};\".format(l)\n\n astr = \"\"\n for a in accuracies:\n astr += \"{:f};\".format(a)\n\n msg = \"{:d},{:f},{:f},{:f},{:s},{:s}\\n\".format(\n step, meta_loss, loss, accuracy, lstr, astr)\n\n with open(f, 'a') as fo:\n fo.write(msg)", "title": "" }, { "docid": "df8b2a04b9e2cdbf8433f2a8fa62e4a8", "score": "0.49936584", "text": "def __init__(self, db: \"hatspil.db.Db\") -> None:\n self.db = db\n self.collection = Collection(db, \"picard_metrics\")", "title": "" }, { "docid": "ec886ab35213b4faf9597bf1256ece80", "score": "0.4992751", "text": "def write(self,email,series):\n try:\n with self.connection.cursor() as cursor:\n # Create a new record\n sql = \"INSERT INTO `Entries` (`email`, `series`) VALUES (%s, %s)\"\n for tv_series in series:\n cursor.execute(sql, (email, tv_series.strip()))\n\n # connection is not autocommit by default. So you must commit to save\n # your changes.\n self.connection.commit()\n finally:\n self.connection.close()", "title": "" }, { "docid": "08cda6e33a4cbc8be430e9cb362cd4eb", "score": "0.49916524", "text": "def write(self, batch):\n for k, v in batch:\n self.db[k] = v", "title": "" }, { "docid": "40aaadcd086e968b7c846b6ce943e3d0", "score": "0.49860382", "text": "def log_data_stats(engine, fp=None, **kwargs):\n assert fp\n x, y = engine.state.batch\n output = {\n \"batch xmean\": x.mean().item(),\n \"batch xstd\": x.std().item(),\n \"batch ymedian\": y.median().item(),\n }\n output_items = \" - \".join([f\"{m}:{v:.4f}\" for m, v in output.items()])\n msg = f\"{engine.state.epoch} | {engine.state.iteration}: {output_items}\"\n\n with open(fp, \"a\") as h:\n h.write(msg)\n h.write(\"\\n\")", "title": "" }, { "docid": "495cc6a1bcbedd742637e079aa16d9b8", "score": "0.49838138", "text": "def write_to_file(model_path,sensitivity,specifity,accuracy,f1_score,mcc):\n fd = open(\"RNN_model_performance\",\"a+\")\n fd.write(str(time.time())+model_path + \"performance:\")\n fd.write(\"\\n\")\n fd.write(\"sensitivity:{}\\n\".format(sensitivity))\n fd.write(\"specifity:{}\\n\".format(specifity))\n fd.write(\"accuracy:{}\\n\".format(accuracy))\n fd.write(\"f1_score:{}\\n\".format(f1_score))\n fd.write(\"mcc:{}\\n\".format(mcc))\n fd.write(\"\\n\\n\")\n fd.close()", "title": "" }, { "docid": "447f99f34e105bd368f21ec16aa5b73d", "score": "0.49756965", "text": "def save_data(df, database_filename):\n engine = create_engine('sqlite:///{}'.format(database_filename))\n df.to_sql('mess_cat', engine, index=False)", "title": "" }, { "docid": "0c5201dcce02c8d601ff1e01ada3ab62", "score": "0.49660218", "text": "def write_values(self, inputs):\n query = 'INSERT INTO Barometer(Datetime, Temperature, Pressure) VALUES(?,?,?)'\n self.cursor.execute(query, (inputs))\n self.conn.commit()", "title": "" } ]
ffef628e950fce7d8be4f34b8515a83f
If no cars created, an appropriate message is displayed
[ { "docid": "ec556e3ffac45418701b4b8e014dfa8c", "score": "0.48073006", "text": "def test_context_data(self):\n response = self.client.get(reverse('popular'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'No cars added')\n self.assertQuerysetEqual(response.context['cars'], [])", "title": "" } ]
[ { "docid": "bdd0de10bfc411623eb167a11b334115", "score": "0.6560931", "text": "def display_no_car(form):\n return render_template(\"/customer/car_view.html\", cars=[], form=form, start_date=\"\", end_date=\"\")", "title": "" }, { "docid": "501ba74b54ecd9bb6f4aa11bed9e743a", "score": "0.63950825", "text": "def test_rating_view_no_cars(self):\n response = self.client.get(reverse('rate-car'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'Please add a car in order to give rate it')", "title": "" }, { "docid": "96668cc2b8135b47276232ecb8403de7", "score": "0.6317706", "text": "def admin_cars_create():\n if not current_user.isAdmin():\n abort(403)\n form = CarForm(request.form)\n if form.validate_on_submit():\n car = Car(make=form.make.data, \n color=form.color.data,\n body_type=form.body_type.data,\n seats=form.seats.data,\n cost_per_hour=form.cost_per_hour.data)\n # Insert the record in our database and commit it\n db.session.add(car)\n db.session.commit()\n flash('Car added.')\n return redirect(url_for('users.admin_cars_create'))\n # redirect user to the 'home' method of the user module. \n return render_template(\"users/admin/cars-create.html\", form=form)", "title": "" }, { "docid": "099be13744500f4e4ca83e20e85a2e96", "score": "0.6233999", "text": "def add_car(self, car):\r\n #Remember to consider all the reasons adding a car can fail.\r\n #You may assume the car is a legal car object following the API.\r\n # implement your code and erase the \"pass\"\r\n pass", "title": "" }, { "docid": "eed109ee65beaa92e6f6cfe18e146d67", "score": "0.6151259", "text": "def add_car(self, car):\n\n if car.get_name() in self.__cars:\n return False\n\n # Check that all the cells of the vehicle are exist and empty-\n for one_loc in car.car_coordinates():\n if one_loc not in self.cell_list():\n return False\n if self.cell_content(one_loc):\n return False\n\n for one_loc in car.car_coordinates():\n row, col = one_loc\n self.__board[row][col] = car.get_name()\n\n car_name = car.get_name()\n self.__cars[car_name] = car\n return True", "title": "" }, { "docid": "e35bb579457e9474b8b31299c3f25db0", "score": "0.61472803", "text": "def test_car_invalid_post_data_empty_fields(self):\n url = reverse('create-car')\n data = {\n 'make': '',\n 'model': ''\n }\n response = self.client.post(url, data)\n self.assertEquals(response.status_code, 200)\n self.assertFalse(Car.objects.exists())", "title": "" }, { "docid": "21937a34dc9377f0f6010536b4c26b8e", "score": "0.61160785", "text": "def check_for_cars(self):\n current_cars = self.outer_boundary.collidelistall(simulation.cars)\n current_cars = set([simulation.cars[c] for c in current_cars])\n cars_incoming = current_cars - self.cars\n cars_outgoing = self.cars - current_cars\n if cars_incoming:\n for car in cars_incoming:\n #car.change_color('instructions')\n car.render(simulation.screen)\n self.controller.reserve_spot(car)\n if cars_outgoing:\n for car in cars_outgoing:\n #car.change_color('exit')\n car.render(simulation.screen)\n self.controller.remove_reservation(car)\n car.approach_speed_limit()\n\n self.cars = current_cars", "title": "" }, { "docid": "840815128cf2e7b508c14b791f4d94f1", "score": "0.6010749", "text": "def admin_cars_report():\n if not current_user.isAdmin():\n abort(403), 503\n car = Car.query.filter_by(id=request.form['car_id']).first()\n if car:\n car_report = CarReport(car.id)\n db.session.add(car_report)\n db.session.commit()\n return '', 200\n return 'car not exist.', 404", "title": "" }, { "docid": "a79b3c8635542354759fde597d70b586", "score": "0.5988786", "text": "def add_car():\n form = CarForm()\n if form.validate_on_submit():\n plate = form.plate.data\n description = form.description.data\n car = Car(owner=current_user, plate=plate, description=description)\n db.session.add(car)\n db.session.commit()\n flash('Zaposano pojazd: {}'.format(description))\n return redirect(url_for('index'))\n return render_template('add_car.html', form=form)", "title": "" }, { "docid": "b9b6002118ef3afd8b34d50398395b7d", "score": "0.59066844", "text": "def init_car_details():\n with terminating_sn() as session:\n for car in cars_list:\n reg = car[0]\n category = car[1]\n colour = car[2]\n model = car[3]\n milage = car[4]\n location = car[5]\n data = Cars(reg, category, colour, model, milage, location)\n session.add(data)\n try:\n session.commit()\n except exc.IntegrityError as e:\n return {\"success\": False, \"msg\": e._message()}\n\n return {\"success\": True, \"msg\": \"Initial data migrated\"}", "title": "" }, { "docid": "ea5425715ee9754fd2ef4d77d7678a65", "score": "0.58803296", "text": "def test_create_car(self):\n\n response = self.client.post(\n reverse('car-list'),\n {'model': 'Mercedes Benz'},\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n car_data = response.data\n\n self.assertEqual(car_data.get('model'), 'Mercedes Benz')", "title": "" }, { "docid": "6f8e5361cde9356b58274470ce23dcdd", "score": "0.56544715", "text": "def test_success_post_car_view(self):\n url = reverse('create-car')\n response = self.client.post(url, {'make': 'Sample', 'model': 'Car'})\n self.assertRedirects(\n response, \n expected_url=reverse('popular'), \n status_code=302, \n target_status_code=200)\n self.assertTrue(Car.objects.exists())", "title": "" }, { "docid": "84c9edd7e5555e3385f798e405d12bdc", "score": "0.5644956", "text": "def cars():\n if (current_user.user_type != \"customer\"):\n return redirect(\"/unauthorised\")\n\n form = CarsSearchForm()\n if form.validate_on_submit():\n response = requests.post(\"http://127.0.0.1:5000/api/cars/available/property\", data=request.form)\n else:\n response = requests.get(\"http://127.0.0.1:5000/api/cars/status/available\")\n data = json.loads(response.text)\n return render_template(\"cars.html\", available_cars=data, form=form)", "title": "" }, { "docid": "193b267a69f113a46c38026c2605ec7b", "score": "0.55878174", "text": "def test_rating_view_cars(self):\n car = create_car(make='Car', model='Test')\n response = self.client.get(reverse('rate-car'))\n self.assertIn('form', response.context)", "title": "" }, { "docid": "025491ac8dd96097879609f3f11b3bf0", "score": "0.5583649", "text": "def create(self, cr, user, vals, context=None):\n if ('name' not in vals) or (vals.get('name')=='/'): \n vals['name'] = self.pool.get('ir.sequence').get(cr, user, 'rented.cars')\n return super(rented_cars, self).create(cr, user, vals, context)", "title": "" }, { "docid": "8a3f15a1e4f0b1a053bcf84e546cba4e", "score": "0.55331796", "text": "def test_no_creates(self):\n response = self.client.get(reverse('currencies:index'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"No currencies are available.\")\n self.assertQuerysetEqual(response.context['latest_create_list'], [])", "title": "" }, { "docid": "051e414c8fd7659e7f91a8e75dd30f30", "score": "0.54933804", "text": "def create(self, request, *args, **kwargs):\n return super(MyCarViewSet, self).create(request, *args, **kwargs)", "title": "" }, { "docid": "908d2db3868773c314e074fa170225d1", "score": "0.536929", "text": "def add_car(garage_id, car): # noqa: E501\n carDetail = CarDetail.from_dict(car)\n\n # added input validation - this should go into the model code\n # but the framework does not catch and wrap errors other than\n # can be expected from the yaml specification\n try:\n decimal.Decimal(carDetail.price)\n except decimal.InvalidOperation:\n return ApiResponse(code=604, type='error', message='invalid input, price must be decimal'), 400\n\n try:\n with get_db() as con:\n cursor = con.cursor()\n\n # TODO: verify isolation level\n # this calls for a transaction:\n # - insert the car (optimistically)\n # - then check garage capacity to eventually rollback\n cursor.execute('BEGIN TRANSACTION')\n\n check_stmt = '''\n select g.max_capacity, count(c.id) as no_cars\n from garage as g left join car as c\n on g.id = c.garage_id\n where g.id = ?\n group by g.id\n '''\n check_args = (garage_id,)\n check_record = cursor.execute(check_stmt, check_args).fetchone()\n\n # garage not found\n if check_record is None:\n return ApiResponse(code=602, type='error', message='cannot find garage {}'.format(garage_id)), 400\n\n from pprint import pprint\n pprint({ key: check_record[key] for key in check_record.keys() })\n\n # garage already full\n if check_record['no_cars'] > check_record['max_capacity']:\n return ApiResponse(code=603, type='error', message='garage {} is already full'.format(garage_id)), 400\n\n create_stmt = '''\n insert into car (registration, brand, model, price, garage_id)\n values (?, ?, ?, ? ,?);\n '''\n create_args = (carDetail.registration,\n carDetail.brand,\n carDetail.model,\n carDetail.price,\n garage_id\n )\n\n cursor.execute(create_stmt, create_args)\n cursor.execute('END TRANSACTION')\n \n carDetail.id = cursor.lastrowid\n\n return carDetail, 201, { 'Location': '{}/garages/{}/cars/{}'.format(api_base_url(), garage_id, carDetail.id) }\n\n except sqlite3.IntegrityError as err:\n msg, = err.args\n\n if 'UNIQUE' in msg:\n return ApiResponse(code=601, type='error', message='car registration already exists'), 400\n elif 'FOREIGN KEY' in msg:\n return ApiResponse(code=602, type='error', message='cannot find garage {}'.format(garage_id)), 400", "title": "" }, { "docid": "294ff55b67cff8968c709084475a8f95", "score": "0.5363036", "text": "def car_list(request):\n\n if request.method == 'GET':\n try:\n cars = Car.objects.all()\n serializer = CarSerializer(cars, many=True)\n return Response(serializer.data)\n except:\n return Response( status=status.HTTP_204_NO_CONTENT)\n\n\n\n elif request.method == 'POST':\n serializer = CarSerializer(data=request.data)\n if serializer.is_valid():\n #check if fits\n model = request.data['make']\n url = f'https://vpic.nhtsa.dot.gov/api/vehicles/getmodelsformake/{model}?format=json'\n try:\n response = requests.get(url)\n data = json.loads(response.text)\n data = data[\"Results\"]\n on_list = False\n for element in data:\n if element['Model_Name'] == request.data['model']:\n on_list = True\n break\n if not on_list:\n raise Http404\n except Http404 as err:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "65c6a7673312b840a178f635f490660b", "score": "0.5351675", "text": "def car_view():\n if g.type != \"Customer\":\n return \"Access Denied\"\n \"\"\"Customer view car\"\"\"\n if (g.type != \"Customer\"):\n return \"Access Denied\"\n form = UserCarSearchForm()\n if request.method == \"POST\":\n return search_car(form)\n if request.method == \"GET\":\n return display_no_car(form)", "title": "" }, { "docid": "d6ad6146faa5821afc767aba9536fa63", "score": "0.5335756", "text": "def test_delete_car(self):\n\n response = self.client.delete(\n reverse('car-list') + str(self.car.id) + '/',\n )\n\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(Car.objects.count(), 0)", "title": "" }, { "docid": "91ab406340dee5e29cd764a2cfd77355", "score": "0.52362394", "text": "def test_create_empty_pot(self):\n url = reverse('pot-list')\n self.client.force_authenticate(user=self.user)\n response = self.client.post(url)\n\n expected_error = {\n 'name': ['This field is required.'],\n 'identifier': ['This field is required.'],\n 'place': ['This field is required.'],\n 'plant': ['This field is required.']\n }\n self.assertEqual(response.data, expected_error)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "f6116d1d795e44410c682cabb24d10cf", "score": "0.52306664", "text": "def create(self, request):\n\n serializer = serializers.CarInstanceSerializer(data=request.data)\n\n if serializer.is_valid():\n # Create car instance\n car = helper.Car(**serializer.data)\n\n return Response(car.output)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "9d558ce1f1512042b2000149624df00d", "score": "0.5211144", "text": "def test_list_cars(self):\n\n response = self.client.get(reverse('car-list'))\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn('results', response.data)\n\n results = response.data.get('results')\n\n self.assertEqual(len(results), 1)\n\n car_data = results[0]\n\n self.assertEqual(car_data.get('model'), 'Mercedes Benz')", "title": "" }, { "docid": "21e0ccaeb2a175ddafd957dc7a74af22", "score": "0.5189958", "text": "def next():\r\n try:\r\n nonlocal cars\r\n nonlocal counter\r\n print('car: ' + str(cars[counter][0]) + ', ' + 'parking type: ' + str(\r\n cars[counter][1]) + ', parking time: ' + str(cars[counter][2]))\r\n counter += 1\r\n except IndexError:\r\n print(\"no car\")", "title": "" }, { "docid": "a5a7a030d3d766b612358127dff0b7a1", "score": "0.518613", "text": "def confirm_no_recipe(self):\n self.clarify_fallback('Confirm recipe')\n print (\"hiha\")", "title": "" }, { "docid": "a26a88cab18b31d11595a9761393eacd", "score": "0.5185768", "text": "def clean_fields(self, exclude=None):\n if self.car:\n if not self.profile.car_set.filter(pk=self.car.id).exists():\n raise ValidationError(_('Car should be one of the user\\'s'))", "title": "" }, { "docid": "8f50d95259f2b9e0e15f3ac84c539130", "score": "0.5183534", "text": "def index(request):\n\n #generate a count of all the cars in the system\n\n cars_count = Car.objects.all().count()\n car_instance = carInstance.objects.all().count()\n\n #count the number of available cars\n cars_available = carInstance.objects.filter(status__exact = 'a').count()\n\n #counr the number of toyotas\n toyotas_available = CarMake.objects.filter(car_make__exact='Toyota').count()\n\n #Context specifies how the data will be presented in the rendered view\n context = {\n 'cars_count': cars_count,\n 'car_instance': car_instance,\n 'cars_available': cars_available,\n 'toyotas_available':toyotas_available,\n }\n return render(request, 'index.html', context=context)", "title": "" }, { "docid": "4b5ef4afdc0c854a6106350d650bc837", "score": "0.5175794", "text": "def no_new_recipe(self):\n self.clarify_fallback('Warning recipe')\n print (\"oeps\")", "title": "" }, { "docid": "224c90b21ffd5c43fc4208695044c5f9", "score": "0.5168944", "text": "def list_car():\n cars = CarApiHandler.car_handler.get_all()\n if cars is None:\n out = {'status': 'there are no cars in the database!'}\n return jsonify(out), 400\n c = []\n for car in cars:\n c.append(car.serialize())\n out = {'object': c}\n return jsonify(out), 200", "title": "" }, { "docid": "daa3fb9df94eafb4dd2a54c0c3ab3c18", "score": "0.51629484", "text": "def test_get_car_view(self):\n response = self.client.get(reverse('create-car'))\n self.assertIn('form', response.context)", "title": "" }, { "docid": "92ed84d0c06b1378f287a2c4b5565c0c", "score": "0.51563597", "text": "def check_category(self):\r\n print( f'You created {self.category} category')", "title": "" }, { "docid": "05a686488ea09f2fefe7a0075850e93e", "score": "0.51383066", "text": "def set_up_cars():\n data = []\n for car in get_cars_list():\n obj = Car.objects.create(car_make=car[\"Make_Name\"], model_name=car[\"Model_Name\"])\n data.append(obj)\n return data", "title": "" }, { "docid": "b40b5d4f9a5831e884a83cf791303508", "score": "0.5136973", "text": "def add_to_wishlist(request, car_id):\n car = Car.objects.get(id=car_id)\n user_profile = UserProfile.objects.get(user=request.user)\n\n wished_car, created = Wishlist.objects.get_or_create(\n car=car,\n user_profile=user_profile,\n )\n wished_car.save()\n\n if created:\n # When car is added to wishlist\n messages.success(request, f'{car.make} {car.model} {car.year} \\\n has been successfully added to your Wishlist')\n else:\n # when car has already been added to wishlist\n messages.info(request, f'{car.sku}, {car.make} {car.model} {car.year} \\\n is already in your wishlist')\n return redirect(reverse('car_detail', args=[car.id]))\n\n context = {\n 'wished_car': wished_car,\n }\n\n return redirect(reverse('view_wishlist'), context)", "title": "" }, { "docid": "fb3925a21d34baa605a263383ceb54a4", "score": "0.51342005", "text": "def create():\n return render_template('catalogs/create.html', catalog=None, error='')", "title": "" }, { "docid": "0e1c598b2be11608357e218852ddba81", "score": "0.5114945", "text": "def test_retrieve_car(self):\n\n response = self.client.get(\n reverse('car-list') + str(self.car.id) + '/',\n )\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n car_data = response.data\n\n self.assertEqual(car_data.get('model'), 'Mercedes Benz')", "title": "" }, { "docid": "266388be6be79acabaf3520cc9255b62", "score": "0.51104534", "text": "def testTheyExist(self):\n self._create()", "title": "" }, { "docid": "5152ec1f516aaffd4edbfbf493bf5206", "score": "0.5108604", "text": "def admin_cars_delete():\n if not current_user.isAdmin():\n abort(403), 503\n car = Car.query.filter_by(id=request.form['car_id']).first()\n if car:\n db.session.delete(car)\n db.session.commit()\n return '', 200\n return 'car not exist.', 404", "title": "" }, { "docid": "a0c8326e890f5bd5da178e9d701ad5d5", "score": "0.5104021", "text": "def verify_unsatisfactory_results(self):\n\n # Click the button to create a new record\n found = False\n for results_form in self.s.doc.cssselect('form'):\n submit_button = results_form.cssselect('input[type=\"submit\"]')\n if (submit_button and\n 'Create a new record' in submit_button[0].get('value', '')):\n self.s.submit(results_form)\n found = True\n assert found, \"didn't find Create a new record in any form\"", "title": "" }, { "docid": "0e58ae9cb70e7562f601120ac669e889", "score": "0.50848496", "text": "def create_car_object():\n global web_results\n for index in range(len(get_year())):\n web_results.append(CarObject(get_make()[index], get_model()[index], get_mileage()[index], get_year()[index],\n get_engine_capacity()[index],\n get_fuel_type()[index], get_price()[index], today))", "title": "" }, { "docid": "574ad95e978ec47118abb1f7db541d16", "score": "0.5079116", "text": "def _fill_with_petrol(car: Car) -> None:\n print(f\"Filling {car} with petrol..\")", "title": "" }, { "docid": "574ad95e978ec47118abb1f7db541d16", "score": "0.5079116", "text": "def _fill_with_petrol(car: Car) -> None:\n print(f\"Filling {car} with petrol..\")", "title": "" }, { "docid": "15ba6270f7103619b3faac7464940cfe", "score": "0.50739354", "text": "def create_and_add_cars(board, info):\n for name, attributes in info.items():\n location_cord = (attributes[1][0], attributes[1][1])\n if name in COLORS and attributes[0] in range(2, 5) \\\n and attributes[2] in range(2):\n car = Car(name, attributes[0], location_cord, attributes[2])\n board.add_car(car)", "title": "" }, { "docid": "7c879a93b1e845b8044f309b9b72c27a", "score": "0.507318", "text": "def test_vehicle_creation(self):\n manufacturer = Manufacturer(\n name=\"CarbonWurks\",\n )\n manufacturer.save()\n\n self.assertEqual(manufacturer.name, 'CarbonWurks')", "title": "" }, { "docid": "8795bf7acb6ea331c58af9d0bcad4e54", "score": "0.5069418", "text": "def book_car():\n if g.type != \"Customer\":\n return \"Access Denied\"\n try:\n car = json.loads(str(request.args['car'].replace(\"'\", \"\\\"\")))\n start_date = datetime.strptime(request.args['start_date'], '%Y-%m-%d %H:%M:%S')\n end_date = datetime.strptime(request.args['end_date'], '%Y-%m-%d %H:%M:%S')\n except: \n return \"Missing start_date, end_date or car arguments\"\n total_cost = math.ceil((end_date - start_date).total_seconds()/3600) * car['Cost']\n action = \"confirm\"\n return render_template(\"customer/booking_detail.html\", car=car,start_date=start_date,end_date=end_date, total_cost=total_cost,action=action)", "title": "" }, { "docid": "18a5c72d90532799983883e3b5098e1f", "score": "0.5060563", "text": "def create_car(self):\n if randint(1, 6) == 1:\n car = Turtle()\n car.shape('square')\n car.shapesize(stretch_wid=1, stretch_len=2)\n car.penup()\n car.color(choice(COLORS))\n random_y = randint(-self.SCREEN_HEIGHT, self.SCREEN_HEIGHT)\n car.goto((self.SCREEN_WIDTH, random_y))\n self.all_cars.append(car)", "title": "" }, { "docid": "9928cbce33dac7271a7201ec8cdfde13", "score": "0.50369674", "text": "def create_car(self):\r\n random_chance = randint(1, 6)\r\n if random_chance == 1:\r\n new_car = Turtle(\"square\")\r\n new_car.penup()\r\n new_car.shapesize(stretch_wid=1, stretch_len=2)\r\n new_car.color(choice(COLORS))\r\n random_y = randint(-250, 250)\r\n new_car.goto(300, random_y)\r\n self.all_cars.append(new_car)", "title": "" }, { "docid": "7b843109e669ee8e58ab6034c5fccd75", "score": "0.50352097", "text": "def test_register_car_non_admin(self):\n\n allure.dynamic.title(\"Register car \"\n \"using non admin user credentials\")\n allure.dynamic.severity(allure.severity_level.BLOCKER)\n\n with allure.step(\"Verify user permissions\"):\n username = USER_LIST[1]['name']\n password = USER_LIST[1]['password']\n self.assertEqual(\"non_admin\",\n USER_LIST[1]['perm'])\n\n with allure.step(\"Send POST request\"):\n response = requests.post(url=self.URL +\n self.register_url,\n params=self.new_car,\n json=self.customer,\n auth=(username,\n password))\n\n with allure.step(\"Verify status code\"):\n self.assertEqual(200,\n response.status_code)\n\n with allure.step(\"Verify 'successful' flag\"):\n self.assertTrue(response.json()['registered_car']['successful'])\n\n with allure.step(\"Verify retrieved car details\"):\n self.assertDictEqual({\n \"name\": \"Creta\",\n \"brand\": \"Hyundai\",\n \"price_range\": \"8-14 lacs\",\n \"car_type\": \"hatchback\"\n }, response.json()['registered_car']['car'])\n\n with allure.step(\"Verify retrieved customer details\"):\n self.assertDictEqual(self.customer,\n response.json()['registered_car']['customer_details'])", "title": "" }, { "docid": "89156d8462a5a8c5b2f81d952e49fdb2", "score": "0.5033979", "text": "def post(self):\n # get the post body data\n try: \n post_data = request.get_json()\n \n query_car = Car.query.filter_by(\n name = post_data.get('name') \n ).first()\n \n if not query_car:\n car = Car (post_data.get('name'),\n post_data.get('make'),\n post_data.get('body'),\n post_data.get('colour'),\n post_data.get('seats'),\n post_data.get('location'),\n post_data.get('cost_per_hour'),\n post_data.get('manu_date')\n )\n db.session.add(car)\n db.session.commit()\n \n # parse into dictionary type\n new_car = new_car_dict(self, car)\n \n responseObject = {\n 'status': 'success',\n 'message': 'New car created',\n 'data': new_car\n }\n \n return make_response(jsonify(responseObject), 201)\n \n else:\n responseObject = {\n 'status': 'Not allowed',\n 'message': 'Car name has already existed'\n }\n return make_response(jsonify(responseObject), 401)\n except Exception as e:\n responseObject = {\n 'status': 'fail',\n 'message': str(e)\n }\n return make_response(jsonify(responseObject)), 500", "title": "" }, { "docid": "d0dee78db4b8dd3ec22ef45aade2f126", "score": "0.5006911", "text": "def status(self):\n if not self._is_parking_lot_created():\n return\n\n print(\"Slot No\\tRegistration No\\tColour\")\n for slot in self.slots.values():\n if not slot.available and slot.car:\n print(f\"{slot.slot_no} \\t {slot.car.registration_no} \\t {slot.car.colour}\")", "title": "" }, { "docid": "1b0db03acd43dd13859dd6d9d3489746", "score": "0.50016177", "text": "def display_not_enough_data():\n print(ColorfulView.format_string_to_red('You have no students/groups/assignments added'))", "title": "" }, { "docid": "763d09a2f2704e5cb458a9ebdb317336", "score": "0.4998177", "text": "def test_view_display(self):\n response = self.client.get(reverse('create-car'))\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "87d60f4a1fa8943c5cc707d25b1494e5", "score": "0.49766663", "text": "def sanity_check(self):\n\n if self.reactions == []:\n print(\"No reactions defined\")\n exit(1)\n\n if self.background_set == []:\n print(\"Empty background set\")\n exit(1)", "title": "" }, { "docid": "c3c815e7e202a3f540a606e4970e74fa", "score": "0.49648315", "text": "def create_car(make, model):\n return Car.objects.create(make=make, model=model)", "title": "" }, { "docid": "05343d32145df0070ba0eb0866ba6adc", "score": "0.49483994", "text": "def list(self, request):\n\n return Response({'message': 'Include car instance data'})", "title": "" }, { "docid": "c42c31b1047d6a545b4ae37b2b618913", "score": "0.49413767", "text": "def render_error(self):\n possible_cities = self.name\n if possible_cities is None:\n message = \"No location found. Please try again.\"\n else:\n message = \"Multiple cities found.</br>\"\n message += '</br>'.join([city for city in possible_cities])\n\n return self.render({'place_name': message})", "title": "" }, { "docid": "758b3fc8ef8c938b3fb1960cd83d89fb", "score": "0.49380752", "text": "def _fill_with_diesel(car: Car) -> None:\n print(f\"Filling {car} with diesel..\")", "title": "" }, { "docid": "758b3fc8ef8c938b3fb1960cd83d89fb", "score": "0.49380752", "text": "def _fill_with_diesel(car: Car) -> None:\n print(f\"Filling {car} with diesel..\")", "title": "" }, { "docid": "55d5db1436a77c87b5486378f5e0e985", "score": "0.49361703", "text": "def __init__(self) -> None:\n self._cars = {}", "title": "" }, { "docid": "94a0d4ee0d93eaa2d0e79ad064c07898", "score": "0.49347797", "text": "def get(self, car_name = None):\n try: \n # expose the list of cars\n if (car_name is None):\n cars = Car.query.all()\n cars_dict = {}\n\n for car in cars:\n new_car = new_car_dict(self, car)\n cars_dict[new_car['name']] = new_car\n \n responseObject = {\n 'status': 'success',\n 'message': 'Response to get all cars',\n 'data': cars_dict\n }\n \n return make_response(jsonify(responseObject)), 200\n else:\n # expose the single car\n car = Car.query.filter_by(name=car_name).first()\n \n new_car = new_car_dict (self, car)\n \n responseObject = {\n 'status': 'success',\n 'message': 'Response to get single car',\n 'data': new_car\n }\n return make_response(jsonify(responseObject)), 200\n except Exception as e:\n responseObject = {\n 'status': 'fail',\n 'message': str(e)\n }\n return make_response(jsonify(responseObject)), 500", "title": "" }, { "docid": "56e967f11f9105550dfef48efcdfabd8", "score": "0.49327004", "text": "def require_created(self):\n if not self.__created:\n self._create()\n self.__created = True", "title": "" }, { "docid": "dd260d502cc3d2d1eadacae4ab82798d", "score": "0.49312252", "text": "def raise_creation_errors(self, dompc):\n self.raise_submission_errors()", "title": "" }, { "docid": "83309bd41fd123977ce9c7bbf19e7d55", "score": "0.492659", "text": "def check_send_food_info():\n try:\n truck_list = get_truck_list()\n hipchat_message(\"chat_message.html\", {'trucks': truck_list})\n except Exception, e:\n print e", "title": "" }, { "docid": "c3836d77f22c629f227b9ac8238cd705", "score": "0.49193597", "text": "def is_car(cls):\n print('Is this Car? ', cls.__isCar)", "title": "" }, { "docid": "53bd587989aefa62769b0c3745002a4c", "score": "0.49157196", "text": "def is_made(self):\n pass", "title": "" }, { "docid": "d0eabaa92ab1fcf649614b69f02f0a19", "score": "0.49122882", "text": "def get_car(car_id):\n carr = CarApiHandler.car_handler.get(car_id)\n if carr is None:\n out = {'status': 'car not found!'}\n return jsonify(out), 400\n out = {'object': carr.serialize()}\n return jsonify(out), 200", "title": "" }, { "docid": "ded741dd718c55234d041e16d7445603", "score": "0.4908681", "text": "def post(self, dealer_id, center_id):\n if Dealer.query.get(dealer_id) is None:\n return 'Wrong dealer id {}'.format(dealer_id), HTTPStatus.BAD_REQUEST\n if DealerCenter.query.get(center_id) is None:\n return 'Wrong dealer center id {}'.format(center_id), HTTPStatus.BAD_REQUEST\n\n data = api.payload\n if isinstance(data, list):\n data = data[0]\n\n if Car.query.filter_by(id=data['id'], car_model_id=data['car_model_id'])\\\n .first() is not None:\n return 'Car already exists', HTTPStatus.CONFLICT\n\n new_car = Car(\n id=data['id'],\n car_model_id=data['car_model_id'],\n manufacture_date=datetime.datetime.strptime(data['manufacture_date'], '%Y-%m-%d'),\n kilometrage=data['kilometrage'],\n dealer_center_id=center_id,\n price=data['price'],\n description=data.get('description', None)\n )\n\n db.session.add(new_car)\n db.session.commit()\n return 'Car successfully added', HTTPStatus.CREATED", "title": "" }, { "docid": "23fe2dff0f1a1ce8a9cf20f1d0cf26ed", "score": "0.49083608", "text": "def test_car_make(self) -> None:\n car_1 = Car.objects.get(id=1)\n car_2 = Car.objects.get(id=2)\n\n self.assertEqual(car_1.make, \"Toyota\")\n self.assertEqual(car_2.make, \"Volkswagen\")", "title": "" }, { "docid": "e5ab254512e1f1754da48292d7d0aa4f", "score": "0.4907637", "text": "def placeDoginCarrier(self):\r\n print(self.name + \" is in the car carrier!\")", "title": "" }, { "docid": "aa1fd5c155aa8db4d120ed86c543db6e", "score": "0.4905959", "text": "def __init__(self):\r\n \r\n self.top = NVTK.Toplevel(NewVehicleDialog.root)\r\n self.frm = NVTK.Frame(self.top, borderwidth=4, relief='ridge')\r\n self.frm.pack(fill='both', expand=True)\r\n\r\n self.label = NVTK.Label(self.frm, text=\"Add New Vehicle\")\r\n self.label.pack(padx=4, pady=4)\r\n\r\n self.lblMake = NVTK.Label(self.frm, text='Make')\r\n self.lblMake.pack(padx=4, pady=4)\r\n\r\n self.make_value = NVTK.StringVar()\r\n self.cbMakes = NVTK.ttk.Combobox(self.frm, textvariable=self.make_value, width=50, state=\"readonly\")\r\n self.cbMakes['values'] = carDB.loadCarMakes()\r\n self.cbMakes.bind(\"<<ComboboxSelected>>\", self.MakeSelected)\r\n self.cbMakes.pack(pady=4, padx=4)\r\n\r\n\r\n #TODO make it so once make is selected it changes MakeID\r\n make = self.make_value.get()\r\n spacePos = make.find(' ')\r\n MakeID = make[0:spacePos]\r\n\r\n self.lblModel = NVTK.Label(self.frm, text = 'Model')\r\n self.lblModel.pack(padx=4, pady=4)\r\n\r\n self.model_value = NVTK.StringVar()\r\n self.cbModels = NVTK.ttk.Combobox(self.frm, textvariable=self.model_value, width=50, state=\"readonly\")\r\n if (MakeID == ''):\r\n for dat in self.cbModels.get():\r\n self.cbModels.set('')\r\n else:\r\n self.cbModels['values'] = carDB.loadCarModels(MakeID)\r\n self.cbModels.pack(pady=4, padx=4)\r\n\r\n self.lblModelNew = NVTK.Label(self.frm, text=\"If Model not present input here\")\r\n self.lblModelNew.pack(padx=4, pady=4)\r\n self.entryModel = NVTK.Entry(self.frm)\r\n self.entryModel.pack(pady=4, padx=4)\r\n \r\n\r\n self.lblColor = NVTK.Label(self.frm, text = 'Color')\r\n self.lblColor.pack(padx=4, pady=4)\r\n self.entryColor = NVTK.Entry(self.frm)\r\n self.entryColor.pack(pady=4, padx=4)\r\n\r\n self.b_cancel = NVTK.Button(self.frm, text='Cancel')\r\n self.b_cancel['command'] = self.top.destroy\r\n self.b_cancel.pack(padx=4, pady=4) \r\n\r\n self.b_OK = NVTK.Button(self.frm, text='OK')\r\n self.b_OK['command'] = self.addVehicle\r\n self.b_OK.pack(padx=4, pady=4)", "title": "" }, { "docid": "7923922ed811b5c93b20b354447fd5ba", "score": "0.49006245", "text": "def test_create_missing_field(self):\n self.client.force_authenticate(user=self.admin)\n\n data = dict()\n\n response = self.client.post(\n reverse('retreat:retreat-list'),\n data,\n format='json',\n )\n\n content = {\n \"price\": [\"This field is required.\"],\n \"timezone\": [\"This field is required.\"],\n \"display_start_time\": [\"This field is required.\"],\n \"type\": [\"This field is required.\"],\n \"name\": [\"This field is required.\"],\n }\n\n self.assertEqual(\n response.status_code,\n status.HTTP_400_BAD_REQUEST,\n response.content\n )\n\n self.assertEqual(\n json.loads(response.content),\n content,\n response.content\n )", "title": "" }, { "docid": "c51398b73f00e0f4923a283ca5468e8c", "score": "0.4893992", "text": "def test_no_customer_success(self):\n q = self.generate_query('promote_courses', ())\n res = self.execute_query(q)\n assert len(res) == 0, \"There are no customers\"", "title": "" }, { "docid": "6419d552764c4662dcd609dba0b1dc2b", "score": "0.48815712", "text": "def add_car(self):\n possible_directions = {'h':['l','r'], 'v':['u','d']}\n starting_points = {\n 'l': (self.right, self.top + self.buffer),\n 'r': (self.left, self.bottom - self.buffer - 15),\n 'u': (self.right - self.buffer - 15, self.bottom),\n 'd': (self.left + self.buffer, self.top)}\n\n direction = random.choice(possible_directions[self.orientation])\n starting_point = starting_points[direction]\n simulation.cars.append(Car(starting_point, direction))", "title": "" }, { "docid": "98c80d0e43e4c5391ad37f8e4d125f80", "score": "0.48794788", "text": "def error(self):\n if len(self.cards) == 0:\n raise RuntimeError", "title": "" }, { "docid": "7ea436e4ef0d19d0bc1d857c5355a2b4", "score": "0.4873793", "text": "def add_car(self, id_: str, fuel: int) -> None:\n\n # Check to make sure the identifier isn't already used.\n if id_ not in self._cars:\n self._cars[id_] = Car(fuel)", "title": "" }, { "docid": "464736f80dc0f57997a0a24e93c26db2", "score": "0.48688722", "text": "def test_view_with_existing_movie_no_image(self):\n a_movie = create_movie('fubar', 'SF', 'fubar', 'fubar',\n 'fubar', 'fubar', 'A')\n response = self.client.get(reverse('movielists:movie_details',\n args=(a_movie.id,)))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, a_movie.title)", "title": "" }, { "docid": "5e4e3058c1515ee791e8dbffef3d23de", "score": "0.4866042", "text": "def error_NotFound(self):\n window = Tk()\n window.withdraw()\n # To prevent Tk window show up when message box used\n messagebox.showinfo('Error', 'Title of input series is NOT IN DATABASE'\n ' or is TOO BOARD. Please Reenter.')", "title": "" }, { "docid": "9a10d388263a8f6b2899461df3c895b9", "score": "0.4866037", "text": "def test_model_can_create_a_vehicle(self):\n old_count = VehicleDetails.objects.count()\n self.vehicleData.save()\n new_count = VehicleDetails.objects.count()\n self.assertNotEqual(old_count, new_count)", "title": "" }, { "docid": "c425e84e5b82f5ef6af5064413513483", "score": "0.4865762", "text": "def check(self):\n\t\tif not self.cite_id:\n\t\t\terrstr = \"No citation ID\"\n\t\t\traise RuntimeError(errstr)\n\t\tif not self.title:\n\t\t\terrstr = \"No title\"\n\t\t\traise RuntimeError(errstr)\n\t\tif not self.year:\n\t\t\terrstr = \"No year\"\n\t\t\traise RuntimeError(errstr)\n\t\tif not self.how_published:\n\t\t\terrstr = \"No publication description\"\n\t\t\traise RuntimeError(errstr)\n\t\tif len(self.authors) == 0:\n\t\t\terrstr = \"No authors\"\n\t\t\traise RuntimeError(errstr)", "title": "" }, { "docid": "d40888ca9b01aa29a0e935f54545fd98", "score": "0.4860018", "text": "def check_exists():\n if request.method == 'POST':\n if \":\" in request.form[\"name\"]:\n category, competence = request.form[\"name\"].split(\": \")\n c_query = s.query(Competence). \\\n join(CompetenceDetails). \\\n filter(CompetenceDetails.title == competence). \\\n first()\n if c_query:\n return json.dumps({'success': True}), 200, {'ContentType': 'application/json'}", "title": "" }, { "docid": "bfc4e42f4a599b1fd02a9a41e6a3cbbc", "score": "0.48587167", "text": "def test_no_creators(self):\n product = self.amazon.lookup(ItemId=\"8420658537\")\n assert_false(product.creators)", "title": "" }, { "docid": "406c6c7cb9942e69088ea8be7d2a23d0", "score": "0.4855977", "text": "def test_no_items(self):\n\n data = self.post(\n self.url,\n {\n \"items\": [],\n \"location\": None,\n },\n expected_code=400\n ).data\n\n self.assertIn('Line items must be provided', str(data))\n\n # No new stock items have been created\n self.assertEqual(self.n, StockItem.objects.count())", "title": "" }, { "docid": "dedd61ffe9ed98a8d9a17b15fb0c761f", "score": "0.48478046", "text": "def add_booking(self, **property_list):\n # Check for anyone who rent at the moment first\n query = f\"\"\"select * from (select * from {self.table} where {self.CAR_ID} = %s) as b where not (b.{self.FROM} >= %s or b.{self.TO} <= %s)\"\"\"\n records = self.execute_return(query, (car_id, to_time, from_time))\n\n # If there is no one booking, then add booking\n if len(records) > 0:\n raise Exception(\"This car has been booked at this time.\")\n else:\n # Add to Google Calendar first\n response = self.calendar.add_event(user_id, car_id, datetime.datetime.strptime(\n from_time, \"%Y-%m-%d %H:%M:%S\"), datetime.datetime.strptime(to_time, \"%Y-%m-%d %H:%M:%S\"), booking_detail)\n event_id = response[\"id\"]\n\n query = f\"insert into {self.table} \" + \\\n \" ({}, {}, {}, {}, {}, {}) values(%s, %s, %s, %s, %s, %s)\".format(\n *self.property_list[1:])\n return self.execute_no_return(query, (car_id, user_id, booking_detail, from_time, to_time, event_id))", "title": "" }, { "docid": "0f38acc9fafeb9aa8d027dbcd479bb0a", "score": "0.48328292", "text": "def test_occurrence_created_with_success(self):\n\n occurrences = Occurrence.objects.all()\n self.assertEqual(len(occurrences), 3)", "title": "" }, { "docid": "8acf860bae595f2824699db60baf8994", "score": "0.48309845", "text": "def test_empty(self):\n\n data = self.post(self.url, {}, expected_code=400).data\n\n self.assertIn('This field is required', str(data['items']))\n self.assertIn('This field is required', str(data['location']))\n\n # No new stock items have been created\n self.assertEqual(self.n, StockItem.objects.count())", "title": "" }, { "docid": "1fb5df6aaf64b684f346a8c7ae72a72e", "score": "0.48299348", "text": "def test_input_corretto(self):\n num_rec_init = Recensione.objects.count()\n Recensione.objects.create(\n attivita=self.attivita,\n autore=self.profilo1,\n voto=1,\n testo='Wow',\n data=timezone.now().date()\n )\n self.assertGreater(Recensione.objects.count(), num_rec_init,\n msg=\"Non viene creata recensione\")", "title": "" }, { "docid": "bbabb789d64f0ad1d9163979a7b460e8", "score": "0.48273933", "text": "def test_multipla_is_not_a_car(self):\n\n self.assertRaises(\n ValidationError,\n CarSerializer.validate_model,\n CarSerializer(),\n 'Multipla',\n )", "title": "" }, { "docid": "15d0aac5ccdfa9ccc137308474bb5c3e", "score": "0.4824083", "text": "def test_create_books_no_data(self):\n url = reverse('book-list')\n response = self.client.post(url)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "15e043dde9df1e36dc92f116fbb2e9b3", "score": "0.4823163", "text": "def admin_cars():\n if not current_user.isAdmin():\n abort(403)\n cars = Car.query.order_by(desc(Car.id)).all()\n return render_template(\"users/admin/cars.html\", cars=cars)", "title": "" }, { "docid": "a55529018e33b7087197a14424fb7f45", "score": "0.4821608", "text": "def addBooking():\n form = request.form\n checkFieldsExist(form, \"car_id\", \"customer_id\", \"start_datetime\", \"end_datetime\")\n\n car = Car.query.get(form[\"car_id\"])\n customer = User.query.get(form[\"customer_id\"])\n description = \"car details - Make: {}, Model: {}, Body Type: {}, Colour: {}, Seats: {}, Location: {}, Cost Per Hour: ${}\\nBooked by {}\".format(\n car.make, car.model, car.body_type, car.colour, car.seats, car.location, car.cost_per_hour, customer.username)\n event = create_event(\n form[\"start_datetime\"]+\":00\", form[\"end_datetime\"]+\":00\", \"Booking for car {} {}\".format(car.make, car.model), \n description=description)\n if event != None:\n event_id = event['id']\n else:\n event_id = None\n\n newBooking = Booking(\n car_id=form[\"car_id\"],\n user_id=form[\"customer_id\"],\n start_datetime=form[\"start_datetime\"],\n end_datetime=form[\"end_datetime\"],\n status=\"active\",\n calendar_id=event_id)\n \n car.status = \"unavailable\"\n\n db.session.add(newBooking)\n db.session.commit()\n\n return bookingSchema.jsonify(newBooking)", "title": "" }, { "docid": "47330619f833ed36317b5c1fbc07dd3f", "score": "0.4817881", "text": "def generate_successful_msg(self, request, obj, *args, **kwargs):\n return \"\"", "title": "" }, { "docid": "f0efbd5ad37ce463a384a18dd2f87df1", "score": "0.48154417", "text": "def test_create_client_empty_data(app):\n with pytest.raises(ValidationError):\n client.ClientService.objects_new()", "title": "" }, { "docid": "36ea2066c2316df08ae42ecd0eafe8b2", "score": "0.48053408", "text": "def test_empty_submission(self):\n\n items_page = ItemsPage(self.driver)\n items_page.click_create_new_item()\n create_item_page = CreateItemPage(self.driver)\n create_item_page.click_save()\n assert create_item_page.check_empty_or_not(), \"Empty Error didn't occur\"", "title": "" }, { "docid": "b878cb12026042e1cc2347ce0c9128ca", "score": "0.47913918", "text": "def test_api_can_create_a_vehicle(self):\n vehicle_data = {\n \"vehicle_type\": \"motorcycle\",\n \"vin\": \"116\",\n \"make\": \"bike3\",\n \"model\": \"bike44\",\n \"year\": 2015,\n \"seat_capacity\": 2,\n \"roof_rack_availability\": False,\n \"haul_capacity\": None,\n \"sidecar_availability\": True\n }\n response = self.client.post('/api/vehicle/', vehicle_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "title": "" }, { "docid": "0f2d74f218e69db489fb2bab574111da", "score": "0.47890836", "text": "def test_created_successfully(self):\n\n payload = {'name':\"Cabbage\"}\n self.client.post(INGREDIENT_URL, payload)\n\n exists = Ingredient.objects.filter(name=payload['name']).exists()\n\n self.assertTrue(exists)", "title": "" }, { "docid": "99da865f6a9d7f1dea54a49fb68869b0", "score": "0.47859472", "text": "def get_free_car(self):\n query = f\"\"\"select COUNT(c.{CarDatabase.ID}) from {CAR_TABLE} as c \n where c.{CarDatabase.ID} not in \n (select i.{IssuesDatabase.CAR_ID} as ID from {ISSUES_TABLE} as i \n WHERE {IssuesDatabase.FROM} between current_date() and date_add(current_date(), interval 1 day\n ) \n union \n select b.{BookingDatabase.CAR_ID} as ID from {BOOKING_TABLE} as b \n WHERE {BookingDatabase.FROM} between current_date() and date_add(current_date(), interval 1 day)\n )\"\"\"\n records = self.execute_return(query)\n return records", "title": "" }, { "docid": "473da9f029936b4173465dc47d5112ec", "score": "0.4784191", "text": "def test_new_recipe_without_name_raises_error(self):\n with self.assertRaises(Exception):\n models.Recipe.objects.create(\n name=None,\n description=None\n )", "title": "" }, { "docid": "da52547e9143052881fa9f84cda85c7d", "score": "0.47785142", "text": "def IsCreated(self):", "title": "" }, { "docid": "8d7305be486a90638b1979ae87f7bb56", "score": "0.47778273", "text": "def new():\n catalog, error = Catalog.create(name=request.form.get('name'),\n user=current_user())\n if error:\n return render_template('catalogs/create.html',\n catalog=request.form,\n error=error)\n else:\n return redirect(url_for('catalogs.index'))", "title": "" }, { "docid": "265b5436bd27d7dcdf158dfbbfc7606d", "score": "0.4770154", "text": "def test_vehicle_creation(self):\n vehicle = Vehicle(\n make=\"Ford\",\n model=\"Focus\",\n generation=\"MK3\"\n )\n vehicle.save()\n\n self.assertEqual(vehicle.make, 'Ford')\n self.assertEqual(vehicle.model, 'Focus')\n self.assertEqual(vehicle.generation, 'MK3')", "title": "" } ]
4d4b550de13792535a2bbaa8a9d4149b
Populate the indexing item with a Property, for both lists and dictionaries.
[ { "docid": "7f99c19011917125cda8bfca1c99d0fd", "score": "0.5757633", "text": "def __getitem__(self, item: Union[int, str]):\n if item not in self._items.keys():\n shape = Properties._shapes_map.get(self.service_name, {}).get(self.shape_name)\n member = shape[\"value\"][\"shape\"]\n if isinstance(item, str):\n property_item = Properties(self.step_name, f\"{self.path}['{item}']\", member)\n else:\n property_item = Properties(self.step_name, f\"{self.path}[{item}]\", member)\n self._items[item] = property_item\n\n return self._items.get(item)", "title": "" } ]
[ { "docid": "006a309296cbac0e8bfcae5d3ad05ceb", "score": "0.6605211", "text": "def __index_data_for_property(self, data, property_getter):\n self._get_logger().debug(\"Creating index on getter '{}' for #{} entries\".format(property_getter, len(data)))\n return {property_getter(data_item): data_item for data_item in data}", "title": "" }, { "docid": "af4f6cf1d72abe3656b43c21c89886a5", "score": "0.65228707", "text": "def indexedproperty(fget=None):\n return IndexedProperty(fget)", "title": "" }, { "docid": "92d557122ce646302d2f10b59c53c925", "score": "0.6252321", "text": "def BuildListOfItemIndicesWithPropertyValueLookup(item,net,level,args):\n log = Log('BuildListOfItemIndicesWithPropertyValueLookup',item.GetName())\n errPrefix = 'in BuildListOfItemIndicesWithPropertyValueLookup, '\n argsType = str(type(args))\n if len(args) < 2:\n raise Exception, errPrefix + 'expected args to be property name and dict'\n if 'str' not in str(type(args[0])):\n raise Exception, errPrefix + 'first item in args must be a property name string'\n if 'dict' not in str(type(args[1])):\n raise Exception, errPrefix + 'second item in args must be a dictionary'\n try:\n value = item.GetProperty(args[0])\n if value is None:\n if len(args) > 2:\n value = args[2]\n else:\n raise Exception,'key not found'\n idx = item.GetIdx()\n dict = args[1]\n dict[idx] = value\n except Exception, e:\n #print 'in BuildListOfItemIndicesWithPropertyValueLookup: no property value for ' + args[0] \\\n # + ' in item ' + str(item.GetName())\n dict = args[1]\n idx = item.GetIdx()\n # use default, if one exists\n if len(args) > 2:\n dict[idx] = args[2]\n else:\n dict[idx]=0.0\n return True", "title": "" }, { "docid": "da1ab2f04b0b6851f6fbf328057910c6", "score": "0.5989224", "text": "def _expand_index_map(self, active_ctx, active_property, value, index_key, as_graph, options):\n rval = []\n for k, v in sorted(value.items()):\n ctx = JsonLdProcessor.get_context_value(\n active_ctx, k, '@context')\n if ctx:\n active_ctx = self._process_context(active_ctx, ctx, options)\n\n expanded_key = self._expand_iri(active_ctx, k, vocab=True)\n if index_key == '@id':\n # expand document relative\n k = self._expand_iri(active_ctx, k, base=True)\n elif index_key == '@type':\n k = expanded_key\n\n v = self._expand(\n active_ctx, active_property,\n JsonLdProcessor.arrayify(v),\n options, inside_list=False)\n for item in v:\n if as_graph and not _is_graph(item):\n item = {'@graph': [item]}\n if index_key == '@type':\n if expanded_key == '@none':\n # ignore @none\n item\n elif item.get('@type'):\n types = [k]\n types.extend(item['@type'])\n item['@type'] = types\n else:\n item['@type'] = [k]\n elif expanded_key != '@none' and index_key not in item:\n item[index_key] = k\n\n rval.append(item)\n return rval", "title": "" }, { "docid": "b906ca9962b2c19394d48607256202ec", "score": "0.59756225", "text": "def BuildListOfItemIndicesWithPropertyValueDict(item,net,level,args):\n log = Log('BuildListOfItemIndicesWithPropertyValueDict')\n dict = args[0]\n key = args[1]\n valuedict = args[2]\n if len(args) > 3:\n defaultValue = args[3]\n else:\n defaultValue = None\n # get index \n idx = item.GetIdx()\n nodeType = item.GetProperty(key)\n try:\n keyval = valuedict[nodeType]\n dict[idx] = keyval\n except Exception, e:\n if defaultValue != None:\n dict[idx] = defaultValue\n else:\n raise Exception,\\\n 'key value not found for node type \"%s\": %s' % (str(nodeType),str(e))\n return True", "title": "" }, { "docid": "a50998f25a329b3af89cbc1267b6449b", "score": "0.5946126", "text": "def __getitem__(self, item):\n\n return self.properties[item]", "title": "" }, { "docid": "047fc8af1178c4e1f7f99054a68dd25f", "score": "0.5840876", "text": "def __getitem__(self, index):\n \n try:\n fget = self.getter\n except AttributeError:\n raise NotImplementedError('no property getter defined')\n \n index = self.moduserindex(index)\n if isinstance(index, self.iterable_indices):\n return [fget(self.modindex(i)) for i in index]\n return fget(self.modindex(index))", "title": "" }, { "docid": "2a58e3fc4504e0be9787fee5b91e451a", "score": "0.57897544", "text": "def create_property(property_info):\n return insert('property', property_info.keys(), property_info.values())", "title": "" }, { "docid": "be89afc500b2779efdfeba9a06e89e5c", "score": "0.5775146", "text": "def __getitem__(self, item: Union[int, str]):\n if item not in self._items.keys():\n shape = Properties._shapes_map.get(self.service_name, {}).get(self.shape_name)\n member = shape[\"member\"][\"shape\"]\n if isinstance(item, str):\n property_item = Properties(self.step_name, f\"{self.path}['{item}']\", member)\n else:\n property_item = Properties(self.step_name, f\"{self.path}[{item}]\", member)\n self._items[item] = property_item\n\n return self._items.get(item)", "title": "" }, { "docid": "a08f134e2d8d6397669b451c80c3f1db", "score": "0.57573456", "text": "def __setitem__(self, index, value):\n try:\n fset = self.setter\n except AttributeError:\n raise NotImplementedError('no property setter defined')\n \n index = self.moduserindex(index)\n if isinstance(index, self.iterable_indices):\n # If the value is a sized collection (and not a str), it must be of\n # the same size as the index in order to correlate it.\n try:\n if isinstance(value, str):\n raise TypeError()\n if len(value) != len(index):\n raise ValueError(\"length mismatch; can't broadcast to index\")\n for i, v in zip(index, value):\n fset(self.modindex(i), v)\n except TypeError:\n # We're here on a non-iterable; broadcast it.\n for i in index:\n fset(self.modindex(i), value)\n else:\n # Non-iterable index; just do the one.\n fset(self.modindex(index), value)", "title": "" }, { "docid": "b819670556c041c4cbb266b22c8a88af", "score": "0.57130283", "text": "def __getitem__(self, index) -> dict:\n raise NotImplementedError", "title": "" }, { "docid": "94b434df8e36bfb04deccdaa83e1f3ff", "score": "0.57105374", "text": "def index_item(self, item: object, index: int) -> None:\n indexes = retrieve_possible_object_indexes(item)\n for var_name, value in indexes.items():\n if var_name in self.index_blacklist:\n continue\n if var_name not in self.index_map:\n # if the first item value is None, create the index without assigning type\n value_type = type(value)\n if value_type is NoneType:\n self.index_map.update({var_name: Index()})\n else:\n self.index_map.update({var_name: Index(type(value))})\n try:\n self.index_map[var_name].add(value, index)\n except TypeError:\n self.index_map.pop(var_name)\n self.index_blacklist.add(var_name)", "title": "" }, { "docid": "33dc8656ab135b297e615e76ea76ddbf", "score": "0.5705661", "text": "def member_property_field_index(self, member_property_field_index):\n\n self.container['member_property_field_index'] = member_property_field_index", "title": "" }, { "docid": "c3bd9792ef06df12799a3c7a4273e37a", "score": "0.55968416", "text": "def __setitem__(self, key, value):\n\n self.properties[key] = value", "title": "" }, { "docid": "d26d4774bd146a27e135c43264af8586", "score": "0.55942667", "text": "def _from_index(cls, index: core.PositionalIndex) -> WordIndex:\n # probably a better way to do this, but this just sets an empty data\n # and overrides the value of `self.index`\n cls_item = cls([])\n cls_item.index = index\n return cls_item", "title": "" }, { "docid": "dfa2069c491d844a13c8894f86a0686a", "score": "0.55760324", "text": "def __setitem__(self, index: int, data: typing.Any) -> None:", "title": "" }, { "docid": "9f28007f2afa155f3b28c0c336d96533", "score": "0.5555733", "text": "def __setitem__(self, key, value):\n index, prop = key\n if index not in self._p._data[self.DAT_KEY]:\n self._p._data[self.DAT_KEY][index] = {}\n self._p._data[self.DAT_KEY][index][prop] = value", "title": "" }, { "docid": "9d6f0ac005bbe998400b1a03156bb6c1", "score": "0.5522841", "text": "def __getitem__(self, n):\n if len(self._list_properties) == 1:\n return getattr(self, list(self._list_properties.items())[0][0])[n]\n else:\n raise Exception(\n 'Not simply a list type, __getitem__ not supported'\n )", "title": "" }, { "docid": "522f99c010382d59dcb5124fd137201c", "score": "0.55208164", "text": "def __init__(self):\n self.idx = dict()\n self.value = []", "title": "" }, { "docid": "31fb9b3a14879ed92074414bd71238bd", "score": "0.55151385", "text": "def __getitem__(self, index):\n if isinstance(index, basestring):\n self.__schema.checkProp(index)\n for row in self:\n yield row[index]\n else:\n raise ValueError(\"Bad item spec %r\" % index)", "title": "" }, { "docid": "7c253c6273e7e7ca9562d37411d49e1c", "score": "0.5496389", "text": "def __setitem__(self, index, new_item):\r\n pass", "title": "" }, { "docid": "9c76d5c8e879df81a15ed522c82b4459", "score": "0.549574", "text": "def add_property_id(property_name, property_dict, property_value):\n for i in range(1, 100):\n temp_property = property_name + str(i)\n if temp_property not in property_dict:\n property_dict[temp_property] = property_value\n return property_dict", "title": "" }, { "docid": "b6ae50b3916a6e593abd873eb1cb5f3b", "score": "0.54796356", "text": "def _indexedProps(spec):\n return [prop for prop, propclass in spec.getprops().items()\n if isinstance(propclass, hyperdb.String) and propclass.indexme]", "title": "" }, { "docid": "a2440a122d97e7b62280c2b14fb1a92f", "score": "0.5466197", "text": "def __getitem__(self, index) -> typing.Any:", "title": "" }, { "docid": "af94f313ef684b9445ba3e09c926fc0a", "score": "0.5455197", "text": "def __getitem__(self, index):\n pass", "title": "" }, { "docid": "af94f313ef684b9445ba3e09c926fc0a", "score": "0.5455197", "text": "def __getitem__(self, index):\n pass", "title": "" }, { "docid": "af94f313ef684b9445ba3e09c926fc0a", "score": "0.5455197", "text": "def __getitem__(self, index):\n pass", "title": "" }, { "docid": "a8ddd86317feb1abc64b0801b57e4701", "score": "0.5449253", "text": "def __getitem__(self, key):\n return self.properties[key]", "title": "" }, { "docid": "7e49874a47887e624d61ae665b2d0c11", "score": "0.5445597", "text": "def setindex(object, index, value):\n object[index] = value", "title": "" }, { "docid": "900935613720dd8b5e322300a7c94bbe", "score": "0.543754", "text": "def set_property_item(self):\n return {\n 'house': HouseItem(),\n 'room': RoomItem(),\n 'office': OfficeItem(),\n 'garage': GarageItem(),\n 'land': LandItem(),\n 'commercial': CommercialItem(),\n 'storeroom': StoreRoomItem(),\n 'building': BuildingItem(),\n }.get(self.property_type, 'Not a valid property')", "title": "" }, { "docid": "af23586d8b876d2d4c9038269485c476", "score": "0.53941655", "text": "def __setitem__(self, index, value, _raise=True):\n if not isinstance(index, int):\n if _raise:\n index_type = type(index)\n raise TypeError('Index must be int, not {}'.format(index_type))\n return\n\n if index >= len(self):\n raise IndexError(index)\n\n value = self._value_to_graph_object(index, value, _raise=_raise)\n if isinstance(value, (PlotlyDict, PlotlyList)):\n super(PlotlyList, self).__setitem__(index, value)", "title": "" }, { "docid": "d6cb171dd7e17ba115037c8938a8ab82", "score": "0.5388489", "text": "def get_for_indexer(self, value):", "title": "" }, { "docid": "0b76eb46ecb702f5602ca563698593a9", "score": "0.538201", "text": "def __setitem__(key, object):", "title": "" }, { "docid": "c159017cce0e2f1146a1711db257260b", "score": "0.5352744", "text": "def __init__(self, param):\n \n if isinstance(param, str):\n self._index = json.loads(param)\n else:\n self._index = param", "title": "" }, { "docid": "57d9db597ded6bbe82bd3746d72f47f2", "score": "0.53435624", "text": "def vertex_property(G, vproperty: str, idx: int) -> Any:\n return G.vertex_properties[vproperty][idx]", "title": "" }, { "docid": "6b04d57765c46c4e025207d3df508c04", "score": "0.53399456", "text": "def __init__(self, index):\n self.index = int(index)", "title": "" }, { "docid": "8f497abe01bacd8ff552f7ec183ae7cb", "score": "0.53398365", "text": "def crescentindex(index):", "title": "" }, { "docid": "b6a60029cc5f6383e30b374cbe181267", "score": "0.5337533", "text": "def __getitem__(self, x):\n return self.index[x]", "title": "" }, { "docid": "988a050bf885f1e6a7caed2a97137983", "score": "0.5304007", "text": "def __setitem__(self, index, newItem):\n self._items[index] = newItem", "title": "" }, { "docid": "c8002b3babdeec6f2b4c35f52410faa9", "score": "0.52869576", "text": "def build_index(self, data) -> None:", "title": "" }, { "docid": "9ebde3c94ab95fd3ef2909811611d7bf", "score": "0.5278785", "text": "def __setitem__(self, dot_path, value):\n if self.ctor_properties is None:\n raise exception.BananaTypeCheckerBug(\n \"Component type can't have properties\"\n )\n\n if len(dot_path.properties) == 0:\n for arg in self.ctor_properties:\n if arg.param_name == dot_path.varname.inner_val():\n if not can_be_cast_to(value, arg.param_type):\n raise exception.BananaArgumentTypeError(\n expected_type=arg.param_type,\n received_type=value,\n where=dot_path.span\n )\n else:\n return\n else:\n for arg in self.ctor_properties:\n if arg.param_name == dot_path.varname.inner_val():\n if isinstance(arg.param_type, Any):\n return\n elif isinstance(arg.param_type, Object):\n next_dot_path = dot_path.next_dot_path()\n sub_arg_type = arg.param_type[next_dot_path]\n if not can_be_cast_to(value, sub_arg_type):\n raise exception.BananaArgumentTypeError(\n expected_type=sub_arg_type,\n received_type=value,\n where=next_dot_path.span\n )\n else:\n return\n else:\n raise exception.BananaPropertyDoesNotExists(\n dot_path.next_dot_path(),\n arg.param_type\n )\n\n raise exception.BananaPropertyDoesNotExists(dot_path, on_type=self)", "title": "" }, { "docid": "2cc5443214319839f26e11b27d8867cf", "score": "0.5274287", "text": "def test_multi_type_indexer():\n ob = Test.MultiTypeIndexerTest()\n spam = Test.Spam(\"spam\")\n\n ob[0, \"one\", spam] = \"zero one spam\"\n assert ob[0, \"one\", spam] == \"zero one spam\"\n\n ob[1, \"nine\", spam] = \"one nine spam\"\n assert ob[1, \"nine\", spam] == \"one nine spam\"\n\n with pytest.raises(TypeError):\n ob = Test.MultiTypeIndexerTest()\n _ = ob[0, 1, spam]\n\n with pytest.raises(TypeError):\n ob = Test.MultiTypeIndexerTest()\n ob[0, 1, spam] = \"wrong\"", "title": "" }, { "docid": "b8602e28e7f983b5041f552935aea2eb", "score": "0.526272", "text": "def __setitem__(name, object):", "title": "" }, { "docid": "b8602e28e7f983b5041f552935aea2eb", "score": "0.526272", "text": "def __setitem__(name, object):", "title": "" }, { "docid": "1201a5180a24bee9fe63bb4dea8c6e69", "score": "0.526234", "text": "def item(self, *args) -> \"adsk::core::Ptr< adsk::core::Property >\" :\n return _core.Properties_item(self, *args)", "title": "" }, { "docid": "63ec0cb50da0bd625bb13146a5ddea93", "score": "0.5262104", "text": "def __setitem__(self, index, value):\n key = self.keys[index]\n self.nml[key] = value", "title": "" }, { "docid": "2dab6038d5bbb8a1612d8a0deaf2a59a", "score": "0.5260968", "text": "def __getitem__(self, index):\n raise NotImplementedError", "title": "" }, { "docid": "24858aa5771eeaf230550df26655fc47", "score": "0.526041", "text": "def __init__(self):\n self.nums = []\n self.index_dict = {}", "title": "" }, { "docid": "cc5324dea8823cc0c31c6a81fe7a15eb", "score": "0.5259636", "text": "def __init__(self, *args, **kwargs):\n # Set up an index dictionary where we store the position each key was inserted\n self.indices = {}\n # Set up lazy but indexable key/value data\n self._keys = IndexableLazyIterable()\n self._values = IndexableLazyIterable()\n super().__init__(*args, **kwargs)", "title": "" }, { "docid": "4715ee5df0e304aed0022b2866ed2e2a", "score": "0.52590847", "text": "def __getitem__(self, index) -> object:\n return self.get_at_index(index)", "title": "" }, { "docid": "f828012d6ca5170c80e5cd3e3610063b", "score": "0.5250207", "text": "def __init__(self):\n self.mapping = {} # key is item, value is index in items list\n self.items = [] # list of items", "title": "" }, { "docid": "02cd4bf9ea709f08ff9e2f3fb8aee53e", "score": "0.52475625", "text": "def __setitem__(self, key, obj):\n return setattr(self, key, obj)", "title": "" }, { "docid": "eab887eb0486a7061ed0513f2b6a3f0d", "score": "0.5238907", "text": "def add_properties(self, key, property_vector):\n if self.num_structures != len(property_vector):\n raise AttributeError(\n \"Length of property_vector must match number of structures\"\n f\" {len(property_vector)} != {self.num_structures}.\"\n )\n\n for prop, entry in zip(property_vector, self._entries):\n entry.data[\"properties\"][key] = prop", "title": "" }, { "docid": "6749b08dfc4a6f2076a8fed93e717acb", "score": "0.5236672", "text": "def __init__(self, key=None, index=0):\n self.key = key\n self.index = 0\n self.items = []", "title": "" }, { "docid": "2e4420be93a0d103c335fb7e0ffaf9d0", "score": "0.5226456", "text": "def set_as_object(self, index: int = None, value: Any = None):\n if index is None and not (value is None):\n self.set_as_array(value)\n else:\n self[index] = value", "title": "" }, { "docid": "096658232893ca706a240082ea0e6c67", "score": "0.52223235", "text": "def __setitem__(self, index, value):\n self.attribute_values[index] = value", "title": "" }, { "docid": "a590e4429a0035797fa3318dcf561eec", "score": "0.5217201", "text": "def __getitem__(self, item):", "title": "" }, { "docid": "8a17c7d40ee5c42ebb4e8f77264ec544", "score": "0.5198154", "text": "def __init__(self):\n self.key_index_map = dict()\n self.index_key_map = dict()\n self.index = 0", "title": "" }, { "docid": "1d9a6ddae8668c2fa6a2da8dbca0ff02", "score": "0.5194165", "text": "def __getitem__(self, index):\n return self._mapping._list[index]", "title": "" }, { "docid": "f5051c0485e5b31197b2f39894ed40da", "score": "0.518643", "text": "def __setitem__(self, index: Union[int, Int], value: T) -> None:\r\n self._validate_index_type_is_int(index=index)\r\n index_: int = self._get_builtin_int_from_index(index=index)\r\n self._value[index_] = value\r\n self._append_setitem_expression(index=index, value=value)", "title": "" }, { "docid": "895876bc1804cab3d21c59cc16cbd9e3", "score": "0.5185782", "text": "def __setitem__(self,ind,item):\n self.Values[ind] = item", "title": "" }, { "docid": "0d85a08b78768345285c7ea1c7bd977b", "score": "0.51800245", "text": "def test_setitem(self, index):\n builtin = [0, 1, 2, {\"a\": 1}]\n ds = DatasetList(builtin)\n\n ds[index] = \"test\"\n builtin[index] = \"test\"\n\n assert ds == builtin\n assert len(ds) == len(builtin)", "title": "" }, { "docid": "f1ec8ed723936864d53d1ecf8294f8eb", "score": "0.5174047", "text": "def __getitem__(self, idx):", "title": "" }, { "docid": "f1ec8ed723936864d53d1ecf8294f8eb", "score": "0.5174047", "text": "def __getitem__(self, idx):", "title": "" }, { "docid": "f1ec8ed723936864d53d1ecf8294f8eb", "score": "0.5174047", "text": "def __getitem__(self, idx):", "title": "" }, { "docid": "e32c4a2aa8417a3abd5ce8f9a249f039", "score": "0.5169639", "text": "def __getitem__(self, idx):\n pass", "title": "" }, { "docid": "e32c4a2aa8417a3abd5ce8f9a249f039", "score": "0.5169639", "text": "def __getitem__(self, idx):\n pass", "title": "" }, { "docid": "2074262f4e523d57d8b40b46a3243c92", "score": "0.51582843", "text": "def _setIndexed(self, value):\n if value:\n self._indexed = True\n else:\n self._indexed = False", "title": "" }, { "docid": "d6990285e81a99661425fc43fcda3d2b", "score": "0.5147966", "text": "def __setitem__(self, index, value):\n\n self._dict_content[index] = value", "title": "" }, { "docid": "94d46aa7ddd572a8d9f2ee504da4111d", "score": "0.5144396", "text": "def create_property():\n\n return {\n 'Category': None,\n 'Name': None,\n 'Units': 'Unitless',\n 'Description': None\n }", "title": "" }, { "docid": "2d614ef2a3b649cc586750f3f22741dd", "score": "0.51280236", "text": "def __init__(self):\n self.values = []\n self.index = {}", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.51105964", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.51105964", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.51105964", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.51105964", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.51105964", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.51105964", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.51105964", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.51105964", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.51105964", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.51105964", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.51105964", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.51105964", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.51105964", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.51105964", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.51105964", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.51105964", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.51105964", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.51105964", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.51105964", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.51105964", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.51105964", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.51105964", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "e28e180293d720a57da6662ccbf7f3b4", "score": "0.51046556", "text": "def insertListDataProperty(self, props, ref, tag, attr, max, displayName):\n xobj = Xref.get(ref)\n curstart = xobj.datasize # we re-use the size property as the start property\n self.insertUintProperty(props, 'start ' + attr, curstart, max=max - 1)\n\n props.Append(wx.propgrid.PropertyCategory('%s data from %s %d'%(displayName, attr, curstart)))\n\n # collect the data\n dataMap = {}\n for child in xobj.elem:\n if tag == etree.QName(child).localname:\n dataMap[child.get(attr, \"?\")] = child.text\n \n # display up to 10 data for editing\n for i in range(curstart, min(curstart + 10, max)):\n name = \"%s %d\"%(attr, i)\n value = dataMap.get(str(i), 0)\n if tag == bG.suint8:\n self.insertUintProperty(props, name, value, max=255)\n elif tag == bG.sint16:\n self.insertIntProperty(props, name, value, min=-0x80000, max=0x7FFF)\n elif tag == bG.sfrequency: # uint32\n self.insertUintProperty(props, name, value, max=0xFFFFFFFF)", "title": "" }, { "docid": "651ef05895ea511a109fd7aa08f46afc", "score": "0.50973976", "text": "def __init__(self, index):\n self.index = index\n # We iterate on this list while appending elements to it\n self.item_list = []\n self.item_set = set()\n\n return", "title": "" }, { "docid": "5a1f35b6a466dc1869854e9b56fb6f56", "score": "0.5083721", "text": "def __init__(self, data=None): \r\n if data: \r\n for key, value in enumerate(data): \r\n self[key] = value", "title": "" }, { "docid": "b774585dca9c00ae119b8d6583d4d08e", "score": "0.50763243", "text": "def __getitem__(self, item):\n if isinstance(item, int):\n return self.entries[item]\n elif isinstance(item, str):\n return self.entries[self.path_index[item]]\n else:\n raise TypeError", "title": "" }, { "docid": "4b8373813f8affb612d5a0d1d31d6f89", "score": "0.5069966", "text": "def put(self, index: int, value: Any):\n self[index] = value", "title": "" }, { "docid": "ba0e9808dc05fc0df84d835a476d00d2", "score": "0.50682294", "text": "def _assign_getitem(self, inst, index):\n new_expr = Expr.getitem(inst.value.value, index, inst.loc)\n new_inst = Assign(value=new_expr, target=inst.target, loc=inst.loc)\n\n self.func_ir._definitions[inst.target] = [new_expr]\n self.calltypes[new_expr] = signature(\n self.typemap[inst.target.name],\n self.typemap[new_expr.value.name],\n self.typemap[new_expr.index.name]\n )\n\n return new_inst", "title": "" }, { "docid": "0ecd92bd9b7d79d742bf4ba02a58e1c1", "score": "0.506673", "text": "def set(self, index, value):", "title": "" } ]
7546c3e37e518b99914041ff80f2a2e3
Return the field value in the job data identified by jobid. If jobid does not identify a job, the default value is returned. If the named field does not exist, an AttributeError exception is raised.
[ { "docid": "256ec9577e03e7f7ece7d395387949d5", "score": "0.8610517", "text": "def getfield(self, jobid, name, default=None):\n if name not in Fields:\n raise AttributeError(\"Job record has no attribute '%s'\" % name)\n key = _key(jobid)\n if not self.__client.exists(key):\n return default\n raw = self.__client.hget(key, name)\n if raw is None:\n return default\n return Fields[name].loads(raw)", "title": "" } ]
[ { "docid": "69bf28909f7acc163c363d958c69d13c", "score": "0.6560788", "text": "def _get_field_data_value(self, field_id, key_name):\n d = self._get_field_data_dict(field_id)\n return d.get(key_name, None)", "title": "" }, { "docid": "9ab5e206528d04f8a1e7e2c834bb5f4d", "score": "0.65405697", "text": "def get(self, jobid, default=None):\n key = _key(jobid)\n if not self.__client.exists(key):\n return default\n item = self.__client.hgetall(key)\n return {k: Fields[k].loads(v) for k, v in item.iteritems()}", "title": "" }, { "docid": "4b8ad7a9adca3a2e9e7d286bd6560f68", "score": "0.6382924", "text": "def get(self, field_id):\n return self.field_id", "title": "" }, { "docid": "ad87a61333e4dee0514907813dbf8a7c", "score": "0.61702687", "text": "def get_field_value(data, field) -> Any:\n try:\n return _get_field_value(data, field)\n except FieldValueNotFoundError:\n return None", "title": "" }, { "docid": "d90061f56596b4145356a55e9bc3970c", "score": "0.6109488", "text": "def get_field(self, field):\n if not self.has_field(field):\n raise ValueError('field ' + str(field) + ' does not exist')\n return self.data[field]", "title": "" }, { "docid": "109cb35e6ffe9c0c9e6498a3f54ee942", "score": "0.608097", "text": "def get(self, field):\r\n field_name = getattr(field, 'name', field)\r\n return self[field_name].value", "title": "" }, { "docid": "1835339430b6a13631d44017551588a9", "score": "0.6039868", "text": "def _get_field_value(row, field_name):\n if field_name not in row:\n return MISSED\n value = row[field_name]\n if not value:\n return None\n if field_name in INTEGER_FIELDS:\n return int(value)\n return value", "title": "" }, { "docid": "59d5adb1aa6c75604dae60d5de8c8c21", "score": "0.60119843", "text": "def get_field(self, key, field):\n return self.__data[key][field]", "title": "" }, { "docid": "5025f9b9bbcd05a0d20f989d62d1136f", "score": "0.5990031", "text": "def getField(fieldname):\n return data[fieldname]", "title": "" }, { "docid": "d39155004b4a5bdc23921742b863345f", "score": "0.59368664", "text": "def get_job(self, job_id):\n self.not_supported()", "title": "" }, { "docid": "2b62e7c4ec58467307ef2d33661165e1", "score": "0.59266347", "text": "def data_field_name_get(self, field_id, *unused):\n for field_name_, data_ in list(self.data_dict.items()):\n if data_.id == field_id:\n return data_.name\n raise KeyError(\"Field %d not found in learn obj %s\"%(field_id, self.name_get()))", "title": "" }, { "docid": "435b906af3a2e85ecdad53f0ee4c38d0", "score": "0.5923209", "text": "def get(self, job_id, strict=True):\n if self.get_job_status(job_id, strict=strict):\n key = '{}/{}'.format(job_id, self.name)\n return self._cache[key]", "title": "" }, { "docid": "f83f8ad021c170bf1f7f6c269118f5d8", "score": "0.5858183", "text": "def get(self, job_id):\n if job_id not in self._jobs:\n raise JobNotFound(job_id)\n return self._jobs[job_id]", "title": "" }, { "docid": "5176c590acd1b7e319a884d28b38ad74", "score": "0.58578396", "text": "def GetValue(self, field, default=None):\n if self.HasValidSummary():\n return self.task_summary_json.get('shards')[0].get(field)\n return default", "title": "" }, { "docid": "d2640d5bd7274abdd5a5687d6c6395a5", "score": "0.5854203", "text": "def get(self, record, field, default=NOTHING):\n try:\n field_cache = self._data[field]\n if field.depends_context:\n field_cache = field_cache[record.env.cache_key(field)]\n return field_cache[record._ids[0]]\n except KeyError:\n if default is NOTHING:\n raise CacheMiss(record, field)\n return default", "title": "" }, { "docid": "09c310b94c3e7fe95e5d9b35e2b93937", "score": "0.581818", "text": "def get_field_value(data, field, default=None):\n if not data:\n return default\n\n try:\n if '.' in field:\n current, rest = field.split('.', 1)\n if isinstance(data, list) and current.isdigit():\n return get_field_value(data[int(current)], rest, default)\n if current in data:\n return get_field_value(data[current], rest, default)\n\n return data.get(field, default)\n except:\n LOGGER.exception(\"failed to get_field_value()\")\n return None", "title": "" }, { "docid": "a15810267b3c4fbcdb37af949098f594", "score": "0.57955545", "text": "def get_field(self, field_name: str) -> Any:\n fields = self.get_fields(field_name)\n if fields is not None:\n if len(cast(PropertyValues, fields)) != 0:\n return fields[0]\n return None", "title": "" }, { "docid": "751f628eb92a6f514798bb5d911132f9", "score": "0.5784027", "text": "def get_job(self, job_id):\n return self.session.get(self.job_url + job_id)", "title": "" }, { "docid": "2aaf481cbce3c1287f74e386d33c629f", "score": "0.5748026", "text": "def get(self, name):\n field = self.getField(name)\n if field is not None:\n return field.stringValue()\n else:\n return None", "title": "" }, { "docid": "6248a0386b52478fd0d4f9c89c5a9893", "score": "0.57389915", "text": "def getValue(self, name):\n\t\tfor field in self.dataFields:\n\t\t\tif field.name == name:\n\t\t\t\treturn field.value\n\n\t\treturn None", "title": "" }, { "docid": "d0440f2ac6023911632aafd97544ef2c", "score": "0.57366264", "text": "def getValueOf(self, id_):\n return self.descendent(id_).value", "title": "" }, { "docid": "476b18c2d3a396c50321f38d1c9855e3", "score": "0.5736383", "text": "def getField(self, field):\r\n return self.record[field]", "title": "" }, { "docid": "68db7c557585c7a53003c020858764e1", "score": "0.57351136", "text": "def get_job(self, job_id: str) -> dict:\n full_name = _JOB_NAME_PATTERN.format(parent=self._parent, job_id=job_id)\n return self._api_client.projects().locations().pipelineJobs().get(\n name=full_name).execute()", "title": "" }, { "docid": "683378aa0b780c89941589549901bfa4", "score": "0.5732435", "text": "def job_fetch(self, id):\n job = False, None\n try:\n job = True, Job.fetch(id, connection=rq_instance.connection)\n except NoSuchJobError as e:\n job = False, str(e)\n except Exception as e:\n job = False, str(e)\n return job", "title": "" }, { "docid": "0dd0459df5aa77fb787d6658eb15ec51", "score": "0.56980526", "text": "def get_field(entry, field):\n return entry.fields.get(field, \"\")", "title": "" }, { "docid": "06682cd252edd845929a7bd7c1e299c1", "score": "0.5695814", "text": "def get_job_by_id(self, job_id):\n # FIXME. Make this an internal method It's ambiguous which job type\n # type you're asking for\n return _process_rget_with_job_transform_or_none(_to_url(\n self.uri, \"{r}/{i}\".format(i=job_id, r=ServiceAccessLayer.ROOT_JOBS)), headers=self._get_headers())", "title": "" }, { "docid": "76937dfd1f8506827deb9ac6166b8b9b", "score": "0.5680496", "text": "def getField(self,fieldName):\n return self.fields[fieldName]", "title": "" }, { "docid": "10348ac35ef6ee999384d9aabc97a25a", "score": "0.5669427", "text": "def getField(self, name):\n return self._fields.get(name, None)", "title": "" }, { "docid": "2166a0acd210bf99e405e44d3bb6b256", "score": "0.5666846", "text": "def lookup_field(self, match: dict, field_name: str, default):\n field_value = lookup_es_key(match, field_name)\n if field_value is None:\n field_value = self.rule.get(field_name, default)\n\n return field_value", "title": "" }, { "docid": "12e8c86dcfa18583ead45938312640d9", "score": "0.56639194", "text": "def find_job_id(*args, **kwargs):\n if len(args) > 0:\n return args[0]\n else:\n return kwargs.get(\"job_id\", -1)", "title": "" }, { "docid": "fb75b1c0b41adc3b8b8bd7642f329958", "score": "0.5635873", "text": "def get_field_id(self, filed_name: str) -> Optional[str]:\n try:\n json_string = self.__url_opener.url_read(self.__url + 'rest/api/2/field')\n except url_opener.UrlOpener.url_open_exceptions:\n return None\n for field in utils.eval_json(json_string):\n if field['name'] == filed_name:\n return field['id']\n logging.error(\"Error retrieving id for the field with name %s.\", filed_name)\n return None", "title": "" }, { "docid": "c6dee92f81ae4f2deb623a734aa51b12", "score": "0.562156", "text": "def getJob(self, ctx, jobID):\n pass", "title": "" }, { "docid": "4ded963db277aca34ba44ec1e392b735", "score": "0.5603905", "text": "def get_job(self, job):\n return self._get(_job.Job, job)", "title": "" }, { "docid": "0a2ec6d5ba431dc4347249a9359d7163", "score": "0.56019706", "text": "def get_field_from_data_or_instance(cls, field, data, instance, default=None):\n return data.get(field, getattr(instance, field, default))", "title": "" }, { "docid": "a9e4d76a80bedbdae492291e0e044982", "score": "0.55758405", "text": "def data_field_name_get(self, field_id, action_name=None):\n def _data_field_name_get_helper(dict_to_search, field_id, ret_list):\n if dict_to_search is None:\n return False\n for field_name_, data_ in list(dict_to_search.items()):\n if data_.id == field_id:\n ret_list.append(data_.name)\n return True\n found = False\n for field_name_, field_ in list(dict_to_search.items()):\n found |= _data_field_name_get_helper(field_.container_dict, field_id, ret_list)\n return found\n\n ret_list = []\n if action_name is not None:\n found = _data_field_name_get_helper(\n self.action_dict[self.action_dict_allname[action_name]].data_dict,\n field_id, ret_list)\n if found:\n return ret_list[0]\n found = _data_field_name_get_helper(self.data_dict, field_id, ret_list)\n if found:\n return ret_list[0]\n # Error 404\n if (action_name):\n raise KeyError(\"Failed to find field %d for action %s in table %s\"\n % (field_id, action_name, self.name))\n else:\n raise KeyError(\"Failed to find field %d in table %s\"\n % (field_id, self.name))", "title": "" }, { "docid": "bf69666b4f38514abc75f21432b4f1fb", "score": "0.5569437", "text": "def get_job_object(self, id=None, name=None):\n if id == None and name == None:\n return dumps({\"error\": \"Job ID is not defined\"})\n \n jobs = job.get_jobs(id, name)\n return jobs[0]", "title": "" }, { "docid": "95109e8488d8a25a9a472aaf84984863", "score": "0.55692536", "text": "def get_job(self, jq_id):\n return self.job_queue.get_job_instance(jq_id)", "title": "" }, { "docid": "9ab1dc17309750f9bf59fdded791d4b7", "score": "0.5553132", "text": "def get_value_for_job(data, target_job_type):\n # All data is in a single line, just return that.\n if ';' not in data:\n return data\n\n result = ''\n for line in data.splitlines():\n job_type, value = (line.strip()).split(';')\n if job_type == target_job_type or (job_type == 'default' and not result):\n result = value\n\n return result", "title": "" }, { "docid": "20520552a37fa79eebb0a7fe62d9a939", "score": "0.55300856", "text": "def GetValue(self, identifier, default_value=None):\n return self._value_dict.get(identifier, default_value)", "title": "" }, { "docid": "14ca052064a639620f1a15bf499e7d30", "score": "0.551852", "text": "def get_field_name(self, field_id):\n return self.field_id_to_name_dict.get(field_id, None)", "title": "" }, { "docid": "2fc23343bc4253f3f0951eda419bb9c6", "score": "0.55110323", "text": "def _get_field_data_dict(self, field_id):\n sf = self._sf[self._sf['Field of study ID'] == field_id]\n if len(sf) == 0:\n return None\n return sf[0]", "title": "" }, { "docid": "eb4d3bcba3dbc17705fa8e9e7960ebe5", "score": "0.5492504", "text": "def get_value(self, key: AbstractField) -> Any:\n return self._params[key] if key in self._params else None", "title": "" }, { "docid": "fa50d8ae81fa50712398d2dec1785698", "score": "0.5481109", "text": "def get_field(self, key):\n for field in self.fields():\n if field.name() == key:\n return field", "title": "" }, { "docid": "184775e87c6303515c464126ae12cf8b", "score": "0.5480014", "text": "def get_field(self, idx):\n return self._fieldvalues[idx]", "title": "" }, { "docid": "70ae6a0a512963525e4f2ef885abe27b", "score": "0.54702836", "text": "def __getitem__(self, jobid):\n key = _key(jobid)\n if not self.__client.exists(key):\n raise KeyError(\"Job not found: %s\" % jobid)\n item = self.__client.hgetall(key)\n return {k: Fields[k].loads(v) for k, v in item.iteritems()}", "title": "" }, { "docid": "504c32cc89d0613247aee5b4eb6f6ef2", "score": "0.54698247", "text": "def get_details(job_id):\n c = get_configuration(id=job_id)\n if not c:\n logger.error(\"Failed to find job congifuration for job details. id: '{}'.\".format(job_id))\n return None\n\n job_cron_string = \"{} {} {} {} {}\".format(c.month, c.day, c.day_of_week, c.hour, c.minute)\n return JobDetails(c.status, c.class_name, c.id, c.name, c.description, c.resource_config_filename, c.translation_config_filename, job_cron_string)", "title": "" }, { "docid": "c0cbe0da46a9fb0ca06645ff05522818", "score": "0.54669106", "text": "def update(self, jobid, **fields):\n badfields = fields.viewkeys() - Fields.viewkeys()\n if badfields:\n raise AttributeError(\n \"Job record has no attribute%s %s\"\n % (\n \"\" if len(badfields) == 1 else \"s\",\n \", \".join(\"'%s'\" % name for name in badfields),\n ),\n )\n key = _key(jobid)\n if not self.__client.exists(key):\n raise KeyError(\"Job not found: %s\" % jobid)\n deleted_fields = [k for k, v in fields.items() if v is None]\n if deleted_fields:\n self.__client.hdel(key, *deleted_fields)\n fields = {\n k: Fields[k].dumps(v) for k, v in fields.items() if v is not None\n }\n if fields:\n self.__client.hmset(key, fields)\n self.__expire_key_if_status_is_ready(key)", "title": "" }, { "docid": "d73d511ccef94799d1f6e5b894281ba3", "score": "0.5457875", "text": "def getProperty(self, id, default=None):\n return getattr(self, id, default)", "title": "" }, { "docid": "e56b600c275812f94643197da310a2e3", "score": "0.54506665", "text": "def getJobInput(self, jobId):\n return EDJob.getDataInputFromId(jobId)", "title": "" }, { "docid": "3c0bfbad308f029b83033c6426de455b", "score": "0.5428267", "text": "def get_model_field_value(model_instance, field_str):\n return getattr(model_instance, field_str)", "title": "" }, { "docid": "f02934589200c5c11f8112e90682a95d", "score": "0.54194504", "text": "def get(self, job_id, query_id=None):\n job = Job.fetch(job_id)\n return serialize_job(job)", "title": "" }, { "docid": "b815fdbb6da97b20bfe622d199d6a57f", "score": "0.54106903", "text": "def serializable_value(self, field_name):\r\n try:\r\n field = self._meta.get_field_by_name(field_name)[0]\r\n except FieldDoesNotExist:\r\n return getattr(self, field_name)\r\n return getattr(self, field.attname)", "title": "" }, { "docid": "b815fdbb6da97b20bfe622d199d6a57f", "score": "0.54106903", "text": "def serializable_value(self, field_name):\r\n try:\r\n field = self._meta.get_field_by_name(field_name)[0]\r\n except FieldDoesNotExist:\r\n return getattr(self, field_name)\r\n return getattr(self, field.attname)", "title": "" }, { "docid": "452afdf879a57cfe9a4a3a1b143d033a", "score": "0.53896767", "text": "def field(self, name):\n try:\n return self.fields[name.title()]\n except KeyError:\n return ''", "title": "" }, { "docid": "dd9bff18411db4b8ba1044d10c552d18", "score": "0.53895646", "text": "def _get_field(group_dict, field):\r\n value = group_dict[field]\r\n if field == 'kernel':\r\n return KernelString(value)\r\n if value is None: # handle null dates as later than everything else\r\n if field.startswith('DATE('):\r\n return rpc_utils.NULL_DATE\r\n if field.endswith('_time'):\r\n return rpc_utils.NULL_DATETIME\r\n return value", "title": "" }, { "docid": "7938831638e23f07d40c639cb9615c3d", "score": "0.53774536", "text": "def get_field(cls, field_name):\n try:\n field = cls._meta.get_field_by_name(field_name)\n return field[0]\n except FieldDoesNotExist:\n return None", "title": "" }, { "docid": "b11f0dce1bb22cae7172d9d05107016a", "score": "0.53687364", "text": "def get_simple_value(response, field_id):\n for entry in response:\n if entry['field_id'] == field_id:\n return entry['value'], entry['id']\n return '', ''", "title": "" }, { "docid": "1951afbcd2cf4445f94dd6ea7b28db56", "score": "0.5367949", "text": "def get_result(self, job_uuid):\n with self.jobs_look:\n value = self.jobs[job_uuid].get()\n return value", "title": "" }, { "docid": "ee47f42ec35f6d2cf7e2e64742048e53", "score": "0.5356755", "text": "def findField(field_id, item=None):\n \n if item is None:\n # This applies to any root item\n for i in p6.api.getApp().items:\n if p6.storage.interfaces.IWork in \\\n zope.interface.implementedBy(i.__class__):\n result = p6.metadata.interfaces.IMetadataStorage(i).getMetaValue(\n field_id)\n \n else:\n result = p6.metadata.interfaces.IMetadataStorage(item).getMetaValue(field_id)\n\n return result", "title": "" }, { "docid": "ffa9af7eacdcd70812cea8e870ba3002", "score": "0.5348314", "text": "def _get_field(self, field, schema):\n if field not in schema._declared_fields:\n raise FieldError(\n 'Invalid field specified: {}.'.format(self.value))\n return schema._declared_fields[field]", "title": "" }, { "docid": "5b9078697c7a07e2b6174aac05e6dacf", "score": "0.53425574", "text": "def get(cls, id_: Union[int, str], short_version: bool = False) -> \"Field\":\n fields = cls.list_all(short_version=short_version)\n for field in fields:\n if field.id_ == id_:\n return field\n raise APIException(f\"Can't find field with ID {id_}\")", "title": "" }, { "docid": "1da4f25675c4c118dba89d0ccec21c70", "score": "0.5330967", "text": "def get_value(self, field_name):\n if field_name in self.fields:\n return self._dict['attributes'][field_name]\n elif field_name.upper() in ['SHAPE', 'SHAPE@', \"GEOMETRY\"]:\n return self._dict['geometry']\n return None", "title": "" }, { "docid": "479a132bfeb9d5016e7af366344dc6f1", "score": "0.5320402", "text": "def retrieve_form_field(self, field_id):\n return self.start().uri('/api/form/field') \\\n .url_segment(field_id) \\\n .get() \\\n .go()", "title": "" }, { "docid": "1f57acee69b99bc21983110cdccd53b1", "score": "0.53164184", "text": "def _get_field_if_exists(name: str, field: str) -> str:\n reg_data = _get_registry_data()\n if name not in reg_data.keys():\n raise ValueError(f\"Project '{name}' not found in registry.\")\n contents = \"(None)\"\n if field in reg_data[name].keys():\n contents = reg_data[name][field]\n if len(contents) == 0:\n contents = \"(None)\"\n return contents", "title": "" }, { "docid": "46a47270ab041034c691dc1f8e7c10dd", "score": "0.531415", "text": "def get_job_by_id(self, job_id: str) -> SparkJob:\n with self.lock:\n return self.job_by_id[job_id]", "title": "" }, { "docid": "616c9c1846a2c361de03425bff2105f0", "score": "0.5308739", "text": "def get_value(self, instance):\n return getattr(instance, self.get_field_name(instance))", "title": "" }, { "docid": "7744cb7e1b37d30e42fb8d2735716228", "score": "0.5304506", "text": "def __getattr__(self, name: str) -> Any:\n if name in self.field_metadata:\n return self.field_metadata.get(name)", "title": "" }, { "docid": "63eae92773d7f5d57ae8665162cd3ebf", "score": "0.53040594", "text": "def get(self, job_id):\n job = database.get_manual_job(job_id)\n if job is None:\n flask.abort(404)\n if \"get-status\" in request.args:\n if app.debug:\n print(\n f\"Getting manual constellation job status{job_id}...\",\n file=sys.stderr,\n )\n job_details = database.export_job_details(job)\n if job_details is None:\n flask.abort(404)\n if app.debug:\n print(f\"Got job details for job {job_id}.\", file=sys.stderr)\n return job_details, 200\n elif \"get-results\" in request.args:\n if app.debug:\n print(\n f\"Getting manual constellation job results {job_id}...\",\n file=sys.stderr,\n )\n exportable_job = database.export_job(job)\n if exportable_job is None:\n flask.abort(404)\n elif exportable_job[\"status\"] != database.JobStatus.COMPLETED.value:\n flask.abort(404)\n if app.debug:\n print(f\"Got job results for job {job_id}.\", file=sys.stderr)\n return exportable_job, 200\n return \"No arguments supplied.\", 400", "title": "" }, { "docid": "1e6bee6d49525b318f95ab6b8b405a30", "score": "0.5303035", "text": "def getJob(self, ctx, jobID):\n self.send_getJob(ctx, jobID)\n return self.recv_getJob()", "title": "" }, { "docid": "f1ebf4d8ad946096f24469725b261d07", "score": "0.5302303", "text": "def getSubfieldValue(self, values, idx, subfield, default=None):\n try:\n return values[idx].get(subfield, default)\n except IndexError:\n return default", "title": "" }, { "docid": "274a52904b86017f7c2cabbd749d7e1d", "score": "0.53003174", "text": "def get_field_value(self, attribute):\n attribute_variable = \"{}_keyname\".format(attribute)\n try:\n attribute_value = getattr(self, attribute_variable)\n return self.event_json.get(attribute_value, '')\n except AttributeError:\n attribute_value = getattr(self, attribute)\n return attribute_value", "title": "" }, { "docid": "6823d98a549236f1ee44e1531e11f1b9", "score": "0.52991307", "text": "def get(self, job_id):\n job = database.get_auto_job(job_id)\n if job is None:\n flask.abort(404)\n if \"get-status\" in request.args:\n if app.debug:\n print(\n f\"Getting auto constellation job status{job_id}...\", file=sys.stderr\n )\n job_details = database.export_job_details(job)\n if job_details is None:\n flask.abort(404)\n if app.debug:\n print(f\"Got job details for job {job_id}.\", file=sys.stderr)\n return job_details, 200\n elif \"get-results\" in request.args:\n if app.debug:\n print(\n f\"Getting auto constellation job results {job_id}...\",\n file=sys.stderr,\n )\n exportable_job = database.export_job(job)\n if exportable_job is None:\n flask.abort(404)\n elif exportable_job[\"status\"] != database.JobStatus.COMPLETED.value:\n flask.abort(404)\n if app.debug:\n print(f\"Got job results for job {job_id}.\", file=sys.stderr)\n return exportable_job, 200\n return \"No arguments supplied.\", 400", "title": "" }, { "docid": "01f87005c155179d7189d8273f4ee57e", "score": "0.52982914", "text": "def get_field_data(self, field_name, record_type=\"\"):\n return self.get_fields_data([field_name], record_type=record_type)[0]", "title": "" }, { "docid": "a2bbd775436e83a3b60ff3ab35496d38", "score": "0.52921826", "text": "def _GetField(site, field):\n try:\n return getattr(site, field)\n except AttributeError:\n logging.warn('site %s is missing attribute %s' % (site.key().id(), field))\n return None", "title": "" }, { "docid": "0c3acc530b3f6f018ccc6d4f2d868ad8", "score": "0.5291736", "text": "def retrieve_job(self, job_id):\n logging.info(\"Retrieving results for job %s\", job_id)\n with self._session() as session:\n r = session.get(self.jobs_id.format(self.uri, str(job_id)))\n if r.status_code != 200:\n logging.error(\"failed to retrieve job because: %s\", r.text)\n r.raise_for_status()\n job = r.json()\n return job", "title": "" }, { "docid": "4e3b0149919b3ffcc1f0e03e30332fd5", "score": "0.5283221", "text": "def field_id(self, key):\n\n if isinstance(key, basestring):\n try:\n id = self.fields_by_name[key]\n except KeyError:\n sys.exit(\"Error: field name '%s' does not exist\" % key)\n return id\n elif isinstance(key, int):\n try:\n id = self.fields_by_column_number[key]\n except KeyError:\n sys.exit(\"Error: field column number '%s' does not exist\" %\n key)\n return id", "title": "" }, { "docid": "afa659d6140b4ee94f2502c2e6967379", "score": "0.52775043", "text": "def getvalue(self, key, default=None):\n\n items = self.__fields.get(key, [])\n if not items:\n return default\n if len(items) == 1:\n return items[0].value\n return [item.value for item in items]", "title": "" }, { "docid": "3bc79755eadd805065b83931c55cfa37", "score": "0.52733624", "text": "def data_field_id_get(self, field_name, *unused):\n return self._data_field_metadata_get(\"id\", field_name)", "title": "" }, { "docid": "3ea3725a41fdee8b05de5b215d271fc6", "score": "0.5267372", "text": "def get_field(field, read_str, default):\n\n if field in read_str:\n field_start = read_str.index(field)+len(field)\n rem = read_str[field_start:]\n if \",\" in rem:\n field_end = rem.index(\",\")+field_start\n else:\n field_end = rem.index(\"]\")+field_start\n value = read_str[field_start:field_end]\n else:\n value = default\n\n return value", "title": "" }, { "docid": "7bebc0675a7063111e5dbc6f84b09d96", "score": "0.5240296", "text": "def try_parse_field(field_name, value, parser_dict):\n parser = parser_dict.get(field_name) # None if no such entry\n if parser is not None:\n return try_or_none(parser)(value)\n else:\n return value", "title": "" }, { "docid": "7bebc0675a7063111e5dbc6f84b09d96", "score": "0.5240296", "text": "def try_parse_field(field_name, value, parser_dict):\n parser = parser_dict.get(field_name) # None if no such entry\n if parser is not None:\n return try_or_none(parser)(value)\n else:\n return value", "title": "" }, { "docid": "1cdbe70b95cb4cc03565d2e7e3019987", "score": "0.5237294", "text": "def get_job( self, id: str ) -> Optional[Job]:\n\n with self.transaction() as session:\n job: JobMetadata = session.query( JobMetadata ).filter_by( id = id ).first()\n return job.export() if job else None", "title": "" }, { "docid": "0b5833b47c71c6580a7c4caffdaae616", "score": "0.52363205", "text": "def job_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"job_id\")", "title": "" }, { "docid": "7ea3f62e28d766f37933df11d307ab86", "score": "0.52339745", "text": "async def get_job(self, processor_name: str, job_id: str) -> PYJobOutput:\n try:\n job = await db_get_processing_job(job_id)\n return job.to_job_output()\n except ValueError:\n raise HTTPException(\n status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,\n detail=f\"Processing job with id '{job_id}' of processor type '{processor_name}' not existing\"\n )", "title": "" }, { "docid": "be91ba2114dc130c31b17f1518dd4495", "score": "0.5233446", "text": "def _get_field_option(self,field_extras,option_name,default):\n if not field_extras: return default\n return field_extras.get(option_name,default)", "title": "" }, { "docid": "fc83252afffabc08d32b741c616c0f5c", "score": "0.5232333", "text": "def _get_raw_value_for_field(self, payload, field):\n obj = None\n if self.is_data_table():\n field_id = str(field['id'])\n\n if payload['cells'].get(field_id):\n obj = payload['cells'][field_id].get('value', None) # 'value' is not always present for datatables\n else:\n LOG.warning(\"field_id no longer exists: %s\", field_id)\n LOG.debug(payload)\n raise ValueError()\n\n else:\n prefix = field.get('prefix')\n\n type_name = self.get_type_name(field['type_id'], pretty=False)\n\n if prefix is None and type_name == 'incident' and not field['internal']:\n # This is lame and required only because the server isn't setting the\n # prefix for custom fields.\n #\n prefix = \"properties\"\n\n obj = payload\n\n if prefix is not None:\n obj = payload.get(prefix)\n\n if obj is not None and isinstance(obj, dict):\n obj = obj.get(field['name'])\n\n return obj", "title": "" }, { "docid": "e4339488653171650ab84d107d0d92a9", "score": "0.5226257", "text": "def job_id(self):\n return self.details_property(KEY_JOB_ID)", "title": "" }, { "docid": "12aa9cc5963769510453cfdaff5aee19", "score": "0.52244663", "text": "def find_by_id(cls, job_id):\n return cls.query.filter_by(id=job_id).first()", "title": "" }, { "docid": "5361d7b48ed968743d5a1c037d91381b", "score": "0.5223882", "text": "def get(self, attr):\n\n if attr in self.fields:\n index = self.fields.index(attr)\n return self.values[index]\n return None", "title": "" }, { "docid": "4721bb6f79bf09063bf6c7836d4dbab4", "score": "0.52192557", "text": "def get(self, jobId):\r\n return status.__status.get(jobId,'None')", "title": "" }, { "docid": "c9168eb3203f2e7e169e2f9d936142aa", "score": "0.51978153", "text": "def job_id(self) -> Optional[str]:\n return str(self._job_uuid) if isinstance(self._job_uuid, UUID) else None", "title": "" }, { "docid": "5bd73425ef4cc89f7d3e54b6d708e904", "score": "0.5196384", "text": "def get_job(self, job_id):\n response = get_json(self.session, self.async_endpoint + \"/job/\" + job_id)\n return Job(response.json(), 'QVM')", "title": "" }, { "docid": "a863e559584d92edb84c11103a8fb80d", "score": "0.51941633", "text": "def get_entryfield(self, source, field_id, default=None, parent_entry_id=None, entry_id=None, entry_position=None):\r\n\r\n source = self.get_storage(source)\r\n return source.get_entryfield(field_id, default, parent_entry_id, entry_id, entry_position)", "title": "" }, { "docid": "d8b8cbc8efd9621b7f06498a912a90c2", "score": "0.51857007", "text": "def _extract_job_id(job_name: str) -> Optional[str]:\n p = re.compile(\n 'projects/(?P<project_id>.*)/locations/(?P<region>.*)/pipelineJobs/(?P<job_id>.*)'\n )\n result = p.search(job_name)\n return result.group('job_id') if result else None", "title": "" }, { "docid": "c2b076048d42239b0ef6cf8fbade245f", "score": "0.51836276", "text": "def get_job_output(job_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetJobResult]:\n ...", "title": "" }, { "docid": "40c7cee52f8544f8f9f104338afb9747", "score": "0.5180899", "text": "def find_by_job_id(cls, job_id):\n return cls.query.filter_by(job_id=job_id).first()", "title": "" }, { "docid": "0430c4889b2774a029c2eb57b8c04ebc", "score": "0.5180141", "text": "def get_field(self):\n return self._field", "title": "" }, { "docid": "5757f68355c80cdffadf1f79168af9ef", "score": "0.51624775", "text": "def get_job_details(self, job_id):\n if isinstance(job_id, int):\n log.warning('deprecated: job_id support for int for backward compatibility only')\n job_id = str(job_id)\n path = 'JobDetails'\n payload = {\n 'JobManager_JobDetailRequest': {\n '@jobId': job_id\n }\n }\n res = self.request('POST', path, payload=payload)\n data = res.json()\n try:\n job_details = data['job']['jobDetail']\n except KeyError:\n try:\n job_details = data['JobManager_JobDetailResponse']['job']['jobDetail']\n except KeyError:\n # Make new request with xml because Commvault seems to have\n # broken the json request on this route.\n headers = self.headers.copy()\n headers['Content-type'] = 'application/xml'\n payload_nondict = ('<JobManager_JobDetailRequest jobId=\"{}\"/>'\n .format(job_id))\n res = self.request('POST', path, headers=headers, payload_nondict=payload_nondict)\n data = res.json()\n job_details = data['job']['jobDetail']\n except TypeError:\n msg = 'No job details found for job {}'.format(job_id)\n raise_requests_error(404, msg)\n if not job_details:\n msg = 'No job details found for job {}'.format(job_id)\n raise_requests_error(404, msg)\n return job_details", "title": "" }, { "docid": "781fd4deb98671855ec81485b3ca18a6", "score": "0.5158926", "text": "def fetch_job(self, job_id: str) -> Optional['Job']:\n try:\n job = self.job_class.fetch(job_id, connection=self.connection, serializer=self.serializer)\n except NoSuchJobError:\n self.remove(job_id)\n else:\n if job.origin == self.name:\n return job", "title": "" }, { "docid": "ae6af1a08a3165d50678f45bfdce2ee9", "score": "0.5154483", "text": "def get_job(self, job_id):\n response_data = self.layer1.describe_job(self.name, job_id)\n return Job(self, response_data)", "title": "" } ]
56a33b94c67f641b5f4000ff1f60be92
Display favorites from Favorite table
[ { "docid": "7c0370360c0dcd5614b530927e1608e2", "score": "0.7621096", "text": "def display_favorites(self):\n self.methods_for_db.get_favorites_products()\n sentence_for_favorites = \"your favorite products!\"\n if self.methods_for_db.favorites == []:\n print(self.messages[\"skip_a_lign\"])\n print(self.messages[\"-\"])\n print(self.messages[\"no_favorites\"])\n print(self.messages[\"-\"])\n else:\n self.select_header(sentence_for_favorites)\n for favorite_id, favorite in enumerate(self.methods_for_db.favorites):\n print(\"{}\".format(favorite_id + 1))\n self.show_product_informations(favorite)", "title": "" } ]
[ { "docid": "ba80cdffd086ef672938b89a19b59f09", "score": "0.75344425", "text": "def show_favorites(self, client_id):\n favorites = self.favorite_m.retrieve_favorites(client_id)\n print(\"\\n****** FAVORIS ******\\n\")\n for i, dictionary in enumerate(favorites):\n code = dictionary['code']\n store_names = self.favorite_m.retrieve_stores(code)\n store_names = [store['store'] for store in store_names]\n stores = \", \".join(store_names)\n print(c.display_favorites.format(\n i=i + 1, stores=stores, **dictionary))", "title": "" }, { "docid": "84e98c94b94ea95905eb429c9dedfdd8", "score": "0.7481881", "text": "def show_favorites():\n\n # Get user from flask session\n user = User.query.get(flask_session.get('current_user'))\n\n if user.favorites:\n\n # Get all favorite_notes for current_user\n favorite_notes = db.session.query(FavoriteNote).filter(FavoriteNote.user_id == user.user_id).all()\n\n return render_template(\"favorites.html\", favorite_notes=favorite_notes)\n\n else:\n\n return render_template(\"favorites.html\", favorite_notes=None)", "title": "" }, { "docid": "d8541298ede2394d0240829aa812b72c", "score": "0.7441504", "text": "def list_user_favorites(request):\n user_favorites = Favorite.objects.filter(user=request.user)\n\n return render(request, 'library/user_favorites_list.html', {'user_favorites': user_favorites})", "title": "" }, { "docid": "8fed46284c45097cc75c5bc908ccd5f8", "score": "0.73683846", "text": "def favourite_list():\n try:\n username = mongo.db.users.find_one(\n {\"username\": session[\"user\"]})[\"username\"]\n except Exception:\n flash('Please login in order to access your list of favourites.')\n return redirect(url_for('login'))\n return render_template('pages/favourites.html',\n favourite_exoplanets=mongo.db[username].find())", "title": "" }, { "docid": "591b9ea6f9c74949386c8c694cc32b08", "score": "0.7104876", "text": "def favorites(request) -> Response:\n return Response()", "title": "" }, { "docid": "288e2d4326bd00f84ebf8c1f3523bc6b", "score": "0.7069665", "text": "def get_favorites(self):\n from app.models.favorite import Favorite\n\n join_predicate = ((Team.espn_id == Favorite.team) & (Team.sport_type == Favorite.sport_type))\n\n favorites = Team.select().join(Favorite, on=join_predicate).where(Favorite.user_id == self.id)\n\n return favorites", "title": "" }, { "docid": "086cbf83ac93ca0706276efad8027670", "score": "0.7057788", "text": "def favorites(request):\n user_id = request.user.id\n user = get_object_or_404(User, id=user_id)\n recorded = Favorites.objects.filter(users=user)\n favorite_recorded = []\n for record in recorded:\n rec = (\n Products.objects.get(id=record.products_id),\n Products.objects.get(id=record.substitute_id),\n )\n favorite_recorded.append(rec)\n\n recording = False\n context = {\n \"recording\": recording,\n \"favorite_recorded\": favorite_recorded,\n }\n\n return render(request, \"food_substitute/favorites.html\", context)", "title": "" }, { "docid": "e56106ead9b739f2c558f81acbad6cb1", "score": "0.6993792", "text": "def find_show_favorites_list(cls, guidebox_id, user_id):\n \n favorite = Favorite.query.filter_by(guidebox_id=guidebox_id, user_id=user_id).all()\n return favorite", "title": "" }, { "docid": "48d4ab5217428d7d71e354ebc2b9ea5e", "score": "0.69307876", "text": "def favorite():\n if request.method == \"GET\":\n all_places = db.execute(\"SELECT location FROM key\")\n fav_places = db.execute(\"SELECT location FROM favorites WHERE user = :user\", user=session[\"user_id\"])\n length = len(fav_places)\n places = []\n if fav_places:\n for i in range(length):\n places.append(db.execute(\"SELECT location FROM key WHERE id = :id1\", id1=(fav_places[i]['location'])))\n non_fav_places = diff(all_places, places)\n return render_template(\"favorite.html\", non_fav_places=non_fav_places, places=places)\n else:\n name = request.form.get(\"location\")\n key = db.execute(\"SELECT id FROM key WHERE location = :location\", location=name)\n result = db.execute(\"INSERT INTO favorites (location, user) VALUES(:location, :user)\",\n location=key[0]['id'], user=session[\"user_id\"])\n all_places = db.execute(\"SELECT location FROM key\")\n fav_places = db.execute(\"SELECT location FROM favorites WHERE user = :user\", user=session[\"user_id\"])\n length = len(fav_places)\n places = []\n if fav_places:\n for i in range(length):\n places.append(db.execute(\"SELECT location FROM key WHERE id = :id1\", id1=(fav_places[i]['location'])))\n non_fav_places = diff(all_places, places)\n return render_template(\"favorite.html\", all_places=all_places, places=places)", "title": "" }, { "docid": "4a297443e3047dbb7f20754bc4abe7e9", "score": "0.6917918", "text": "def display_favorite_menu(self):\n if self.session['connected'] is False:\n self.log_in()\n id_client = self.client_m.get_id_client(self.session['user'])\n self.show_favorites(id_client)\n response = self.get_response(c.display_favorite_menu, \"bq\")\n next_step = {\n \"b\": self.display_main_menu,\n \"q\": self.quit_menu\n }\n next_step[response]()", "title": "" }, { "docid": "de98299b8876bfae976a05d0e18ceefd", "score": "0.6912191", "text": "def get_user_favorites(username):\n favs_of_user = UserFavorite.query.filter(username==username).all()\n\n return favs_of_user", "title": "" }, { "docid": "11cc6c5dadf55a837792ce3d9525ee67", "score": "0.6892073", "text": "def showfavourites(request):\n\n email = request.user.email\n favourites = Favourite.objects.all().filter(email_user=email)\n favourite_list = []\n for i in range(len(favourites)):\n new_code = favourites[i].favourite_barcode\n product = Product.objects.get(barcode=new_code)\n favourite_list.append(product)\n \n return render(request, 'food/favourite.html', locals())", "title": "" }, { "docid": "0fd1d877046bdfa90d38c4ccd286e139", "score": "0.688519", "text": "def _fav_list(username):\n fav_list = Favourite.objects.filter(Q(user__username=username))\n serializer = FavouriteModelSerializer(fav_list, many=True)\n return Response(serializer.data)", "title": "" }, { "docid": "afa5de1d11bea13132c6d3c70ec0c064", "score": "0.67605203", "text": "def FavoritesFor(self, buffer=None, index=None):\r\n\r\n who = buffer.get_all_screen_names(index)\r\n new = modal_dialog(gui.FavoritesDialog, parent=self.session.frame, users=who, results_per_api=200)\r\n who = new.users.GetValue()\r\n name = _(\"%s's likes\") % who\r\n self.session.register_buffer(name, buffers.Favorites, username=who, count=new.retrieveCount.GetValue(), maxAPIPerUpdate=new.maxAPIPerUpdate.GetValue(), prelaunch_message=_(\"Loading likes for %s.\") % who)", "title": "" }, { "docid": "fa6f8dc61d04d22d13fd86bf58c8a0a6", "score": "0.67360866", "text": "def favorite(request):\n favorite = Favorite(request)\n if favorite.login_validate():\n product = []\n product = favorite.return_favorite_list()\n return render(request, 'substitute/favorite.html',\n {'product': product})\n else:\n return redirect('homepage')", "title": "" }, { "docid": "094174a05091635d4862571ff6202e14", "score": "0.6723262", "text": "def favorite_data(self, data, favorites):\n os.system(\"clear\")\n print(\"Voici les informations sur ce favori :\")\n print(data[0])\n print(\"Nutriscore : \" + data[1])\n print(\"Ce produit peut être acheté chez : \" + data[2])\n print(\"Page du produit : \" + data[3])\n print(\"Pour revenir au menu principal, entrez q\")\n n = input(\"Entrez votre choix puis appuyez sur <Entrée> pour valider :\")\n while n.lower() != \"q\":\n print(\"Vous n'avez pas entré une valeur valide\")\n n = input(\"Entrez votre choix puis appuyez sur <Entrée> pour valider :\")\n if n.lower() == \"q\":\n self.menu()", "title": "" }, { "docid": "afc9cc21713b0653e7158814e21ebb3d", "score": "0.6682353", "text": "def list_favorites():\n favorites = config.get('Favorites', {})\n tuples = [(name, favorites[name]['Project'], favorites[name]['Task']) for name in favorites]\n return [Favorite(*t) for t in tuples]", "title": "" }, { "docid": "e447c948538ebe1a878d4547f4186cf0", "score": "0.66458076", "text": "def Favorites(self):\r\n\r\n self.session.register_buffer(_(\"Likes\"), buffers.Favorites, prelaunch_message=_(\"Loading likes\"))", "title": "" }, { "docid": "9071783e2f75b4e27ded30436882bd81", "score": "0.6624388", "text": "def get(self, request, username):\n\n return _fav_list(username)", "title": "" }, { "docid": "9751877a0955220a56bb2a578a4d2b5e", "score": "0.65690494", "text": "def favoriteseller_list(request):\n if request.method == 'GET':\n with sqlite3.connect(Connection.db_path) as conn:\n conn.row_factory = sqlite3.Row\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n SELECT\n f.id, \n f.seller_id,\n f.customer_id,\n u.id user_id,\n u.first_name || ' ' || u.last_name AS full_name\n FROM \n bangazonapi_favorite f\n JOIN\n bangazonapi_customer c ON f.customer_id = c.id\n JOIN\n auth_user u ON c.user_id = u.id;\n \"\"\")\n dataset = db_cursor.fetchall()\n\n favorite_seller = {}\n\n for row in dataset:\n favorite = Favorite()\n favorite.customer_id = row[\"customer_id\"]\n favorite.seller_id = row[\"seller_id\"]\n\n uid = row[\"user_id\"]\n\n if uid in favorite_seller:\n favorite_seller[uid]['favorites'].append(favorite)\n\n else:\n favorite_seller[uid] = {}\n favorite_seller[uid][\"id\"] = uid\n favorite_seller[uid][\"full_name\"] = row[\"full_name\"]\n favorite_seller[uid][\"favorites\"] = [favorite]\n\n list_of_users_with_favorites = favorite_seller.values()\n\n template = 'users/list_withFavorites.html'\n context = {\n 'favoriteseller_list': list_of_users_with_favorites\n }\n\n return render(request, template, context)", "title": "" }, { "docid": "b4608ffc6d6a164ed2ef200a12a67266", "score": "0.65661657", "text": "def get_favorites(user):\n return list(Favorite.objects.filter(User=user).values_list('Project__pk', flat=True))", "title": "" }, { "docid": "c76deb5562deae2d59e5c3589703adcb", "score": "0.6530957", "text": "def list_favorite_queries():\n\n headers = [\"Name\", \"Query\"]\n rows = [(r, favoritequeries.get(r)) for r in favoritequeries.list()]\n\n if not rows:\n status = \"\\nNo favorite queries found.\" + favoritequeries.usage\n else:\n status = \"\"\n return [(\"\", rows, headers, status)]", "title": "" }, { "docid": "dda9e48fc8a873b0caae1733c06baf70", "score": "0.6526526", "text": "def fav_select(obj, favorites):\n return obj.id in favorites", "title": "" }, { "docid": "bf8158e8a3360beb586ddf244fc8027c", "score": "0.6521394", "text": "def get(self, request, username):\n\n return get_fav_list(username)", "title": "" }, { "docid": "28ec4deaf2b5460565cdacc2916c83b1", "score": "0.6503984", "text": "def enter_favorite(query):\n try:\n custom_faves = Dict['favorites']\n except KeyError:\n custom_faves = []\n custom_faves.append(query)\n Dict['favorites'] = custom_faves\n Dict.Save()", "title": "" }, { "docid": "b6d8a75736552d6f8520f2a91770f356", "score": "0.6412661", "text": "def favorites(self, request, *args, **kwargs):\n user = request.user\n if not hasattr(user, 'userfavorite'):\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n favorite = user.userfavorite\n serializer_class = self.get_serializer_class()\n\n serializer = serializer_class(\n favorite.variations.select_related(\n 'size', 'product',\n ).prefetch_related(\n Prefetch('product__images'),\n Prefetch('quantities__address'),\n ),\n many=True,\n )\n return Response(serializer.data, status=status.HTTP_200_OK)", "title": "" }, { "docid": "58a81754be05f3f16d8dc660433f1ce7", "score": "0.63533014", "text": "def auto_fav(q: str, count: int = 100):\n result = search_tweets(q, count)\n a = result[\"statuses\"][0][\"user\"][\"screen_name\"]\n print (a)\n success = 0\n for tweet in result[\"statuses\"]:\n if fav_tweet(tweet) is not None:\n success += 1\n print (\"We Favorited a total of %i out of %i tweets\" % (success, len(result[\"statuses\"])))", "title": "" }, { "docid": "14acbd55e97b71de71384aad9282be51", "score": "0.6335619", "text": "def list(self, request):\n customer = Customer.objects.get(user=request.auth.user)\n sellers_i_love = Favorite.objects.filter(customer=customer)\n\n serializer = FavoriteSerializer(\n sellers_i_love, many=True, context={'request': request})\n return Response(serializer.data)", "title": "" }, { "docid": "f8e666221d4ce7f8abd97f7fd337ecef", "score": "0.6332512", "text": "def favorite(self, request, pk=None):\n obj = self.get_object()\n try:\n FavoriteItem.objects.create(user=request.user, item=obj)\n except IntegrityError:\n pass\n return Response(status=status.HTTP_200_OK)", "title": "" }, { "docid": "def740cdc2c45acd6fe4b1069ea35e54", "score": "0.63314444", "text": "def get_rec_from_favorites(user_data):\r\n find_fav_rec = user_data[\"favorites\"]\r\n\r\n friend_watched = friend_watched_list(user_data)\r\n\r\n rec_fav = []\r\n\r\n for movie in find_fav_rec:\r\n if movie not in friend_watched:\r\n rec_fav.append(movie)\r\n return rec_fav", "title": "" }, { "docid": "974e90a7dbfd84d2eb917d9fe07a40b6", "score": "0.6292259", "text": "def favlist(self, user_id, page=1, count=100):\n params = {\n 'user_id': user_id,\n 'count': count,\n 'page': page,\n }\n res = self.session.get(self.urls['favlist'], params=params)\n if res.status_code != 200:\n raise Exception()\n return json.loads(res.text)", "title": "" }, { "docid": "d88e9fd51f655aa9fa0e777f8a503735", "score": "0.6271748", "text": "def favor_recipes(request):\n user = request.user\n latest = models.Recipe.objects.annotate(\n is_favorite=Exists(\n models.FavorRecipe.objects.filter(\n user_id=user.id,\n recipe_id=OuterRef('pk'),\n )),\n is_purchas=Exists(\n models.Purchase.objects.filter(\n user_id=user.id,\n recipe_id=OuterRef('pk'),\n ),\n )).filter(favor_recipe__user=user)\n paginator = Paginator(latest, PAGINATOR_SIZE)\n page_number = request.GET.get('page')\n page = paginator.get_page(page_number)\n return render(\n request,\n \"favor_recipes.html\",\n {\"page\": page, \"paginator\": paginator, 'favor': True}\n )", "title": "" }, { "docid": "7c9da811eb2a5c183c4a5b733e7c6c68", "score": "0.62199694", "text": "def favorites(self):\n\n # to be optimized later if still exists\n member_usernames = set(member.username for member in self.card.members)\n # FIXME: don't reference parent\n board_user_stats = [(nb_cards, username) for username, nb_cards in self.member_stats.iteritems()]\n board_user_stats.sort(reverse=True)\n # Take the 5 most popular that are not already affected to this card\n favorites = [username for (__, username) in board_user_stats\n if username not in member_usernames]\n\n self._favorites = [component.Component(usermanager.UserManager.get_app_user(username), \"friend\")\n for username in favorites[:5]]\n return self._favorites", "title": "" }, { "docid": "2b705b89e4f9a61f73d705c4a13fc1cd", "score": "0.6217827", "text": "def favorites(self):\n result = (db.session.query(Purchase, Product)\n .filter(Product.id == Purchase.product_id)\n .filter(Product.active.is_(True))\n .filter(Purchase.user_id == self.id)\n .filter(Purchase.revoked.is_(False))\n .group_by(Purchase.product_id)\n .order_by(func.sum(Purchase.amount).desc())\n .all())\n return [purchase.product_id for purchase, _ in result]", "title": "" }, { "docid": "5743e67acdd4d49732effbe0c4841d72", "score": "0.620639", "text": "def favorites_choice(self, favorites):\n os.system(\"clear\")\n if favorites != None:\n print(\"Voici vos favoris :\")\n for favorite in favorites:\n print(str(favorites.index(favorite) + 1) + \" - \" + str(favorite))\n n_list = [str(favorites.index(favorite) + 1) for favorite in favorites]\n n = input(\n \"Entrez le numéro d'un produit ou q pour revenir au menu principal puis appuyez sur <Entrée> :\"\n )\n while n not in n_list and n.lower() != \"q\":\n print(\"Vous n'avez pas entré une valeur valide\")\n n = input(\n \"Entrez le numéro d'un produit ou q pour revenir au menu principal puis appuyez sur <Entrée> :\"\n )\n if n in n_list:\n data = self.db.get_product_data(favorites[int(n) - 1])\n self.favorite_data(data, favorites)\n elif n.lower() == \"q\":\n self.menu()\n else:\n print(\"Vous n'avez aucun favori\")\n n = input(\n \"Entrez q pour revenir au menu principal puis appuyez sur <Entrée>\"\n )\n if n != \"q\":\n print(\"Vous n'avez pas entré une valeur valide\")\n elif n.lower() == \"q\":\n self.menu()", "title": "" }, { "docid": "89364bdf649a4e035f7b0f942b0f846e", "score": "0.62023014", "text": "def toggle_favorite(request):\n if not request.is_ajax():\n return HttpResponseForbidden()\n\n try:\n app, model, pk = request.POST['app'], request.POST['model'], int(request.POST['pk'])\n\n except ValueError:\n return HttpResponseBadRequest()\n\n try:\n ctype = ContentType.objects.get(app_label=app, model=model)\n obj = ctype.get_object_for_this_type(id=pk)\n\n except Favorite.DoesNotExist:\n return HttpResponseNotFound()\n\n try:\n fave = Favorite.objects.get(content_type=ctype, object_id=obj.id, user=request.user)\n\n except Favorite.DoesNotExist:\n # Does not exist, so create it\n fave = Favorite(content_object=obj, user=request.user)\n fave.save()\n is_favorite = True\n\n else:\n # Exists, so delete it\n fave.delete()\n is_favorite = False\n\n # Return the current total number for UI updates\n favorites_count = Favorite.objects.filter(content_type=ctype, user=request.user).count()\n\n return JsonResponse({\n 'app': app,\n 'model': model,\n 'pk': pk,\n 'is_favorite': is_favorite,\n 'count': favorites_count,\n })", "title": "" }, { "docid": "b6e792d80d7e39c7454a955d06810638", "score": "0.62016433", "text": "def favorite_detail(request, pk):\n product_detail = get_object_or_404(Product, pk=pk)\n return render(request, 'substitute/favorite_detail.html',\n {'product': product_detail})", "title": "" }, { "docid": "0838592136336d71fb0464a34761b7f2", "score": "0.61695236", "text": "def fav_tweet(tweet: Twitter):\n try:\n result = t.favorites.create(_id=tweet[\"id\"])\n print \"Favorited: %s\" % (result[\"text\"])\n return result\n except TwitterHTTPError as e:\n print \"Error: \", e\n return None", "title": "" }, { "docid": "13f7d4da9b43a4d4333039565b222336", "score": "0.60823476", "text": "def FavoriteTweet(self, buffer=None, index=None):\r\n\r\n call_threaded(self.session.favorite_tweet, buffer, index)", "title": "" }, { "docid": "d015d4448edb8b906cb54cc07e0d01f5", "score": "0.607299", "text": "def store_favorites(n_clicks, selected_rows, existing_favorites, data):\n favorites_list = existing_favorites\n for selected_row in selected_rows:\n if data[selected_row] not in favorites_list:\n favorites_list.append(data[selected_row])\n return favorites_list", "title": "" }, { "docid": "27d5877ba6f50753152008732f95b10f", "score": "0.6070252", "text": "def favourite(exoplanet_id):\n try:\n username = mongo.db.users.find_one(\n {\"username\": session[\"user\"]})[\"username\"]\n favourite = mongo.db.exoplanets.find_one(\n {\"_id\": ObjectId(exoplanet_id)})\n except Exception:\n flash('Please log in in order to add any exoplanet to your list of favourites')\n return redirect(url_for('login'))\n\n # check if the exoplanet is already in the favourites list\n already_favourite = mongo.db[username].find_one(\n {\"_id\": ObjectId(exoplanet_id)})\n if already_favourite:\n return render_template('pages/alreadyFavourite.html',\n favourite=favourite)\n else:\n mongo.db[username].insert(favourite)\n return redirect(url_for('favourite_list'))", "title": "" }, { "docid": "9ba50eb0ffb7fc1831e4503916db0e57", "score": "0.6058243", "text": "def __repr__(self):\r\n\r\n return \"<Favorite fav_id=%s>\" % (self.fav_id)", "title": "" }, { "docid": "6b69344d03d31375f33c6cd71859ac9c", "score": "0.6037774", "text": "def get_user_favourites(user_id):\n\n try:\n connection = open_connection_if_not_already_open()\n with connection.cursor() as cursor:\n cursor.execute(\n 'SELECT Id, Name, Blurb, ImageName FROM Recipes INNER JOIN UserFavourites ON UserFavourites.RecipeId = Recipes.Id WHERE UserFavourites.UserId = \"{}\";'.format(\n user_id))\n returned_tuples = cursor.fetchall()\n values_list = [{\"Id\": individual_tuple[0], \"Name\": individual_tuple[1], \"Blurb\": individual_tuple[2],\n \"ImageName\": individual_tuple[3]} for individual_tuple in returned_tuples]\n values_list = add_average_review_score_to_dictionary_list(values_list)\n return values_list\n except Exception as e:\n print(\"ERROR: {}\".format(e))", "title": "" }, { "docid": "6606786682cc2e864aac3a0e12343e3e", "score": "0.6020843", "text": "def new_favorite(app, init_db):\n favorite_object = {\n 'title': 'calory',\n 'description': '[email protected]',\n 'meta_data': '{color is grey}',\n 'rank': 2\n }\n\n favorite = Favorite(**favorite_object)\n return favorite", "title": "" }, { "docid": "5850519d722e42077e9417b261d572c9", "score": "0.5968852", "text": "def small_to_large_favourites():\n username = mongo.db.users.find_one(\n {\"username\": session[\"user\"]})[\"username\"]\n sized_exoplanets = mongo.db[username].find()\n favourite_exoplanets = sorted(sized_exoplanets,\n key=lambda i: float(i['mass']))\n return render_template('pages/favourites.html',\n favourite_exoplanets=favourite_exoplanets)", "title": "" }, { "docid": "7d36c90adaddec95b2941cd886c10e4a", "score": "0.5967099", "text": "def open_favorites(self):\r\n file = open(\"favoriteroutes.txt\",\"r\")\r\n print(file.read())\r\n file.close", "title": "" }, { "docid": "5af0ebc86970c4b97289f9d95271b1ad", "score": "0.5950597", "text": "def favorite(self):\n return self._info[\"favorite\"] == 1", "title": "" }, { "docid": "1e6302736fa39769def0071d86578691", "score": "0.59315133", "text": "def favorites(self, page=1, screenname=None):\n\n input_data = {'page': page}\n if screenname == None:\n data = self.__get_data('favorites', input_data)\n else:\n data = self.__get_data('favorites/%s' % screenname, input_data)\n\n for x in data:\n yield Status(x)", "title": "" }, { "docid": "e6d9e4451b05aec4a4757c649de48681", "score": "0.5930816", "text": "def favorite(self):\n return True if self._info[\"favorite\"] == 1 else False", "title": "" }, { "docid": "52db33975dcdbd0dcdb39789dd07b5a8", "score": "0.59234", "text": "def toggle_fav_art_to_db():\n\n user_id = session['current_user']\n art_id = request.args.get(\"art_id\")\n # getting art_id via js object key rather than \"name\" HTML element\n\n favorite_art = UserArt.query.filter_by(user_id=user_id, art_id=art_id).first()\n\n if favorite_art:\n # If favorite already exists in db for this user, remove it\n # user_list.artworks.pop(favorite)\n db.session.delete(favorite_art)\n db.session.commit()\n\n else:\n new_favorite_art = UserArt(user_id=user_id, art_id=art_id)\n # If favorite already exists in db for this user, add it\n # user_list.artworks.append(favorite)\n db.session.add(new_favorite_art)\n db.session.commit()\n\n return \"Success!\"", "title": "" }, { "docid": "241a1f2e81dfaf125e550aba11863f26", "score": "0.58907163", "text": "def favorite(self):\n return self.is_type(\"favorite\")", "title": "" }, { "docid": "ecead11fe0538c682c70219417b78223", "score": "0.5888142", "text": "def fav_recipes():\n\n # get user_id from session\n user_id = session['user']\n\n #Use Ajax request to get data\n title = request.args.get(\"title\") \n url = request.args.get(\"fav-url\")\n img = request.args.get(\"img\")\n\n\n # Check user info against database\n recipe_query = Recipe.query.filter_by(user_id=user_id, title=title).first()\n\n if recipe_query == None:\n recipe = Recipe(user_id=user_id, title=title, url=url, img=img)\n db.session.add(recipe)\n db.session.commit()\n\n return jsonify('Recipe added to favorites!')\n \n else: \n return jsonify('Recipe already in favorites!')", "title": "" }, { "docid": "225665e73609f13fada51c03015f4c3a", "score": "0.5883362", "text": "def test_remove_get_favorite(self):\n favitem = FavoriteItem(name='Product 1')\n favitem.save()\n self.assertFalse(Favorites.objects.get_favorite(self.user, favitem))\n\n fav = Favorites.objects.add_favorite(self.user, favitem)\n fav_len = len(Favorites.objects.all())\n self.assertTrue(fav_len == 1)\n\n # delete it\n fav_del = Favorites.objects.delete_favorite(self.user, favitem)\n\n fav_len = len(Favorites.objects.all())\n self.assertTrue(fav_len == 0)\n\n termprint(\"INFO\", my_favorites(self.user))\n self.assertFalse(my_favorites(self.user))", "title": "" }, { "docid": "c601ee37efffb836ff3aeff49f84024f", "score": "0.5875409", "text": "def save_favorite(request, pk_prod):\n favorite = Favorite(request, pk_prod)\n if favorite.login_validate():\n favorite.saved_favorite()\n product = favorite.return_favorite_list()\n return render(request, 'substitute/favorite.html',\n {'product': product})", "title": "" }, { "docid": "5cdedb396692a4efc5b8ef6734bb1270", "score": "0.58580416", "text": "def set_favorite(self, value):\n return self.set_type(\"favorite\", value)", "title": "" }, { "docid": "2a1e29cf33987570f69e72612c4aee27", "score": "0.5850121", "text": "def user_favourites_count(self):\n\t\treturn self.obj[\"user\"][\"favourites_count\"]", "title": "" }, { "docid": "313e51c9ed8493b9e5460461a7ccf902", "score": "0.5834429", "text": "def get_fav_boards(self):\n raise NotImplementedError()", "title": "" }, { "docid": "6f039e97d7996bf32e75374e340407ff", "score": "0.58123624", "text": "def favourite_book(user,book):\n print(f\"{user.title()} likes {book.title()} to read.\")", "title": "" }, { "docid": "c67de8315897e9a0c5ec715c3c8aeaae", "score": "0.58107525", "text": "def favorite_book(request, pk):\n book = get_object_or_404(Book, pk=pk)\n\n # if the request is a GET (the user requests the /book/<book>/favorite url)\n # book.favorites.add(request.user)\n if request.method == 'GET':\n if request.user in book.favorites.all():\n book.favorites.remove(request.user)\n messages.success(request, f'{ book.title } has been removed from your favorites.')\n else:\n book.favorites.add(request.user)\n messages.success(request, f'{ book.title } has been added to your favorites!')\n\n # redirect to a new URL:\n return HttpResponseRedirect(request.GET.get(\"next\"))", "title": "" }, { "docid": "1f5d01bc2fa1a0c9a342e189db58e89b", "score": "0.5770329", "text": "def update_favorites(self, new_favorites):\n current_favorites = self.get_favorites()\n current_favorites_tuples = [(team.espn_id, team.sport_type) for team in current_favorites]\n new_favorites_tuples = [(team.espn_id, team.sport_type) for team in new_favorites]\n remove_list = []\n add_list = []\n\n # finding favorites that must be removed\n for team in current_favorites:\n if (team.espn_id, team.sport_type) not in new_favorites_tuples:\n remove_list.append(team)\n\n # finding favorites that must be added\n for team in new_favorites:\n if (team.espn_id, team.sport_type) not in current_favorites_tuples:\n add_list.append(team)\n\n # removing old favorites and adding new ones\n self.add_favorite(add_list)\n self.remove_favorite(remove_list)", "title": "" }, { "docid": "0e5101c4e2b03e791236ea4e1f06dd54", "score": "0.57529324", "text": "async def get_favorites_service(call):\n audio_source = None\n if \"audio_source\" in call.data:\n audio_source = call.data[\"audio_source\"]\n\n d = hass.data[DOMAIN]\n list_info = {}\n list_idx = 0\n for item in reversed(d.favorites):\n if audio_source is None or audio_source == item[\"source\"]:\n if (\n \"media_stream_image\" in item\n and item[\"media_stream_image\"] is not None\n ):\n img = item[\"media_stream_image\"]\n else:\n img = \"/static/icons/tile-win-310x150.png\"\n list_info[list_idx] = {}\n list_info[list_idx][\"title\"] = item[\"name\"]\n if item[\"name\"].startswith(item[\"source\"]):\n list_info[list_idx][\"name\"] = item[\"name\"]\n else:\n list_info[list_idx][\"name\"] = (\n ais_global.G_NAME_FOR_AUDIO_NATURE.get(\n item[\"source\"], item[\"source\"]\n )\n + \" \"\n + item[\"name\"]\n )\n list_info[list_idx][\"thumbnail\"] = img\n list_info[list_idx][\"uri\"] = item[\"media_content_id\"]\n list_info[list_idx][\"audio_type\"] = item[\"source\"]\n list_info[list_idx][\"icon_type\"] = ais_global.G_ICON_FOR_AUDIO.get(\n item[\"source\"], \"mdi:play\"\n )\n list_info[list_idx][\"icon_remove\"] = \"mdi:delete-forever\"\n list_info[list_idx][\"editable\"] = True\n if audio_source == ais_global.G_AN_PODCAST:\n list_info[list_idx][\"icon\"] = \"mdi:podcast\"\n else:\n list_info[list_idx][\"icon\"] = \"mdi:play\"\n list_info[list_idx][\"id\"] = item[\"id\"]\n list_idx = list_idx + 1\n\n # create lists\n if audio_source is None:\n # get all items\n hass.states.async_set(\"sensor.aisfavoriteslist\", -1, list_info)\n else:\n # check if the change was done form remote\n import homeassistant.components.ais_ai_service as ais_ai\n\n if audio_source == ais_global.G_AN_RADIO:\n hass.states.async_set(\"sensor.radiolist\", -1, list_info)\n if (\n ais_ai.CURR_ENTITIE == \"input_select.radio_type\"\n and ais_ai.CURR_BUTTON_CODE == 23\n ):\n ais_ai.set_curr_entity(hass, \"sensor.radiolist\")\n hass.async_add_job(\n hass.services.async_call(\n \"ais_ai_service\", \"say_it\", {\"text\": \"Wybierz stację\"}\n )\n )\n elif audio_source == ais_global.G_AN_PODCAST:\n hass.states.async_set(\"sensor.podcastnamelist\", -1, list_info)\n if (\n ais_ai.CURR_ENTITIE == \"input_select.podcast_type\"\n and ais_ai.CURR_BUTTON_CODE == 23\n ):\n ais_ai.set_curr_entity(hass, \"sensor.podcastnamelist\")\n hass.async_add_job(\n hass.services.async_call(\n \"ais_ai_service\", \"say_it\", {\"text\": \"Wybierz audycję\"}\n )\n )\n elif audio_source == ais_global.G_AN_MUSIC:\n hass.states.async_set(\"sensor.youtubelist\", -1, list_info)\n elif audio_source == ais_global.G_AN_SPOTIFY:\n hass.states.async_set(\"sensor.spotifylist\", -1, list_info)\n elif audio_source == ais_global.G_AN_AUDIOBOOK:\n hass.states.async_set(\"sensor.audiobookschapterslist\", -1, list_info)", "title": "" }, { "docid": "d1cc71498480897213a3a1fec3892d83", "score": "0.57421863", "text": "def show_artist(artist_id):\n\n artist = Artist.query.filter_by(artist_id=artist_id).first()\n if 'current_user' in session:\n user_id = session['current_user']\n favorite = UserArtist.query.filter_by(user_id=user_id, artist_id=artist_id).first()\n is_favorited = favorite is not None\n else:\n is_favorited = False\n\n return render_template(\"artists.html\", artist=artist, is_favorited=is_favorited)", "title": "" }, { "docid": "3e8efc492d1e15e0b4a3eef205ebca95", "score": "0.56800467", "text": "def retrieve(self, request, pk=None):\n try:\n single_favorite = Favorite.objects.get(pk=pk)\n serializer = FavoriteSerializer(single_favorite, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)", "title": "" }, { "docid": "eafb7590b7dd1e011932c4fd36db3798", "score": "0.56782305", "text": "def is_favorite(self, scheme, user):\n f = Favorite.all().filter('user = ', user).filter('scheme = ', scheme).get()\n if f:\n return f.date\n else:\n return None", "title": "" }, { "docid": "ebd80a33a2f76a1a6ae843c32a7334d4", "score": "0.5665364", "text": "def fav_products_page_returns_alt_and_fav_products(self):\n\n fake_fav_product = FavouriteProduct.objects.order_by('?').first()\n\n self.client.login(username='FakeUser', password='fake_password')\n response = self.client.get(reverse('altproduct:fav_products'))\n\n fake_fav_products = response.context['fav_products']\n # See test_product_returned_is_alternative test for object_list use\n category = Category.objects.get(id=fake_fav_products.object_list.category.id)\n\n self.assertEqual(fake_fav_products.object_list.id, fake_fav_product.product_id)\n self.assertEqual(True, category.alternative)", "title": "" }, { "docid": "e871a7ce57e70aac226016159bfd0b55", "score": "0.5662184", "text": "def toggle_fav_artist_to_db():\n\n user_id = session['current_user']\n artist_id = request.args.get(\"artist_id\")\n\n favorite_artist = UserArtist.query.filter_by(user_id=user_id, artist_id=artist_id).first()\n\n if favorite_artist:\n db.session.delete(favorite_artist)\n db.session.commit()\n\n else:\n new_favorite_artist = UserArtist(user_id=user_id, artist_id=artist_id)\n db.session.add(new_favorite_artist)\n db.session.commit()\n\n result = \"Success!\"\n\n return result", "title": "" }, { "docid": "3ce04e893769f035b87169652f5f6faa", "score": "0.56491673", "text": "def large_to_small_favourites():\n username = mongo.db.users.find_one(\n {\"username\": session[\"user\"]})[\"username\"]\n larged_sized = mongo.db[username].find()\n favourite_exoplanets = sorted(larged_sized,\n key=lambda i: float(\n i['mass']), reverse=True)\n return render_template('pages/favourites.html',\n favourite_exoplanets=favourite_exoplanets)", "title": "" }, { "docid": "24323782aaf6a1fbd41ebaae7584c41e", "score": "0.56363595", "text": "def add_fav_board(self):\n raise NotImplementedError()", "title": "" }, { "docid": "0576f16435984f3c315706094bc1a2ec", "score": "0.5634527", "text": "def favouritesAnzeigen(self, RestaurantListe, x):\n return [restaurant.Name for restaurant in RestaurantListe if restaurant.Profil in self.__Lieblingsessen and restaurant.Rating >= x]", "title": "" }, { "docid": "d930a0fea88a8b182fc89c7829580c19", "score": "0.56246", "text": "def update_favorite_review(self, review_id):\n\n rev = Review.query.get(review_id)\n\n if self.is_favorite_review(review_id):\n\n # If the user has favorited the item, remove the favorite from the db\n self.favorite_reviews.remove(rev)\n db.session.commit()\n return \"Unfavorited\"\n\n else:\n\n # If the user has not favorited the review, add it to the db\n self.favorite_reviews.append(rev)\n db.session.commit()\n return \"Favorited\"", "title": "" }, { "docid": "03462ba41354663c2e3b9a48a394b083", "score": "0.5609158", "text": "def get_flavors(self):\n print(self.restaurant_name + \" has:\")\n for flavor in self.flavors:\n print(\" -\" + flavor.title())", "title": "" }, { "docid": "40c2e8528802319282b312a0b24ef75a", "score": "0.5596299", "text": "def get_query_set(self):\n return super(FavoriteManager, self).get_query_set().filter(spod__status=2)", "title": "" }, { "docid": "a21166e731235e68a5332e9de28e8d4f", "score": "0.557874", "text": "def fav_tweet(api: TwitterAPI, tweet) -> bool:\n r = api.request('favorites/create', {'id': tweet.id})\n\n if r.status_code != 200:\n return False\n\n return True", "title": "" }, { "docid": "ed2bb53946589459522a0c340e396c0c", "score": "0.5572109", "text": "def test_add_favorite(self):\n product_to_fav = Product(\n id_product=\"fav01\",\n product_name_fr=\"test_favori\",\n nutriscore_score=0,\n nutriscore_grade='A'\n )\n product_to_fav.save()\n url = reverse('admin_favorite',\n args=['fav01', 'add'])\n response = self.client_login.get(url)\n user = self.User.objects.get(username='test')\n favorites = Product.objects.filter(favorites=user)\n\n self.assertEqual(response.status_code, 302)\n self.assertEqual(len(favorites), 12)\n self.assertTrue(product_to_fav in user.product_set.all())", "title": "" }, { "docid": "42967fa817638c95fe30946652754a81", "score": "0.55718726", "text": "def favorite_count(self):\n return tweet_counts.get_favorite_count(self)", "title": "" }, { "docid": "d105280d89dddd413e0ce8d5a64debe5", "score": "0.55715925", "text": "def favourite_detailed(exoplanet_id):\n username = mongo.db.users.find_one(\n {\"username\": session[\"user\"]})[\"username\"]\n detailed_exoplanet = mongo.db[username].find_one(\n {\"_id\": ObjectId(exoplanet_id)})\n return render_template('pages/detailedExoplanet.html',\n detailed_exoplanet=detailed_exoplanet)", "title": "" }, { "docid": "6fcfc93c9f65a1677b10192f2133731e", "score": "0.556414", "text": "def post(self, request, *args, **kwargs):\n _user = self.request.user\n pk_entry = self.kwargs['pk']\n _entry = Entry.objects.get(id=pk_entry)\n \"\"\" Add to favorites \"\"\"\n Favorites.objects.create(\n user=_user,\n entry=_entry,\n )\n\n return HttpResponseRedirect(\n reverse(\n 'favorites_app:ProfilePage',\n )\n )", "title": "" }, { "docid": "bbd3b5f62f10cf1b7f329320112ab1d0", "score": "0.55613714", "text": "def getFavorites(self, username):\n conn = self.getConnection()\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM WATCHLIST WHERE username=? ORDER BY \\\n LASTWATCHED DESC\", (username,))\n\n rows = cur.fetchall()\n conn.close()\n\n return self.createList(rows, 2)", "title": "" }, { "docid": "8d02b189e1767a5373fe9963e4c49fe3", "score": "0.5556697", "text": "def favourite_gas_giants():\n username = mongo.db.users.find_one(\n {\"username\": session[\"user\"]})[\"username\"]\n\n favourite_exoplanets = mongo.db[username].find(\n {'type': 'gas giant'})\n return render_template('pages/favourites.html',\n favourite_exoplanets=favourite_exoplanets,\n gas=True)", "title": "" }, { "docid": "43a5a6b5b56b786822c7b0a9d2bef719", "score": "0.5549737", "text": "def make_favorite(self, twitter):\n return twitter.favorite_create(self.id)", "title": "" }, { "docid": "f0dea74a604e97ac70cf92fe9a748afd", "score": "0.55400246", "text": "def favorite(self, recipe):\n self.favorites.add(recipe)", "title": "" }, { "docid": "1d7f29bbce1d7c806c7b4597ff5db546", "score": "0.54908615", "text": "def show_recommendations():\n\t\n\tlogged_in_user_id = session['user_id']\n\tlogged_in_username = session['username']\n\n\trecommendations = model.session.query(model.Recommendation).filter(\n\t\tmodel.Recommendation.recipient_id == logged_in_user_id, model.Recommendation.pending==True).all()\n\n\t# for rec in recommendations:\n\t# \tprint rec.id\n\t# \tprint rec.restaurant.name\n\t# \tprint rec.recommender.username \n\t# \tprint rec.recipient.username\n\n\t# create a dict with all needed info to render restaurant info & edit recommendation\n\trec_data = {}\n\tfor rec in recommendations:\n\t\trec_data[rec.id] = {}\t\t\t\t\t\t# item.id == bookmark id \n\t\trec_data[rec.id][\"bkm_id\"] = rec.id\n\t\trec_data[rec.id][\"rest_id\"] = rec.restaurant.id\n\t\trec_data[rec.id][\"recommender_username\"] = rec.recommender.username\n\t\trec_data[rec.id][\"recipient_username\"] = rec.recipient.username\n\t\trec_data[rec.id][\"rest_name\"] = rec.restaurant.name\n\t\trec_data[rec.id][\"rest_cuisine\"] = rec.restaurant.cuisine\n\t\trec_data[rec.id][\"rest_address\"] = rec.restaurant.address\n\t\trec_data[rec.id][\"rest_city\"] = rec.restaurant.city\n\t\trec_data[rec.id][\"rest_state\"] = rec.restaurant.state\n\t\trec_data[rec.id][\"rest_url\"] = rec.restaurant.url\n\t\trec_data[rec.id][\"rec_pending\"] = rec.pending\n\n\t# print rec_data\n\n\treturn render_template(\"recommendations.html\", recommendations = rec_data, username = logged_in_username)", "title": "" }, { "docid": "ddd40f7c544db6b547953523fb9a365e", "score": "0.5489843", "text": "def update_favorite_product(self, asin):\n\n product = Product.query.get(asin)\n\n if self.is_favorite_product(asin):\n\n # If the user has favorited the item, remove the favorite from the db\n self.favorite_products.remove(product)\n db.session.commit()\n return \"Unfavorited\"\n\n else:\n\n # If the user has not favorited the product, add it to the db\n self.favorite_products.append(product)\n db.session.commit()\n return \"Favorited\"", "title": "" }, { "docid": "0ef98607e1e6d4b0f7e22b53f0922675", "score": "0.5478395", "text": "def show_collection(collection_id):\n\n collection = Collection.query.filter_by(collection_id=collection_id).first()\n if 'current_user' in session:\n user_id = session['current_user']\n favorite = UserCollection.query.filter_by(user_id=user_id, collection_id=collection_id).first()\n is_favorited = favorite is not None\n else:\n is_favorited = False\n\n return render_template(\"museums.html\", collection=collection, is_favorited=is_favorited)", "title": "" }, { "docid": "7a39d2093170633328b865bab57945a6", "score": "0.5473548", "text": "def get_favorites(self, source_lang=None, target_lang=None, cleanup=True):\n for page in self._favorites_pager(source_lang, target_lang):\n for entry in page[\"results\"]:\n yield self._process_fav_entry(entry, cleanup)", "title": "" }, { "docid": "63e76f88e90ac72b58698debd8d673b5", "score": "0.54668707", "text": "def test_add_get_favorite(self):\n favitem = FavoriteItem(name='Product 1')\n favitem.save()\n self.assertFalse(Favorites.objects.get_favorite(self.user, favitem))\n\n fav = Favorites.objects.add_favorite(self.user, favitem)\n fav_len = len(Favorites.objects.all())\n self.assertTrue(fav_len == 1)\n\n # should not duplicate\n if not Favorites.objects.get_favorite(self.user, favitem):\n termprint(\"WARNING\", 'Not found....adding')\n fav_duplicate = Favorites.objects.add_favorite(self.user, favitem)\n\n fav_len = len(Favorites.objects.all())\n termprint(\"INFO\", \"%s rows found \" % fav_len)\n self.assertTrue(fav_len == 1)\n\n favget = Favorites.objects.get_favorite(self.user, favitem)\n self.assertTrue(favget)\n\n # test the templat tag get\n termprint(\"INFO\", my_favorites(self.user))\n self.assertTrue(my_favorites(self.user))", "title": "" }, { "docid": "d93ff5834f56e9ec1eeb9fd2931516f8", "score": "0.5456899", "text": "def add_to_favourite(recipieid, favourite):\n login_data = login_authorize(request, db)\n # if login is unsuccessful redirecting to login page again\n if not login_data[\"success\"]:\n return redirect(url_for(\"login\"))\n\n query = {\"_id\": ObjectId(recipieid)}\n update = dict(isFavourate={\"0\": False, \"1\": True}[str(favourite)])\n db[\"recipes\"].update_one(query, {\"$set\": update})\n return redirect(url_for(\"profile\"))", "title": "" }, { "docid": "8339fbf0e668f597d23f63fcce04072a", "score": "0.54560065", "text": "def get_favorite_roms(self):\n token = \"favorities\"\n s = self.util.load_value_db(token)\n if s:\n connection = sqlite3.connect(self.util.frontend_db)\n cursor = connection.cursor()\n sql = \"SELECT rom, name, custom_options, status, roms.id, year, manufacturer, \"\n sql += \"display_type, display_screen, input_players, input_control, input_buttons \"\n sql += \"FROM roms LEFT JOIN rom_info ON roms.id = rom_info.id WHERE roms.id IN \"\n sql += \"(\"+s+\") AND disabled = 0 AND found = 1 ORDER BY name ASC;\"\n\n cursor.execute(sql)\n return cursor.fetchall()\n else: return [[\"\",\"EMPTY ROM LIST\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",0,0]]", "title": "" }, { "docid": "b7443397e1d9354fc8a7611a536fe3bf", "score": "0.54468787", "text": "def get_queryset(self):\n user = self.request.user\n print(\"Usuario en request: \", user)\n return user.s7_user.favorito.songs.all()", "title": "" }, { "docid": "67d9a0bc2ff2359a27873c82184ff0d9", "score": "0.5432009", "text": "def favorite_gist(request, gist_id: str) -> Response:\n gist = get_gist_by_id(gist_id)\n faved_gist = FavoriteGist.objects.get_or_create(\n gist_id=gist_id,\n git_username=gist.github_user,\n description=gist.description\n )\n return Response(status=status.HTTP_201_CREATED)", "title": "" }, { "docid": "2779e0b0f073ee9d24228e75290af820", "score": "0.54233533", "text": "def add_to_fav(title):\n\n fav_dict = dict()\n # check if favorites exists\n if \"favorites\" in session:\n # add fav key to fav_dict\n fav_dict = session.get(\"favorites\")\n \n else:\n # create a new fav key\n session[\"favorites\"] = dict()\n \n print(f\"fav_dict before: {fav_dict}\")\n\n # store movie in favorites dict\n fav_dict[title] = title\n\n print(f\"fav_dict after: {fav_dict}\")\n\n # add key to session\n session[\"favorites\"] = fav_dict\n\n return redirect(url_for('homepage'))", "title": "" }, { "docid": "66b2ac975002391e81b2b946f4fd730d", "score": "0.5420327", "text": "async def get_favorite_books(self):\n if self._favorite_books is None:\n self._favorite_books = await self._state.get_related(self._relationships[\"favorite-books\"])\n return self._favorite_books", "title": "" }, { "docid": "827e6989ab3055d40a5ac28c569e2de0", "score": "0.54190373", "text": "def view_wishlist(request):\n user = UserAccount.objects.get(user=request.user)\n wishlist = Product.objects.filter(userwishlists__user_profile=user)\n context = {\n 'wishlist': wishlist,\n }\n return render(request, 'wishlists/wishlist.html', context)", "title": "" }, { "docid": "1f8196bd11fb7832359184cf6ef0a924", "score": "0.54181105", "text": "def print_favorite_added_message(self):\n print(self.messages[\"skip_a_lign\"])\n print(self.messages[\"favorites\"])\n print(self.messages[\"skip_a_lign\"])", "title": "" }, { "docid": "27bc175af8cece71d3296e06376ea421", "score": "0.5414401", "text": "def createFavorite(self,fin,outFavorite,foutMapping_user,foutMapping_steps):\n # fin = 'file/ted/testUser.csv'\n # fout = 'file/ted/testFavorite.csv'\n data = pd.read_table(fin)\n favorite = pd.DataFrame()\n user = data['Anon Student Id'].drop_duplicates()\n user.to_csv(foutMapping_user)\n steps = data['Step Name'].drop_duplicates()\n steps.to_csv(foutMapping_steps)\n\n favorite['Anon Student Id'] = data['Anon Student Id'].replace(user.values,user.index )\n favorite['Step Name'] = data['Step Name'].replace(steps.values,steps.index )\n favorite['Correct First Attempt'] = data['Correct First Attempt']\n favorite['Row'] = data['Row']\n\n favorite = favorite[favorite['Correct First Attempt']==1]\n favorite.to_csv(outFavorite, index=False, header=False)", "title": "" }, { "docid": "854ce5e769a5f4967a43b4a98e559e97", "score": "0.5409132", "text": "def favourite_rocky_planets():\n username = mongo.db.users.find_one(\n {\"username\": session[\"user\"]})[\"username\"]\n\n favourite_exoplanets = mongo.db[username].find(\n {'type': 'rocky'})\n return render_template('pages/favourites.html',\n favourite_exoplanets=favourite_exoplanets,\n rocky=True)", "title": "" }, { "docid": "24aba59f4a8ab65b27cbf40948f2dfa0", "score": "0.54066736", "text": "def toggle_favorite():\n request_data = request.json\n user_id = request_data[\"user\"]\n photo_url = request_data[\"photo\"][\"photoUrl\"]\n\n photo = LikedPhoto.query.filter(LikedPhoto.photo_url == photo_url and\n LikedPhoto.user_id == user_id).first()\n if photo is None:\n \n lp = LikedPhoto(\n title = request_data[\"photo\"][\"title\"],\n author = request_data[\"photo\"][\"author\"],\n photo_url = request_data[\"photo\"][\"photoUrl\"],\n link = request_data[\"photo\"][\"link\"],\n tags = request_data[\"photo\"][\"tags\"],\n user_id = user_id \n )\n \n db_session.add(lp)\n db_session.commit()\n\n response = { \"status\": True, \"action\": \"liked\", \"id\": lp.id }\n else:\n db_session.delete(photo)\n db_session.commit()\n\n response = { \"status\": True, \"action\": \"unliked\" }\n\n return jsonify(response)", "title": "" }, { "docid": "a3af32f6c25ce14c9467310954314e6e", "score": "0.5402849", "text": "def display_flavors(self):\n print(\"Flavors available for today:\")\n for flavor in self.flavors:\n print(flavor.title())", "title": "" }, { "docid": "53208aeedd8e1be09e40652a9805092b", "score": "0.5402621", "text": "def get_favorite_reviews_for_product(self, asin):\n\n return self.favorite_reviews.filter_by(asin=asin).all()", "title": "" }, { "docid": "f02d196d2867ba95163a584abd44904e", "score": "0.5397619", "text": "def favorite_column_name(self, a_table_def: MetaDataTable) -> str:\n favorite_names = self._favorite_names_list\n for each_favorite_name in favorite_names:\n columns = a_table_def.columns\n for each_column in columns:\n col_name = each_column.name.lower()\n if col_name == each_favorite_name:\n return each_column.name\n for each_column in columns:\n col_name = each_column.name.lower()\n if each_favorite_name in col_name:\n return each_column.name\n for each_column in columns: # no favorites, just return 1st\n return each_column.name", "title": "" } ]
76688fe53cd914782ded9a48e64176b9
Return a discrete colormap from the continuous colormap cmap.
[ { "docid": "abf7018deae5cecaf093263ba6dfd96b", "score": "0.62132716", "text": "def cmap_discretize(self, cmap, N):\n\t\tif type(cmap) == str:\n\t\t\tcmap = cm.get_cmap(cmap)\n\t\t\tcolors_i = np.concatenate((np.linspace(0, 1., N), (0., 0., 0., 0.)))\n\t\t\tcolors_rgba = cmap(colors_i)\n\t\t\tindices = np.linspace(0, 1., N + 1)\n\t\t\tcdict = {}\n\t\tfor ki, key in enumerate(('red', 'green', 'blue')):\n\t\t\tcdict[key] = [(indices[i], colors_rgba[i - 1, ki], colors_rgba[i, ki]) for i in range(N + 1)]\n\t\treturn mc.LinearSegmentedColormap(cmap.name + \"_%d\" % N, cdict, 1024)", "title": "" } ]
[ { "docid": "64530397dc432e3fd67edf428f43658b", "score": "0.7468833", "text": "def _continuous_colormap(colormap, values, vmin=None, vmax=None):\n assert values is not None\n assert colormap.shape[1] == 3\n n = colormap.shape[0]\n vmin = vmin if vmin is not None else values.min()\n vmax = vmax if vmax is not None else values.max()\n assert vmin is not None\n assert vmax is not None\n denom = vmax - vmin\n denom = denom if denom != 0 else 1\n # NOTE: clipping is necessary when a view using color selector (like the raster view)\n # is updated right after a clustering update, but before the vmax had a chance to\n # be updated.\n i = np.clip(np.round((n - 1) * (values - vmin) / denom).astype(np.int32), 0, n - 1)\n return colormap[i, :]", "title": "" }, { "docid": "88ccaa58654ceafe08bf8ebf00bf2a7a", "score": "0.73542523", "text": "def _categorical_colormap(colormap, values, vmin=None, vmax=None):\n assert np.issubdtype(values.dtype, np.integer)\n assert colormap.shape[1] == 3\n n = colormap.shape[0]\n if vmin is None and vmax is None:\n # Find unique values and keep the order.\n _, idx = np.unique(values, return_index=True)\n lookup = values[np.sort(idx)]\n x = _index_of(values, lookup)\n else:\n x = values\n return colormap[x % n, :]", "title": "" }, { "docid": "808adccfe7a36483cd8b5928d53f8606", "score": "0.7243529", "text": "def discrete_cmap(N, base_cmap=None):\n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)", "title": "" }, { "docid": "f1cbb6add4b63323663684021d2b9be7", "score": "0.723842", "text": "def discrete_cmap(N, base_cmap=None):\n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)", "title": "" }, { "docid": "f1cbb6add4b63323663684021d2b9be7", "score": "0.723842", "text": "def discrete_cmap(N, base_cmap=None):\n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)", "title": "" }, { "docid": "78bcc7975467571a6224d3629e066937", "score": "0.722852", "text": "def discrete_cmap(N, base_cmap=None):\n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return matplotlib.colors.LinearSegmentedColormap.from_list(cmap_name, color_list, N)", "title": "" }, { "docid": "934f71112fb070f3acc13fbc94ade6e2", "score": "0.72011316", "text": "def discrete_cmap(N, base_cmap=None):\n\n\t# Note that if base_cmap is a string or None, you can simply do\n\t# return plt.cm.get_cmap(base_cmap, N)\n\t# The following works for string, None, or a colormap instance:\n\n\tbase = plt.cm.get_cmap(base_cmap)\n\tcolor_list = base(np.linspace(0, 1, N))\n\tcmap_name = base.name + str(N)\n\treturn base.from_list(cmap_name, color_list, N)", "title": "" }, { "docid": "bfa41f8d1c6869de71b095ac39dc5168", "score": "0.71725595", "text": "def discrete_cmap(N, base_cmap=None):\n\n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)", "title": "" }, { "docid": "bfa41f8d1c6869de71b095ac39dc5168", "score": "0.71725595", "text": "def discrete_cmap(N, base_cmap=None):\n\n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)", "title": "" }, { "docid": "bfa41f8d1c6869de71b095ac39dc5168", "score": "0.71725595", "text": "def discrete_cmap(N, base_cmap=None):\n\n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)", "title": "" }, { "docid": "961f9c07e886a09de8c424658a3110a3", "score": "0.7131422", "text": "def discrete_cmap(N, base_cmap=None):\n\n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n\n base = pyplot.cm.get_cmap(base_cmap)\n color_list = base(numpy.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return colors.LinearSegmentedColormap.from_list(cmap_name, color_list, N)", "title": "" }, { "docid": "5c2c3f876a592bee6bdb03661dce1075", "score": "0.70590246", "text": "def get_discrete_cmap(N, base_cmap=None):\n base = plt.cm.get_cmap(base_cmap)\n\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n\n try:\n cm = base.from_list(cmap_name, color_list, N)\n except AttributeError:\n cm = plt.cm.get_cmap(base_cmap, N)\n\n return cm", "title": "" }, { "docid": "0dc17ab83be8b8406f05264d8b68d864", "score": "0.69799864", "text": "def mk_discrete_cmap( lvls=None, cmap=None, arr=None,\\\n vmin=0, vmax=10, nticks=10, debug=False ):\n \n # define bins\n if isinstance( lvls, type(None) ):\n bounds = np.linspace( vmin, vmax, nticks, endpoint=True )\n else:\n bounds = lvls\n \n # get colormap if not provided\n if isinstance( cmap, type(None) ):\n cmap = get_colormap( np.array([vmin, vmax]) )\n\n if debug:\n print lvls, vmin, vmax, nticks\n \n # extract colors\n# cmaplist = [ cmap(i) for i in range( len(lvls ) ) ]\n cmaplist = [cmap(i) for i in range(cmap.N)]\n\n # force the first color entry to be grey\n# cmaplist[0] = (.5,.5,.5,1.0)\n\n # create the new discrete cmap\n cmap = cmap.from_list('Custom cmap', cmaplist, cmap.N )\n\n # make norm... - define the bins and normalize\n norm = mpl.colors.BoundaryNorm( bounds, cmap.N)\n\n return cmap, norm", "title": "" }, { "docid": "647b4d71623acb7710c9ba1de0f9fcca", "score": "0.675352", "text": "def inverseCmap(cmap_name):\n import matplotlib._cm as _cm\n import matplotlib as mpl\n try:\n cmap_data = eval('_cm._%s_data' % cmap_name)\n except:\n raise ValueError, \"Cannot obtain data for the colormap %s\" % cmap_name\n new_data = dict( [(k, [(v[i][0], v[-(i+1)][1], v[-(i+1)][2])\n for i in xrange(len(v))])\n for k,v in cmap_data.iteritems()] )\n return mpl.colors.LinearSegmentedColormap('%s_rev' % cmap_name,\n new_data, _cm.LUTSIZE)", "title": "" }, { "docid": "e47adf5fac5fae1ffff29925b8382db4", "score": "0.6712286", "text": "def colors_from_colormap_continuous(num, cmap): \n ints = np.arange(0,num+1,1)\n norm = clr.Normalize(vmin=ints[0], vmax=ints[-1], clip=True)\n mapper = cm.ScalarMappable(norm=norm, cmap=cmap)\n color_list = []\n for i in ints:\n color_list.append(mapper.to_rgba(i))\n \n return color_list", "title": "" }, { "docid": "cc3c1dfa02e1787041bfd66f2a95effa", "score": "0.649215", "text": "def _cmapDiscretize(cls, cmap: cm, n: int) -> mcolors.LinearSegmentedColormap:\n cdict = cmap._segmentdata.copy()\n # N colors\n colors_i = np.linspace(0,1.,n)\n # N+1 indices\n indices = np.linspace(0,1.,n+1)\n for key in ('red','green','blue'):\n # Find the N colors\n D = np.array(cdict[key])\n colors = np.interp(colors_i, D[:,0], D[:,1])\n #I = sp.interpolate.interp1d(D[:,0], D[:,1])\n #colors = I(colors_i)\n # Place these colors at the correct indices.\n A = np.zeros((n+1,3), float)\n A[:,0] = indices\n A[1:,1] = colors\n A[:-1,2] = colors\n # Create a tuple for the dictionary.\n L = []\n for l in A:\n L.append(tuple(l))\n cdict[key] = tuple(L)\n # Return colormap object.\n return mcolors.LinearSegmentedColormap('colormap', cdict, 1024)", "title": "" }, { "docid": "c62d1bb3b61f9b5feb5ca5ab05337f45", "score": "0.64542574", "text": "def colors_from_colormap(num, cmap):\n ## First check if colormap is discrete. Best way I can see to do it is \n ## through the colors attribute\n if getattr(cmap, \"colors\", False):\n ### Get maximally different colors\n num_colors_cmap = len(cmap.colors)\n skip_number = int(num_colors_cmap / num)\n return cmap.colors[::skip_number][:num]\n \n ints = np.arange(0,num+1,1)\n norm = clr.Normalize(vmin=ints[0], vmax=ints[-1], clip=True)\n mapper = cm.ScalarMappable(norm=norm, cmap=cmap)\n color_list = []\n for i in ints:\n color_list.append(mapper.to_rgba(i))\n \n return color_list", "title": "" }, { "docid": "ff9020459293a262a325c5e3f881c406", "score": "0.6398298", "text": "def inverse_cmap(cmap_name):\r\n import matplotlib._cm as _cm\r\n import matplotlib as mpl\r\n try:\r\n cmap_data = eval('_cm._%s_data' % cmap_name)\r\n except:\r\n raise ValueError, \"Cannot obtain data for the colormap %s\" % cmap_name\r\n new_data = dict( [(k, [(v[i][0], v[-(i+1)][1], v[-(i+1)][2])\r\n for i in xrange(len(v))])\r\n for k,v in cmap_data.iteritems()] )\r\n return mpl.colors.LinearSegmentedColormap('%s_rev' % cmap_name,\r\n new_data, _cm.LUTSIZE)", "title": "" }, { "docid": "b3d4d53b2061fe20a953f8eb9e634047", "score": "0.63774854", "text": "def cmap_discretize(self, cmap, N):\n\n if type(cmap) == str:\n cmap = plt.get_cmap(cmap)\n\n colors_i = np.concatenate((np.linspace(0, 1., N), (0.,0.,0.,0.)))\n colors_rgba = cmap(colors_i)\n indices = np.linspace(0, 1., N+1)\n cdict = {}\n\n for ki,key in enumerate(('red','green','blue')):\n cdict[key] = [ (indices[i], colors_rgba[i-1,ki], colors_rgba[i,ki]) for i in range(N+1) ]\n\n # Return colormap object.\n return _mcolors.LinearSegmentedColormap(cmap.name + \"_%d\"%N, cdict, 1024)", "title": "" }, { "docid": "d295d29b66139c47bba30855eab8c577", "score": "0.6323158", "text": "def cmap_discretize(cmap, N):\n \n if type(cmap) == str:\n cmap = plt.get_cmap(cmap)\n colors_i = np.concatenate((np.linspace(0, 1., N), (0.,0.,0.,0.)))\n colors_rgba = cmap(colors_i)\n indices = np.linspace(0, 1., N+1)\n cdict = {}\n for ki,key in enumerate(('red','green','blue')):\n cdict[key] = [ (indices[i], colors_rgba[i-1,ki], colors_rgba[i,ki])\n for i in xrange(N+1) ]\n\n # Return colormap object.\n return mcolors.LinearSegmentedColormap(cmap.name + \"_%d\"%N, cdict, 1024)", "title": "" }, { "docid": "39cce7dc6fde741d0aa39c4932495bbf", "score": "0.6306001", "text": "def cmap_discretize(cmap, N):\n if type(cmap) == str:\n cmap = get_cmap(cmap)\n colors_i = np.concatenate((np.linspace(0, 1., N), (0., 0., 0., 0.)))\n colors_rgba = cmap(colors_i)\n indices = np.linspace(0, 1., N + 1)\n cdict = {}\n for ki, key in enumerate(('red', 'green', 'blue')):\n cdict[key] = [(indices[i], colors_rgba[i - 1, ki], colors_rgba[i, ki]) for i in xrange(N + 1)]\n return matplotlib.colors.LinearSegmentedColormap(cmap.name + \"_%d\" % N, cdict, 1024)", "title": "" }, { "docid": "6b78879f4af57cc55ffcacf4993eb32e", "score": "0.6256127", "text": "def cmap_fromList(colorList=None, name='mycm', discrete=False):\n\tif colorList == None:\n\t\tcolorList =['#6C9F7B','#783F68','#F4F4F2','#22F322','#F3F322','#0000F3']\n\tif discrete==False:\n\t\tcdic = {'red':[], 'green':[], 'blue':[]}\n\t\tx = numpy.linspace(0.,1., len(colorList))\n\t\tfor ic in range(len(colorList)):\n\t\t\tr,g,b = matplotlib.colors.hex2color(colorList[ic])\n\t\t\tcdic['red'].append((x[ic], r, r))\n\t\t\tcdic['green'].append((x[ic], g, g))\n\t\t\tcdic['blue'].append((x[ic], b, b))\n\t\treturn(matplotlib.colors.LinearSegmentedColormap(name,cdic,1024))\n\telse:\n\t\treturn(matplotlib.colors.ListedColormap(colorList, name=name ))", "title": "" }, { "docid": "a9fc2bba7b4d1f763a6326454ec81a33", "score": "0.6247591", "text": "def cmap_discretize(cmap, N):\n\n if type(cmap) == str:\n cmap = get_cmap(cmap)\n colors_i = np.concatenate((np.linspace(0, 1., N), (0.,0.,0.,0.)))\n colors_rgba = cmap(colors_i)\n indices = np.linspace(0, 1., N+1)\n cdict = {}\n for ki,key in enumerate(('red','green','blue')):\n cdict[key] = [ (indices[i], colors_rgba[i-1,ki], colors_rgba[i,ki]) for i in xrange(N+1) ]\n # Return colormap object.\n return matplotlib.colors.LinearSegmentedColormap(cmap.name + \"_%d\"%N, cdict, 1024)", "title": "" }, { "docid": "03690b0bc4d7fb3e2afdb7ffa0882ef0", "score": "0.62045914", "text": "def cmap_discretize(cmap, N):\n\n if type(cmap) == str:\n cmap = get_cmap(cmap)\n colors_i = np.concatenate((np.linspace(0, 1., N), (0.,0.,0.,0.)))\n colors_rgba = cmap(colors_i)\n indices = np.linspace(0, 1., N+1)\n cdict = {}\n for ki,key in enumerate(('red','green','blue')):\n cdict[key] = [ (indices[i], colors_rgba[i-1,ki], colors_rgba[i,ki]) for i in xrange(N+1) ]\n # Return colormap object.\n return colors.LinearSegmentedColormap(cmap.name + \"_%d\"%N, cdict, 1024)", "title": "" }, { "docid": "89b644deba35b9c06d5b377ec4359e07", "score": "0.61795497", "text": "def cmap_discretize(cmap, N):\n if type(cmap) == str:\n cmap = get_cmap(cmap)\n colors_i = np.concatenate((np.linspace(0, 1., N), (0., 0., 0., 0.)))\n colors_rgba = cmap(colors_i)\n indices = np.linspace(0, 1., N + 1)\n cdict = {}\n for ki, key in enumerate(('red', 'green', 'blue')):\n cdict[key] = [(indices[i], colors_rgba[i - 1, ki], colors_rgba[i, ki]) for i in xrange(N + 1)]\n return LinearSegmentedColormap(cmap.name + \"_%d\" % N, cdict, 1024)", "title": "" }, { "docid": "c10e67363ca60e2dd79dca3e2b5ac897", "score": "0.6128432", "text": "def _color_map(self, normalized=True, base_map_name='tab20'):\n\n base_map = matplotlib.cm.get_cmap(base_map_name, 22).colors\n cmap = np.zeros_like(base_map)\n cmap[0,-1] = 1\n cmap[1:-1] = base_map[:20]\n cmap[-1] = [1, 1, 1, 1]\n\n return matplotlib.colors.ListedColormap(cmap)", "title": "" }, { "docid": "39731a130b20336432206d345d6b712d", "score": "0.6064429", "text": "def colorize(value, vmin=0, vmax=1, cmap='viridis'):\n\n # normalize\n vmin = value.min() if vmin is None else vmin\n vmax = value.max() if vmax is None else vmax\n if vmin!=vmax:\n value = (value - vmin) / (vmax - vmin) # vmin..vmax\n else:\n # Avoid 0-division\n value = value*0.\n # squeeze last dim if it exists\n value = value.squeeze()\n\n cmapper = matplotlib.cm.get_cmap(cmap)\n value = cmapper(value,bytes=True) # (nxmx4)\n return np.ascontiguousarray(value[:, :, :3].transpose(2, 0, 1))", "title": "" }, { "docid": "d308ef3d2117c89122cad514f908c84b", "score": "0.60576123", "text": "def cmap_discretize(cmap, N):\n\n cdict = cmap._segmentdata.copy()\n # N colors\n colors_i = linspace(0,1.,N)\n # N+1 indices\n indices = linspace(0,1.,N+1)\n for key in ('red','green','blue'):\n # Find the N colors\n D = array(cdict[key])\n I = interpolate.interp1d(D[:,0], D[:,1])\n colors = I(colors_i)\n # Place these colors at the correct indices.\n A = zeros((N+1,3), float)\n A[:,0] = indices\n A[1:,1] = colors\n A[:-1,2] = colors\n # Create a tuple for the dictionary.\n L = []\n for l in A:\n L.append(tuple(l))\n cdict[key] = tuple(L)\n # Return colormap object.\n return matplotlib.colors.LinearSegmentedColormap('colormap',cdict,1024)", "title": "" }, { "docid": "5d508f178bdba7694e594ca9dcbaabf0", "score": "0.6025732", "text": "def get_density_cmap():\n # Add completely white color to Reds colormap in Matplotlib\n list_colors = plt.cm.datad['Reds']\n list_colors = list(list_colors)\n list_colors.insert(0, (1, 1, 1))\n list_colors.insert(0, (1, 1, 1))\n lscm = matplotlib.colors.LinearSegmentedColormap.from_list(\"my_Reds\", list_colors)\n return lscm", "title": "" }, { "docid": "65c43951cf88d99adbc2d306172b425c", "score": "0.5988357", "text": "def reverse_cmap(cmap, N=256):\n rev_cm_name = '_'.join([cmap.name, 'r'])\n try:\n rev_cm = mpl.colors.LinearSegmentedColormap(\n rev_cm_name, mpl_cm.revcmap(cmap._segmentdata), N=N)\n except AttributeError:\n seg_cmap = mpl.colors.LinearSegmentedColormap.from_list(\n 'temp', cmap.colors)\n rev_cm = mpl.colors.LinearSegmentedColormap(\n rev_cm_name, mpl_cm.revcmap(seg_cmap._segmentdata), N=N)\n\n return rev_cm", "title": "" }, { "docid": "f939807df6ccf5846a640e9592675a92", "score": "0.5987681", "text": "def get_cmap(cmap):\n AVAILABLE_CMAPS = {\n \"magma\": cv2.COLORMAP_MAGMA,\n \"jet\": cv2.COLORMAP_JET,\n \"gray\": None,\n \"kitti\": kitti_colormap,\n }\n assert cmap in AVAILABLE_CMAPS.keys()\n return AVAILABLE_CMAPS[cmap]", "title": "" }, { "docid": "e0b0612baec59e01fba5a99e2e88964d", "score": "0.5985926", "text": "def from_discrete_norm(cls, boundaries, cmap=mpl.cm.jet, norm_kws=dict(), **kwargs):\n if isinstance(cmap, str):\n cmap = mpl.cm.get_cmap(cmap)\n return cls(vmin=None, vmax=None, cmap=cmap, norm=discrete_norm(boundaries, cmap, values_are_bounds=True, **norm_kws), **kwargs)", "title": "" }, { "docid": "d68000b72e83726aa92738e667e157b3", "score": "0.593917", "text": "def getcmaprgb(N, cmap):\n return cmap(np.linspace(0,255,N).astype(int))", "title": "" }, { "docid": "7edb29d663edef03361e6c9f646bc7a1", "score": "0.5935895", "text": "def cmap_div():\n c = cm.get_cmap('gist_ncar_r', 256)\n new1 = c(np.linspace(0.1, 0.5, 128))\n new2 = c(np.linspace(0.6, 1., 128))\n newcmp = colors.ListedColormap(np.concatenate([new1, new2], axis = 0))\n return newcmp", "title": "" }, { "docid": "a7c112d36faa7d69c7dad2cdd71bf853", "score": "0.59170985", "text": "def get_cmap_colors(Nc, cmap='plasma'):\n scalarMap = cmx.ScalarMappable(norm=colors.Normalize(vmin=0, vmax=Nc),\n cmap=cmap)\n return [scalarMap.to_rgba(i) for i in range(Nc)]", "title": "" }, { "docid": "db45723aec289d5985c463336f3759f4", "score": "0.5911246", "text": "def make_colorbar_continuous(cls, data, colormap): \n \n return go.Scattergl(x=[None],y=[None], mode='markers', showlegend=False, \n marker=dict(colorscale=colormap, showscale=True, cmin=np.min(data), cmax=np.max(data), \n colorbar=dict(outlinecolor='black', outlinewidth=0.5)), hoverinfo='none')", "title": "" }, { "docid": "6bd30d7ff1437c71804934ebf5498b92", "score": "0.58975", "text": "def cmap_discretize(cmap, n):\n\n cdict = cmap._segmentdata.copy()\n # n colors\n colors_i = linspace(0, 1., n)\n # n+1 indices\n indices = linspace(0, 1., n+1)\n for key in ('red', 'green', 'blue'):\n # Find the n colors\n D = np.array(cdict[key])\n I = interpolate.interp1d(D[:, 0], D[:, 1])\n colors = I(colors_i)\n # Place these colors at the correct indices.\n A = np.zeros((n+1, 3), float)\n A[:, 0] = indices\n A[1:, 1] = colors\n A[:-1, 2] = colors\n # Create a tuple for the dictionary.\n L = []\n for l in A:\n L.append(tuple(l))\n cdict[key] = tuple(L)\n # Return colormap object.\n return LinearSegmentedColormap('colormap', cdict, 1024)", "title": "" }, { "docid": "8f0779b4a1dcd4da6e7233fe8185dcde", "score": "0.5896322", "text": "def getColormap(self):\n return self.getColorScaleBar().getColormap()", "title": "" }, { "docid": "9990c3d735a2cc58b9089aa3046d25ac", "score": "0.58815813", "text": "def shiftedColorMap(cmap, start=0, midpoint=0.5, stop=1.0, name=\"shiftedcmap\"):\n cdict = {\"red\": [], \"green\": [], \"blue\": [], \"alpha\": []}\n\n # regular index to compute the colors\n reg_index = np.linspace(start, stop, 257)\n\n # shifted index to match the data\n shift_index = np.hstack(\n [\n np.linspace(0.0, midpoint, 128, endpoint=False),\n np.linspace(midpoint, 1.0, 129, endpoint=True),\n ]\n )\n\n for ri, si in zip(reg_index, shift_index):\n r, g, b, a = cmap(ri)\n\n cdict[\"red\"].append((si, r, r))\n cdict[\"green\"].append((si, g, g))\n cdict[\"blue\"].append((si, b, b))\n cdict[\"alpha\"].append((si, a, a))\n\n newcmap = mpl.colors.LinearSegmentedColormap(name, cdict)\n\n return newcmap", "title": "" }, { "docid": "2c27a7296f24db49622d1d95816734bb", "score": "0.5859245", "text": "def generate_color_scale(color_ranges: int )-> matplotlib.colors.LinearSegmentedColormap:\r\n return plt.cm.get_cmap('hsv',color_ranges)", "title": "" }, { "docid": "9a85319256bc4e9e3b619c3072e9e04e", "score": "0.5843894", "text": "def _cmap_from_color(self, color):\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])", "title": "" }, { "docid": "ff87e6bd6e7c7b8b1558a11a3b6a44e9", "score": "0.58133584", "text": "def cmap(self):\n if self._cmap is None:\n cmapsize = np.max(np.unique(self.colormap(self.map)))\n cmfilename = \"cmap-{0}.dat\".format(self._name.replace(\" \", \"_\"))\n try:\n cmfile = open(cmfilename, \"rb\")\n r, g, b = pickle.load(cmfile)\n cmfile.close()\n if len(r) != cmapsize:\n raise IndexError\n except:\n log.debug(\"GENERATING CMAP: {}\".format(cmapsize))\n r = np.random.uniform(size=cmapsize)\n g = np.random.uniform(size=cmapsize)\n b = np.random.uniform(size=cmapsize)\n if cmapsize > 0:\n r[0] = 0.9\n g[0] = 0.9\n b[0] = 0.9\n r[2] = 0.5\n g[2] = 0.5\n b[2] = 0.5\n cmfile = open(cmfilename, \"wb\")\n pickle.dump((r, g, b), cmfile)\n cmfile.close()\n\n if cmapsize > 0:\n self._cmap = LinearSegmentedColormap.from_list(\"rands\",\n list(zip(r, g, b)))\n return self._cmap", "title": "" }, { "docid": "6ebc5de1476caf296990254a1afb4806", "score": "0.579725", "text": "def colormap(self):\n return self.__colormap", "title": "" }, { "docid": "710af6f5f9cacef6ae89404caf88ec94", "score": "0.57838565", "text": "def create_custom_colormap():\n\n cmap = np.zeros((180,360,3))\n\n x, y = np.mgrid[0:180:1, 0:360:1]\n pos = np.dstack((x, y))\n rv = multivariate_normal([0, 0], 10000* np.asarray([[2.0, 0.], [0., 0.5]])).pdf(pos)\n rv += multivariate_normal([0, 360], 10000* np.asarray([[2.0, -0.], [-0., 0.50]])).pdf(pos)\n cmap[:,:,2] = rv / np.max(rv)\n\n rv = multivariate_normal([0, 120], 10000* np.asarray([[2.5, 0.], [0., 0.5]])).pdf(pos)\n cmap[:,:,1] = rv / np.max(rv)\n\n rv = multivariate_normal([180, 120], 10000* np.asarray([[0.5, 0.], [0., 40]])).pdf(pos)\n cmap[:,:,0] = rv / np.max(rv)\n\n return cmap", "title": "" }, { "docid": "2b9c921f37da27da9253e26c84d0f4cb", "score": "0.5775305", "text": "def get_cmap(name):\n if name.lower() == \"accent\":\n warn(\"The `Accent` colormap is deprecated as of version\" +\n \" 0.2 of Fury and will be removed in a future \" +\n \"version. Please use another colormap\",\n DeprecationWarning)\n\n global dipy_cmaps\n if dipy_cmaps is None:\n filename = pjoin(DATA_DIR, \"dipy_colormaps.json\")\n with open(filename) as f:\n dipy_cmaps = json.load(f)\n\n desc = dipy_cmaps.get(name)\n if desc is None:\n return None\n\n def simple_cmap(v):\n \"\"\"Emulates matplotlib colormap callable\"\"\"\n rgba = np.ones((len(v), 4))\n for i, color in enumerate(('red', 'green', 'blue')):\n x, y0, y1 = zip(*desc[color])\n # Matplotlib allows more complex colormaps, but for users who do\n # not have Matplotlib fury makes a few simple colormaps available.\n # These colormaps are simple because y0 == y1, and therefor we\n # ignore y1 here.\n rgba[:, i] = np.interp(v, x, y0)\n return rgba\n\n return simple_cmap", "title": "" }, { "docid": "c91f226d101a0fa3476c7cde73e5f41f", "score": "0.57339555", "text": "def shifted_color_map(vmin, vmax, cmap = None):\n\n if cmap is None:\n cmap = colormaps.seismic\n \n midpoint = 1 - abs(vmax)/(abs(vmax) + abs(vmin))\n \n cdict = {\n 'red': [],\n 'green': [],\n 'blue': [],\n 'alpha': []\n }\n\n # regular index to compute the colors\n reg_index = linspace(0, 1, 257)\n\n # shifted index to match the data\n shift_index = hstack([\n linspace(0.0, midpoint, 128, endpoint=False), \n linspace(midpoint, 1.0, 129, endpoint=True)\n ])\n\n for ri, si in zip(reg_index, shift_index):\n r, g, b, a = cmap(ri)\n\n cdict['red'].append((si, r, r))\n cdict['green'].append((si, g, g))\n cdict['blue'].append((si, b, b))\n cdict['alpha'].append((si, a, a))\n\n return colors.LinearSegmentedColormap('shiftedcmap', cdict)", "title": "" }, { "docid": "5c99936c18abbd0e04821eb1124656a5", "score": "0.56820494", "text": "def _n_colors_from_colormap(n, cmap='Set1'):\n from matplotlib.cm import get_cmap\n cm = get_cmap(cmap)\n return [cm(1.*i/n) for i in range(n)]", "title": "" }, { "docid": "796f083076d81eb7b55876c8a2a9d276", "score": "0.5646625", "text": "def cmap(self):\n return self.vtab.cmap", "title": "" }, { "docid": "0f6a53ad59496c212861a625127eaf53", "score": "0.5640036", "text": "def get_colormap(name: str) -> IntensityMap:\n return _registered_colormaps[name]", "title": "" }, { "docid": "57fe30cfb9e4a7a1ff3f134efe64d7a4", "score": "0.5636796", "text": "def map_to_color(map, crange, args):\n\tmap = ((map.T-crange[0])/(crange[1]-crange[0])).T # .T ensures broadcasting for rgb case\n\tif args.reverse_color: map = 1-map\n\tif args.rgb: m_color = colorize.colorize(map, desc=args.color, driver=args.method, mode=args.rgb_mode)\n\telse: m_color = colorize.colorize(map[0], desc=args.color, driver=args.method)\n\tm_color = enmap.samewcs(np.rollaxis(m_color,2), map)\n\treturn m_color", "title": "" }, { "docid": "047cb69c758d77eba76107736291ea72", "score": "0.5622111", "text": "def _get_seq_cmap(seq):\n seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]\n cdict = {'red': [], 'green': [], 'blue': []}\n for i, item in enumerate(seq):\n if isinstance(item, float):\n r1, g1, b1 = seq[i - 1]\n r2, g2, b2 = seq[i + 1]\n cdict['red'].append([item, r1, r2])\n cdict['green'].append([item, g1, g2])\n cdict['blue'].append([item, b1, b2])\n\n return mc.LinearSegmentedColormap('CustomMap', cdict)", "title": "" }, { "docid": "c834004475c70f7c1d26caef60148e03", "score": "0.5620833", "text": "def ordered_cmap(N, base_cmap='nipy_spectral'):\n\n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n color_list[0,:]=[0., 0., 0., 0.]\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)", "title": "" }, { "docid": "1b8093d91a1a59d4c186a601bd28054d", "score": "0.5569671", "text": "def _cmap_xmap(function, cmap):\n import matplotlib as mpl\n\n cdict = cmap._segmentdata\n function_to_map = lambda x : (function(x[0]), x[1], x[2])\n for key in ('red','green','blue'):\n cdict[key] = map(function_to_map, cdict[key])\n cdict[key].sort()\n assert (cdict[key][0]<0 or cdict[key][-1]>1), \"Resulting indices extend out of the [0, 1] segment.\"\n\n return mpl.colors.LinearSegmentedColormap('colormap',cdict,1024)", "title": "" }, { "docid": "2146f1bf7bbc0e669672524bf3ab7c1c", "score": "0.556244", "text": "def make_lut(colormap: Dict) -> numpy.ma.array:\n lut = numpy.zeros(shape=(256, 4), dtype=numpy.uint8)\n for i, color in colormap.items():\n lut[int(i)] = color\n return lut", "title": "" }, { "docid": "fd32c5171489deaa0cc09b1998414121", "score": "0.55511636", "text": "def build_lut(cmap):\n\n lut = np.empty(shape=(256, 3), dtype=np.uint8)\n max = 256\n # build lookup table:\n lastval, lastcol = cmap[0]\n for step, col in cmap[1:]:\n val = int(step * max)\n for i in range(3):\n lut[lastval:val, i] = np.linspace(lastcol[i], col[i], val - lastval)\n\n lastcol = col\n lastval = val\n\n return lut", "title": "" }, { "docid": "b3ab47157c8f9d5e1a1a86a692bbfb7f", "score": "0.55414176", "text": "def colorScalarMap(minValue=0,maxValue=10,colorMap=_mpl.cm.Reds):\n#\timport matplotlib\n\tnorm = _mpl.colors.Normalize(minValue,maxValue)\n\ts_m = _mpl.cm.ScalarMappable(cmap=colorMap, norm=norm)\n\ts_m.set_array([])\n\treturn s_m", "title": "" }, { "docid": "72d88e55fced28dd148facaf041e3953", "score": "0.5531602", "text": "def cmap(self, cmap):\n global cmaps\n if cmap is None:\n cmap = self._cached_cmap\n try:\n if parse_version(mpl.__version__) >= parse_version('3.5.0'):\n cm = copy.copy(mpl.colormaps[cmap])\n else:\n cm = copy.copy(get_cmap(cmap))\n except ValueError:\n raise NeXusError(f\"'{cmap}' is not registered as a color map\")\n cmap = cm.name\n if cmap != self._cached_cmap:\n if cmap not in cmaps:\n cmaps.insert(6, cmap)\n idx = self.cmapcombo.findText(cmap)\n if idx < 0:\n if cmap in divergent_cmaps:\n self.cmapcombo.addItem(cmap)\n else:\n self.cmapcombo.insertItem(7, cmap)\n self.cmapcombo.setCurrentIndex(self.cmapcombo.findText(cmap))\n else:\n self.cmapcombo.setCurrentIndex(idx)\n cm.set_bad(self.plotview.bad)\n self.plotview.image.set_cmap(cm)\n if self.symmetric:\n if self.is_qualitative_cmap(self._cached_cmap):\n self.axis.hi = self.axis.max\n self.make_symmetric()\n self.plotview.x, self.plotview.y, self.plotview.v = \\\n self.plotview.get_image()\n self.plotview.replot_image()\n elif self.qualitative:\n self.make_qualitative()\n self.plotview.x, self.plotview.y, self.plotview.v = \\\n self.plotview.get_image()\n self.plotview.replot_image()\n else:\n self.maxbox.setEnabled(True)\n self.minbox.setEnabled(True)\n self.maxslider.setEnabled(True)\n self.minslider.setEnabled(True)\n if self.is_symmetric_cmap(self._cached_cmap):\n self.axis.lo = self.axis.min\n elif self.is_qualitative_cmap(self._cached_cmap):\n self.axis.lo = self.axis.min\n self.axis.hi = self.axis.max\n self.plotview.replot_image()\n self._cached_cmap = self.cmap", "title": "" }, { "docid": "9aae5e8e7dba019cb9a34c3a7d71dad2", "score": "0.55068564", "text": "def colormap(self, *args):\n return self.send({'cmd': 'colormap', 'args': args})", "title": "" }, { "docid": "4ed902661a182d18e88a188e579e6399", "score": "0.5502435", "text": "def discrete_norm(values, cmap='viridis', values_are_bounds=False, **norm_kws):\n if values_are_bounds:\n boundaries = values\n else:\n boundaries = _vals_to_boundaries(values)\n if isinstance(cmap, str):\n cmap = mpl.cm.get_cmap(cmap)\n return mpl.colors.BoundaryNorm(boundaries=boundaries, ncolors=cmap.N, **norm_kws)", "title": "" }, { "docid": "44c5a1b2b1aab5af39f81a848913e015", "score": "0.5493861", "text": "def color_map(cat):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval( cat, -1, 1, 0, 255)\n return int(color_code)", "title": "" }, { "docid": "90fb584b8143ae31a0afcee5ac329be9", "score": "0.54769725", "text": "def colors(self, values, domain_min=None, domain_max=None):\n values = numpy.array(values, dtype=\"int64\")\n flat = numpy.ravel(values) % len(self._palette._colors)\n result = self._palette._colors[flat]\n return result.reshape(values.shape)", "title": "" }, { "docid": "ac1ec93fcbd379a61c3ecb2612c9ae42", "score": "0.54704547", "text": "def from_discrete_values(cls, values, cmap=mpl.cm.jet, norm_kws=dict(), **kwargs):\n return cls(vmin=None, vmax=None, cmap=cmap, norm=discrete_norm(values, cmap, values_are_bounds=False, **norm_kws), **kwargs)", "title": "" }, { "docid": "4bbbb186db42b0af142346f341a442ab", "score": "0.5453183", "text": "def get_colormap( arr, center_zero=True, minval=0.15, maxval=0.95, \\\n npoints=100, cb='CMRmap_r', maintain_scaling=True, \\\n negative=False, positive=False, divergent=False, \\\n sigfig_rounding_on_cb=2, buffer_cmap_upper=False, fixcb=None, nticks=10, \\\n verbose=True, debug=False ):\n\n# cb='Blues' # Kludge. Force blues for colorbar... \n# cb='Reds' # Kludge. Force Reds for colorbar... \n\n # Make sure cmap includes range of all readable levels (lvls)\n # i.e head of colormap often rounded for ascetic/readability reasons\n if buffer_cmap_upper:\n lvls, lvls_diff = get_human_readable_gradations( vmax=fixcb[1], \\\n vmin=fixcb[0], nticks=nticks, rtn_lvls_diff=True, \\\n sigfig_rounding_on_cb=sigfig_rounding_on_cb )\n\n # increase maximum value in color by 5% of level diff \n # to allow space for max lvl \n fixcb_ = ( fixcb[0], lvls[-1]+ ( lvls_diff*0.05 ))\n arr = np.array( fixcb_ )\n\n # Gotcha - make sure max(lvls) not increased > 0 \n if (max(lvls) <= 0) and ( not (arr.max() < 0 ) ):\n arr = np.array( [ arr[0], 0 ] )\n \n \n # make sure array has a mask\n if debug:\n print arr, [ ( i.min(), i.max() ) for i in [arr] ]\n print '>'*5, ('ask' not in str( type( arr ) )), type( arr )\n if 'ask' not in str(type( arr ) ):\n arr = np.ma.array(arr) \n# s_mask = arr.mask\n \n if debug:\n print '>'*5, ('ask' not in str( type( arr ) )), type( arr )\n \n # If postive/negative not given, check if +ve/-ve\n if not any( np.array([ positive, negative ]) ):\n if debug:\n print 'Testing if arr is +ve/-ve, for arr with min' +\\\n '{} and max {}'.format( arr.min(), arr.max() )\n\n # --- sequential\n # negative?\n arr.mask =False\n arr.mask[arr<=0]=True\n if arr.mask.all():\n negative = True\n\n # postive?\n arr.mask =False\n arr.mask[arr>=0]=True\n if arr.mask.all():\n positive = True\n\n # reverse colourbar if negative\n if negative:\n if cb == 'CMRmap_r':\n cb = cb[:-2]\n else:\n cb = cb+'_r'\n\n if verbose:\n logging.info( 'cmap is: >{}< & data is:'.format( cb ))\n logging.info( '< postive == {}, negative == {}, divergent == {} >'.format( \\\n positive, negative, (( not positive) and (not negative)) ))\n\n # load color map\n cmap = plt.get_cmap( cb )\n\n # Chop off bottom for 'gnuplot2' \n if (negative and ( 'gnuplot2' in cb)) or (positive and ( 'CMRmap' in cb)):\n cmap = matplotlib.colors.LinearSegmentedColormap.from_list( \\\n 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=0, \\\n b=maxval-minval, ), cmap(np.linspace(0, maxval-minval, npoints)))\n if (positive and ( 'gnuplot2' in cb)) or (negative and ( 'CMRmap' in cb)):\n# if positive and ( 'CMRmap' in cb ): \n cmap = matplotlib.colors.LinearSegmentedColormap.from_list( \\\n 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval,\\\n b=maxval), cmap(np.linspace(minval, maxval, npoints)))\n\n # --- divergent\n if ( ( not positive) and (not negative) ) or divergent:\n if debug:\n print 'diverging data? =={}, for values range: '.format( True ),\n arr.mask = False\n cmap = plt.cm.RdBu_r\n# cmap = plt.cm.Spectral\n # Centre color bar around zero\n if center_zero:\n vmin, vmax = arr.min(), arr.max()\n print 1-vmax/(vmax + abs(vmin)), vmin, vmax\n cmap = shiftedColorMap(cmap, midpoint=1-vmax/(vmax + abs(vmin)), \\\n maintain_scaling=maintain_scaling, arr=arr, name='shifted', \\\n verbose=verbose, debug=debug )\n\n # Use updated jet replacements from mpl\n # : https://github.com/bids/colormap\n# cmap = cmc # pink, orange, blue alterative... \n# cmap = cmd # green - blue alternative\n\n# arr.mask = s_mask\n\n if debug:\n print cb, center_zero \n if buffer_cmap_upper:\n return cmap, fixcb_\n else:\n return cmap", "title": "" }, { "docid": "15ace60c115603eeed629819e2e321e0", "score": "0.5445226", "text": "def get_lin_cmap(values, colors):\n\n # Normalized vmid\n x = [1.0*(v-values[0])/(values[-1]-values[0]) for v in values[1:-1]]\n\n # Adapt to seq\n c = mc.ColorConverter().to_rgb\n\n seq = [c(colors[0])]\n for i in range(len(x)):\n seq += [c(colors[i+1]), x[i], c(colors[i+1])]\n seq += [c(colors[-1])]\n\n return _get_seq_cmap(seq)", "title": "" }, { "docid": "94365f92247fc5bf3a794ca7c9469e55", "score": "0.54432136", "text": "def single_color_cmap(color=None):\n if color is None:\n color = [0, 0, 0]\n colors = np.ones((2, 4))\n for i in range(3):\n colors[0, i] = color[i]\n colors[1, i] = color[i]\n\n return LinearSegmentedColormap.from_list(\"single_color\" , colors, 2)", "title": "" }, { "docid": "13eab88b0db78a3a963566f8f1761179", "score": "0.5436824", "text": "def get_colormap(name: str) -> Dict:\n cmap_file = os.path.join(os.path.dirname(__file__), \"cmaps\", f\"{name}.npy\")\n cmap = numpy.load(cmap_file)\n return {idx: value.tolist() for idx, value in enumerate(cmap)}", "title": "" }, { "docid": "0a7e44aea7cdf75ba0665a69ac93ff90", "score": "0.5414513", "text": "def get_colormap(i, z, shift, colmap):\n\n col = int((normalize(i, z)) + (shift * len(colmap) / 100)) % len(colmap)\n return colmap[col]", "title": "" }, { "docid": "0287004468f501a18fd5da2fbd024a68", "score": "0.540365", "text": "def color_map(val):\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "title": "" }, { "docid": "7b723109f04c8a5e6c3fe2292b4a3ef8", "score": "0.54000956", "text": "def createcm(df,factor):\n color_range = ['#023858','#084081','#0868ac','#2b8cbe','#4eb3d3','#7bccc4','#a8ddb5','#ccebc5','#e0f3db','#f7fcf0','#fff7bc']\n colormap = cm.LinearColormap(\n color_range[::-1],\n vmin = 0, \n vmax = round(max(df[factor])+1),\n index = np.linspace(round(min(df[factor])),round(max(df[factor])+1),11),\n caption = ''.join([factor,' of each country'])\n )\n return colormap", "title": "" }, { "docid": "281ca8d5471ba20570b4e862a0fe5e27", "score": "0.5395399", "text": "def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=10000):\n new_cmap = clr.LinearSegmentedColormap.from_list(\n 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),\n cmap(np.linspace(minval, maxval, n)))\n return new_cmap", "title": "" }, { "docid": "1120697588d1d1bccb9350eb3ac82117", "score": "0.5391788", "text": "def map(self, values):\n if self._logarithmic:\n assert np.all(values > 0)\n values = np.log(values)\n vmin, vmax = np.log(self.vmin), np.log(self.vmax)\n else:\n vmin, vmax = self.vmin, self.vmax\n assert values is not None\n # Use categorical or continuous colormap depending on the categorical option.\n f = (_categorical_colormap\n if self._categorical and np.issubdtype(values.dtype, np.integer)\n else _continuous_colormap)\n return f(self._colormap, values, vmin=vmin, vmax=vmax)", "title": "" }, { "docid": "17a7bf8a4e563d8afef9ad30a1e136bd", "score": "0.5383692", "text": "def get_discrete_midpt_cmap_norm(vmin, vmax, midpoint,\n bands_above_mdpt=5,\n bands_below_mdpt=5,\n remove_middle_color=False,\n this_cmap=plt.get_cmap('PuOr'),\n extend='both'):\n x = np.concatenate([np.linspace(start=vmin,\n stop=midpoint,\n num=bands_below_mdpt)[:-1],\n np.linspace(start=midpoint,\n stop=vmax,\n num=bands_above_mdpt)])\n if remove_middle_color:\n low_stop = 0.4\n high_start = 0.6\n else:\n low_stop = 0.5\n high_start = 0.5\n y = np.concatenate([np.linspace(start=0.0,\n stop=low_stop,\n num=bands_below_mdpt+1)[:-1],\n np.linspace(start=high_start,\n stop=1.0,\n num=bands_above_mdpt)])\n\n if extend is 'neither':\n y = y[1:-1]\n if extend is 'min':\n y = y[:-1]\n if extend is 'max':\n y = y[1:]\n mycmap, mynorm = from_levels_and_colors(x, this_cmap(y),\n extend=extend)\n return(mycmap, mynorm)", "title": "" }, { "docid": "a13f29480c59364f0a278f420f704c96", "score": "0.53554916", "text": "def GetColormap(self, *args):\n return self.send({'cmd': 'GetColormap', 'args': args})", "title": "" }, { "docid": "afb6165f02884c40c85036c14a89826f", "score": "0.5341889", "text": "def grayify_cmap(cmap):\n cmap = plt.cm.get_cmap(cmap)\n colors = cmap(np.arange(cmap.N))\n # convert RGBA to perceived greyscale luminance\n # cf. http://alienryderflex.com/hsp.html\n RGB_weight = [0.299, 0.587, 0.114]\n luminance = np.sqrt(np.dot(colors[:, :3] ** 2, RGB_weight))\n colors[:, :3] = luminance[:, np.newaxis]\n return cmap.from_list(cmap.name + \"_grayscale\", colors, cmap.N)", "title": "" }, { "docid": "1e86fd0bb7d4c37b14fb2d22f0f5a63e", "score": "0.5331777", "text": "def get_mpl_colormap(self, **kwargs):\r\n if not HAVE_MPL: # pragma: no cover\r\n raise RuntimeError('matplotlib not available.')\r\n\r\n cmap = LinearSegmentedColormap.from_list(self.name,\r\n self.mpl_colors, **kwargs)\r\n\r\n return cmap", "title": "" }, { "docid": "db026bdb24222dfe941f41e76d57ebdd", "score": "0.5319674", "text": "def cmap(self, *args):\n return self.send({'cmd': 'cmap', 'args': args})", "title": "" }, { "docid": "91c35af8309aa61e49e03cb8dae02fea", "score": "0.5314846", "text": "def getColorMapByKey(self, key):\n return self.cmLib.getColorMapByKey(key)", "title": "" }, { "docid": "6edb5b5ebd87b419992f925148859feb", "score": "0.5309055", "text": "def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "title": "" }, { "docid": "6edb5b5ebd87b419992f925148859feb", "score": "0.5309055", "text": "def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "title": "" }, { "docid": "6edb5b5ebd87b419992f925148859feb", "score": "0.5309055", "text": "def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "title": "" }, { "docid": "6edb5b5ebd87b419992f925148859feb", "score": "0.5309055", "text": "def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "title": "" }, { "docid": "6edb5b5ebd87b419992f925148859feb", "score": "0.5309055", "text": "def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "title": "" }, { "docid": "6edb5b5ebd87b419992f925148859feb", "score": "0.5309055", "text": "def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "title": "" }, { "docid": "8cbe97725cb8fb33cf06c638b28da479", "score": "0.53066945", "text": "def GetRedChannel(self) -> \"std::vector< double,std::allocator< double > >\":\n return _itkCustomColormapFunctionPython.itkCustomColormapFunctionUCRGBPUC_GetRedChannel(self)", "title": "" }, { "docid": "ac128376728e2c74c1facbc860cfbd82", "score": "0.529228", "text": "def colordict(self):\n d = {}\n i=0\n n = len(self.constraints)\n for c in self.constraints:\n #self.colordict[c] = colors[i % 6]\n d[c] = cm.jet(1.*i/n)\n i+=1\n return d", "title": "" }, { "docid": "27286a2b27524eb9966a4349a927bff9", "score": "0.5288192", "text": "def get_grayscale(c):\n min = min(c)\n max = max(c)\n\n greyscale_values = np.interp(c, [min, max], [0, 255]).astype(int)", "title": "" }, { "docid": "b843d307a05011453c1e4747dbebde62", "score": "0.5282837", "text": "def rgb_colormaps(color):\n\n if color == \"red\":\n cdict = {'red': ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)),\n\n 'green': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0)),\n\n 'blue': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0))}\n elif color == \"green\":\n cdict = {'red': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0)),\n\n 'green': ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)),\n\n 'blue': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0))}\n elif color == \"blue\":\n cdict = {'red': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0)),\n\n 'green': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0)),\n\n 'blue': ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0))}\n else:\n raise ValueError(\"Wrong color specified. Allowed colors are 'red', 'green', 'blue'.\")\n\n cmap = LinearSegmentedColormap('BlueRed2', cdict)\n\n return cmap", "title": "" }, { "docid": "8d7af04a986993c99bf81f5e1feb8216", "score": "0.5282519", "text": "def mpl_colormap(self):\r\n return self.get_mpl_colormap()", "title": "" }, { "docid": "4883836f5285d355eac5105a19a36780", "score": "0.5246864", "text": "def apply_cmap(data: numpy.ndarray, cmap: Dict) -> Tuple[numpy.ndarray, numpy.ndarray]:\n if data.shape[0] > 1:\n raise Exception(\"Source data must be 1 band\")\n\n lookup_table = make_lut(cmap)\n\n # apply the color map\n data = lookup_table[data[0], :]\n data = numpy.transpose(data, [2, 0, 1])\n return data[:-1], data[-1]", "title": "" }, { "docid": "eea5074e8f922f072d46b2e2b8f072b4", "score": "0.5244483", "text": "def _make_default_colormap():\n colormap = np.array(cc.glasbey_bw_minc_20_minl_30)\n # Reorder first colors.\n colormap[[0, 1, 2, 3, 4, 5]] = colormap[[3, 0, 4, 5, 2, 1]]\n # Replace first two colors.\n colormap[0] = [0.03137, 0.5725, 0.9882]\n colormap[1] = [1.0000, 0.0078, 0.0078]\n return colormap", "title": "" }, { "docid": "b6d53f7fd6ed8fa24128ed64333973eb", "score": "0.52252215", "text": "def create_cityscapes_label_colormap():\n colormap = np.asarray([\n [128, 64, 128],\n [244, 35, 232],\n [70, 70, 70],\n [102, 102, 156],\n [190, 153, 153],\n [153, 153, 153],\n [250, 170, 30],\n [220, 220, 0],\n [107, 142, 35],\n [152, 251, 152],\n [70, 130, 180],\n [220, 20, 60],\n [255, 0, 0],\n [0, 0, 142],\n [0, 0, 70],\n [0, 60, 100],\n [0, 80, 100],\n [0, 0, 230],\n [119, 11, 32],\n ])\n return colormap", "title": "" }, { "docid": "ecba7f86109e0520bfd723960fa03c55", "score": "0.52242047", "text": "def cart2cmap(xyz, cmap):\n \n rthetaphi = cart2sph(xyz)\n phi = rthetaphi[1]\n theta = rthetaphi[2] + 180.0\n rgb = cmap[int(phi), int(theta)]\n\n return rgb", "title": "" }, { "docid": "c30c2ba60548d2a2198a472d6d3fd932", "score": "0.52128977", "text": "def make_cdf(img, n_bins=256):\n cdf = stats.cumfreq(img, n_bins, (0, 255))[0]\n cdf_ = {'cdf': np.array(cdf) / int(max(cdf)), 'n_items': int(max(cdf))}\n\n return cdf_", "title": "" }, { "docid": "06412308edea60c69ace3137e0605d50", "score": "0.5205457", "text": "def continuous_palette_for_color(color, bins=256):\n\n # A quick and dirty way to create a continuous color palette is to convert from the RGB color\n # space into the HSV color space and then only adapt the color's saturation (S component).\n\n hexs = webcolors.CSS3_NAMES_TO_HEX[color] if color[0] != \"#\" else color\n r, g, b = [(int(h[1:3], 16), int(h[3:5], 16), int(h[5:7], 16)) for h in hexs]\n h, s, v = colorsys.rgb_to_hsv(r, g, b)\n\n assert 0 < bins <= 256\n\n palette = []\n for i in range(bins):\n r, g, b = [int(v * 255) for v in colorsys.hsv_to_rgb(h, (1 / bins) * (i + 1), v)]\n palette.extend(r, g, b)\n\n return palette", "title": "" }, { "docid": "00fad312779cc2f4a87470caa3d34e91", "score": "0.52052486", "text": "def view_colormap(cmap, raw_file_path):\r\n # cmap = plt.cm.get_cmap(cmap)\r\n colors = cmap(np.arange(cmap.N))\r\n \r\n # cmap = grayscale_cmap(cmap)\r\n # grayscale = cmap(np.arange(cmap.N))\r\n \r\n fig, ax = plt.subplots(1, figsize=(6, 2),\r\n subplot_kw=dict(xticks=[], yticks=[]))\r\n ax.imshow([colors], extent=[0, 10, 0, 1])\r\n \r\n pltF.GCPE_savecolorbar(fig, raw_file_path)\r\n # ax[1].imshow([grayscale], extent=[0, 10, 0, 1])\r", "title": "" }, { "docid": "528408ef58b72b9f08198270c471845e", "score": "0.52025527", "text": "def colorbar(vmin, vmax, cmap):\n\n vmin = vmin / (np.pi*(4**2*40)*0.0014*0.3*(1/0.917)*10)\n vmax = vmax / (np.pi*(4**2*40)*0.0014*0.3*(1/0.917)*10)\n\n fig, ax = plt.subplots(1, 1)\n\n fraction = 1\n\n norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n cbar = ax.figure.colorbar(\n mpl.cm.ScalarMappable(norm=norm, cmap=cmap),\n ax=ax, pad=.05, extend='both', fraction=fraction)\n\n ax.axis('off')\n \n plt.savefig(str(path+'colorbar.jpg'),dpi=300)\n \n return", "title": "" }, { "docid": "22fee697a735989a83f519afcc166c5e", "score": "0.5186091", "text": "def create_pascal_label_colormap():\n colormap = np.zeros((256, 3), dtype=int)\n ind = np.arange(256, dtype=int)\n\n for shift in reversed(range(8)):\n for channel in range(3):\n colormap[:, channel] |= ((ind >> channel) & 1) << shift\n ind >>= 3\n print(\"shift\",shift)\n print(\"channel\",channel)\n #print(\"colormap\",colormap)\n return colormap", "title": "" }, { "docid": "fe455d012ebaea21e0f6b574125cdde3", "score": "0.5175574", "text": "def GetRedChannel(self) -> \"std::vector< double,std::allocator< double > >\":\n return _itkCustomColormapFunctionPython.itkCustomColormapFunctionUCRGBAPUC_GetRedChannel(self)", "title": "" }, { "docid": "4c9f8f0c0fb3db0c5854b51b69f6671d", "score": "0.5169313", "text": "def GetBlueChannel(self) -> \"std::vector< double,std::allocator< double > >\":\n return _itkCustomColormapFunctionPython.itkCustomColormapFunctionUCRGBPUC_GetBlueChannel(self)", "title": "" }, { "docid": "b91e8df9cffcafe996741a27cf048cd7", "score": "0.515642", "text": "def change_cmap(self):\n self.cmap = self.cmapcombo.currentText()", "title": "" } ]
db0af1a186c2fbf834cf81400266dddd
Wrapper around submit for python api.
[ { "docid": "1ed696ceed191d790787d19b8ed39d8d", "score": "0.6731706", "text": "def submit_api(config: dict, **kwargs):\n cfg = Config.from_dict(config)\n return submit(cfg=cfg, **kwargs)", "title": "" } ]
[ { "docid": "b3160e79d99c6bf5469e3b1f4a9dcb99", "score": "0.78354293", "text": "def submit(self):\n raise NotImplementedError", "title": "" }, { "docid": "685d566c2b16f9ade7eb9ad70c3d3e39", "score": "0.76533663", "text": "def submit(self):\n pass", "title": "" }, { "docid": "89dee93d52686478a9d0cf7419f822e9", "score": "0.7331223", "text": "def submit(self, req):\n \n cmd = req.cmd\n if req.args:\n cmd += req.args\n \n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n (out, err) = p.communicate()\n rc = p.returncode\n \n req.out = out\n req.err = err\n req.rc = rc", "title": "" }, { "docid": "acee74cfe601b21f61620d78cb22bd59", "score": "0.7179298", "text": "def user_submit(request, _form_data, url_mapping, client, _responses, action, what, args):\n url = _url_args(url_mapping[what][action], args)\n _responses.append(_form_request(request, _form_data, client, 'post', url))", "title": "" }, { "docid": "c83bbf9c46c92513c63d4809ff3c18ce", "score": "0.71219987", "text": "def submit():\n if request.headers['Content-Type'] == 'application/json':\n return Server.submit(request.json)\n else:\n return Server.submit(request.form)", "title": "" }, { "docid": "672e9e95dc61da6d8e11521ef0365ef4", "score": "0.7089498", "text": "def submit(self):\r\n return self._submit", "title": "" }, { "docid": "7a66a062f7d0b5e29a549d507adf2bae", "score": "0.6755731", "text": "def submitted(self):\n pass", "title": "" }, { "docid": "352d4c6e8813702f8aa675df686e536c", "score": "0.67519957", "text": "def submit(self, name=None, index=None, value=None, **args):\n fields = self.submit_fields(name, index=index, submit_value=value)\n if self.method.upper() != \"GET\":\n args.setdefault(\"content_type\", self.enctype)\n return self.response.goto(self.action, method=self.method,\n params=fields, **args)", "title": "" }, { "docid": "8f06e9ef9902b9116721227b88b7a08e", "score": "0.6736374", "text": "async def submit(ctx):\n # TODO: Submission Matrix", "title": "" }, { "docid": "f1d45db740a371082b69f21b73f597b7", "score": "0.66750336", "text": "def Submit(self, request, global_params=None):\n config = self.GetMethodConfig('Submit')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "68370f252182b2b106655c9dbe621810", "score": "0.6654933", "text": "def test_submit(self):\n pass", "title": "" }, { "docid": "470d91477987029114bc8d323ab03254", "score": "0.6582727", "text": "def Submit(func):\n executor.submit(func)", "title": "" }, { "docid": "914df6d2e76137590499ce072446ca1b", "score": "0.65789574", "text": "def submit_job(self, job: Job):\n pass", "title": "" }, { "docid": "6dcf70fc81128877bac2437b7638fa4c", "score": "0.6540917", "text": "def post(self):", "title": "" }, { "docid": "6dcf70fc81128877bac2437b7638fa4c", "score": "0.6540917", "text": "def post(self):", "title": "" }, { "docid": "6dcf70fc81128877bac2437b7638fa4c", "score": "0.6540917", "text": "def post(self):", "title": "" }, { "docid": "6dcf70fc81128877bac2437b7638fa4c", "score": "0.6540917", "text": "def post(self):", "title": "" }, { "docid": "6dcf70fc81128877bac2437b7638fa4c", "score": "0.6540917", "text": "def post(self):", "title": "" }, { "docid": "69eafde7df4f2aa8121c8d0a3a3f4ce3", "score": "0.64982796", "text": "def submit(context, path):\n repo = context.obj['repo']\n repo.submit(path=path)\n context.exit(0)", "title": "" }, { "docid": "05ecdaff7dbade30bd204e9c09e72f10", "score": "0.64878976", "text": "def test_submit(self):\n assert LocalExecutor().submit(lambda: 1) == 1\n assert LocalExecutor().submit(lambda x: x, 1) == 1\n assert LocalExecutor().submit(lambda x: x, x=1) == 1\n assert LocalExecutor().submit(lambda: prefect) is prefect", "title": "" }, { "docid": "22c484d2e8901c8a38c76390b84331ed", "score": "0.6458699", "text": "def submit(self):\n return {}", "title": "" }, { "docid": "9fe2bae0f72dc62d3e6bb740a84de539", "score": "0.63994145", "text": "def _submit_job_request(self, request: Dict) -> Dict:\n pass", "title": "" }, { "docid": "2af0ad7280940ab0c2ba969e966f03b8", "score": "0.6372564", "text": "def post(self):\n pass", "title": "" }, { "docid": "2af0ad7280940ab0c2ba969e966f03b8", "score": "0.6372564", "text": "def post(self):\n pass", "title": "" }, { "docid": "2af0ad7280940ab0c2ba969e966f03b8", "score": "0.6372564", "text": "def post(self):\n pass", "title": "" }, { "docid": "2af0ad7280940ab0c2ba969e966f03b8", "score": "0.6372564", "text": "def post(self):\n pass", "title": "" }, { "docid": "2af0ad7280940ab0c2ba969e966f03b8", "score": "0.6372564", "text": "def post(self):\n pass", "title": "" }, { "docid": "edb15551b573081017c792b1b19bfd43", "score": "0.6364024", "text": "def post(self):\n pass", "title": "" }, { "docid": "edb15551b573081017c792b1b19bfd43", "score": "0.6364024", "text": "def post(self):\n pass", "title": "" }, { "docid": "edb15551b573081017c792b1b19bfd43", "score": "0.6364024", "text": "def post(self):\n pass", "title": "" }, { "docid": "edb15551b573081017c792b1b19bfd43", "score": "0.6364024", "text": "def post(self):\n pass", "title": "" }, { "docid": "368cf52e642582926523f08036364bdd", "score": "0.63634175", "text": "def send_request(self): \n payload = {'request': json.dumps(self.payload, separators=(',', ':')), 'submit': 'submit' }\n self.response = requests.post(self.base_url, data=payload)\n self.response.raise_for_status();", "title": "" }, { "docid": "a21d304f19457b4882b448409f550033", "score": "0.622106", "text": "def http_post() -> Any:", "title": "" }, { "docid": "f38c53a700b5e18c7efcb60e085f0e1d", "score": "0.6212556", "text": "def _submit_task(self, task_spec: Dict[str, Any]) -> Tuple[Hashable, Any]:", "title": "" }, { "docid": "e5421dd44af6c3ca1e166175d3a5f8e4", "score": "0.6191706", "text": "def Submit(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "89a16a7074ac380d0d40e1f7da7bcba3", "score": "0.61556774", "text": "def submit(func: Callable, *args, **kwargs) -> FutureWrapper:\n client = get_client()\n fut = client.submit(func, *args, **kwargs)\n return FutureWrapper.from_future(fut)", "title": "" }, { "docid": "ea7aaf35097fafa97378db38b1260c91", "score": "0.61552185", "text": "def post(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "ea7aaf35097fafa97378db38b1260c91", "score": "0.61552185", "text": "def post(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "0e9ff3ce5dab186c490ec904a7f9467d", "score": "0.6130024", "text": "def _submit_job(self, context: str) -> uuid.UUID:\n url = os.path.join(self._base_url, \"submit\")\n response = self._client.post(url, context)\n data: schemas.JobSubmitted = schemas.decode(\n schemas.JobSubmittedSchema, response.text\n )\n return data.id", "title": "" }, { "docid": "279472950f46eb6c1edd7278cafbb883", "score": "0.609114", "text": "def jip_submit(args):\n parser = __create_connection_options()\n parser.add_option(\"--cwd\", dest=\"cwd\", help=\"Working directory\", default=os.path.abspath(os.curdir))\n\n programm_args = None\n if \"--\" in args:\n i = args.index(\"--\")\n programm_args = args[i + 1:]\n args = args[:i]\n\n options, rest_args = parser.parse_args(args)\n connection_info = _get_connection_info(options)\n connection_info['url'] = __prepare_submit_url(connection_info['url'], \"cluster/%s/submit\" % connection_info['cluster'])\n\n job = {\n \"command\": \" \".join(programm_args),\n \"cwd\": options.cwd\n }\n\n try:\n response = send(requests.post, connection_info, json.dumps(job))\n if not handle_reponse(response):\n exit(1)\n if response.status_code == 200:\n print \"Job\", response.json['id'], \"submitted\"\n except requests.exceptions.Timeout, e:\n print >> sys.stderr, \"Connection timeout\"\n exit(1)", "title": "" }, { "docid": "22bfd19306996343d2123a5764e27722", "score": "0.6087373", "text": "def submit():\n try:\n proxyid = getProxyId()\n except errors.NoSuchProxyError:\n return 'Wrong or no client certificate', 401\n\n jmgr = jobmgr.JobManager()\n\n site = request.form.get('site', None)\n if not site:\n return 'No site given', 400\n xrsl_file = request.files.get('xrsl', None)\n if not xrsl_file:\n return 'No job description file given', 400\n jobdesc = xrsl_file.read()\n try:\n jobmgr.checkJobDesc(jobdesc)\n jobmgr.checkSite(site)\n except errors.InvalidJobDescriptionError as e:\n return 'Invalid job description', 400\n except errors.NoSuchSiteError as e:\n return 'Invalid site', 400\n else:\n try:\n jobid = jmgr.clidb.insertJobAndDescription(jobdesc, proxyid, site)\n except Exception as e:\n return 'Server error: {}'.format(str(e)), 500\n else:\n return str(jobid)", "title": "" }, { "docid": "f850bd0bd87b7d5f29bb002afb176ab2", "score": "0.60647935", "text": "def polling_submit_command(args: Dict[str, Any], client: Client, params: Dict[str, Any],\n exe_metrics: ExecutionMetrics) -> PollResult:\n if submission_id := args.get('submission_id'):\n res = client.submission_info(submission_id=submission_id)\n status = res.get('status')\n if status == 'finished':\n command_results = build_submission_command_result(client, res, args, exe_metrics, True)\n return PollResult(command_results)\n return PollResult(\n response=[CommandResults(outputs=res, # this is what the response will be in case job has finished\n outputs_prefix='Joe.Submission', outputs_key_field='submission_id',\n readable_output=f'Waiting for submission \"{res.get(\"submission_id\")}\" to finish...'),\n CommandResults(execution_metrics=exe_metrics.get_metric_list())], continue_to_poll=True,\n args_for_next_run={'submission_id': args.get('submission_id'), **args})\n else:\n if file := args.get('entry_id'):\n return file_submission(client, args, params, file, exe_metrics)\n elif url := args.get('url'):\n return url_submission(client, args, params, url, exe_metrics)\n else:\n raise DemistoException('No file or URL was provided.')", "title": "" }, { "docid": "6b0ef864c864a0ed26c3208df28b89cd", "score": "0.6022606", "text": "def submit( self ):\n\n (stdOut, stdErr) = self._call_condor( [self.submit_exe, self.script] )\n\n (condor_id, parent_id) = self._parse_submit_output( stdOut, stdErr )\n\n self.info = CondorDbInfo( condor_id, parent_id )\n\n # store state of job\n self.info.record( 'Submitted' )\n self._store_state()", "title": "" }, { "docid": "73f4a11f2d481847d5954b2c7e0babef", "score": "0.6017088", "text": "def submitNewEntry(self, params_list):\r\n pass", "title": "" }, { "docid": "675bb4cdf63723d54a30d871ae76f46b", "score": "0.6012116", "text": "def submit(args, syn):\n # check if evaluation is a number, if so it is assumed to be a evaluationId else it is a evaluationName\n if args.evaluation is not None:\n try:\n args.evaluationID = str(int(args.evaluation))\n except ValueError:\n args.evaluationName = args.evaluation\n\n # checking if user has entered a evaluation ID or evaluation Name\n if args.evaluationID is None and args.evaluationName is None:\n raise ValueError(\"Evaluation ID or Evaluation Name is required\\n\")\n elif args.evaluationID is not None and args.evaluationName is not None:\n sys.stderr.write(\n \"[Warning]: Both Evaluation ID & Evaluation Name are specified \\n EvaluationID will be used\\n\"\n )\n elif args.evaluationID is None: # get evalID from evalName\n try:\n args.evaluationID = syn.getEvaluationByName(args.evaluationName)[\"id\"]\n except Exception:\n raise ValueError(\n \"Could not find an evaluation named: %s \\n\" % args.evaluationName\n )\n\n # checking if a entity id or file was specified by the user\n if args.entity is None and args.file is None:\n raise ValueError(\"Either entityID or filename is required for a submission\\n\")\n elif args.entity is not None and args.file is not None:\n sys.stderr.write(\n \"[Warning]: Both entityID and filename are specified \\n entityID will be used\\n\"\n )\n elif (\n args.entity is None\n ): # upload the the file to synapse and get synapse entity id for the file\n if args.parentid is None:\n raise ValueError(\"parentID required with a file upload\\n\")\n if not os.path.exists(args.file):\n raise IOError(\"file path %s not valid \\n\" % args.file)\n # //ideally this should be factored out\n synFile = syn.store(\n synapseclient.File(path=args.file, parent=args.parentid),\n used=syn._convertProvenanceList(args.used, args.limitSearch),\n executed=syn._convertProvenanceList(args.executed, args.limitSearch),\n )\n args.entity = synFile.id\n\n submission = syn.submit(\n args.evaluationID, args.entity, name=args.name, team=args.teamName\n )\n sys.stdout.write(\n \"Submitted (id: %s) entity: %s\\t%s to Evaluation: %s\\n\"\n % (\n submission[\"id\"],\n submission[\"entityId\"],\n submission[\"name\"],\n submission[\"evaluationId\"],\n )\n )", "title": "" }, { "docid": "9cde6f22f493bb5fcc89c3a1516df4b7", "score": "0.59955055", "text": "def pinner_cs_submit_call():\n request = app.current_request\n complaint_id = process.file_complaint(request.query_params)\n t = j2_env.get_template('pinner_cs_submit.html')\n\n return Response(body=t.render(complaint_id=complaint_id), status_code=200, headers={'Content-Type': 'text/html'})", "title": "" }, { "docid": "f50c3a28d9b5bd40ffcf50af1a83a0ba", "score": "0.5991187", "text": "def submit(self, dispatcher, tracker, domain):\n print(\"####Came here\")\n # utter submit template\n # dispatcher.utter_template(\"utter_submit\", tracker)\n # tracker.trigger_followup_action('action_ack_makereservation')\n FollowupAction('action_search_flights')\n return []", "title": "" }, { "docid": "a1da93e84ae036bd14706207c74c943c", "score": "0.5988896", "text": "def submitBlock(self, block):\n return self.call(\"submitBlock\", [block])", "title": "" }, { "docid": "c932d0bf7f993a29f6acfc9122503689", "score": "0.59597635", "text": "def _submit(submitter: Submitter) -> Result:\n\n pre_val_dir = paths.get_pre_validation_dir(submitter)\n submission_time = datetime.now()\n\n if ApprovedSubmitters().submissions_closed() and not ApprovedSubmitters().elig_for_extension(submitter):\n return Result(False, f\"A new submission can't be made at {submission_time.strftime(fmt.DATETIME_TRACE_STRING)}.\"\n f\" Submissions have been closed\")\n\n size_check_result = _check_submission_file_size(pre_val_dir)\n if not size_check_result:\n return size_check_result\n\n return daemon.queue_submission(submitter, submission_time)", "title": "" }, { "docid": "f8e7e52bcedf88432438a86242a6fad5", "score": "0.5956966", "text": "def post(self):\n raise NotImplementedError()\n #return None, 201", "title": "" }, { "docid": "f7e7c6dfe90a052a05a129b58b9b5206", "score": "0.5953424", "text": "def submit(self, fn, *args, **kwargs):\n\n coro = fn(*args, **kwargs)\n return asyncio.run_coroutine_threadsafe(coro, self._loop)", "title": "" }, { "docid": "d07a804270b312ea55fccd6f37bb95b1", "score": "0.59471244", "text": "def submit(self):\n self._focus()\n getsys('submit')(f'python -m \"pypers.core.main\" -r')", "title": "" }, { "docid": "44af9f5366d0d8d9755d3f57699edb84", "score": "0.5938347", "text": "def submit_url(self, title, url, subverse):\n return self.submit_post(title, url, subverse, True)", "title": "" }, { "docid": "57aa1f043bb2ad7d442e57ff958e3879", "score": "0.5928879", "text": "def submit_helper(self, config, merge_run=None):\n report_path = config.report_path(None)\n assert os.path.exists(report_path), \"Passed an invalid report file. \" \\\n \"Should have never gotten here!\"\n\n result = None\n if config.submit_url:\n from lnt.util import ServerUtil\n for server in config.submit_url:\n self.log(\"submitting result to %r\" % (server,))\n try:\n result = ServerUtil.submitFile(server, report_path, False,\n merge_run=merge_run)\n except (urllib2.HTTPError, urllib2.URLError) as e:\n logger.warning(\"submitting to {} failed with {}\"\n .format(server, e))\n else:\n # Simulate a submission to retrieve the results report.\n # Construct a temporary database and import the result.\n self.log(\"submitting result to dummy instance\")\n\n import lnt.server.db.v4db\n import lnt.server.config\n db = lnt.server.db.v4db.V4DB(\"sqlite:///:memory:\",\n lnt.server.config.Config\n .dummy_instance())\n session = db.make_session()\n result = lnt.util.ImportData.import_and_report(\n None, None, db, session, report_path, 'json', 'nts')\n\n if result is None:\n fatal(\"Results were not obtained from submission.\")\n\n return result", "title": "" }, { "docid": "47c697f87156043d1613d50312067511", "score": "0.59283996", "text": "def submit(self):\n original_working_directory = os.getcwd()\n os.chdir(self.submit_dir_abs_path)\n submit_function_output = self.submit_function()\n os.chdir(original_working_directory)\n return submit_function_output", "title": "" }, { "docid": "d83f86d774411e14251b9f717925efb7", "score": "0.59282285", "text": "def submit_to_ebi(preprocessed_data_id, submission_type):\n submit_EBI(preprocessed_data_id, submission_type, True)", "title": "" }, { "docid": "99978f2176928c18d390acd1b451d025", "score": "0.59195936", "text": "def gather_submission(self):\n raise NotImplementedError", "title": "" }, { "docid": "6c9501f874f718ba120b0a64aa3685c8", "score": "0.5919432", "text": "def submitBuildRequest(ss, reason, props=None):", "title": "" }, { "docid": "cd91f21e28f501249ca30d265381262f", "score": "0.59104073", "text": "def submit(self, value):\r\n self.logger.warn(\"Setting values on submit will NOT update the remote Canvas instance.\")\r\n self._submit = value", "title": "" }, { "docid": "94fd5192c8a46b4f84e15fb70fcc4219", "score": "0.59068656", "text": "def submit_task(self, task, raw=False):\n return self.run_task(task=task, raw=raw)", "title": "" }, { "docid": "d9b85afe432743a84407fe2982af8c18", "score": "0.58896893", "text": "def SubmitItem(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "4edd50f9b233d407417d086a52ad541b", "score": "0.58822984", "text": "def submit(self, script: scripts.Script) -> str:\n return self.server.submit(script.remote.filepath('sbatch'))", "title": "" }, { "docid": "59d977e0735087a8e81fdc32899656ae", "score": "0.587871", "text": "def htcondor_submit(args,GcardID,file_extension):\r\n\r\n runscript_file = file_struct.runscript_file_obj.file_base + file_extension + file_struct.runscript_file_obj.file_end\r\n clas12condor_file = file_struct.condor_file_obj.file_base + file_extension + file_struct.condor_file_obj.file_end\r\n\r\n condorfile = 'submission_files/'+'condor_files/' + clas12condor_file\r\n\r\n subprocess.call(['chmod','+x',file_struct.runscript_file_obj.file_path + runscript_file])\r\n subprocess.call(['chmod','+x',\"condor_wrapper\"])\r\n submission = Popen(['condor_submit',condorfile], stdout=PIPE).communicate()[0]\r\n #The below is for testing purposes\r\n #submission = \"\"\"Submitting job(s)...\r\n #3 job(s) submitted to cluster 7334290.\"\"\"\r\n print(submission)\r\n words = submission.split()\r\n node_number = words[len(words)-1] #This might only work on SubMIT\r\n print(node_number)\r\n\r\n strn = \"UPDATE Submissions SET run_status = 'submitted to pool' WHERE GcardID = '{0}';\".format(GcardID)\r\n utils.sql3_exec(strn)\r\n\r\n timestamp = utils.gettime() # Can modify this if need 10ths of seconds or more resolution\r\n strn = \"UPDATE Submissions SET submission_timestamp = '{0}' WHERE GcardID = '{1}';\".format(timestamp,GcardID)\r\n utils.sql3_exec(strn)\r\n\r\n strn = \"UPDATE Submissions SET pool_node = '{0}' WHERE GcardID = '{1}';\".format(node_number,GcardID)\r\n utils.sql3_exec(strn)", "title": "" }, { "docid": "77c2ed83b98aefdba24145a7184671bd", "score": "0.5867448", "text": "def submit(\n *,\n file: UploadFile = File(...),\n db: Session = Depends(get_db),\n user: User = Depends(get_current_user),\n lab_id: int\n):\n if file.filename.split('.')[-1] not in config.ALLOWED_EXTENSIONS:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"Disallowed file type\"\n )\n if not lab_get(db, lab_id):\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=f\"Lab '{lab_id}' doesn't exist\"\n )\n content = file.file.read()\n filename = str(uuid.uuid4())\n with open(os.path.join(config.UPLOAD_FOLDER, filename), \"wb\") as f:\n f.write(content)\n file.file.close()\n res = submission_create(db, {\n \"filename\": filename,\n \"origin_filename\": file.filename,\n \"code\": str(content, encoding=\"utf-8\"),\n \"total_score\": 0,\n \"status\": \"pending\",\n \"user_id\": user.id,\n \"lab_id\": lab_id\n })\n return res", "title": "" }, { "docid": "26d960d97b44d51e8562e46601fecbbf", "score": "0.586115", "text": "def spark_submit(self):\n return self._spark_submit", "title": "" }, { "docid": "602eb2c402d20b3b1174fbc62d54c44e", "score": "0.5852757", "text": "def test_submit_run(self):\n run_serializer = {}\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n 'Authorization': 'Bearer special-key',\n }\n response = self.client.open(\n '/api/v1/runs/submit',\n method='POST',\n headers=headers,\n data=json.dumps(run_serializer),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "title": "" }, { "docid": "7d4c16be12b6a8c6e16c1018dde1c544", "score": "0.5846389", "text": "def post(self):\n return BlueprintsUpload().do_request()", "title": "" }, { "docid": "9e7903566d69666331590b1442939ff1", "score": "0.5844832", "text": "def submit(self, func, *args, **kwargs):\n self._pool.submit(func, *args, **kwargs)", "title": "" }, { "docid": "d8394e4449dbf0b1f0f9e4fc8766c65b", "score": "0.5822767", "text": "def submission(ctx, submission_id):\n \"\"\"\n Invoked by `evalai submission SUBMISSION_ID`.\n \"\"\"\n ctx.obj = Submission(submission_id=submission_id)\n if ctx.invoked_subcommand is None:\n display_submission_details(submission_id)", "title": "" }, { "docid": "6b50bf2d86db16881536104accf5d09f", "score": "0.5822314", "text": "def execute(self) -> None:\n runtime.submit(self)", "title": "" }, { "docid": "f70f83df49f206ac7fefa25dc0593d7a", "score": "0.58122593", "text": "def requires_submit(func):\n @wraps(func)\n def _wrapper(self, *args, **kwargs):\n if self._future is None:\n raise JobError(\"Job not submitted yet!. You have to .submit() first!\")\n return func(self, *args, **kwargs)\n return _wrapper", "title": "" }, { "docid": "d9da4fba61374fe7542d63b659a81727", "score": "0.5811575", "text": "def post(self, request, *args, **kwargs):", "title": "" }, { "docid": "5e4bf80226aa1dc221abc6b805609c7b", "score": "0.58094907", "text": "def submit(self):\n if self._body:\n self._body_response = Fedora.post_foxml(element=self._body)\n\n if self._annotation:\n self._annotation_response = Fedora.post_foxml(element=self._annotation)", "title": "" }, { "docid": "0159908909bebb9c8d907f9c625f93a6", "score": "0.58043236", "text": "def url_submission(client: Client, args: Dict[str, Any], params: Dict[str, Any], url: str,\n exe_metrics: ExecutionMetrics) -> PollResult:\n res = client.submit_url(url=url, params=params)\n exe_metrics.success += 1\n partial_res = CommandResults(readable_output=f'Waiting for submission \"{res.get(\"submission_id\")}\" to finish...')\n return PollResult(\n response=CommandResults(outputs=res, outputs_prefix='Joe.Submission', outputs_key_field='submission_id'),\n args_for_next_run={'submission_id': res.get('submission_id'), **args}, continue_to_poll=True,\n partial_result=partial_res)", "title": "" }, { "docid": "7bc602f52cf0cf00e1b24fa4e90e7c3b", "score": "0.5795174", "text": "def Submit(self, artist, track, starttime):\n\n return self.SubmitFull(artist, track, starttime, 'P', '', '', '', '', '')", "title": "" }, { "docid": "495724386f6e968c4e58a1b5cb362c51", "score": "0.5756369", "text": "def submit(control_file_name):\n _dict = read_control_file(control_file_name)\n return submit_json(_dict)", "title": "" }, { "docid": "240f1c2b65146b91784ba4b38e38e6e7", "score": "0.57443184", "text": "def post(self):\n self._handle(self.request.POST)", "title": "" }, { "docid": "240f1c2b65146b91784ba4b38e38e6e7", "score": "0.57443184", "text": "def post(self):\n self._handle(self.request.POST)", "title": "" }, { "docid": "95ab3d6613cb0478ada8a97ac100a1e7", "score": "0.57333755", "text": "def _submit(url,\n gremlin,\n graph,\n bindings=None,\n lang=\"gremlin-groovy\",\n aliases=None,\n op=\"eval\",\n processor=\"\",\n timeout=None,\n session=None,\n loop=None,\n username=\"\",\n password=\"\",\n future_class=None):\n future_class = graph.future_class\n future = future_class()\n future_conn = graph.connect(force_close=True)\n\n def on_connect(f):\n try:\n conn = f.result()\n except Exception as e:\n future.set_exception(e)\n else:\n stream = conn.send(gremlin, bindings=bindings, lang=lang,\n aliases=aliases, op=op, processor=processor,\n session=session, timeout=timeout)\n future.set_result(stream)\n\n future_conn.add_done_callback(on_connect)\n\n return future", "title": "" }, { "docid": "abb7a80ebf60e555fa970ea37225ef1f", "score": "0.5731762", "text": "def submit(self, url, customurl=None, password=None, reuse=None):\r\n response = kutt.submit(apikey=self._api, url=url, customurl=customurl,\r\n password=password, reuse=reuse, host_url=self._host_url)\r\n if (response['code'] == 200) or (response['code'] == 201):\r\n print('Target: '+response['data']['target'])\r\n\r\n if response['data']['password']:\r\n print(\"Your URL is now secured with password\")\r\n\r\n print(\"\\nShorted URL is: \"+response['data']['link'])\r\n\r\n else:\r\n print(response['data']['error'])", "title": "" }, { "docid": "93c4fb04d89b9b61dcbd965dfebe38cd", "score": "0.57311034", "text": "def submission():\n return create_submission(P_ensemble())", "title": "" }, { "docid": "32e5a41f56c37457aab02018e173d072", "score": "0.57238907", "text": "def submit(ctx, manifest_path, marketplace, skip, parameters):\n if parameters is not None:\n try:\n parameters = _parse_parameters(parameters)\n except:\n click.secho(UxString.invalid_parameter, fg=\"red\")\n return\n\n _publish(ctx.obj[\"config\"], manifest_path, marketplace, skip, parameters)", "title": "" }, { "docid": "7d6ea49bc466465108fa1180e104736e", "score": "0.5721265", "text": "def _handle_submit_response(self, thisTask, stdout, stderr, status_code):\n\n #print \"task: {ident}\".format(ident=thisTask.identifier)\n #print \"stdout:\"\n #print stdout\n #print \"stderr:\"\n #print stderr\n #print \"statuscode:\"\n #print status_code\n #return True\n\n if len(stderr.strip()):\n raise RuntimeError(\"An error occured trying to submit task '{:s}', \"\n \"condor_submit returned the following error:\\n'{:s}'\".format(thisTask.identifier,stderr))\n return False\n if status_code != 0:\n raise RuntimeError(\"An error occured trying to submit task '{:s}', \"\n \"condor_submit returned non-zero status code:\\n'{:d}'\".format(thisTask.identifier,status_code))\n return False\n #if not, extract task ID\n #NOTE: with -terse condor_submit reports only an ID range in the form\n # 'first - last' where first and last are of the form 'clusterID.index' with\n # index being an integer starting at 0\n fragments = stdout.strip().split(\"-\") #with --parsable flag output is jobID;clusterName\n if len(fragments)<1:\n raise RuntimeError(\"Task '{:s}' seems to have been submitted successfully,\"\n \"but no jobID could be extracted from condors response\"\n \"'{:s}'\".format(thisTask.identifier,stdout) )\n return False\n\n jobID = fragments[0].strip()\n\n thisTask.jobid = jobID\n thisTask.setStatusCode(taskStatus.submitted) #do not set by hand, this method also updates the timestamp of the task!\n\n return True", "title": "" }, { "docid": "f3bffaaf0425dda7f1fda30b61045f5f", "score": "0.57051635", "text": "def submit(\n self,\n dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: Dict[Text, Any],\n ) -> List[Dict]:\n\n # utter submit template\n dispatcher.utter_template(\"utter_submit\", tracker)\n return []", "title": "" }, { "docid": "2e7fd54ba841d45c4d059405e08e203d", "score": "0.5703957", "text": "def submit(n_clicks, data, regions, reporting_date, username, log):\n if n_clicks:\n id_ = str(uuid.uuid4())\n\n # queue the task\n queue = Queue(connection=conn)\n queue.enqueue(_run_model, reporting_date, data, regions, username, log, False, job_id=id_)\n\n # log process id in dcc.Store\n return {\"id\": id_}\n\n return {}", "title": "" }, { "docid": "8b996dd80f3f9c17ab95fc51eecfa4bb", "score": "0.56948274", "text": "def requestSubmitted(request):", "title": "" }, { "docid": "b901be6bbf51faeb21e800659c1964a4", "score": "0.56898993", "text": "def submit(\n self,\n consistencyChecking: Boolean = ON,\n datacheckJob: Boolean = False,\n continueJob: Boolean = False,\n ):\n pass", "title": "" }, { "docid": "e7e481d9a128badd198067d54faa5f8f", "score": "0.56882316", "text": "def post(self):\n curuser = models.User(self)\n admin = models.Admin(self)\n assignment = models.Assignment(self.get_argument(\"md5hash\",\"\",True))\n thisview = views.submissionView()\n print self.get_argument(\"code\",\"\",True)\n \n if curuser.can_attempt(self.get_argument(\"assignment\",\"\",True), self.get_argument(\"task\",\"\",True)):\n resultset, score = thisview.getView(self, curuser, admin, assignment) \n \n e = curuser.store_solution(self.get_argument(\"assignment\",\"\",True),self.get_argument(\"task\",\"\",True),self.get_argument(\"code\", \"\", False),resultset, score)\n if isinstance(e,Exception):\n self.write(\"Error storing solution\\n\" + e)\n self.flush(True) \n else:\n self.write(\"\"\"You have found a way to post a submission \n after submitting the maximum number of times\n or after the deadline has passed.\n \n This submission will not be accepted.\n This incident will be reported.\"\"\")\n self.flush(True)\n #report incident \n \n ##I replaced the redirect with clientside code We only want to return \n ##The returnvalues of the tests one by one", "title": "" }, { "docid": "3851467bc9089c5118c4148495d84dca", "score": "0.56869674", "text": "def submitAndReturnOutput(sourceCode, language, input):\n languageID = languageIds[language]\n link = createSubmission(sourceCode, languageID, input)\n if not link:\n return None\n\n done = False\n attempts = 0\n\n while not done:\n time.sleep(2) # delay execution for 2 seconds\n done = isSubmissionFinished(link)\n if done == None:\n return None\n if done > 0:\n break\n if attempts >= 10:\n return None\n else:\n attempts = attempts+1\n\n if done == RESULT_SUCCESS:\n output = getSubmissionOutput(link)\n return output\n else:\n return done", "title": "" }, { "docid": "7d835e3d4473f6daf33b70e5a8b6f708", "score": "0.56849164", "text": "def submit_form(form, extra_values=None, open_http=None):\n values = form.form_values()\n if extra_values:\n if hasattr(extra_values, 'items'):\n extra_values = extra_values.items()\n values.extend(extra_values)\n if open_http is None:\n open_http = open_http_urllib\n if form.action:\n url = form.action\n else:\n url = form.base_url\n return open_http(form.method, url, values)", "title": "" }, { "docid": "ba9c8a000e968ce8c58896630d8ff0ef", "score": "0.56843185", "text": "def process_post(self):", "title": "" }, { "docid": "bfe49b4d4db348c83bbc349e1304c162", "score": "0.56841004", "text": "def _execute(self, remote_name=None):\n try:\n # Submit job with PbsScript object and remote workspace\n self.job_id = self.pbs_job.submit(remote_name=remote_name)\n except UITError as e:\n if 'allocation' in str(e):\n self.status_message = 'Submission failed because subproject allocation has expired or there are ' \\\n 'insufficient hours.'\n else:\n self.status_message = str(e)\n log.exception(e)\n raise e\n except Exception as e:\n try:\n self.client.call(f'ls {self.working_dir}/*.pbs')\n except:\n self.status_message = 'No PBS script created. Contact web site administrator for resolution.'\n else:\n self.status_message = f'Error submitting job on \"{self.system}\": {e}'\n log.exception(e)\n raise e", "title": "" }, { "docid": "f41c2097b981f173e8e888a3652e0ece", "score": "0.5682065", "text": "def post(self, *args, **kwargs):\r\n kwargs['method'] = 'POST'\r\n return self.request(*args, **kwargs)", "title": "" }, { "docid": "b19cf28eaf948f0cab82347c995af83a", "score": "0.5681193", "text": "def _submitRun(self, point, traj, step, moreInfo=None):\n info = {}\n if moreInfo is not None:\n info.update(moreInfo)\n info.update({'traj': traj,\n 'step': step\n })\n # NOTE: explicit constraints have been checked before this!\n self.raiseADebug(f'Adding run to queue: {self.denormalizeData(point)} | {info}')\n self._submissionQueue.append((point, info))", "title": "" }, { "docid": "5e812c527082bf80c021d3088f1568f9", "score": "0.5680801", "text": "def post(self, **kwargs):\n return self.run_method(requests.post, **kwargs)", "title": "" }, { "docid": "d49ba5757f9c8001320d4cf7a4a8835f", "score": "0.5679108", "text": "def submit(self, resubmit=False, targets=None, **extra_args):\n assert self._attached, (\"Task.submit() called on detached task %s.\" %\n self)\n assert hasattr(self._controller, 'submit'), \\\n (\"Invalid `_controller` object '%s' in Task %s\" %\n (self._controller, self))\n self._controller.submit(self, resubmit, targets, **extra_args)", "title": "" }, { "docid": "99633128d80bde129032a4192b4ccac8", "score": "0.5678626", "text": "def submitSolution(email_address, ch_resp, sid, output, source, state, ch_aux):\n source_64_msg = email.message.Message()\n source_64_msg.set_payload(source)\n email.encoders.encode_base64(source_64_msg)\n\n output_64_msg = email.message.Message()\n output_64_msg.set_payload(output)\n email.encoders.encode_base64(output_64_msg)\n values = { 'assignment_part_sid' : sid, \\\n 'email_address' : email_address, \\\n 'submission' : output_64_msg.get_payload(), \\\n 'submission_aux' : source_64_msg.get_payload(), \\\n 'challenge_response' : ch_resp, \\\n 'state' : state \\\n }\n url = submit_url() \n data = urllib.urlencode(values)\n req = urllib2.Request(url, data)\n response = urllib2.urlopen(req)\n string = response.read().strip()\n result = 0\n return result, string", "title": "" }, { "docid": "e140d37f80f697b0dfb112c477c66d03", "score": "0.56728923", "text": "def hook():\n\n # TODO just a demo here\n data = {\"resourceKind\": \"WorkflowTemplate\", \"resourceName\": \"tck\",\n \"submitOptions\": {'parameters': ['name={}'.format(generate_nebula_name()), 'nebula-version=v2-nightly']}}\n resp = requests.post('https://192.168.8.96:30080/api/v1/workflows/argo/submit', verify=False, json=data)\n return jsonify(success=True)", "title": "" }, { "docid": "e3ac6394255c39b9a34a9b8130d7b931", "score": "0.5664325", "text": "def submit(self, input_):\n endpoint = \"/changes/%s/submit\" % self.id\n base_url = self.gerrit.get_endpoint_url(endpoint)\n response = self.gerrit.requester.post(\n base_url, json=input_, headers=self.gerrit.default_headers\n )\n result = self.gerrit.decode_response(response)\n return self.gerrit.changes.get(result.get(\"id\"))", "title": "" }, { "docid": "7fd5912a93df45de60018d6bd7e2dfcd", "score": "0.5658424", "text": "def response_for_submission(self, submission):\n pass", "title": "" } ]
439fbb9be6d578e0ff2961c46bc9a7d4
Saves the coordinates of all zone arrow and aligning vertex groups
[ { "docid": "c0af716381c56c14ccb6fc72e5d9de22", "score": "0.6373062", "text": "def save_vertex_groups(mesh):\n print('saving Zones vertex groups')\n current_object = bpy.context.object\n current_mode = bpy.context.mode\n mesh.update_from_editmode()\n vgroup_names = {vgroup.index: vgroup.name for vgroup in mesh.vertex_groups}\n for v in mesh.data.vertices:\n for g in v.groups:\n vg_name = vgroup_names.get(g.group, None)\n if not vg_name:\n continue\n co = None\n if vg_name.startswith('ZVG') or vg_name.startswith(\"ALIGN_\"):\n co = \";\".join([str(v.co[0]), str(v.co[1]), str(v.co[2])])\n mesh[vg_name] = co\n select_object(current_object.name)\n if current_mode == 'OBJECT':\n bpy.ops.object.mode_set(mode='OBJECT')\n elif current_mode == 'EDIT_MESH':\n bpy.ops.object.mode_set(mode='EDIT')", "title": "" } ]
[ { "docid": "1b3ae2c9b3a777eb8d116287449da2ec", "score": "0.5913553", "text": "def savepoint():", "title": "" }, { "docid": "61721e6bc0615cdba21eb803df749aa2", "score": "0.53940016", "text": "def save_as_ijk(markupnodes, dir_path, prefix):\n for i, node in enumerate(markupnodes):\n try:\n currentPoints = node.GetCurvePointsWorld()\n all_points = {\"0\": [], \"1\": [], \"2\": []}\n for controlpoint in range(0, currentPoints.GetNumberOfPoints()):\n pt = [0, 0, 0]\n currentPoints.GetPoint(controlpoint, pt)\n pt_ijk = get_ijk_point(pt)\n for i in range(3):\n all_points[str(i)].append(pt_ijk[i])\n\n with open(os.path.join(dir_path, f\"ijk_{prefix}_{str(i)}.json\"), \"w\") as fp:\n json.dump(all_points, fp)\n except AttributeError:\n continue", "title": "" }, { "docid": "137c3f58dac7d5cc0c812c63f4e61431", "score": "0.5373368", "text": "def _save_landmarks(self, *args, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "e9f6a0ca1f8536586374e7e42a986eb9", "score": "0.5372607", "text": "def save(self):\r\n ret = [\"{\"]\r\n ret += ['\"player\" : '+str(self.parent.player)+',']\r\n ret += ['\"camera\" : '+str(self.parent.camera)+',']\r\n for id in self.positions:\r\n obj = self.lookup[id]\r\n pos = self.get_pos(id)\r\n parent = self.get_parent(id)\r\n print pos, parent\r\n ret += ['\"'+str(id)+'\" : {']\r\n ret += ['\"type\" : \"'+obj.type+'\",']\r\n if pos is not None:\r\n ret += ['\"pos\" : ['+str(pos[0])+','+str(pos[1])+'],']\r\n else:\r\n ret += ['\"pos\" : null,']\r\n if parent is not None:\r\n ret += ['\"parent\" : '+str(parent)+',']\r\n else:\r\n ret += ['\"parent\" : null,']\r\n ret += ['\"atts\" : \"'+obj.get_attributes()+'\",']\r\n ret += ['\"name\" : \"'+obj.name+'\",']\r\n ret += ['\"char\" : \"'+obj.char+'\",']\r\n if obj.delay:\r\n ret += ['\"delay\" : '+str(obj.delay)+',']\r\n else:\r\n ret += ['\"delay\" : null,']\r\n ret += ['\"fgcol\" : '+str(list(obj.fgcol))]\r\n ret += [\"},\"]\r\n ret[-1] = ret[-1][:-1]\r\n ret += [\"}\"]\r\n return ret", "title": "" }, { "docid": "f9a19b81ca45482585410c1ffea753a0", "score": "0.53704935", "text": "def save_positions(self, file):\n # this should be rewritten to use self.positions and recording.files\n if isinstance(file, basestring):\n file = files.StandardTextFile(file, mode='w')\n cells = self.all_cells\n result = numpy.empty((len(cells), 4))\n result[:,0] = cells\n result[:,1:4] = self.positions.T \n if rank() == 0:\n file.write(result, {'assembly' : self.label})\n file.close()", "title": "" }, { "docid": "1b3c2012d64d06258d008f345c892f67", "score": "0.53549695", "text": "def _old_place_and_route(self):\n \n # Sort out the y-positions of the modules in each column\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n #placement.find_pin_coords( self.connection_list, drawing_object_dict, inst_col_dict, True )\n # placement.yplacement(\n # drawing_object_dict,\n # self.connection_list,\n # inst_col_dict\n # )\n\n # Re-Scale the drawing positions of the objects to draw\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n for draw_obj in self.drawing_object_dict.values():\n\n if draw_obj.obj_type is 'module':\n x_pos = ( 150 * draw_obj.position.x )\n y_pos = ( draw_obj.position.y ) * 50\n elif draw_obj.obj_type is 'port':\n x_pos = 50 + ( 150 * draw_obj.position.x )\n y_pos = ( draw_obj.position.y ) * 50 \n\n draw_obj.setPosition( wx.Point( x_pos, y_pos ) )\n draw_obj._update_sizes()", "title": "" }, { "docid": "e99db405ce26b2ec45f9c4c9595da316", "score": "0.53541046", "text": "def organize_the_maze(self):\n with open(self.filename) as map_file:\n\n for x, line in enumerate(map_file):\n for y, col in enumerate(line):\n if col == self.constants.PATH_CHAR:\n self.paths.add((x, y))\n elif col == self.constants.START_CHAR:\n self.start.add((x, y))\n self.paths.add((x, y))\n elif col == self.constants.GOAL_CHAR:\n self.goal.add((x, y))\n self.paths.add((x, y))\n elif col == self.constants.WALL_CHAR:\n self.walls.add((x, y))", "title": "" }, { "docid": "73268e2a05665a794cc9265edc441909", "score": "0.5323381", "text": "def save_landmarks(self, *args, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "8ea2f91050837313f92e04875c32e36e", "score": "0.52958155", "text": "def savegaze():\n from remodnav.tests import utils as ut\n import pylab as pl\n import datalad.api as dl\n\n # use two examplary files (lab + MRI) used during testing as well\n # hardcoding those, as I see no reason for updating them\n infiles = [\n op.join(\n 'data',\n 'raw_eyegaze', 'sub-32', 'beh',\n 'sub-32_task-movie_run-5_recording-eyegaze_physio.tsv.gz'),\n op.join(\n 'data',\n 'raw_eyegaze', 'sub-02', 'ses-movie', 'func',\n 'sub-02_ses-movie_task-movie_run-5_recording-eyegaze_physio.tsv.gz'\n ),\n ]\n # one call per file due to https://github.com/datalad/datalad/issues/3356\n for f in infiles:\n dl.get(f)\n for f in infiles:\n # read data\n data = np.recfromcsv(f,\n delimiter='\\t',\n names=['x', 'y', 'pupil', 'frame'])\n\n # adjust px2deg conversion factor according to datafile\n pxdeg, ext = (0.0266711972026, 'lab') if '32' in f \\\n else (0.0185581232561, 'mri')\n clf = EyegazeClassifier(\n px2deg=pxdeg,\n sampling_rate=1000.0)\n p = clf.preproc(data)\n # lets go with 10 seconds to actually see details. This particular time\n # window is within the originally plotted 50s and contains missing data\n # for both data types (lab & mri)\n events = clf(p[15000:25000])\n\n fig = pl.figure(\n # fake size to get the font size down in relation\n figsize=(14, 2),\n dpi=120,\n frameon=False)\n ut.show_gaze(\n pp=p[15000:25000],\n events=events,\n sampling_rate=1000.0,\n show_vels=True,\n coord_lim=(0, 1280),\n vel_lim=(0, 1000))\n pl.savefig(\n op.join('img', 'remodnav_{}.svg'.format(ext)),\n transparent=True,\n bbox_inches=\"tight\")\n pl.close()", "title": "" }, { "docid": "87958621d8a8748ae348e6213b7f2f78", "score": "0.52840453", "text": "def createZones(self):\n with open('assets/mapCoords.csv', 'r') as f:\n f.readline()\n for line in f:\n (name, x, y, w, h) = line.split(\",\")\n x = int(x)\n y = int(y)\n w = int(w)\n h = int(h)\n zone = Zone(self, name, (x, y), (w, h), self.scale)\n zone.zoneActivated.connect(self.parent.log.addEntry)\n self.zones[name] = zone", "title": "" }, { "docid": "30224afcf7d9137fb552241cf31c222f", "score": "0.5280108", "text": "def save_areas(self):\n with open(\"resources/data/areas.wf\", \"w\") as file:\n for area in self.areas:\n a = self.areas[area]\n file.write(a.area_name)\n for spot in a:\n file.write(\",\" + spot.spot_id)\n file.write(\"\\n\")", "title": "" }, { "docid": "3b72877b250b8c8249e81a51c57ea96c", "score": "0.5275549", "text": "def get_persistence_from_cornerpoints(self):\n self.persistence_to_plot = [(c.k, (c.birth, c.death)) for c in self.cornerpoints]", "title": "" }, { "docid": "ebd943367241ce5242a3fb9ac89058d4", "score": "0.5227357", "text": "def save_positions(self, file): # @ReservedAssignment\n file_handle = open(file, \"w\")\n file_handle.write(self.positions)\n file_handle.close()", "title": "" }, { "docid": "abf3243d0b0f3a201c8733ed83037610", "score": "0.5193897", "text": "def groupe_point(self):\r\n zone_grp=gp.read_file(self.fichier_zone)\r\n #jointure\r\n self.df_trafic=gp.sjoin(self.df_trafic,zone_grp, predicate='within')", "title": "" }, { "docid": "5b192bd9fb1fd348bd361288b0b51259", "score": "0.5160164", "text": "def save_coordinates(self, filename):\n f=h5py.File(filename, 'w')\n f.attrs['resolution'] = self.resolution\n f.attrs['origin'] = self.origin\n f.attrs['dimensions'] = self.dimensions\n coords_files=['voxel_coordinates','triangles', 'depths','triangle_coordinates']\n for coords_file in coords_files:\n dset = f.require_dataset( coords_file ,\n shape = self.volume_surf_coordinates[coords_file].shape ,\n dtype = self.volume_surf_coordinates[coords_file].dtype,\n compression = \"gzip\", compression_opts = 9)\n dset[:] = self.volume_surf_coordinates[coords_file]\n f.close()", "title": "" }, { "docid": "70cb761d1bdfe71d68979b68f1319847", "score": "0.5118652", "text": "def save_arrays(coords, edof, dofs, bdofs, elementmarkers, boundaryElements, markerDict, name=\"unnamed_arrays\"):\n\n if not name.endswith(\".cfma\"):\n name = name + \".cfma\"\n with open(name, 'wb') as file:\n pickle.dump(coords, file)\n pickle.dump(edof, file)\n pickle.dump(dofs, file)\n #for key in bdofs.items():\n # print(key, markerDict[key])\n pickle.dump(bdofs, file)\n pickle.dump(elementmarkers, file)\n pickle.dump(boundaryElements, file)\n pickle.dump(markerDict, file)", "title": "" }, { "docid": "9ea1b3d93be18c9238b52a8c69133aa2", "score": "0.5108426", "text": "def annotate_all(self):\n for zone in self._zones:\n self.annotate_map(zone, save=True, show=False)", "title": "" }, { "docid": "8def09b4e4f5a2a7bdcd5a9061ae8ee7", "score": "0.50969034", "text": "def save_d(self):\n data = {\"pout\": dict((hash(x),x.pos) for x in self.points),\n \"lout\": [(map(hash,x.points),x.highlight) for x in self.links]\n }\n return data", "title": "" }, { "docid": "350c06f87bdd144c89216dbab13eb951", "score": "0.50834453", "text": "def save(self, fp):\n np.savez(fp, data=self.data, IDs=self.IDlst)", "title": "" }, { "docid": "183cd79a71489021ead197b50b789c93", "score": "0.5035973", "text": "def write_to_esmf_mesh(filename, center_lat, center_lon, corner_lat, corner_lon, mask, area=None):\n # create array with unique coordinate pairs\n # remove coordinates that are shared between the elements\n corner_pair = da.stack([corner_lon.T.reshape((-1,)).T, corner_lat.T.reshape((-1,)).T], axis=1)\n\n # REPLACED: corner_pair_uniq = dd.from_dask_array(corner_pair).drop_duplicates().to_dask_array(lengths=True)\n # following reduces memory by %17\n corner_pair_uniq = dd.from_dask_array(corner_pair).drop_duplicates().values\n corner_pair_uniq.compute_chunk_sizes()\n\n # check size of unique coordinate pairs\n dims = mask.shape\n nlon = dims[0]\n nlat = dims[1]\n elem_conn_size = nlon*nlat+nlon+nlat+1\n if corner_pair_uniq.shape[0] != elem_conn_size:\n print('The size of unique coordinate pairs is {} but expected size is {}!'.format(corner_pair_uniq.shape[0], elem_conn_size))\n print('Please check the input file or try to force double precision with --double option. Exiting ...')\n sys.exit(2)\n\n # create element connections\n corners = dd.concat([dd.from_dask_array(c) for c in [corner_lon.T.reshape((-1,)).T, corner_lat.T.reshape((-1,)).T]], axis=1)\n corners.columns = ['lon', 'lat']\n elem_conn = corners.compute().groupby(['lon','lat'], sort=False).ngroup()+1\n elem_conn = da.from_array(elem_conn.to_numpy())\n\n # create new dataset for output\n out = xr.Dataset()\n\n out['origGridDims'] = xr.DataArray(np.array(center_lon.shape, dtype=np.int32),\n dims=('origGridRank'))\n\n out['nodeCoords'] = xr.DataArray(corner_pair_uniq,\n dims=('nodeCount', 'coordDim'),\n attrs={'units': 'degrees'})\n\n out['elementConn'] = xr.DataArray(elem_conn.T.reshape((4,-1)).T,\n dims=('elementCount', 'maxNodePElement'),\n \t\t attrs={'long_name': 'Node indices that define the element connectivity'})\n out.elementConn.encoding = {'dtype': np.int32}\n\n out['numElementConn'] = xr.DataArray(4*np.ones(center_lon.size, dtype=np.int32),\n dims=('elementCount'),\n attrs={'long_name': 'Number of nodes per element'})\n\n out['centerCoords'] = xr.DataArray(da.stack([center_lon.T.reshape((-1,)).T,\n center_lat.T.reshape((-1,)).T], axis=1),\n dims=('elementCount', 'coordDim'),\n attrs={'units': 'degrees'})\n\n # add area if it is available\n if area:\n out['elementArea'] = xr.DataArray(area.T.reshape((-1,)).T,\n dims=('elementCount'),\n attrs={'units': 'radians^2',\n 'long_name': 'area weights'})\n\n # add mask\n out['elementMask'] = xr.DataArray(mask.T.reshape((-1,)).T,\n dims=('elementCount'),\n attrs={'units': 'unitless'})\n out.elementMask.encoding = {'dtype': np.int32}\n\n # force no '_FillValue' if not specified\n for v in out.variables:\n if '_FillValue' not in out[v].encoding:\n out[v].encoding['_FillValue'] = None\n\n # add global attributes\n out.attrs = {'title': 'ESMF unstructured grid file for rectangular grid with {} dimension'.format('x'.join(list(map(str,center_lat.shape)))),\n 'created_by': os.path.basename(__file__),\n 'date_created': '{}'.format(datetime.now()),\n 'conventions': 'ESMFMESH',\n }\n\n # write output file\n if filename is not None:\n print('Writing {} ...'.format(filename))\n out.to_netcdf(filename)", "title": "" }, { "docid": "4cec48d79d72f898ba849e6b7dbe2101", "score": "0.50202215", "text": "def create_arrows(self, arrows):\n arrows_dict = dict()\n for key, value in arrows.items():\n if key in self.created_arrows.keys():\n # update if already existe\n self.created_arrows[key].update()\n if not self.created_arrows[key].is_deleted:\n arrows_dict[key] = self.created_arrows[key]\n if self.current_id == self.created_arrows[key].player_id:\n data = \" \".join([\"arrow\", \"update\", str(self.created_arrows[key].id),\n str(self.created_arrows[key].pos.x), str(self.created_arrows[key].pos.y)])\n self.server.send(data)\n else:\n # create a new arrow\n pos = vec(int(value[\"pos\"][\"x\"]), int(value[\"pos\"][\"y\"]))\n dir = vec(float(value[\"dir\"][\"x\"]), float(value[\"dir\"][\"y\"]))\n vel = vec(float(value[\"vel\"][\"x\"]), float(value[\"vel\"][\"y\"]))\n a = Arrow(self,\n pos,\n dir,\n vel,\n int(value[\"damage\"]), key, self.current_id)\n arrows_dict[key] = a\n\n return arrows_dict", "title": "" }, { "docid": "4eb049abd60f3abf8c1eea3f4af15c7e", "score": "0.50167763", "text": "def save(self,filepath):\r\n\t\tnp.savez(filepath, K=self.K, _A=self._A, _G=self._G, counter=self.counter)", "title": "" }, { "docid": "ff847077966bbdf15bf1d611e3551d3c", "score": "0.49970272", "text": "def save_all_sidecar_files(cls):\n for map in cls.maps.values():\n map.save_sidecar_files()", "title": "" }, { "docid": "a991b0430ea1146c0d0b8970fad139aa", "score": "0.49947274", "text": "def save(self, f):\n for start, tr in zip(self.trajstart, self.tr2pos):\n f.write('%d\\n'%start)\n f.write('\\t'.join(['%d'%p for p in tr])+'\\n')", "title": "" }, { "docid": "1e7a43f4243cc67595d76db653d45396", "score": "0.4985213", "text": "def _make_swaz_drn_points():\n # only run one set\n import geopandas as gpd\n\n raise NotImplementedError('depreciated')\n paths = [\n \"{}/m_ex_bd_inputs/raw_sw_samp_points/drn/non_carpet_drains.shp\".format(smt.sdp),\n \"{}/m_ex_bd_inputs/raw_sw_samp_points/drn/carpet_drains.shp\".format(smt.sdp)\n ]\n for path in paths:\n data = gpd.read_file(path)\n base_dir = \"{}/m_ex_bd_inputs/raw_sw_samp_points/drn/other\".format(smt.sdp)\n for group in set(data.group):\n temp = data.loc[data.group == group]\n temp.to_file('{}/{}.shp'.format(base_dir, group), driver='ESRI Shapefile')", "title": "" }, { "docid": "805c4357003e5c6c45c64e5985b14944", "score": "0.4985099", "text": "def save(self, folder):\n precomputed_maps_proto_conversion.write_graph_array(\n self.graphs_list, folder, 'graphs_list')\n\n for int_array, name in [\n (self.stone_to_reward, 'stone_to_reward'),\n (self.drop_reward, 'drop_reward'),\n (self.edge_exists, 'edge_exists'),\n (self.stone_maps, 'stone_maps'),\n (self.potion_maps, 'potion_maps'),\n (self.react_result, 'react_result'),\n (self.partial_stone_map_update, 'partial_stone_map_update'),\n (self.potion_to_pair, 'potion_to_pair'),\n (self.perm_index_to_index, 'perm_index_to_index'),\n (self.index_to_perm_index, 'index_to_perm_index'),\n (self.missing_edge_no_change, 'missing_edge_no_change'),\n (self.update_partial_graph_no_change, 'update_partial_graph_no_change'),\n (self.partial_stone_map_to_stone_map, 'partial_stone_map_to_stone_map'),\n ]:\n precomputed_maps_proto_conversion.write_int_array(int_array, folder, name)\n\n for int_array, name in [\n (self.partial_graph_to_matching_graphs,\n 'partial_graph_to_matching_graphs'),\n (self.graphs_with_edge, 'graphs_with_edge'),\n (self.potion_masks, 'potion_masks'),\n (self.no_effect_from_partial_chem, 'no_effect_from_partial_chem'),\n ]:\n precomputed_maps_proto_conversion.write_bitfield_array(\n int_array, folder, name)\n\n precomputed_maps_proto_conversion.write_float_array(\n self.graph_index_distr, folder, 'graph_index_distr')\n\n for int_array, name in [\n (self.possible_latent_dims, 'possible_latent_dims'),\n (self.partial_graph_update, 'partial_graph_update'),\n (self.poss_p_maps, 'poss_p_maps'),\n (self.poss_s_maps, 'poss_s_maps'),\n ]:\n precomputed_maps_proto_conversion.write_list_ints_array(\n int_array, folder, name)\n\n precomputed_maps_proto_conversion.write_possible_latent_dirs(\n self.possible_latent_dirs, folder, 'possible_latent_dirs')\n precomputed_maps_proto_conversion.write_partial_potion_map_update(\n self.partial_potion_map_update, folder, 'partial_potion_map_update')\n\n proto = precomputed_maps_pb2.PartialGraphIndexToPossibleIndex(\n entries=self.partial_graph_index_to_possible_index)\n io.write_proto(\n os.path.join(folder, 'partial_graph_index_to_possible_index'),\n proto.SerializeToString())", "title": "" }, { "docid": "79516359c2d072bdd95525e29e1a0c63", "score": "0.49767894", "text": "def _write_nodes(self, bdf_file, size=8, is_double=False):\n self.grdset.write_card(bdf_file, size=size, is_double=is_double)\n self.grid.write_card(bdf_file, size=size, is_double=is_double)\n self.point.write_card(bdf_file, size=size, is_double=is_double)\n self.epoint.write_card(bdf_file, size=size, is_double=is_double)\n self.spoint.write_card(bdf_file, size=size, is_double=is_double)\n self.pointax.write_card(bdf_file, size=size, is_double=is_double)", "title": "" }, { "docid": "2c5bc6d1dbbb75fdba3b205aaf6d20b3", "score": "0.49724382", "text": "def snapshot(self, **kwargs):\n if 'ax' in kwargs:\n ax = kwargs['ax']\n else:\n fig = plt.figure(figsize=(18, 16))\n ax = fig.add_subplot(111, projection=\"3d\")\n ax.grid()\n ax.set_xlim3d([-0.2, 1.2])\n ax.set_ylim3d([-0.7, 0.7])\n ax.set_zlim3d([0, 1.4])\n if 'G' in kwargs:\n G = kwargs['G']\n else:\n G = np.identity(4)\n js = self.robot.joints\n N = len(js)\n for idx in range(N-1):\n pa = (G @ np.hstack((js[idx].origin1, ar([1]))))[0:3]\n pb = (G @ np.hstack((js[idx+1].origin1, ar([1]))))[0:3]\n line, = ax.plot(np.array([pa[0], pb[0]]), np.array([pa[1], pb[1]]),\n np.array([pa[2], pb[2]]), \"#ff9500\", lw=1.5)\n for joint in js:\n pa = (G @ np.hstack((joint.origin1, ar([1]))))[0:3]\n line, = ax.plot(np.array([pa[0]]), np.array([pa[1]]), np.array([pa[2]]),\n 'o',\n markeredgecolor='b', markerfacecolor='none', ms=8, lw=2)\n\n arrow_color = [\"r\", \"g\", \"b\"]\n head1 = (G @ np.hstack((self.robot.tool2world(np.array([0,0,0.1]))[0:3],\n ar([1]))))[0:3]\n head2 = (G @ np.hstack((self.robot.tool2world(np.array([0,0.1,0]))[0:3],\n ar([1]))))[0:3]\n head3 = (G @ np.hstack((self.robot.tool2world(np.array([0.1,0,0]))[0:3],\n ar([1]))))[0:3]\n arrow_heads = [head1, head2, head3]\n tail = (G @ np.hstack((self.robot.joints[-1].origin1, ar([1]))))[0:3]\n for idx, head in enumerate(arrow_heads):\n line, = ax.plot(ar([tail[0], head[0]]),\n ar([tail[1], head[1]]),\n ar([tail[2], head[2]]),\n arrow_color[idx],lw=1)\n line, = ax.plot(ar([0]), ar([0]), ar([0]), 'gs', markersize=16)\n line, = ax.plot(\n ar([G[0][3]]), ar([G[1][3]]), ar([G[2][3]]), 'gs', markersize=16\n )\n return ax", "title": "" }, { "docid": "3a8e02250ed60de22abb214a9cf2ab9a", "score": "0.49718317", "text": "def savepoint(optimistic=False):", "title": "" }, { "docid": "3a8e02250ed60de22abb214a9cf2ab9a", "score": "0.49718317", "text": "def savepoint(optimistic=False):", "title": "" }, { "docid": "6fcedafe5f3abf7bd973478c72b4a418", "score": "0.49568555", "text": "def save_positions(self, file):\n # first column should probably be indices, not ids. This would make it\n # simulator independent.\n if isinstance(file, basestring):\n file = files.StandardTextFile(file, mode='w')\n cells = self.all_cells\n result = numpy.empty((len(cells), 4))\n result[:,0] = cells\n result[:,1:4] = self.positions.T \n if rank() == 0:\n file.write(result, {'population' : self.label})\n file.close()", "title": "" }, { "docid": "628afc45fc4a25d350dbe6d5185f70c9", "score": "0.4947177", "text": "def _write_coords(self, bdf_file, size=8, is_double=False):\n self.coords.write_card(bdf_file, size, is_double)", "title": "" }, { "docid": "625557576997c45c20f844276f395980", "score": "0.4913042", "text": "def save_bridge_mappings(self):", "title": "" }, { "docid": "102c4995b44a5170318a781fb082acf1", "score": "0.49117684", "text": "def snapshot(self, file):\n\n np.savez(file,\n coords=self.coords, weights=self.weights, sigmas=self.sigmas,\n parents=self.parents,\n parent_coords=self._parents,\n parent_weights = self._parent_weights\n )", "title": "" }, { "docid": "f157cbb7fe793bfe4d32a04d12fbc3f5", "score": "0.4903753", "text": "def save(filename, anim, names=None, frametime=1.0/24.0, order='zyx', positions=False, orients=True):\n \n if names is None:\n names = [\"joint_\" + str(i) for i in range(len(anim.parents))]\n \n with open(filename, 'w') as f:\n\n t = \"\"\n f.write(\"%sHIERARCHY\\n\" % t)\n f.write(\"%sROOT %s\\n\" % (t, names[0]))\n f.write(\"%s{\\n\" % t)\n t += '\\t'\n\n f.write(\"%sOFFSET %f %f %f\\n\" % (t, anim.offsets[0,0], anim.offsets[0,1], anim.offsets[0,2]) )\n f.write(\"%sCHANNELS 6 Xposition Yposition Zposition %s %s %s \\n\" % \n (t, channelmap_inv[order[0]], channelmap_inv[order[1]], channelmap_inv[order[2]]))\n\n for i in range(anim.shape[1]):\n if anim.parents[i] == 0:\n t = save_joint(f, anim, names, t, i, order=order, positions=positions)\n\n t = t[:-1]\n f.write(\"%s}\\n\" % t)\n\n f.write(\"MOTION\\n\")\n f.write(\"Frames: %i\\n\" % anim.shape[0]);\n f.write(\"Frame Time: %f\\n\" % frametime);\n \n #if orients: \n # rots = np.degrees((-anim.orients[np.newaxis] * anim.rotations).euler(order=order[::-1]))\n #else:\n # rots = np.degrees(anim.rotations.euler(order=order[::-1]))\n rots = np.degrees(anim.rotations.euler(order=order[::-1]))\n poss = anim.positions\n \n for i in range(anim.shape[0]):\n for j in range(anim.shape[1]):\n \n if positions or j == 0:\n \n f.write(\"%f %f %f %f %f %f \" % (\n poss[i,j,0], poss[i,j,1], poss[i,j,2], \n rots[i,j,ordermap[order[0]]], rots[i,j,ordermap[order[1]]], rots[i,j,ordermap[order[2]]]))\n \n else:\n \n f.write(\"%f %f %f \" % (\n rots[i,j,ordermap[order[0]]], rots[i,j,ordermap[order[1]]], rots[i,j,ordermap[order[2]]]))\n\n f.write(\"\\n\")", "title": "" }, { "docid": "6e59dd7b1ec606049d1f7763486290d0", "score": "0.48840728", "text": "def save_position(self, doc, pnode, nodename=\"position\"):\n if self.ra is None:\n return\n node = ET.SubElement(pnode, nodename)\n for name in ('ra', 'dec', 'dist', 'rv', 'rapm', 'decpm'):\n v = getattr(self, name, None)\n if v is not None:\n xmlutil.savedata(doc, node, name, v)", "title": "" }, { "docid": "d6c8d116e22551e5fc1ef2e0c8785c9e", "score": "0.4873662", "text": "def update_positions(self, members, groupLocation):\r\n\t\tfor indiv in enumerate(members):\r\n\t\t\tindiv[1].location = (groupLocation + indiv[0], 17, 'red')", "title": "" }, { "docid": "0e39fd0988fcde24e2fe65dce91a7349", "score": "0.48719448", "text": "def save(self):\n\n\t\t# Write file header\n\t\twith open(self.filename, 'a') as f:\n\t\t\tf.write('STRING' + '\\n')\n\t\t\tf.write('space_dimension' + ' ' + '2' + '\\n' + '\\n')\n\t\t\tf.write('vertices' + '\\n')\n\n \t# Write file vertices\n\t\tvertices_data = {'label': [], 'x': [], 'y': []}\n\t\tcount = 0\n\t\tfor j, a in enumerate(self.voronoi.vertices):\t\t \n\t\t if count != 0:\n\t\t with open(self.filename, 'a') as f:\n\t\t xx = []\n\t\t xx.append(str(count))\n\t\t xx.append(str(a[0]*1000))\n\t\t xx.append(str(a[1]*1000))\n\t\t this_row = ' '.join(str(x) for x in xx)\n\t\t f.write(this_row + '\\n') \n\t\t vertices_data['label'].append(count )\n\t\t vertices_data['x'].append(a[0]*1000)\n\t\t vertices_data['y'].append(a[1]*1000)\n\t\t count += 1\n\n\t\tvertices_dataframe = pd.DataFrame(vertices_data)\n\t\tvertices_dataframe.set_index(['label'], inplace = True)\n\n\t\t# Write self.filename edges\n\t\twith open(self.filename, 'a') as f:\n\t\t\tf.write('\\n' + 'edges' + '\\n')\n\n\t\tedges_data = {'label': [], 'v1': [], 'v2': []}\n\t\tcount = 1\n\t\tridge_vertices = []\n\t\tfor a in self.voronoi.ridge_vertices:\n\t\t if a[0] > 0 and a[1] > 0:\n\t\t with open(self.filename, 'a') as f:\n\t\t #print(count ,a[0] , a[1])\n\t\t xx = []\n\t\t xx.append(str(count))\n\t\t xx.append(str(a[0]))\n\t\t xx.append(str(a[1]))\n\t\t this_row = ' '.join(str(x) for x in xx)\n\t\t #print(this_row)\n\t\t f.write(this_row + '\\n') \n\t\t edges_data['label'].append(count)\n\t\t edges_data['v1'].append(a[0] )\n\t\t edges_data['v2'].append(a[1])\n\t\t ridge_vertices.append(np.array([a[0], a[1]]))\n\t\t count += 1\n\t\tedges_dataframe = pd.DataFrame(edges_data)\n\t\tedges_dataframe.set_index(['label'], inplace = True)\n\n\n\t\t# Write file facets\n\t\tall_edges_list = []\n\t\tfor i in self.voronoi.regions:\n\t\t if all([a > 0 for a in i ]) and len(i) != 0:\n\t\t edge_list = []\n\t\t for ff in range(len(i)):\n\t\t try:\n\t\t p1 = i[ff]\n\t\t p2 = i[ff + 1]\n\t\t count = 0\n\t\t for jj, j in enumerate(ridge_vertices):\n\t\t count += 1\n\t\t if j[0] == p1 and j[1] == p2:\n\t\t if len(edge_list) == 0:\n\t\t edge_list.append(count)\n\t\t else:\n\t\t edge_list.append(count)\n\t\t elif j[1] == p1 and j[0] == p2:\n\t\t if len(edge_list) == 0:\n\t\t edge_list.append(-count)\n\t\t else:\n\t\t edge_list.append(-count)\n\t\t except:\n\t\t p1 = i[ff]\n\t\t p2 = i[0]\n\t\t count = 0\n\t\t for jj, j in enumerate(ridge_vertices):\n\t\t count += 1\n\t\t if j[0] == p1 and j[1] == p2:\n\t\t if len(edge_list) == 0:\n\t\t edge_list.append(count)\n\t\t else:\n\t\t edge_list.append(count)\n\n\t\t elif j[1] == p1 and j[0] == p2:\n\t\t if len(edge_list) == 0:\n\t\t edge_list.append(-count)\n\t\t else:\n\t\t edge_list.append(-count)\n\t\t all_edges_list.append(edge_list)\n\n\t\twith open(self.filename, 'a') as f:\n\t\t\tf.write('\\n' + 'faces' + '\\n')\n\n\t\tars = []\n\t\tares = []\n\t\ttots = []\n\t\ttot_area = []\n\n\t\tfor j, i in enumerate(all_edges_list):\n\t\t with open(self.filename, 'a') as f:\n\t\t f.write(str(j + 1) + ' ')\n\t\t points = []\n\t\t for k, ii in enumerate(i):\n\t\t if ii > 0:\n\t\t v1 = edges_dataframe.at[ii, 'v1']\n\t\t v2 = edges_dataframe.at[ii, 'v2']\n\t\t x1, y1 = vertices_dataframe.at[v1, 'x'], vertices_dataframe.at[v1, 'y']\n\t\t x2, y2 = vertices_dataframe.at[v2, 'x'], vertices_dataframe.at[v2, 'y']\n\t\t cw_or_not = -(x2 - x1) /(y2 + y1) \n\t\t area = x1*y2 - x2*y1\n\t\t ars.append(cw_or_not)\n\t\t tots.append(area)\n\t\t with open(self.filename, 'a') as f:\n\t\t f.write(str(ii) + ' ')\n\t\t if k ==0:\n\t\t points.append(v1)\n\t\t points.append(v2)\n\t\t else:\n\t\t if v1 not in points:\n\t\t points.append(v1)\n\t\t elif v2 not in points:\n\t\t points.append(v2)\n\t\t else:\n\t\t v2 = edges_dataframe.at[-ii, 'v1']\n\t\t v1 = edges_dataframe.at[-ii, 'v2']\n\t\t x1, y1 = vertices_dataframe.at[v1, 'x'], vertices_dataframe.at[v1, 'y']\n\t\t x2, y2 = vertices_dataframe.at[v2, 'x'], vertices_dataframe.at[v2, 'y']\n\t\t cw_or_not = -(x2 - x1) /(y2 + y1) \n\t\t area = x1*y2 - x2*y1\n\t\t ars.append(cw_or_not)\n\t\t tots.append(area)\n\t\t with open(self.filename, 'a') as f:\n\t\t f.write(str(ii) + ' ')\n\t\t if k ==0:\n\t\t points.append(v1)\n\t\t points.append(v2)\n\t\t else:\n\t\t if v1 not in points:\n\t\t points.append(v1)\n\t\t elif v2 not in points:\n\t\t points.append(v2)\n\t\t \n\t\t ares.append(np.sum(ars))\n\t\t tot_area.append(np.sum(tots))\n\t\t ars = []\n\t\t tots = [] \n\t\t with open(self.filename, 'a') as f:\n\t\t f.write('\\n')\n\n\t\t\n\t\t# Write bodies\n\t\twith open(self.filename, 'a') as f:\n\t\t\tf.write('\\n' + 'bodies' + '\\n')\n\n\t\tfor j, i in enumerate(range(len(all_edges_list))):\n\t\t with open(self.filename, 'a') as f:\n\t\t if ares[j] < 0:\n\t\t #f.write(str(i+1) + ' ' + str(i + 1) + ' ' + 'VOLUME' + ' ' + str(abs(tot_area[j])) + '\\n')\n\t\t f.write(str(i+1) + ' ' + str(i + 1) + ' ' + 'VOLUME' + ' ' + str(500) + '\\n')\n\t\t if ares[j] > 0:\n\t\t #f.write(str(i+1) + ' ' + str(-(i + 1)) + ' ' + 'VOLUME' + ' ' + str(abs(tot_area[j]*1)) + '\\n')\n\t\t f.write(str(i+1) + ' ' + str(-(i + 1)) + ' ' + 'VOLUME' + ' ' + str(500) + '\\n')\n\n\t\t# Write gogo function\n\t\twith open(self.filename, 'a') as f:\n\t\t f.write('\\n' + 'read' + '\\n' + '\\n' + 'gogo := { g 2;' + '\\n' \n\t\t + ' o;' + '\\n' + \n\t\t ' g 5;'+ '\\n' +\n\t\t ' r;' + '\\n'\n\t\t + ' g 20;' + '\\n' +\n\t\t ' r;' + '\\n' + \n\t\t ' g 20;' + '\\n' + \n\t\t ' V 3;' + '\\n' + \n\t\t ' r;' + '\\n' + \n\t\t ' g 20;' + '\\n' +\n\t\t ' };' + '\\n' + '\\n' )", "title": "" }, { "docid": "bef1a25ceffcc2a985a69bd07197dd3c", "score": "0.48649603", "text": "def save_areas(history,outdir,index=0):\n if not os.path.exists(outdir): # if the folder doesn\"t exist create it\n os.makedirs(outdir)\n filename = \"%s/areas_%03d\"%(outdir,index)\n wfile = open(filename,\"w\")\n for tissue in history:\n for area in tissue.mesh.areas: \n wfile.write(\"%.3e \"%area)\n wfile.write(\"\\n\")", "title": "" }, { "docid": "68fcc3f7239f5d0651a6df5a7bcb69ab", "score": "0.48639488", "text": "def xz_grouped(coords):\n if coords is None:\n position = self.devices['driver'].group_position(self.group_id)\n position = position.project([0, None, 1])\n position[1] = self.axes[1].position()\n return position\n else:\n self.devices['driver'].group_move_line(self.group_id, coords[0::2])\n self.axes[1].position(coords[1])\n if wait:\n self.devices['driver'].pause_for_stages()", "title": "" }, { "docid": "61a043755c317f09f36bd5e5b3c7ece5", "score": "0.48635125", "text": "def save_map(self, filename):\n pickle.dump((self.map, self.side), open(filename, 'ab'))", "title": "" }, { "docid": "f958b60e089141ac62009331de103739", "score": "0.4858415", "text": "def save(self, filename = 'array_zest', path = '/home/eric/dev/insitu/data/zs_recovery/'):\n filename = filename# + '_Lx_' + str(self.Lx) + 'm_Ly_' + str(self.Ly) + 'm'\n self.path_filename = path + filename + '.pkl'\n f = open(self.path_filename, 'wb')\n pickle.dump(self.__dict__, f, 2)\n f.close()", "title": "" }, { "docid": "8c686c8c7a18e752c53d78232a456dd7", "score": "0.48580047", "text": "def reset_write_positions(self):\n self.pane_write_positions = {}", "title": "" }, { "docid": "cf3e18c5ad9edd83414b7d467412c7ce", "score": "0.4856478", "text": "def saveShape(self, shape, file_name, file_type, normals=True):\n vtk_poly_data = self.shapeToVTKPolyData(shape)\n self.saveVtkPolyData(vtk_poly_data, file_name, file_type)", "title": "" }, { "docid": "e3326be3d5c209af9d85dfda6bd7f795", "score": "0.4850436", "text": "def save_target_regions(self):\n name = self.targ_areas_dropdown.currentText().strip()\n if name == '':\n self.targ_areas_dropdown.visual_warning()\n return\n elif name in self.dirs.settings.target_areas:\n msg = 'Overwrite this Save?'\n nuke = qg.QMessageBox.warning(self, 'WARNING', msg, qg.QMessageBox.No | qg.QMessageBox.Yes,\n qg.QMessageBox.No)\n if nuke == qg.QMessageBox.No:\n return\n curr_coords = self.dirs.settings.last_targ_areas\n curr_coords.name = name\n self.dirs.settings.target_areas[name] = curr_coords\n self.set_targ_areas_dropdown()", "title": "" }, { "docid": "bd40d6471d735ab7e258756f5324a921", "score": "0.48403686", "text": "def write_coordinates_to_shp_file(config, locator, list_geotranch, name):\n\n input_street_shp = locator.get_street_network()\n output_path_shp = locator.get_electric_network_output_location(name)\n\n geometry = [shapely.geometry.LineString(json.loads(g)) for g in list_geotranch]\n\n gdf_street = gpd.GeoDataFrame.from_file(input_street_shp)\n lat, lon = get_lat_lon_projected_shapefile(gdf_street)\n crs = get_projected_coordinate_system(lat, lon)\n gdf = gpd.GeoDataFrame(crs=crs, geometry=geometry)\n\n gdf.to_file(output_path_shp, driver='ESRI Shapefile', encoding='ISO-8859-1')", "title": "" }, { "docid": "8ae3a56391b60af815aaf334221a08e4", "score": "0.48379388", "text": "def __shpRecords(self):\n f = self.__getFileObj(self.shp)\n f.seek(100)\n recNum = 1\n for s in self._shapes:\n self._offsets.append(f.tell())\n # Record number, Content length place holder\n f.write(pack(\">2i\", recNum, 0))\n recNum += 1\n start = f.tell()\n # Shape Type\n if self.shapeType != 31:\n s.shapeType = self.shapeType\n f.write(pack(\"<i\", s.shapeType))\n # All shape types capable of having a bounding box\n if s.shapeType in (3, 5, 8, 13, 15, 18, 23, 25, 28, 31):\n try:\n f.write(pack(\"<4d\", *self.__bbox([s])))\n except error:\n raise ShapefileException(\"Falied to write bounding box for record %s. Expected floats.\" % recNum)\n # Shape types with parts\n if s.shapeType in (3, 5, 13, 15, 23, 25, 31):\n # Number of parts\n f.write(pack(\"<i\", len(s.parts)))\n # Shape types with multiple points per record\n if s.shapeType in (3, 5, 8, 13, 15, 23, 25, 31):\n # Number of points\n f.write(pack(\"<i\", len(s.points)))\n # Write part indexes\n if s.shapeType in (3, 5, 13, 15, 23, 25, 31):\n for p in s.parts:\n f.write(pack(\"<i\", p))\n # Part types for Multipatch (31)\n if s.shapeType == 31:\n for pt in s.partTypes:\n f.write(pack(\"<i\", pt))\n # Write points for multiple-point records\n if s.shapeType in (3, 5, 8, 13, 15, 23, 25, 31):\n try:\n [f.write(pack(\"<2d\", *p[:2])) for p in s.points]\n except error:\n raise ShapefileException(\"Failed to write points for record %s. Expected floats.\" % recNum)\n # Write z extremes and values\n if s.shapeType in (13, 15, 18, 31):\n try:\n f.write(pack(\"<2d\", *self.__zbox([s])))\n except error:\n raise ShapefileException(\"Failed to write elevation extremes for record %s. Expected floats.\" % recNum)\n try:\n if hasattr(s, \"z\"):\n f.write(pack(\"<%sd\" % len(s.z), *s.z))\n else:\n [f.write(pack(\"<d\", p[2])) for p in s.points]\n except error:\n raise ShapefileException(\"Failed to write elevation values for record %s. Expected floats.\" % recNum)\n # Write m extremes and values\n if s.shapeType in (13, 15, 18, 23, 25, 28, 31):\n try:\n if hasattr(s, \"m\"):\n f.write(pack(\"<%sd\" % len(s.m), *s.m))\n else:\n f.write(pack(\"<2d\", *self.__mbox([s])))\n except error:\n raise ShapefileException(\"Failed to write measure extremes for record %s. Expected floats\" % recNum)\n try:\n [f.write(pack(\"<d\", p[3])) for p in s.points]\n except error:\n raise ShapefileException(\"Failed to write measure values for record %s. Expected floats\" % recNum)\n # Write a single point\n if s.shapeType in (1, 11, 21):\n try:\n f.write(pack(\"<2d\", s.points[0][0], s.points[0][1]))\n except error:\n raise ShapefileException(\"Failed to write point for record %s. Expected floats.\" % recNum)\n # Write a single Z value\n if s.shapeType == 11:\n if hasattr(s, \"z\"):\n try:\n if not s.z:\n s.z = (0,)\n f.write(pack(\"<d\", s.z[0]))\n except error:\n raise ShapefileException(\"Failed to write elevation value for record %s. Expected floats.\" % recNum)\n else:\n try:\n if len(s.points[0]) < 3:\n s.points[0].append(0)\n f.write(pack(\"<d\", s.points[0][2]))\n except error:\n raise ShapefileException(\"Failed to write elevation value for record %s. Expected floats.\" % recNum)\n # Write a single M value\n if s.shapeType in (11, 21):\n if hasattr(s, \"m\"):\n try:\n if not s.m:\n s.m = (0,)\n f.write(pack(\"<1d\", s.m[0]))\n except error:\n raise ShapefileException(\"Failed to write measure value for record %s. Expected floats.\" % recNum)\n else:\n try:\n if len(s.points[0]) < 4:\n s.points[0].append(0)\n f.write(pack(\"<1d\", s.points[0][3]))\n except error:\n raise ShapefileException(\"Failed to write measure value for record %s. Expected floats.\" % recNum)\n # Finalize record length as 16-bit words\n finish = f.tell()\n length = (finish - start) // 2\n self._lengths.append(length)\n # start - 4 bytes is the content length field\n f.seek(start - 4)\n f.write(pack(\">i\", length))\n f.seek(finish)", "title": "" }, { "docid": "f7d3e3680c24438c806941989c08ea98", "score": "0.4833944", "text": "def save_position(self):\n self._position_provider.push_state()", "title": "" }, { "docid": "9f4631fa65cc5a322d00960c22193276", "score": "0.48319474", "text": "def _save_placement(self, placement):\n placement_data = json.dumps(placement)\n placement_zdata = zlib.compress(placement_data.encode())\n self.backend.put(z.path.placement(), placement_zdata)", "title": "" }, { "docid": "be9e358b47fbc33e8b736e7692e68f91", "score": "0.48266914", "text": "def _writeOutputCoordinates(self):\n tmpdic = {}\n\n for slit, value in self.slits.items():\n list = []\n\n fh = open('%s_coordinates.txt' % slit[:-5], 'w')\n fh.write('#File written by findSlitmaskPosition.py on %s\\n'\\\n % datetime.datetime.isoformat(datetime.datetime.now()))\n fh.write('#x\\ty\\tx2\\ty2\\tRA\\tDEC\\n')\n for tmp1, x, y, tmp2 in zip(value['coordinates'],\n value['coordinatesX'],\n value['coordinatesY'],\n value['coordinatesXY']):\n fh.write('%i %i %f %f %f %f \\n' % (x, y, tmp2[0], tmp2[1], tmp1[0], tmp1[1]))\n list.append([x, y, tmp2[0], tmp2[1], tmp1[0], tmp1[1]])\n\n tmpdic[slit] = list\n\n fh.close()\n\n write.cPickleDumpDictionary(tmpdic, 'coordinates.pk')", "title": "" }, { "docid": "68763fe9c950317f2e03601f6f7b53cb", "score": "0.48125944", "text": "def save_all(filepath, annotations_dir=ANNOTATIONS_DIR):\n nodes = getNodesByClass(\"vtkMRMLScalarVolumeNode\")\n assert len(nodes) == 1\n node = nodes[0]\n studyUID = os.path.basename(filepath)[: -len(\".nii.gz\")]\n dir_path = os.path.join(annotations_dir, studyUID)\n segnodes = getNodesByClass(\"vtkMRMLSegmentationNode\")\n print(segnodes)\n if len(segnodes) == 1:\n if not os.path.exists(dir_path):\n print(\"{} does not exists, Creating directory.\".format(dir_path))\n os.mkdir(dir_path)\n segnode = segnodes[0]\n slicer.util.saveNode(segnode, os.path.join(dir_path, studyUID + \".seg.nrrd\"))\n slicer.util.saveNode(node, os.path.join(dir_path, studyUID + \".nrrd\"))\n markupnodes = getNodesByClass(\"vtkMRMLMarkupsNode\")\n markuplinenodes = [node for node in markupnodes if \"MarkupsLine\" in str(node)]\n markupcurvenodes = [node for node in markupnodes if \"MarkupsCurve\" in str(node)]\n for i, markupnode in enumerate(markuplinenodes):\n slicer.util.saveNode(markupnode, os.path.join(dir_path, f\"L_{str(i)}.mrk.json\"))\n for i, markupnode in enumerate(markupcurvenodes):\n slicer.util.saveNode(markupnode, os.path.join(dir_path, f\"C_{str(i)}.mrk.json\"))\n save_resampled_points(markupcurvenodes, dir_path)\n save_as_ijk(markupcurvenodes, dir_path, \"C\")\n save_as_ijk(markuplinenodes, dir_path, \"L\")\n print(\"Saving all segmentations to {}\".format(dir_path))", "title": "" }, { "docid": "e4aca262e047dfa5537a76d8d5d474eb", "score": "0.4811768", "text": "def splineIK_organizeOutliner(self, *args):\n if self.jntLayoutBtn.getSelect()==1:\n pm.group(self.joints[:], name=self.jntNameFeild.getText() + '_JNT_GRP')\n pm.group(self.jntNameFeild.getText() + '_CRV', self.jntNameFeild.getText() + '_JNT_GRP', name=self.jntNameFeild.getText() + '_GRP')\n\n if self.jntLayoutBtn.getSelect()==2:\n pm.group(self.joints[0], name=self.jntNameFeild.getText() + '_IK_JNT_GRP')\n if self.chkboxGrp.getValue1()==1:\n pm.group(self.jntNameFeild.getText() + '_CRV', self.jntNameFeild.getText() + '_SIK', name=self.jntNameFeild.getText() + '_noTouch_GRP')\n pm.setAttr(self.jntNameFeild.getText() + '_noTouch_GRP.visibility', False)\n if self.chkboxGrp.getValue2()==0:\n pm.group(self.jntNameFeild.getText() + '_IK_JNT_GRP', self.jntNameFeild.getText() + '_noTouch_GRP', name=self.jntNameFeild.getText() + '_GRP')\n if self.chkboxGrp.getValue2()==1:\n pm.group(self.jntNameFeild.getText() + '_base_JNT', self.jntNameFeild.getText() + '_mid_JNT', self.jntNameFeild.getText() + '_end_JNT', name=self.jntNameFeild.getText() + '_INF_GRP')\n pm.setAttr(self.jntNameFeild.getText() + '_INF_GRP.visibility', False)\n pm.group(self.jntNameFeild.getText() + '_base_CTRLOffset_GRP', self.jntNameFeild.getText() + '_mid_CTRLOffset_GRP', self.jntNameFeild.getText() + '_end_CTRLOffset_GRP', name=self.jntNameFeild.getText() + '_CTRL_GRP')\n pm.group(self.jntNameFeild.getText() + '_IK_JNT_GRP', self.jntNameFeild.getText() + '_INF_GRP', name=self.jntNameFeild.getText() + '_JNT_GRP')\n pm.group(self.jntNameFeild.getText() + '_JNT_GRP', self.jntNameFeild.getText() + '_CTRL_GRP', self.jntNameFeild.getText() + '_noTouch_GRP', name=self.jntNameFeild.getText() + '_GRP')", "title": "" }, { "docid": "84f8471070ebe5df166ab5f75fc9844f", "score": "0.48105696", "text": "def save_map(self):\n directory = \"Output/Seed{0}-{1}x{1}/\".format(self.seed, self.n)\n file_name = \"w{0}sl{1:+.2f}.png\".format(self.weathering, self.sea_level)\n if not os.path.exists(directory):\n os.makedirs(directory)\n ImageOps.invert(self.map.convert(\"L\")).convert(\"1\").save(directory + file_name)", "title": "" }, { "docid": "48d3be14248619f77181245451ceba42", "score": "0.48094574", "text": "def addObstacleBehindEachDatamatrixMark(self):\n #import datamatrix_topology\n # import pdb\n # TODO optimization ne pas effacer les obstacles des anciennes datamatrix (a condition que la position soit la meme) mais juste rajouter les nouvelles\n try:\n for key in self._ObstacleBehindDatamatrix:\n self.delObstacle(key)\n\n except AttributeError:\n pass\n self._ObstacleBehindDatamatrix = []\n\n for k, aPose6D in self.aGlobalVs.iteritems():\n #boxObstacle = Obstacle3D(0.01, 0.8, 1.0, aPose6D=aPose6D)\n boxObstacle = Obstacle3D(0.01, 0.4, 2.0, aPose6D=aPose6D)\n nIdx = self.addStaticObstacle(boxObstacle)\n self._ObstacleBehindDatamatrix.append(nIdx)", "title": "" }, { "docid": "36a0198283d5eef003236582ecd9c608", "score": "0.48059186", "text": "def save(self):\n self.saveGraphicState()", "title": "" }, { "docid": "d31d782ad9a71a3063118d1976a6733e", "score": "0.48012415", "text": "def savelayers(grids, filename):\n layers = collections.OrderedDict()\n metadata = collections.OrderedDict()\n for key in grids.keys():\n layers[key] = grids[key]['grid'].getData()\n metadata[key] = {'description': grids[key]['description'], 'type': grids[key]['type'], 'label': grids[key]['label']}\n origin = {}\n header = {}\n mgrid = MultiHazardGrid(layers, grids[key]['grid'].getGeoDict(), origin, header, metadata=metadata)\n mgrid.save(filename)", "title": "" }, { "docid": "7af5f9cf27950b16eb0e99e5484f827b", "score": "0.47990644", "text": "def store_poses(self, moves):\n\n RosProxy().call_service(\n \"/%s/store_poses\" %\n (self.name), MoveAlong, moves)", "title": "" }, { "docid": "5e5da3318cba568d7829f54d2c217bc5", "score": "0.47980094", "text": "def setOrigin(self):\n for ebsdMap in self.ebsdMaps:\n ebsdMap.locateGrainID(clickEvent=self.clickSetOrigin)", "title": "" }, { "docid": "61d70588dfce7bddb577b63c67a0c0dc", "score": "0.47979137", "text": "def draw_arrows(self,surface):\n\n for s in self.sindices:\n a = self.arrows[s]\n x,y = self.state2coord(s, center = True)\n arrowpoints = [(y + z[0],x + z[1]) for z in self.t[self.allowed_actions[a]]]\n pygame.draw.lines(surface,(55,55,55),0, arrowpoints, 1)", "title": "" }, { "docid": "1cf39ca7bd4cfd94abc839614796251d", "score": "0.47974825", "text": "def place(self):\n gatesMap = self._gatesMap\n for i in range(len(self._gates) - 1, -1, -1):\n currentGate = self._gates[i]\n # put the IOs to their fixed location\n if (gatesMap[currentGate].isIO()):\n loc = self._grid.getIOLoc(currentGate)\n self._grid.fill(currentGate, loc[0], loc[1])\n self._gatesPlace[currentGate] = loc\n else:\n # fanout should be an object\n fanout = list(self._grid.getOutputs(currentGate))[0] # we only deal with trees\n if (gatesMap[fanout].isIO()):\n fanoutLoc = self._grid.getIOLoc(fanout)\n else:\n fanoutLoc = self._grid.getLoc(fanout)\n loc = self.getOptLoc(fanout, fanoutLoc, currentGate)\n self._grid.fill(currentGate, loc[0], loc[1])\n self._gatesPlace[currentGate] = loc", "title": "" }, { "docid": "ceb388468973b41cc832a653b2d23a35", "score": "0.47967815", "text": "def output_shape_to_off_file(self, off_filename):\n geometry_groups = self.find_geometry_groups()\n # Build up vertices, faces and winding order\n vertices = None\n faces = None\n winding_order = None\n for group in geometry_groups:\n new_vertices, new_faces, new_winding_order = self.get_geometry_from_group(group)\n vertices, faces, winding_order = self.accumulate_geometry(vertices, faces, winding_order, new_vertices,\n new_faces, new_winding_order)\n self.write_off_file(off_filename, vertices, faces, winding_order)", "title": "" }, { "docid": "68ee3878c6c1f7ffc8a41651eb6bdea2", "score": "0.47877416", "text": "def saveArrays(self):\n if (self.olrByYear is not None) and \\\n (self.olrYearAvgsByLatitude is not None):\n np.savez \\\n ( \\\n self.processedDataFname, \\\n olrByYear=self.olrByYear, \\\n olrYearAvgsByLatitude=self.olrYearAvgsByLatitude \\\n )", "title": "" }, { "docid": "3c174b88ccc1bc2d39450039176121bc", "score": "0.4777684", "text": "def save_graph(\n G,\n name,\n path = \"instances/erdos\"\n):\n degree_normalized = calc_degree( G, normalized = True )\n degree_not_normalized = calc_degree( G, normalized = False )\n betweenness_normalized = calc_betweenness( G, normalized = True )\n betweenness_not_normalized = calc_betweenness( G, normalized = False )\n closeness_normalized = calc_closeness( G, normalized = True )\n closeness_not_normalized = calc_closeness( G, normalized = False )\n eigenvector = calc_eigenvector( G ) # May raise nx.exception.PowerIterationFailedConvergence\n \n for node in G:\n G.nodes[node][\"d_n\"] = degree_normalized[ node ]\n G.nodes[node][\"d_p\"] = degree_not_normalized[ node ]\n G.nodes[node][\"b_n\"] = betweenness_normalized[ node ]\n G.nodes[node][\"b_p\"] = betweenness_not_normalized[ node ]\n G.nodes[node][\"c_n\"] = closeness_normalized[ node ]\n G.nodes[node][\"c_p\"] = closeness_not_normalized[ node ]\n G.nodes[node][\"e_n\"] = eigenvector[ node ]\n #end for\n \n data = nx.readwrite.json_graph.node_link_data( G )\n s = json.dumps( data )\n with open(\n \"{path}/{name}.g\".format(\n path = path,\n name = name\n ),\n mode = \"w\"\n \n ) as f:\n f.write(s)\n #end with open f", "title": "" }, { "docid": "f10ddc2b70344e1f3877165335554ed5", "score": "0.47731805", "text": "def export_poly(self, filename):\n mun = self.merge_adjacent_features([f for f in self.getFeatures()])\n mun = self.get_multipolygon(mun)\n with open(filename, 'w') as fo:\n fo.write('admin_boundary\\n')\n i = 0\n for part in mun:\n for j, ring in enumerate(part):\n i += 1\n prefix = '!' if j > 0 else ''\n fo.write(prefix + str(i) + '\\n')\n for p in ring:\n fo.write('%f %f\\n' % (p.x(), p.y()))\n fo.write('END\\n')\n fo.write('END\\n')\n return", "title": "" }, { "docid": "ff285f145a87ac88d4d727d00a7a5db8", "score": "0.4770954", "text": "def bgeo ():\n\n help(bgeo)\n\n import hou\n nodeSelect = hou.selectedNodes()\n black=hou.Color((0,0,0))\n pink=hou.Color((0.98,0.275,0.275))\n out= hou.node(\"/out\")\n\n for node in nodeSelect:\n parent = node.parent() #hou.node(\"..\")\n parentString =parent.name() \n getName = node.name()\n connectNode = node.outputs()\n outNull = node.createOutputNode(\"null\",getName.upper())\n outNull.setPosition(node.position())\n outNull.move([0, -.75])\n outNull.setColor(black)\n\n #set read node to read myWriteGeo\n myFile = outNull.createOutputNode(\"file\",getName.upper()+\"_CACHE\")\n myFile.setColor(pink)\n myFile.setParms({'file': '$HIP/cache/rop_sfx/bgeo.sc/$OS/v`padzero(3,chs(\"/out/$OS/version\"))`/$OS.$F5.bgeo.sc'})\n\n myWriteGeo= out.createNode(\"geometry\",getName.upper()+\"_CACHE\")\n myWriteGeo.setParms({\"soppath\":\"/obj/\"+parentString+\"/\"+getName.upper()})\n myWriteGeo.setParms({\"sopoutput\":\"$HIP/cache/rop_sfx/bgeo.sc/$OS/v`padzero(3, ch('version'))`/$OS.$F5.bgeo.sc\"})\n myWriteGeo.setParms({\"trange\":\"normal\"})\n\n #add create param for versionning and export format\n parm_group = myWriteGeo.parmTemplateGroup()\n parm_folder = hou.FolderParmTemplate(\"folder\",\"version\")\n parm_folder.addParmTemplate(hou.IntParmTemplate(\"version\",\"Version\",1))\n parm_group.append(parm_folder)\n myWriteGeo.setParmTemplateGroup(parm_group)", "title": "" }, { "docid": "7c1840e5f21799052a9a7cc3fca072ed", "score": "0.47690552", "text": "def snapout(cluster, filename, energies=False, radec=False):\n if radec:\n\n ra, dec, d0, pmra, pmdec, vr0 = sky_coords(cluster)\n\n if energies:\n np.savetxt(\n filename,\n np.column_stack(\n [\n cluster.m,\n cluster.x,\n cluster.y,\n cluster.z,\n cluster.vx,\n cluster.vy,\n cluster.vz,\n cluster.id,\n cluster.kw,\n cluster.kin,\n cluster.pot,\n cluster.etot,\n ra,\n dec,\n d0,\n pmra,\n pmdec,\n vr0,\n ]\n ),\n )\n else:\n np.savetxt(\n filename,\n np.column_stack(\n [\n cluster.m,\n cluster.x,\n cluster.y,\n cluster.z,\n cluster.vx,\n cluster.vy,\n cluster.vz,\n cluster.id,\n cluster.kw,\n ra,\n dec,\n d0,\n pmra,\n pmdec,\n vr0,\n ]\n ),\n )\n\n else:\n if energies:\n np.savetxt(\n filename,\n np.column_stack(\n [\n cluster.m,\n cluster.x,\n cluster.y,\n cluster.z,\n cluster.vx,\n cluster.vy,\n cluster.vz,\n cluster.id,\n cluster.kw,\n cluster.kin,\n cluster.pot,\n cluster.etot,\n ]\n ),\n )\n else:\n\n if len(cluster.kw)==cluster.ntot:\n np.savetxt(\n filename,\n np.column_stack(\n [\n cluster.m,\n cluster.x,\n cluster.y,\n cluster.z,\n cluster.vx,\n cluster.vy,\n cluster.vz,\n cluster.id,\n cluster.kw,\n ]\n ),\n )\n else:\n np.savetxt(\n filename,\n np.column_stack(\n [\n cluster.m,\n cluster.x,\n cluster.y,\n cluster.z,\n cluster.vx,\n cluster.vy,\n cluster.vz,\n cluster.id,\n ]\n ),\n )\n\n return 0", "title": "" }, { "docid": "f8d9889fc9b5f4aafa2e875a33e1f4a6", "score": "0.4760813", "text": "def save(self, location):\n\n np.save(location + \"pulls_a.npy\", self.pulls_a)\n np.save(location + \"clicks_a.npy\", self.clicks_a)", "title": "" }, { "docid": "01db561985bdb7510704fda364622a4b", "score": "0.4750638", "text": "def saveValues(self):\n \n self.log.debug(\"Entered saveValues()\")\n\n # Call save on the timestamp widgets.\n self.startPointDatetimeLocationWidget.saveTimestamp()\n self.endPointDatetimeLocationWidget.saveTimestamp()\n \n startPointPrice = self.startPointPriceLocationValueSpinBox.value()\n startPointY = self.convertObj.priceToSceneYPos(startPointPrice)\n\n endPointPrice = self.endPointPriceLocationValueSpinBox.value()\n endPointY = self.convertObj.priceToSceneYPos(endPointPrice)\n \n startPointDatetime = \\\n self.startPointDatetimeLocationWidget.getTimestamp()\n endPointDatetime = \\\n self.endPointDatetimeLocationWidget.getTimestamp()\n\n color = self.colorEditButton.getColor()\n\n startPointX = self.convertObj.datetimeToSceneXPos(startPointDatetime)\n endPointX = self.convertObj.datetimeToSceneXPos(endPointDatetime)\n\n # Position and start point should be the same values.\n\n posF = QPointF(startPointX, startPointY)\n startPointF = QPointF(startPointX, startPointY)\n endPointF = QPointF(endPointX, endPointY)\n\n # Set the values in the artifact.\n self.artifact.setPos(posF)\n self.artifact.setColor(color)\n self.artifact.setStartPointF(startPointF)\n self.artifact.setEndPointF(endPointF)\n\n self.log.debug(\"Exiting saveValues()\")", "title": "" }, { "docid": "01db561985bdb7510704fda364622a4b", "score": "0.4750638", "text": "def saveValues(self):\n \n self.log.debug(\"Entered saveValues()\")\n\n # Call save on the timestamp widgets.\n self.startPointDatetimeLocationWidget.saveTimestamp()\n self.endPointDatetimeLocationWidget.saveTimestamp()\n \n startPointPrice = self.startPointPriceLocationValueSpinBox.value()\n startPointY = self.convertObj.priceToSceneYPos(startPointPrice)\n\n endPointPrice = self.endPointPriceLocationValueSpinBox.value()\n endPointY = self.convertObj.priceToSceneYPos(endPointPrice)\n \n startPointDatetime = \\\n self.startPointDatetimeLocationWidget.getTimestamp()\n endPointDatetime = \\\n self.endPointDatetimeLocationWidget.getTimestamp()\n\n color = self.colorEditButton.getColor()\n\n startPointX = self.convertObj.datetimeToSceneXPos(startPointDatetime)\n endPointX = self.convertObj.datetimeToSceneXPos(endPointDatetime)\n\n # Position and start point should be the same values.\n\n posF = QPointF(startPointX, startPointY)\n startPointF = QPointF(startPointX, startPointY)\n endPointF = QPointF(endPointX, endPointY)\n\n # Set the values in the artifact.\n self.artifact.setPos(posF)\n self.artifact.setColor(color)\n self.artifact.setStartPointF(startPointF)\n self.artifact.setEndPointF(endPointF)\n\n self.log.debug(\"Exiting saveValues()\")", "title": "" }, { "docid": "da9fd2d96cc2ef6780021b14689a64c3", "score": "0.47392854", "text": "def saveValues(self):\n\n self.log.debug(\"Entered saveValues()\")\n\n # Get the colors.\n color = self.colorEditButton.getColor()\n textColor = self.textColorEditButton.getColor()\n \n # Call save on the timestamp widgets.\n self.originPointDatetimeLocationWidget.saveTimestamp()\n self.leg1PointDatetimeLocationWidget.saveTimestamp()\n self.leg2PointDatetimeLocationWidget.saveTimestamp()\n \n # Position and origin point should be the same values.\n originPointPrice = \\\n self.originPointPriceValueSpinBox.value()\n originPointY = self.convertObj.priceToSceneYPos(originPointPrice)\n leg1PointPrice = \\\n self.leg1PointPriceValueSpinBox.value()\n leg1PointY = self.convertObj.priceToSceneYPos(leg1PointPrice)\n leg1PointY = originPointY\n leg2PointPrice = \\\n self.leg2PointPriceValueSpinBox.value()\n leg2PointY = self.convertObj.priceToSceneYPos(leg2PointPrice)\n leg2PointY = originPointY\n\n originPointDatetime = \\\n self.originPointDatetimeLocationWidget.getTimestamp()\n leg1PointDatetime = \\\n self.leg1PointDatetimeLocationWidget.getTimestamp()\n leg2PointDatetime = \\\n self.leg2PointDatetimeLocationWidget.getTimestamp()\n \n originPointX = self.convertObj.datetimeToSceneXPos(originPointDatetime)\n leg1PointX = self.convertObj.datetimeToSceneXPos(leg1PointDatetime)\n leg2PointX = self.convertObj.datetimeToSceneXPos(leg2PointDatetime)\n\n posF = QPointF(originPointX, originPointY)\n originPointF = QPointF(originPointX, originPointY)\n leg1PointF = QPointF(leg1PointX, leg1PointY)\n leg2PointF = QPointF(leg2PointX, leg2PointY)\n\n # Set the values in the artifact.\n self.artifact.setPos(posF)\n self.artifact.setColor(color)\n self.artifact.setTextColor(textColor)\n self.artifact.setOriginPointF(originPointF)\n self.artifact.setLeg1PointF(leg1PointF)\n self.artifact.setLeg2PointF(leg2PointF)\n\n # No need to save the musicalRatios inside self.artifact,\n # because each time there is a rotation or a check-marking\n # action, the internal artifact was updated.\n # The same is the case for the self.artifact.setReversed().\n\n self.log.debug(\"Exiting saveValues()\")", "title": "" }, { "docid": "c3b8028101c7b74b9fc63285084ad46f", "score": "0.47388896", "text": "def create_map(self, file_name):\r\n\r\n # Combine substation and power station records\r\n gdf_e = self.gdf_s.append(self.gdf_p)\r\n \r\n # Plot connected transmission lines\r\n mask = (self.gdf_l['CONNECTED'] == True) | (self.gdf_l['IS_HVDC'] == True)\r\n self.add_lines(self.gdf_l[mask], colour='blue')\r\n print('Lines plotted')\r\n \r\n # Plot connected nodes\r\n mask = self.gdf_n['CONNECTED'] == True\r\n self.add_nodes(self.gdf_n[mask], 'blue')\r\n print('Nodes plotted')\r\n\r\n # Plot connected network elements\r\n mask = (gdf_e['NEAREST_NODE_DISTANCE_KM'] > 0) & (gdf_e['CONNECTED'] == True)\r\n self.add_elements(gdf_e.loc[mask], 'orange')\r\n print('Connected network elements plotted')\r\n \r\n # Plot disconnected network elements\r\n mask = (gdf_e['NEAREST_NODE_DISTANCE_KM'] > 0) & (gdf_e['CONNECTED'] == False)\r\n self.add_elements(gdf_e.loc[mask], 'red')\r\n print('Disconnected network elements plotted')\r\n\r\n self.m.save(os.path.join(self.output_dir, 'maps', file_name))", "title": "" }, { "docid": "2a31e96d497f8f8ddd2871a3f4947043", "score": "0.47381303", "text": "def _save_route(self):\n\t\tnew_route = []\n\t\ttmp = self.end\n\t\twhile tmp.added_by and tmp != self.start:\n\t\t\tnew_route.append(tmp)\n\t\t\twith ignored(ValueError):\n\t\t\t\ttmp.added_by.halls.remove(tmp)\n\t\t\ttmp = tmp.added_by\n\t\tnew_route.append(self.start)\n\t\tnew_route.reverse()\n\t\tself._routes.append(new_route)\n\t\tself._duplicate_nodes(new_route)", "title": "" }, { "docid": "16cc0653a9ebcd786edbbdc39e37e4c8", "score": "0.47371903", "text": "def export(self):\n if self._controller_drawing_area != None:\n self._controller_drawing_area.export()", "title": "" }, { "docid": "b307649a5c2cebec97245405738c6514", "score": "0.4720388", "text": "def save_structure(fileobj, landmarks, *, group_name=\"structure\", landmark_colors=None):\n with __create_h5_group(fileobj, group_name) as g:\n __save_structure_impl(g, landmarks, landmark_colors=landmark_colors)", "title": "" }, { "docid": "dce592e907aedf07613bdadf14cf2449", "score": "0.47189108", "text": "def positions():\r\n def array(x):\r\n return np.array(x)\r\n\r\n #dictionary giving ave lat and longtiude of a given country, was very annoying to make\r\n pos2={'AFG': array([65., 33.]),\r\n 'AGO': array([ 18.5, -12.5]),\r\n 'ALB': array([20., 41.]),\r\n 'AND': array([ 1.6, 42.5]),\r\n 'ARE': array([54., 24.]),\r\n 'ARG': array([-64., -34.]),\r\n 'ARM': array([45., 40.]),\r\n 'ATG': array([-61.8 , 17.05]),\r\n 'AUS': array([133., -27.]),\r\n 'AUT': array([13.3333, 47.3333]),\r\n 'AZE': array([47.5, 40.5]),\r\n 'BDI': array([30. , -3.5]),\r\n 'BEL': array([ 4. , 50.8333]),\r\n 'BEN': array([2.25, 9.5 ]),\r\n 'BFA': array([-2., 13.]),\r\n 'BGD': array([90., 24.]),\r\n 'BGR': array([25., 43.]),\r\n 'BHR': array([50.55, 26. ]),\r\n 'BHS': array([-76. , 24.25]),\r\n 'BIH': array([18., 44.]),\r\n 'BLR': array([28., 53.]),\r\n 'BLZ': array([-88.75, 17.25]),\r\n 'BMU': array([-64.75 , 32.3333]),\r\n 'BOL': array([-65., -17.]),\r\n 'BRA': array([-55., -10.]),\r\n 'BRB': array([-59.5333, 13.1667]),\r\n 'BRN': array([114.6667, 4.5 ]),\r\n 'BTN': array([90.5, 27.5]),\r\n 'BWA': array([ 24., -22.]),\r\n 'CAF': array([21., 7.]),\r\n 'CAN': array([-95., 60.]),\r\n 'CHE': array([ 8., 47.]),\r\n 'CHL': array([-71., -30.]),\r\n 'CHN': array([105., 35.]),\r\n 'CIV': array([-5., 8.]),\r\n 'CMR': array([12., 6.]),\r\n 'COD': array([25., 0.]),\r\n 'COG': array([15., -1.]),\r\n 'COL': array([-72., 4.]),\r\n 'COM': array([ 44.25 , -12.1667]),\r\n 'CPV': array([-24., 16.]),\r\n 'CRI': array([-84., 10.]),\r\n 'CUB': array([-80. , 21.5]),\r\n 'CYM': array([-80.5, 19.5]),\r\n 'CYP': array([33., 35.]),\r\n 'CZE': array([15.5 , 49.75]),\r\n 'DEU': array([ 9., 51.]),\r\n 'DJI': array([43. , 11.5]),\r\n 'DMA': array([-61.3333, 15.4167]),\r\n 'DNK': array([10., 56.]),\r\n 'DOM': array([-70.6667, 19. ]),\r\n 'DZA': array([ 3., 28.]),\r\n 'ECU': array([-77.5, -2. ]),\r\n 'EGY': array([30., 27.]),\r\n 'ERI': array([39., 15.]),\r\n 'ESP': array([-4., 40.]),\r\n 'EST': array([26., 59.]),\r\n 'ETH': array([38., 8.]),\r\n 'FIN': array([26., 64.]),\r\n 'FJI': array([175., -18.]),\r\n 'FRA': array([ 2., 46.]),\r\n 'FSM': array([158.25 , 6.9167]),\r\n 'GAB': array([11.75, -1. ]),\r\n 'GBR': array([-2., 54.]),\r\n 'GEO': array([43.5, 42. ]),\r\n 'GHA': array([-2., 8.]),\r\n 'GIB': array([-5.3667, 36.1833]),\r\n 'GIN': array([-10., 11.]),\r\n 'GMB': array([-16.5667, 13.4667]),\r\n 'GNB': array([-15., 12.]),\r\n 'GNQ': array([10., 2.]),\r\n 'GRC': array([22., 39.]),\r\n 'GTM': array([-90.25, 15.5 ]),\r\n 'GUY': array([-59., 5.]),\r\n 'HKG': array([114.1667, 22.25 ]),\r\n 'HND': array([-86.5, 15. ]),\r\n 'HRV': array([15.5 , 45.1667]),\r\n 'HTI': array([-72.4167, 19. ]),\r\n 'HUN': array([20., 47.]),\r\n 'IDN': array([120., -5.]),\r\n 'IND': array([77., 20.]),\r\n 'IRL': array([-8., 53.]),\r\n 'IRN': array([53., 32.]),\r\n 'IRQ': array([44., 33.]),\r\n 'ISL': array([-18., 65.]),\r\n 'ISR': array([34.75, 31.5 ]),\r\n 'ITA': array([12.8333, 42.8333]),\r\n 'JAM': array([-77.5 , 18.25]),\r\n 'JOR': array([36., 31.]),\r\n 'JPN': array([138., 36.]),\r\n 'KAZ': array([68., 48.]),\r\n 'KEN': array([38., 1.]),\r\n 'KGZ': array([75., 41.]),\r\n 'KHM': array([105., 13.]),\r\n 'KIR': array([173. , 1.4167]),\r\n 'KNA': array([-62.75 , 17.3333]),\r\n 'KOR': array([127.5, 37. ]),\r\n 'KWT': array([47.6581, 29.3375]),\r\n 'LAO': array([105., 18.]),\r\n 'LBN': array([35.8333, 33.8333]),\r\n 'LBR': array([-9.5, 6.5]),\r\n 'LBY': array([17., 25.]),\r\n 'LCA': array([-61.1333, 13.8833]),\r\n 'LKA': array([81., 7.]),\r\n 'LSO': array([ 28.5, -29.5]),\r\n 'LTU': array([24., 56.]),\r\n 'LUX': array([ 6.1667, 49.75 ]),\r\n 'LVA': array([25., 57.]),\r\n 'MAC': array([113.55 , 22.1667]),\r\n 'MAR': array([-5., 32.]),\r\n 'MDA': array([29., 47.]),\r\n 'MDG': array([ 47., -20.]),\r\n 'MDV': array([73. , 3.25]),\r\n 'MEX': array([-102., 23.]),\r\n 'MHL': array([168., 9.]),\r\n 'MKD': array([22. , 41.8333]),\r\n 'MLI': array([-4., 17.]),\r\n 'MLT': array([14.5833, 35.8333]),\r\n 'MMR': array([98., 22.]),\r\n 'MNG': array([105., 46.]),\r\n 'MOZ': array([ 35. , -18.25]),\r\n 'MRT': array([-12., 20.]),\r\n 'MUS': array([ 57.55 , -20.2833]),\r\n 'MWI': array([ 34. , -13.5]),\r\n 'MYS': array([112.5, 2.5]),\r\n 'NAM': array([ 17., -22.]),\r\n 'NER': array([ 8., 16.]),\r\n 'NGA': array([ 8., 10.]),\r\n 'NIC': array([-85., 13.]),\r\n 'NIU': array([-129, -19.0333]),\r\n 'NLD': array([ 5.75, 52.5 ]),\r\n 'NOR': array([10., 62.]),\r\n 'NPL': array([84., 28.]),\r\n 'NRU': array([166.9167, -0.5333]),\r\n 'NZL': array([174., -41.]),\r\n 'OMN': array([57., 21.]),\r\n 'PAK': array([70., 30.]),\r\n 'PAN': array([-80., 9.]),\r\n 'PER': array([-76., -10.]),\r\n 'PHL': array([122., 13.]),\r\n 'PLW': array([134.5, 7.5]),\r\n 'PNG': array([147., -6.]),\r\n 'POL': array([20., 52.]),\r\n 'PRK': array([127., 40.]),\r\n 'PRT': array([-8. , 39.5]),\r\n 'PRY': array([-58., -23.]),\r\n 'PSE': array([35.25, 32. ]),\r\n 'QAT': array([51.25, 25.5 ]),\r\n 'ROU': array([25., 46.]),\r\n 'RUS': array([100., 60.]),\r\n 'RWA': array([30., -2.]),\r\n 'SAU': array([45., 25.]),\r\n 'SDN': array([30., 15.]),\r\n 'SEN': array([-14., 14.]),\r\n 'SGP': array([103.8 , 1.3667]),\r\n 'SLB': array([159., -8.]),\r\n 'SLE': array([-11.5, 8.5]),\r\n 'SLV': array([-88.9167, 13.8333]),\r\n 'SMR': array([12.4167, 43.7667]),\r\n 'SOM': array([49., 10.]),\r\n 'STP': array([7., 1.]),\r\n 'SUR': array([-56., 4.]),\r\n 'SVK': array([19.5 , 48.6667]),\r\n 'SVN': array([15., 46.]),\r\n 'SWE': array([15., 62.]),\r\n 'SWZ': array([ 31.5, -26.5]),\r\n 'SYC': array([55.6667, -4.5833]),\r\n 'SYR': array([38., 35.]),\r\n 'TCA': array([-71.5833, 21.75 ]),\r\n 'TCD': array([19., 15.]),\r\n 'TGO': array([1.1667, 8. ]),\r\n 'THA': array([100., 15.]),\r\n 'TJK': array([71., 39.]),\r\n 'TKM': array([60., 40.]),\r\n 'TLS': array([125.5167, -8.55 ]),\r\n 'TON': array([-130., -20.]),\r\n 'TTO': array([-61., 11.]),\r\n 'TUN': array([ 9., 34.]),\r\n 'TUR': array([35., 39.]),\r\n 'TUV': array([178., -8.]),\r\n 'TZA': array([35., -6.]),\r\n 'UGA': array([32., 1.]),\r\n 'UKR': array([32., 49.]),\r\n 'URY': array([-56., -33.]),\r\n 'USA': array([-97., 38.]),\r\n 'UZB': array([64., 41.]),\r\n 'VCT': array([-61.2 , 13.25]),\r\n 'VEN': array([-66., 8.]),\r\n 'VGB': array([-64.5, 18.5]),\r\n 'VNM': array([106., 16.]),\r\n 'VUT': array([167., -16.]),\r\n 'WSM': array([-128.3333, -13.5833]),\r\n 'YEM': array([48., 15.]),\r\n 'ZAF': array([ 24., -29.]),\r\n 'ZMB': array([ 30., -15.]),\r\n 'ZWE': array([ 30., -20.])}\r\n return pos2", "title": "" }, { "docid": "df19e950f2ad469c6adc640feedeb314", "score": "0.47153595", "text": "def func01(dir_out: str, xyg: bool=1):\r\n from os.path import join\r\n import sqlite3\r\n\r\n DBNAME = 'r0.db'\r\n DROP_TABLE = \"drop table if exists r0\"\r\n CREATE_TABLE = \"\"\"\r\n create table r0 (\r\n \tlat integer,\r\n month integer,\r\n \tr0 real,\r\n \tprimary key (lat, month)\r\n \t)\r\n \"\"\"\r\n DELETE = \"delete from r0\"\r\n INSERT = \"\"\"\r\n insert into r0 (lat, month, r0)\r\n values (?, ?, ?)\r\n \"\"\"\r\n\r\n con = sqlite3.connect(join(dir_out, DBNAME))\r\n cur = con.cursor()\r\n cur.execute(DROP_TABLE)\r\n cur.execute(CREATE_TABLE)\r\n cur.execute(DELETE)\r\n con.commit()\r\n\r\n for m, x, y in _splines1():\r\n print(m)\r\n for x1, y1 in zip(x, y):\r\n cur.execute(INSERT, (int(x1), m, float(y1)))\r\n if xyg:\r\n _xygraph(dir_out, cur, m)\r\n con.commit()\r\n con.close()", "title": "" }, { "docid": "0cb33c343b08ba65d69fa37a26335ac9", "score": "0.47149047", "text": "def make_grid(obj):\n\n # Find the first quadrant and first vertex that lays in this first quadrant\n bm = bmesh.from_edit_mesh(obj.data)\n bm.verts.ensure_lookup_table()\n verts = [v for v in bm.verts if v.select]\n\n for v in verts:\n v.tag = True\n bmesh.update_edit_mesh(bpy.context.object.data)\n\n obj.vertex_groups.active_index = obj.vertex_groups['modifier_group'].index\n bpy.ops.object.vertex_group_select()\n\n bm = bmesh.from_edit_mesh(obj.data)\n bm.verts.index_update()\n bm.verts.ensure_lookup_table()\n verts = [v for v in bm.verts if v.select]\n\n # Create an initial map of all vertices based on the shape limits\n sorted_initial_vert_map = get_shape_limits(verts)\n\n # Create the shape grid and sort by x-coordinate first\n shape_grid = {}\n inner_columns = {}\n outer_columns = {}\n\n for indx, v in enumerate(sorted_initial_vert_map):\n if v.tag:\n shape_grid[v.index] = {\"vertex\": v,\n \"column\": indx,\n \"border_vertex\": True}\n\n outer_columns[indx] = {\"vertex\": v.index}\n else:\n shape_grid[v.index] = {\"vertex\": v,\n \"column\": indx}\n\n inner_columns[indx] = {\"vertex\": v.index}\n\n # Sort by z-coordinate\n verts_z = {v.index: v.co.z for v in verts}\n sorted_verts_z = sorted(verts_z, key=(lambda k: verts_z[k]), reverse=True)\n\n no_key_err = ShapeToolAsserts.ERR_CODES.NO_KEY_ERROR\n if (ShapeToolAsserts.check_dict_entries(sorted_verts_z, shape_grid) == no_key_err):\n Logger.log(\"Error occured: {}, falling back to default extrusion\".format(ShapeToolAsserts.errno()))\n x_displacement = str(0)\n y_displacement = str(0)\n smooth_amount = 5\n return shape_grid, None, None\n\n inner_rows = {}\n outer_rows = {}\n\n bm.verts.index_update()\n\n for indx, v in enumerate(sorted_verts_z):\n if bm.verts[v].tag:\n shape_grid[v].update(row=indx)\n outer_rows[indx] = {\"vertex\": bm.verts[v].index}\n else:\n shape_grid[v].update(row=indx)\n inner_rows[indx] = {\"vertex\": bm.verts[v].index}\n\n bmesh.update_edit_mesh(bpy.context.object.data)\n\n # Add boundaries to the new 2D grid by looping through the shape loop vertices.\n # First sort which of the edge vertices is larger (row/column wise) and find the\n # vertices that have row/column between these two edge vertices. Then take either\n # column/row value as the boundary for the vertex's row/column.\n bpy.ops.mesh.select_all(action='DESELECT')\n obj.vertex_groups.active_index = obj.vertex_groups['shape_intersection_group'].index\n bpy.ops.object.vertex_group_select()\n\n bm = bmesh.from_edit_mesh(obj.data)\n edges = [e for e in bm.edges if e.select]\n for v in bm.verts:\n v.select = False\n for e in edges:\n column_A = shape_grid[e.verts[0].index]['column']\n column_B = shape_grid[e.verts[1].index]['column']\n if column_A > column_B:\n column_B, column_A = column_A, column_B\n row_A = shape_grid[e.verts[0].index]['row']\n row_B = shape_grid[e.verts[1].index]['row']\n if row_A > row_B:\n row_B, row_A = row_A, row_B\n middle_columns = [column for column in range(column_A + 1, column_B)]\n middle_rows = [row for row in range(row_A + 1, row_B)]\n\n for column in middle_columns:\n try:\n vertex = shape_grid[inner_columns[column]['vertex']]\n except KeyError:\n pass\n else:\n row_1 = shape_grid[outer_columns[column_A]['vertex']]['row']\n row_2 = shape_grid[outer_columns[column_B]['vertex']]['row']\n if 'column_rows' in vertex.keys():\n vertex['column_rows'].append(row_1)\n else:\n vertex.update(column_rows=[row_1])\n for row in middle_rows:\n try:\n vertex = shape_grid[inner_rows[row]['vertex']]\n except KeyError:\n pass\n else:\n column_1 = shape_grid[outer_rows[row_A]['vertex']]['column']\n column_2 = shape_grid[outer_rows[row_B]['vertex']]['column']\n if 'row_columns' in vertex.keys():\n vertex['row_columns'].append(column_1)\n else:\n vertex.update(row_columns=[column_1])\n\n # Leave only the closest boundary rows/columns to the current vertex\n for v in shape_grid.keys():\n if 'border_vertex' not in shape_grid[v].keys():\n vertex = shape_grid[v]\n row_A = min(vertex['row_columns'])\n row_B = max(vertex['row_columns'])\n column_A = min(vertex['column_rows'])\n column_B = max(vertex['column_rows'])\n for column in vertex['row_columns']:\n if column > vertex['column'] and column <= row_B:\n row_B = column\n elif column < vertex['column'] and column >= row_A:\n row_A = column\n for row in vertex['column_rows']:\n if row > vertex['row'] and row <= column_B:\n column_B = row\n elif row < vertex['row'] and row >= column_A:\n column_A = row\n vertex['row_columns'] = (row_A, row_B)\n vertex['column_rows'] = (column_A, column_B)\n\n grid_mid = round(len(shape_grid)/2)\n Logger.log(\"Grid mid: {}\".format(grid_mid))\n\n try:\n middle_vertex_Y = shape_grid[inner_columns[grid_mid]['vertex']]\n except KeyError:\n middle_vertex_Y = shape_grid[outer_columns[grid_mid]['vertex']]\n try:\n middle_vertex_X = shape_grid[inner_rows[grid_mid]['vertex']]\n except KeyError:\n middle_vertex_X = shape_grid[outer_rows[grid_mid]['vertex']]\n\n bmesh.update_edit_mesh(bpy.context.object.data)\n\n return shape_grid, middle_vertex_X, middle_vertex_Y", "title": "" }, { "docid": "cf887240cf2c941f658473b743618778", "score": "0.47138003", "text": "def save_data(self, path):\n # checking the existence of the folder\n try:\n os.stat(path=path)\n except FileNotFoundError:\n print('creating folder {}'.format(path))\n os.mkdir(path=path)\n\n # saving indexes\n print('saving indexes ...')\n with open(os.path.join(path, 'index2node.json'), 'w', encoding='utf-8') as index2node_file:\n json.dump(obj=self.index2node, fp=index2node_file)\n with open(os.path.join(path, 'node2index.json'), 'w', encoding='utf-8') as node2index_file:\n json.dump(obj=self.node2index, fp=node2index_file)\n\n # saving node list\n print('saving nodes list ...')\n with open(os.path.join(path, 'nodes.txt'), 'w', encoding='utf-8') as nodes_file:\n nodes_file.write('\\t'.join([str(node) for node in self.nodes]))\n\n # saving edges list\n print('saving edges list ...')\n with open(os.path.join(path, 'edges.txt'), 'w', encoding='utf-8') as edges_file:\n edges_file.write('\\n'.join([str(node1)+'\\t'+str(node2) for node1, node2 in self.edges]))\n\n # saving neighbours\n print('saving neighbours ...')\n neighbours = self.neighbours.copy()\n for n in neighbours.keys():\n neighbours[n] = list(neighbours[n])\n with open(os.path.join(path, 'neighbours.json'), 'w', encoding='utf-8') as neighbours_file:\n json.dump(obj=neighbours, fp=neighbours_file)\n\n # saving negative nodes\n print('saving negative nodes ...')\n negative = self.negative.copy()\n for n in negative.keys():\n negative[n] = list(negative[n])\n with open(os.path.join(path, 'negative_nodes.json'), 'w', encoding='utf-8') as negative_file:\n json.dump(obj=negative, fp=negative_file)\n\n print('graph saved.')\n return", "title": "" }, { "docid": "a9779e23f837fbb731dd3af974af0433", "score": "0.47055027", "text": "def write_grads(dA, dW, db, layer):\n # grads['dA' + str(layer)] = dA\n grads['dW' + str(layer)] = dW\n grads['db' + str(layer)] = db", "title": "" }, { "docid": "a1ba2e6fd046dfaced8f4c4154541b5b", "score": "0.470401", "text": "def writePosFile(\n self,\n outfilename,\n nodalValues,\n fieldName='field',\n append=False):\n if append:\n outfile = open(outfilename, 'a')\n print 'Appending data to', outfilename, '...'\n else:\n outfile = open(outfilename, 'w')\n print 'Writing data to', outfilename, '...'\n\n elemVals = nodalValues[self.connectivity]\n X = self.x[self.connectivity]\n Y = self.y[self.connectivity]\n Z = self.z[self.connectivity]\n coords = np.vstack((X[:, 0], Y[:, 0], Z[:, 0],\n X[:, 1], Y[:, 1], Z[:, 1],\n X[:, 2], Y[:, 2], Z[:, 2])).T\n allData = np.hstack((coords, elemVals))\n # write header\n outfile.write('View \\\"%s\\\" {\\n' % fieldName) # field block\n nElems = self.connectivity.shape[0]\n # write triangles ST(x1,y1,z1,x2,y2,z2,x3,y3,z3){val1,val2,val3}\n fmt = 'ST(%.16g,%.16g,%.16g,%.16g,%.16g,%.16g,%.16g,%.16g,%.16g){%.16g,%.16g,%.16g};\\n'\n for iElem in range(nElems):\n outfile.write(fmt % tuple(allData[iElem, :]))\n\n # write footer\n outfile.write('};\\n') # closes field block", "title": "" }, { "docid": "81585699172bee01cdc9d910118c2de8", "score": "0.47017902", "text": "def write_to_cf(self,filename,spacing):\n \n pos_dict = nx.drawing.layout.planar_layout(self.graph)\n \n write_dict = {}\n write_dict['name'] = 'causal_graph'\n \n # write nodes\n write_dict['nodes'] = []\n for i in range(0,len(self.entity_list)):\n name = self.entity_list[i]\n \n write_dict['nodes'].append({})\n \n write_dict['nodes'][-1]['id'] = 'node' + str(i)\n write_dict['nodes'][-1]['name'] = name\n write_dict['nodes'][-1]['label'] = name\n write_dict['nodes'][-1]['type'] = 'basic'\n write_dict['nodes'][-1]['metadata'] = {}\n write_dict['nodes'][-1]['metadata']['x'] = spacing*pos_dict[i][0]\n write_dict['nodes'][-1]['metadata']['y'] = spacing*pos_dict[i][1]\n write_dict['nodes'][-1]['metadata']['label'] = ''\n write_dict['nodes'][-1]['metadata']['shape'] = 'ellipse'\n write_dict['nodes'][-1]['metadata']['fontSize'] = 14\n write_dict['nodes'][-1]['metadata']['sizeLabelMode'] = 5\n write_dict['nodes'][-1]['metadata']['font'] = {}\n write_dict['nodes'][-1]['metadata']['font']['size'] = 14\n write_dict['nodes'][-1]['metadata']['size'] = 14\n write_dict['nodes'][-1]['metadata']['labelNodeId'] = 'node' + str(i) + 'ID'\n write_dict['nodes'][-1]['metadata']['labelNodeOffset'] = {}\n write_dict['nodes'][-1]['metadata']['labelNodeOffset']['x'] = 0\n write_dict['nodes'][-1]['metadata']['labelNodeOffset']['y'] = 0\n write_dict['nodes'][-1]['metadata']['labelOffset'] = {}\n write_dict['nodes'][-1]['metadata']['labelOffset']['x'] = 0\n write_dict['nodes'][-1]['metadata']['labelOffset']['y'] = 0\n write_dict['nodes'][-1]['metadata']['shadow'] = {}\n write_dict['nodes'][-1]['metadata']['shadow']['color'] = '#00000080'\n write_dict['nodes'][-1]['metadata']['shadow']['size'] = 0\n write_dict['nodes'][-1]['metadata']['shadow']['x'] = 0\n write_dict['nodes'][-1]['metadata']['shadow']['y'] = 0\n \n # write edges\n write_dict['edges'] = []\n \n for i in range(0,len(self.edge_list)):\n \n item = self.edge_list[i]\n from_node = self.entity_list.index(item[0])\n to_node = self.entity_list.index(item[1])\n \n write_dict['edges'].append({})\n \n write_dict['edges'][-1]['id'] = 'node' + str(from_node) + '->node' + str(to_node)\n write_dict['edges'][-1]['from'] = item[0]\n write_dict['edges'][-1]['to'] = item[1]\n write_dict['edges'][-1]['type'] = 'directed'\n write_dict['edges'][-1]['metadata'] = {}\n write_dict['edges'][-1]['metadata']['isLabelDraggable'] = True\n write_dict['edges'][-1]['metadata']['label'] = ''\n \n \n write_dict['task'] = {}\n \n write_dict['metadata'] = {}\n \n write_dict['project_id'] = '123456789'\n write_dict['_fileType'] = 'graph'\n \n with open(filename + '.json', 'w') as json_file:\n json.dump(write_dict, json_file)", "title": "" }, { "docid": "754aa95287fc203657358d2c6b09d330", "score": "0.46976715", "text": "def writeSpawningStructure(self, path):\n if not self.altSelection or self.altStructure.sizePQ() == 0:\n print(\"cluster center\")\n self.pdb.writePDB(path)\n return self.trajPosition\n else:\n spawnStruct, trajPosition = self.altStructure.altSpawnSelection((self.elements, self.pdb))\n spawnStruct.writePDB(path)\n if trajPosition is None:\n trajPosition = self.trajPosition\n return trajPosition", "title": "" }, { "docid": "1a759b9be324b4500d85d3aa048723b9", "score": "0.4691824", "text": "def _update(self):\n \n positions = Die.POSITIONS[self._value]\n cx, cy = self._center\n for i in range(len(positions)):\n if positions[i] is None:\n self._pips[i].setDepth(25)\n else:\n self._pips[i].setDepth(15)\n dx, dy = positions[i]\n self._pips[i].moveTo((cx + dx * self._width,\n cy + dy * self._width))", "title": "" }, { "docid": "3c58a22c2f8a70b81e14bd52c1838569", "score": "0.46851102", "text": "def createGrids(workspace, zone):\r\n print \"Creating Predictor Grids:\"\r\n logger.info(\"Creating Predictor Grids:\")\r\n for zone in zones:\r\n\r\n # Grids to copy\r\n FBFM40_r = workspace +'/%s/fbfm40r' %(zone)\r\n ch_r = workspace +'/%s/chr' %(zone)\r\n evt_r = workspace +'/%s/evtr' %(zone)\r\n tlg_r = workspace +'/%s/trlstr' %(zone) #change back to trlstr\r\n kgd_r = workspace +'/%s/0kgdr' %(zone) #change back to 0kgdr for daymet mode\r\n bps_r = workspace +'/%s/bpsr' %(zone)\r\n # Grids to create p for prep\r\n nfdr_p = workspace +'/%s/%s_nfdr_p' %(zone, zone)\r\n dbh_p = workspace +'/%s/%s_dbh_p' %(zone, zone)\r\n bcf_p = workspace +'/%s/%s_bcf_p' %(zone, zone)\r\n lcr_p = workspace +'/%s/%s_lcr_p' %(zone, zone)\r\n rshd_p = workspace +'/%s/%s_rshd_p' %(zone, zone)\r\n site_p = workspace +'/%s/%s_site_p' %(zone, zone)\r\n # Grids to create p2 for prep\r\n site_p2 = workspace +'/%s/%s_site_p2' %(zone, zone)\r\n nfdr_p2 = workspace +'/%s/%s_nfdr_p2' %(zone, zone)\r\n dbh_p2 = workspace +'/%s/%s_dbh_p2' %(zone, zone)\r\n bcf_p2 = workspace +'/%s/%s_bcf_p2' %(zone, zone)\r\n lcr_p2 = workspace +'/%s/%s_lcr_p2' %(zone, zone)\r\n bcf_p3 = workspace +'/%s/%s_bcf_p3' %(zone, zone)\r\n lcr_p3 = workspace +'/%s/%s_lcr_p3' %(zone, zone)\r\n lcr_p4 = workspace +'/%s/%s_lcr_p4' %(zone, zone)\r\n #Grids to create r for resampled and reclassified\r\n nfdr_r = workspace +'/%s/nfdrr' %(zone)\r\n dbh_r = workspace +'/%s/dbhr' %(zone)\r\n bcf_r = workspace +'/%s/bcfr' %(zone)\r\n lcr_r = workspace +'/%s/lcrr' %(zone)\r\n rshd_r = workspace +'/%s/rshdr' %(zone)\r\n site_r = workspace +'/%s/siter' %(zone)\r\n # Location of DBF Lookup Tables\r\n nfdr_dbf = lukup + '/NFDR.dbf' #National Fire Danger Rating\r\n dbh_dbf = lukup + '/DBH.dbf' # Diameter at breast height\r\n bcf_dbf = lukup + '/BCF.dbf' # Bark Conversion Factor\r\n lcr_dbf = lukup + '/LCR.dbf' # Live Crown Ratio\r\n site_dbf = lukup + '/bps.dbf' # Site Map\r\n try:\r\n # Set the workspace\r\n gp.workspace = workspace +'/%s' %(zone)\r\n## gp.delete_management(nfdr_p)\r\n## gp.delete_management(dbh_p)\r\n## gp.delete_management(bcf_p)\r\n## ## gp.delete_management(lcr_p)\r\n## gp.delete_management(rshd_p)\r\n## gp.delete_management(site_p)\r\n##\r\n## gp.delete_management(site_p2)\r\n## gp.delete_management(nfdr_p2)\r\n## gp.delete_management(dbh_p2)\r\n## gp.delete_management(bcf_p2)\r\n## ## gp.delete_management(lcr_p2) \r\n## gp.delete_management(bcf_p3)\r\n## gp.delete_management(lcr_p3)\r\n## gp.delete_management(lcr_p4)\r\n##\r\n## gp.delete_management(nfdr_r)\r\n## gp.delete_management(dbh_r)\r\n## gp.delete_management(bcf_r)\r\n## ## gp.delete_management(lcr_r)\r\n## gp.delete_management(rshd_r)\r\n## gp.delete_management(site_r)\r\n # Make copies\r\n gp.CopyRaster_management(FBFM40_r, nfdr_p)\r\n gp.CopyRaster_management(ch_r, dbh_p)\r\n gp.CopyRaster_management(evt_r, bcf_p)\r\n gp.CopyRaster_management(tlg_r, lcr_p)\r\n gp.CopyRaster_management(kgd_r, rshd_r) # all rshd values set to 1.\r\n gp.CopyRaster_management(bps_r, site_p)\r\n\r\n # Carefully process the 32 bit and novalue treelist\r\n gp.CheckOutExtension('Spatial')\r\n gp.IsNull_sa(lcr_p, lcr_p2)\r\n gp.Con_sa(lcr_p2, -9999, lcr_p3, lcr_p, \"Value = 1\")\r\n gp.BuildRasterAttributeTable_management(lcr_p3, '#')\r\n # Join DBF files to the copied rasters\r\n gp.joinfield(lcr_p3, 'VALUE', lcr_dbf, 'VALUE')\r\n gp.joinfield(nfdr_p, 'VALUE', nfdr_dbf, 'VALUE')\r\n gp.joinfield(dbh_p, 'Value', dbh_dbf, 'VALUE')\r\n gp.joinfield(bcf_p, 'VALUE', bcf_dbf, 'VALUE')\r\n gp.joinfield(site_p, 'VALUE', site_dbf, 'VALUE')\r\n\r\n ## # reclass by the appropriate join field\r\n gp.CheckOutExtension(\"3d\")\r\n gp.Lookup_3d(site_p, 'Site', site_p2)\r\n gp.Lookup_3d(nfdr_p, 'NFDR', nfdr_p2)\r\n gp.Lookup_3d(dbh_p, 'DBH', dbh_p2)\r\n gp.Lookup_3d(bcf_p, 'BCF', bcf_p2)\r\n gp.Lookup_3d(lcr_p3, 'RATIO2', lcr_p4)\r\n \r\n # Integerize a few of these grids\r\n gp.Int_3d(site_p2, site_r)\r\n gp.Int_3d(nfdr_p2, nfdr_r)\r\n gp.Int_3d(dbh_p2, dbh_r)\r\n gp.Times_3d(bcf_p2, 1000, bcf_p3)\r\n gp.Int_3d(bcf_p3, bcf_r)\r\n gp.Int_3d(lcr_p4, lcr_r)\r\n except:\r\n print gp.GetMessages()", "title": "" }, { "docid": "ed1311a3564a7f303afc5e9dae9ce0b4", "score": "0.46832114", "text": "def mbesSaver(PtCloud):\n global N\n print(\"New data: \", N)\n N+=1\n\n# print(listener.lookupTransform('/mbes', '/map', rospy.Time()))\n\n# print(t.lookup_transform('mbes', 'map', rospy.Time()))\n# listener.waitForTransform('/mbes', '/map', rospy.Time(),rospy.Duration(0.1))\n PtCloud.header.stamp = listener.getLatestCommonTime('/mbes','/odom')\n PtCloud_map_frame = listener.transformPointCloud(\"/odom\", PtCloud)\n\n\n i=1\n for point in PtCloud_map_frame.points:\n line = \"%d %d %.10f %.10f %.10f \\n\"%(PtCloud.header.seq,i,point.x,point.y,point.z)\n i+=1\n f.write(line)", "title": "" }, { "docid": "42ce349a43993b18ce4e61111eb84f39", "score": "0.46829337", "text": "def update_internal_setpoints(self):\n self.__x = self._magnet_x.field()\n self.__y = self._magnet_y.field()\n self.__z = self._magnet_z.field()", "title": "" }, { "docid": "e81390c1b0a3b6f0ec92669b985cd89d", "score": "0.46823916", "text": "def record():\n map = {\n \"exit\": {},\n \"point\": {}\n }\n id = 1\n a = \"\" # user input\n i = 1 # index of the point of interest\n e = 1 # index of the exit point\n while a != \"q\":\n a = input(\"What do you want to add to the current map ? (i = point of interest, e = exit, q to quit and save)\\n\")\n if a == \"i\":\n wait_for_click()\n map[\"point\"][i] = get_pos()\n print(f\"Saved point of interest at pos {get_pos()}\\n\\n\")\n i += 1\n elif a == \"e\":\n wait_for_click()\n map[\"exit\"][e] = get_pos()\n print(f\"Saved exit point at pos {get_pos()}\\n\\n\")\n e += 1\n elif a == \"n\":\n print(f\"Map {id} saved. Now recording map {id+1}\\n\\n\")\n save_map(id, map)\n map = {\n \"exit\": {},\n \"point\": {}\n }\n i = 1\n e = 1\n id += 1\n elif a != \"q\":\n print(\"Wrong input\\n\")\n continue", "title": "" }, { "docid": "a48a5e5c33ee1a71f8ad7e69abada58f", "score": "0.4679628", "text": "def save_grid(filename, grid, subset_name='land_points',\n subset_meaning=\"water land\", global_attrs=None):\n\n try:\n arrcell = grid.arrcell\n except AttributeError:\n arrcell = None\n\n if grid.gpidirect is True:\n gpis = None\n else:\n gpis = grid.gpis\n\n if grid.shape is not None:\n if global_attrs is None:\n global_attrs = {}\n global_attrs['shape'] = grid.shape\n\n save_lonlat(filename, grid.arrlon, grid.arrlat, arrcell=arrcell,\n gpis=gpis, subset_points=grid.subset, subset_name=subset_name,\n subset_meaning=subset_meaning,\n global_attrs=global_attrs)", "title": "" }, { "docid": "7c9b391126ae262dae4385d53ca66175", "score": "0.4670126", "text": "def saveCameraPose(self):\n for idx in range(self.start_idx, self.end_idx + 1):\n self.ds.camera_pose[idx,:] = self.c_pose", "title": "" }, { "docid": "da16580394ed72fb125ede198526e3ee", "score": "0.46696466", "text": "def save_grid(self, path): \n self._grid.to_file(path, driver=\"GeoJSON\")", "title": "" }, { "docid": "2fbca20a318e723691d2006fa1402a41", "score": "0.46666437", "text": "def main():\n cur.execute(\"\"\"SELECT id,imagePath from marauder_floor\"\"\")\n floors = cur.fetchall()\n for floor in floors:\n floorId = floor[0]\n imagePath = floor[1] \n full_path = os.path.join(os.getcwd(),path_offset, imagePath)\n try:\n image = Image.open(full_path)\n except IOError,err:\n print \"Image {} not found. Skipping floor\".format(full_path)\n continue \n draw_image = ImageDraw.Draw(image)\n cur.execute(\"\"\"SELECT x,y from marauder_location where floor_id=%s\"\"\",[floorId])\n locations = cur.fetchall()\n for location in locations:\n x,y = location\n draw_image.ellipse((x-radius,y-radius,x+radius,y+radius), fill='blue')\n if SHOW:\n image.show()\n image_name = os.path.join(os.getcwd(),path_offset,\n \"floor_{}_points.png\".format(floorId))\n image.save(image_name,\"PNG\")\n image = None\n\n # overlay(2,6)", "title": "" }, { "docid": "bcfef149c076c38bfc09751c76b61a0e", "score": "0.4665938", "text": "def replaceAllElements(self):\n\n # replacing all detectors\n for detector in self.detectors:\n # little lazy fixing...\n# try:\n# self.session.add(detector.element)\n# except: qDebug('DB: trying add prevously added detector '+str(detector.element)+' ')\n\n detector.move(int(detector.element.pointX*self.lblGraphic.width()/1000),\n int(detector.element.pointY*self.lblGraphic.height()/1000))\n\n for out in self.outs:\n# try:\n# self.session.add(out.element)\n# except: pass\n out.move(int(out.element.pointX*self.lblGraphic.width()/1000),\n int(out.element.pointY*self.lblGraphic.height()/1000))\n\n for zone in self.zones:\n# try:\n# self.session.add(zone.element)\n# except: qDebug('DB: Trying adding prevously added zone '+str(zone))\n\n for zonePoint in zone.points:\n zone.points[zonePoint].move(zone.points[zonePoint].element.pointX*self.lblGraphic.width()/1000,\n zone.points[zonePoint].element.pointY*self.lblGraphic.height()/1000)", "title": "" }, { "docid": "b99dd2cf40c53c9a5e1c402c0bbba6da", "score": "0.46641013", "text": "def save_to_array(self,\n cellsize=None,\n NODATA_value=-9999.0,\n smooth=None,\n easting_min=None,\n easting_max=None,\n northing_min=None,\n northing_max=None,\n origin=None,\n verbose=False):\n\n\n verbose = False\n\n\n #Get extent and reference\n\n domain = self.domain\n\n volumes = domain.triangles\n\n #smooth = True\n\n x,y,a,v= self.get_vertex_values(xy=True, smooth=smooth)\n\n\n false_easting = 500000\n false_northing = 10000000\n\n\n\n geo_ref = self.domain.geo_reference\n\n xllcorner = geo_ref.get_xllcorner()\n yllcorner = geo_ref.get_yllcorner()\n\n if verbose:\n print()\n print(xllcorner)\n print(yllcorner)\n print(x)\n print(y)\n\n\n # Create grid and update xll/yll corner and x,y\n # Relative extent\n if easting_min is None:\n xmin = min(x)\n else:\n xmin = easting_min - xllcorner\n\n if easting_max is None:\n xmax = max(x)\n else:\n xmax = easting_max - xllcorner\n\n if northing_min is None:\n ymin = min(y)\n else:\n ymin = northing_min - yllcorner\n\n if northing_max is None:\n ymax = max(y)\n else:\n ymax = northing_max - yllcorner\n\n msg = 'Implementation of Quantity.save_to_array() is not completed'\n #raise Exception, msg\n\n\n msg = 'xmax must be greater than or equal to xmin.\\n'\n msg += 'I got xmin = %f, xmax = %f' %(xmin, xmax)\n assert xmax >= xmin, msg\n\n msg = 'ymax must be greater than or equal to xmin.\\n'\n msg += 'I got ymin = %f, ymax = %f' %(ymin, ymax)\n assert ymax >= ymin, msg\n\n\n if verbose: log.critical('Creating grid')\n\n xrange = xmax-xmin\n yrange = ymax-ymin\n\n if cellsize is None:\n cellsize = max(xrange,yrange)/10.0\n\n\n ncols = int(xrange / cellsize) + 1\n nrows = int(yrange / cellsize) + 1\n\n # New absolute reference and coordinates\n newxllcorner = xmin + xllcorner\n newyllcorner = ymin + yllcorner\n\n x = x + xllcorner - newxllcorner\n y = y + yllcorner - newyllcorner\n\n\n grid_values = num.zeros( (nrows*ncols, ), float)\n\n\n num_tri = len(v)\n norms = num.zeros(6*num_tri, float)\n\n\n #Use fast method to calc grid values\n from anuga.file_conversion.calc_grid_values_ext import calc_grid_values\n\n calc_grid_values(nrows, ncols, cellsize, NODATA_value,\n x,y, norms, v, a, grid_values)\n\n\n y_g = num.arange(nrows)*cellsize + yllcorner - newyllcorner\n x_g = num.arange(ncols)*cellsize + xllcorner - newxllcorner\n #print outside_indices\n\n if verbose:\n log.critical('Interpolated values are in [%f, %f]'\n % (num.min(grid_values), num.max(grid_values)))\n\n\n return x_g,y_g, grid_values.reshape(nrows,ncols)#[::-1,:]", "title": "" }, { "docid": "814156d624f01ceabc19ccc3e83f8f95", "score": "0.46631142", "text": "def save_points(fname, pts_lst, selected):\n\twith open(fname, 'w') as fp:\n\t\tfp.write('y;x;id\\n')\n\t\tfor i in selected:\n\t\t\tfp.write(';'.join([str(x) for x in pts_lst[i]]))\n\t\t\tfp.write('\\n')", "title": "" }, { "docid": "812f3dbaa64a6ad40bfbe5076113330d", "score": "0.4649053", "text": "def save_obj(self, name, vertices, faces, img_id=-1):\n obj_path = os.path.join(self.save_obj_dir, name)\n if img_id > -1:\n obj_path = os.path.join(self.save_obj_dir_list[img_id], name)\n\n with open(obj_path, 'w') as fp:\n for v in vertices:\n fp.write('v %f %f %f\\n' % (v[0], v[1], v[2]))\n\n for f in faces: # Faces are 1-based, not 0-based in obj files\n fp.write('f %d %d %d\\n' % (f[0] + 1, f[1] + 1, f[2] + 1))", "title": "" }, { "docid": "a3a20a2bc9987cd4bfddb6e234205822", "score": "0.4647506", "text": "def _updateinputindices(self):\r\n pl = self.createarea.children\r\n for x in pl :\r\n if isinstance(x, PlanetInput):\r\n x.index = len(pl) - pl.index(x) - 2", "title": "" }, { "docid": "e0e85d74d40c7a315fb61685dd94fd81", "score": "0.46471635", "text": "def _write_oriented_bbox(scene_bbox, out_filename):\n\n def heading2rotmat(heading_angle):\n rotmat = np.zeros((3, 3))\n rotmat[2, 2] = 1\n cosval = np.cos(heading_angle)\n sinval = np.sin(heading_angle)\n rotmat[0:2, 0:2] = np.array([[cosval, -sinval], [sinval, cosval]])\n return rotmat\n\n def convert_oriented_box_to_trimesh_fmt(box):\n ctr = box[:3]\n lengths = box[3:6]\n trns = np.eye(4)\n trns[0:3, 3] = ctr\n trns[3, 3] = 1.0\n trns[0:3, 0:3] = heading2rotmat(box[6])\n box_trimesh_fmt = trimesh.creation.box(lengths, trns)\n return box_trimesh_fmt\n\n if len(scene_bbox) == 0:\n scene_bbox = np.zeros((1, 7))\n scene = trimesh.scene.Scene()\n for box in scene_bbox:\n scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box))\n\n mesh_list = trimesh.util.concatenate(scene.dump())\n # save to obj file\n trimesh.io.export.export_mesh(mesh_list, out_filename, file_type='obj')\n\n return", "title": "" }, { "docid": "2cacd820c47bd13ace91c1713caf117d", "score": "0.464223", "text": "def run(self):\n self._create_dir(0)\n shutil.copy(self.start_coordinates, self._get_md_dir(0) + \"/confout.gro\")\n for i in range(len(self.path) - 1):\n self._steer_between_points(i, i + 1)", "title": "" }, { "docid": "6612caaeeb0f20814b6daf6e5a8f830a", "score": "0.4635589", "text": "def saveValues(self):\n\n self.log.debug(\"Entered saveValues()\")\n\n # Get the colors.\n color = self.colorEditButton.getColor()\n textColor = self.textColorEditButton.getColor()\n \n # Call save on the timestamp widgets.\n self.startPointDatetimeLocationWidget.saveTimestamp()\n self.endPointDatetimeLocationWidget.saveTimestamp()\n \n # Position and start point should be the same values.\n startPointPrice = \\\n self.startPointPriceValueSpinBox.value()\n startPointY = self.convertObj.priceToSceneYPos(startPointPrice)\n #endPointPrice = \\\n # self.endPointPriceValueSpinBox.value()\n #endPointY = self.convertObj.priceToSceneYPos(endPointPrice)\n endPointY = startPointY\n \n startPointDatetime = \\\n self.startPointDatetimeLocationWidget.getTimestamp()\n endPointDatetime = \\\n self.endPointDatetimeLocationWidget.getTimestamp()\n\n self.log.debug(\"startPointDatetime == {}\".\\\n format(Ephemeris.datetimeToStr(startPointDatetime)))\n self.log.debug(\"endPointDatetime == {}\".\\\n format(Ephemeris.datetimeToStr(endPointDatetime)))\n\n startPointX = self.convertObj.datetimeToSceneXPos(startPointDatetime)\n endPointX = self.convertObj.datetimeToSceneXPos(endPointDatetime)\n\n self.log.debug(\"startPointX == {}\".format(startPointX))\n self.log.debug(\"endPointX == {}\".format(endPointX))\n\n posF = QPointF(startPointX, startPointY)\n startPointF = QPointF(startPointX, startPointY)\n endPointF = QPointF(endPointX, endPointY)\n\n # Set the values in the artifact.\n self.artifact.setPos(posF)\n self.artifact.setColor(color)\n self.artifact.setTextColor(textColor)\n self.artifact.setStartPointF(startPointF)\n self.artifact.setEndPointF(endPointF)\n\n # No need to save the musicalRatios inside self.artifact,\n # because each time there is a rotation or a check-marking\n # action, the internal artifact was updated.\n # The same is the case for the self.artifact.setReversed().\n\n self.log.debug(\"Exiting saveValues()\")", "title": "" }, { "docid": "dda0839b65a211b655bfa1a4124c8164", "score": "0.46355206", "text": "def save_map(self):\n\n self.logger.info('[*] Saving map template')\n\n o = 'map_template = (\\n'\n for z in range(int(self.args.zdim)):\n o += ' (\\n'\n for y in range(int(self.args.ydim)):\n o += ' ('\n tmp = ''\n for x in range(int(self.args.xdim)):\n tmp += '{},'.format(self.world.grid[x,y,z][0])\n o += tmp[:-1]\n o += '),\\n'\n o += ' ),\\n'\n\n o += ')\\n'\n\n try:\n with open(self.args.fpath,'w') as outfile:\n outfile.write(o)\n except:\n self.logger.error('\\tFile write failed to {} '.format(\n self.args.fpath\n ))\n else:\n self.logger.debug('\\tFile written successfully to {}'.format(\n self.args.fpath\n ))\n self.logger.debug('\\tList variable = map_template')", "title": "" } ]
80f239ecd1d7f5cd5b2419f1cea49b12
Recibe un mensaje con un color y lo pone en la pantalla en una posicion indicada
[ { "docid": "e7aa712035f605d02fcca551bc774d29", "score": "0.6340553", "text": "def message_to_screen(msg,color,x,y):\n screen_text = font.render(msg, True, color)\n display.blit(screen_text, (x,y))", "title": "" } ]
[ { "docid": "b79ea9df88e1c50474bb5318da2ead30", "score": "0.7239434", "text": "def eyes_color(self, message):\n # TODO use opencv2 to edit frames\n r = message.data.get(\"r\")\n if r > 255/2:\n self.current_frame = self.draw_custom(self.alarm)\n g = message.data.get(\"g\")\n b = message.data.get(\"b\")\n if b > 255/2:\n self.current_frame = self.draw_custom(self.face)", "title": "" }, { "docid": "b723e583327f37ccb874b9dc9294576f", "score": "0.67338747", "text": "def message_color(msg):\n global color_lst_indx\n answer = \"\"\n\n for i in range(len(msg)):\n\n if color_lst_indx >= len(color_lst):\n color_lst_indx = 0\n \n answer += f\"\\033[48;5;232m\\033[38;5;{color_lst[color_lst_indx]}m{msg[i]}\\033[0m\"\n color_lst_indx += 1\n\n return answer", "title": "" }, { "docid": "f0a61237ea5b25f54bee1c26ca309f1a", "score": "0.6675935", "text": "def image_color_cb(self, image_message):\n self.process_message(image_message)", "title": "" }, { "docid": "5f33a118ec5b5f653020ccd44baa714e", "score": "0.6606109", "text": "def green(self, msg):\n return self._color(msg, \"green\")", "title": "" }, { "docid": "4befbf300b50de1de2726dcb548f16b9", "score": "0.6577085", "text": "def red(self, msg):\n return self._color(msg, \"red\")", "title": "" }, { "docid": "322675603624baa5d74ed3dcc6d0fc02", "score": "0.6445729", "text": "def _color(self, msg, color):\n kwargs = {\"fg\": color}\n return click.style(msg, **kwargs) if self.colorize else msg", "title": "" }, { "docid": "b9376721fa1fbaf6afe46ccfea1306de", "score": "0.6425976", "text": "def message_color1(msg):\n return f\"\\033[48;5;232m\\033[38;5;12m{msg}\\033[0m\"", "title": "" }, { "docid": "4cc0512aeeac66cd60e1f8bc35449eb6", "score": "0.6422162", "text": "def prep_msg(self, color):\n self.msg_image = self.font.render(self.msg, True, color)\n self.msg_image_rect = self.msg_image.get_rect()\n self.msg_image_rect.centerx, self.msg_image_rect.centery = self.pos", "title": "" }, { "docid": "fedddb9c2e9d3d77c75db0226e7a45dd", "score": "0.6398884", "text": "def Msg(msg):\n print color.B_YELLOW + '[' + color.B_GREEN + '!' + color.B_YELLOW + '] %s' % (msg) + color.END", "title": "" }, { "docid": "17d4f0817472aff3cdf17aa5df50ddbd", "score": "0.63985634", "text": "def okay(msg):\n cprint(msg, \"green\")", "title": "" }, { "docid": "d8c111d9bc632a50ecf47a8d50357784", "score": "0.63843054", "text": "def status(\n screen: Any,\n msg: str,\n passed: Any,\n pos: list,\n sleep: float = rsleep,\n) -> None:\n curses.init_pair(5, 2, 0) # PASS\n curses.init_pair(6, 1, 0) # FAIL\n curses.init_pair(7, 3, 0) # CHECK\n curses.init_pair(8, 5, 0) # WARN\n curses.init_pair(17, 6, 0) # normal item\n\n logtext = curses.color_pair(17)\n passtext = curses.color_pair(5)\n failtext = curses.color_pair(6)\n checktext = curses.color_pair(7)\n warntext = curses.color_pair(8)\n\n if sleep < 0:\n sleep = 0.1\n\n passorfail = None\n if passed is True:\n passorfail = \"PASS\"\n passorfail_color = passtext\n elif passed is False:\n passorfail = \"FAIL\"\n passorfail_color = failtext\n elif passed is None:\n passorfail = \"CHCK\"\n passorfail_color = checktext\n elif str(passed).upper() == \"WARN\":\n passorfail = \"WARN\"\n passorfail_color = warntext\n\n screen.addstr(pos[0] + 1, pos[1], \"[\", logtext)\n screen.addstr(pos[0] + 1, pos[1] + 2, f\"{passorfail}\", passorfail_color)\n screen.addstr(pos[0] + 1, pos[1] + 7, \"]\", logtext)\n screen.addstr(pos[0] + 1, pos[1] + 9, f\"{msg}\", logtext)\n screen.refresh()\n time.sleep(sleep)", "title": "" }, { "docid": "8f6d6238494097b6fbb24ba8a1beba7d", "score": "0.6373931", "text": "def log(color=None, message='Your message goes here!'):\n NOW = datetime.datetime.now()\n CODING = { 'Red': 'emask', 'Yellow': 'kill', 'Blue': '_sse', 'Green': ': md'}\n if color in CODING:\n print (NOW.strftime(\"%m/%d/%y %I:%M%p\") + \" - %s (%s) \") % (message, CODING[color])\n else:\n print (NOW.strftime(\"%m/%d/%y %I:%M%p\") + \" - %s\") % (message)", "title": "" }, { "docid": "28d7e5f2c0f933fb07f7913a0a95acf0", "score": "0.6362864", "text": "def msg(self, chan, msg):\n msg = msg.strip('\\r')\n msg = msg.strip(' ')\n lines = [self.style.merge_colors(self.defaultcolor, line) for line in msg.split('\\n')]\n for i in lines:\n print(\"%r\" % (i))\n msg = '\\n'.join(lines)\n self._msg(chan, msg)", "title": "" }, { "docid": "c24ada7be4b0f317ba20437e1804aff9", "score": "0.6346308", "text": "def ok(message: str) -> None:\n\n print(f\"{COLOR.GREEN}{message}{COLOR.END}\")", "title": "" }, { "docid": "67de1cfb277db755d3775084c0e9c9fc", "score": "0.6316135", "text": "def yeah(self, message):\n return self.output(message, color=\"green\")", "title": "" }, { "docid": "ae2233a3b263a4fda8294ab50b2360d2", "score": "0.6282797", "text": "def success(cls, message):\n\n cls.colorprint(\"[success] %s\" % message, Fore.GREEN)", "title": "" }, { "docid": "459c284cd0e45a9342a818904544e4ba", "score": "0.6243398", "text": "def showSuccess_(self, msg):\n self.bgBrush = self.successBrush\n self.showMessage(msg)", "title": "" }, { "docid": "3dbdaa575faa6765f411187015cef98b", "score": "0.6215862", "text": "def configure_color(self, event):\n # Creates a new color dialog\n dialog = wx.ColourDialog(None)\n dialog.GetColourData().SetChooseFull(True)\n dialog.GetColourData().SetColour(wx.Colour(self.message_color[0],\n self.message_color[1],\n self.message_color[2]))\n # Asks the message color\n if dialog.ShowModal() == wx.ID_OK:\n # Reflects the new color\n data = dialog.GetColourData()\n self.message_color = data.GetColour().Get()\n self.button_color.SetBackgroundColour(self.message_color)\n self.button_color.SetForegroundColour(self.message_color)\n dialog.Destroy()\n return", "title": "" }, { "docid": "24d9ac5ac4a97264ba256bd89ddcd5df", "score": "0.6201863", "text": "def log(self, msg, color='green'):\n print(colorize(msg, color, bold=True))", "title": "" }, { "docid": "c269a63eae6c24e100492e8a5244add6", "score": "0.6198183", "text": "async def rainbow(self, ctx, *msg):\n msg = \" \".join(msg)\n new = \"$\\\\textsf{\"\n at = 1\n for idx, c in enumerate(msg):\n at = idx % len(COLOUR)\n if c == \" \":\n new += \" \"\n continue\n new += f\"\\\\color{{{COLOUR[at-1]}}}{c}\"\n if len(new) >= 1980:\n break\n new += \"}$\"\n await ctx.channel.send(new)", "title": "" }, { "docid": "1e439917fbbaa630fe7a27d52afd39ac", "score": "0.61888945", "text": "def test_colored_warning(self):\n colored_warning(message=\"TESTING!\")", "title": "" }, { "docid": "1bc103d1463492cf56a1245a660fb944", "score": "0.6150515", "text": "def receive_color(self, color):\n self.color_queried = False\n self.turn_played = True\n self.wc_color = color\n self.feed.add_msg(f\"Wild Card color is {color}.\")\n self._pass_turn_to_computer()", "title": "" }, { "docid": "ea6fd524d499f7818e73c3eb2cb4e794", "score": "0.61349857", "text": "def message(new_msg, colour=colours.white):\n\n\tset_colour(colour)\n\tnew_msg_lines = textwrap.wrap(new_msg, MSG_WIDTH)\n\n\tfor line in new_msg_lines:\n\t\tif len(game_msgs) == MSG_HEIGHT:\n\t\t\tdel game_msgs[0]\n\n\t\tgame_msgs.append((line, colour))", "title": "" }, { "docid": "f57f77cc35cf0ca5db52cdc3080943e9", "score": "0.61331606", "text": "def _set_color(self, message: str, color_code: str):\n return f\"{color_code}{message}{self.DEFAULT_COLOR}\"", "title": "" }, { "docid": "667af21bd6d58ee6bd84b008706e1e30", "score": "0.61150604", "text": "def send_back(mqtt_client):\n color = ''\n assert ev3.ColorSensor\n if ev3.ColorSensor().color == ev3.ColorSensor.COLOR_GREEN:\n color = 'green'\n elif ev3.ColorSensor().color == ev3.ColorSensor.COLOR_RED:\n color = 'red'\n elif ev3.ColorSensor().color == ev3.ColorSensor.COLOR_WHITE:\n color = 'white'\n elif ev3.ColorSensor().color == ev3.ColorSensor.COLOR_BLUE:\n color = 'blue'\n elif ev3.ColorSensor().color == ev3.ColorSensor.COLOR_BLACK:\n color = 'black'\n\n mqtt_client.send_message('color_seen', [color])", "title": "" }, { "docid": "cf31161ef2bc4de09c85c0e4e92ef7cf", "score": "0.61088413", "text": "def display_onscreen_message(self, message='', color='red'):\n full_message = [message]\n if len(message) > 30:\n full_message = []\n split_message = message.split(' ')\n line_length = 0\n # new_line = ''\n e = 0\n for i in range(len(split_message)):\n # print(i, len(split_message))\n line_length += len(split_message[i]) + 1\n if line_length >= 30:\n new_line = ' '.join(split_message[e:i])\n # print(new_line, len(new_line))\n full_message.append(new_line)\n # new_line = ''\n line_length = 0\n e = i\n if i == len(split_message) - 1:\n new_line = ' '.join(split_message[e:])\n # print(new_line, len(new_line))\n full_message.append(new_line)\n try:\n self.ax4.cla()\n self.ax4.relim()\n self.ax4.set_xticks([])\n self.ax4.set_yticks([])\n self.ax4.set_title('Message')\n for i in range(len(full_message)):\n self.ax4.text(0.05, 0.95 - i * 0.05,\n full_message[i],\n verticalalignment='top',\n horizontalalignment='left',\n transform=self.ax4.transAxes,\n color=color,\n fontsize=15)\n self.i_fig.canvas.draw()\n except:\n pass\n return", "title": "" }, { "docid": "2d73a5b734e33da0f88165182eae9d80", "score": "0.61076874", "text": "def display_message_at(message,color,width,line):\n text_mode(1)\n select_background_color(16)\n select_text_color(color)\n text_mode(4)\n move_cursor_to(line,0)\n terminal_erase(5)\n sys.stdout.write(message)\n text_mode(1)", "title": "" }, { "docid": "cd80a241faa9c5257dfc141e31373ab5", "score": "0.6105528", "text": "def error(message):\n msg(colored('[ERROR]: ', 'red') + message)", "title": "" }, { "docid": "3d9d05b0f559a61c5f9497679b0ea96f", "score": "0.6063612", "text": "def show_message(sh, r, c, message, type=\"error\", accumulate=True):\n cell = sh.cell(row=r, column=c)\n fill = cell.fill\n if type == \"error\":\n fill = PatternFill(\"solid\", fgColor=\"CC0000\")\n elif type == \"warning\":\n fill = PatternFill(\"solid\", fgColor=\"FFFF33\")\n elif type == \"info\":\n fill = PatternFill(\"solid\", fgColor=\"87CEEB\")\n cell.fill = fill\n if accumulate:\n comment = cell.comment\n if comment:\n comment.text += \"\\n\" + message\n else:\n comment = Comment(message, \"NIS\")\n else:\n comment = Comment(message, \"NIS\")\n cell.comment = comment\n # if type == \"error\":\n # sh.title = \"!\" + sh.title", "title": "" }, { "docid": "8841060058d0e81a4669fab986ae6587", "score": "0.605416", "text": "def pl_green(cls, text):\n cls.println(cls.green(text))", "title": "" }, { "docid": "e8721bb10cd2672f5f61d047ffd16481", "score": "0.6045872", "text": "def print_success(message):\n click.echo(click.style(message, fg='green'))", "title": "" }, { "docid": "2f338d1b5ed6996e2a8746c7babf5f3e", "score": "0.60401255", "text": "def hey(self, message):\n return self.output(message, color=\"blue\")", "title": "" }, { "docid": "ce95d631f0d9878b28d9f322b4664ec2", "score": "0.6034067", "text": "def colorize(self, message, record):\n if record.levelno in self.level_map:\n bg, fg, bold = self.level_map[record.levelno]\n params = []\n if bg in self.color_map:\n params.append(str(self.color_map[bg] + 40))\n if fg in self.color_map:\n params.append(str(self.color_map[fg] + 30))\n if bold:\n params.append('1')\n if params:\n message = ''.join((self.csi, ';'.join(params), 'm', message, self.reset))\n return message", "title": "" }, { "docid": "6c14294b77f36ed9aed786f27d7297dc", "score": "0.60299176", "text": "def render(self, color, reset):\n\n return '%s%s%s' % (color, self.msg, reset)", "title": "" }, { "docid": "606e77578ea4a1ca088a1a76105b6ba2", "score": "0.6021284", "text": "def Colorize(cls, vColor, vMessage):\n if vColor == \"red\":\n coloredMessage = \"\\e[1;31m\" + vMessage + \"\\e[0;m\"\n elif vColor == \"yellow\":\n coloredMessage = \"\\e[1;33m\" + vMessage + \"\\e[0;m\"\n elif vColor == \"green\":\n coloredMessage = \"\\e[1;32m\" + vMessage + \"\\e[0;m\"\n elif vColor == \"cyan\":\n coloredMessage = \"\\e[1;36m\" + vMessage + \"\\e[0;m\"\n elif vColor == \"purple\":\n coloredMessage = \"\\e[1;34m\" + vMessage + \"\\e[0;m\"\n elif vColor == \"white\":\n coloredMessage = \"\\e[1;37m\" + vMessage + \"\\e[0;m\"\n elif vColor == \"pink\":\n coloredMessage = \"\\e[1;35m\" + vMessage + \"\\e[0;m\"\n elif vColor == \"none\":\n coloredMessage = vMessage\n\n return coloredMessage", "title": "" }, { "docid": "e08c847e9cacc45c4fd49eb2bd042afb", "score": "0.6006577", "text": "def print_success(_msg):\n print(Color.green(_msg), file=sys.stderr, flush=True)", "title": "" }, { "docid": "5399fec72df1fe6fe77333ea2b4f23ee", "score": "0.600005", "text": "def colorize(msg, color, nocolor=False):\n # The nocolor is added to shut off completely. You may ask the point of this\n # someone want to pipe the output, but the asci characters will also printed\n if nocolor:\n return msg\n else:\n colors = {'green' : '\\x1b[32;01m%s\\x1b[0m',\n 'red' : '\\x1b[31;01m%s\\x1b[0m',\n 'yellow' : '\\x1b[33;01m%s\\x1b[0m',\n 'bold' : '\\x1b[1;01m%s\\x1b[0m',\n 'none' : '\\x1b[0m%s\\x1b[0m',\n }\n return colors[color if sys.stdout.isatty() else 'none'] % msg", "title": "" }, { "docid": "26e9ce2218a2c09c72adf6a4beb223b7", "score": "0.59940946", "text": "def info(msg):\n cprint(msg, \"cyan\")", "title": "" }, { "docid": "66c955a449659697e657d941a867d827", "score": "0.596796", "text": "def pl_red(cls, text):\n cls.println(cls.red(text))", "title": "" }, { "docid": "bedb843922349de1e2bde3860bd45214", "score": "0.59656334", "text": "def Info(cls, vMessage):\n call([\"echo\", \"-e\", cls.Colorize(\"green\", \"[*] \") + vMessage])", "title": "" }, { "docid": "ec3a52cbf99a3cab61d24931f384e39a", "score": "0.5965251", "text": "def lsassy_highlight(self, msg):\r\n if self.no_color:\r\n return msg\r\n return \"\\033[1;33m{}\\033[0m\".format(msg)", "title": "" }, { "docid": "4698d41a8e9744d98112a9aa4f036d1e", "score": "0.59556144", "text": "def colorize(self, color, message):\n return \"\\033[{0}m{1}\\033[0m\".format(COLORS[color], message)", "title": "" }, { "docid": "3e38e213997b675fc754f794e44b7a74", "score": "0.5948539", "text": "def white(self, msg):\n return self._color(msg, \"white\")", "title": "" }, { "docid": "7136693f10876f55f14a492951f85d2f", "score": "0.5939234", "text": "def warning(message, color=31):\n c_warning = '\\033[{0:d}m'.format(color) # Define warning color\n end_c = '\\033[0m' # Define Normal color\n print c_warning + message + end_c", "title": "" }, { "docid": "d1e3f4e122400412aa8688530bfeed81", "score": "0.5925797", "text": "def display(message, color):\n print(color + bcolors.BOLD + message + bcolors.ENDC)", "title": "" }, { "docid": "fa1069f6863802ae295ad62e5fe4ce0c", "score": "0.59257394", "text": "def ok(msg: str) -> str:\n return bcolors.OKGREEN + msg + bcolors.ENDC", "title": "" }, { "docid": "ba536f0e2408f6d6bdbeb70a9832f964", "score": "0.59141576", "text": "def success(self, msg, *args):\n if args:\n msg %= args\n self.echo(msg, color=\"green\")", "title": "" }, { "docid": "6ddfad1079fe2e02b7982e30cce6cf4d", "score": "0.591296", "text": "def update_result_message_label_text_color(self,new_color):\n self.result_messagge_label.config(fg = new_color)", "title": "" }, { "docid": "4f5843984222922ce8d92062809800ba", "score": "0.59056646", "text": "def display_message(self, msg):\n self._display_message(msg, self.COLOR_DEFAULT_MESSAGE)", "title": "" }, { "docid": "c91cd0e1105c93f2c44d885d9a6da573", "score": "0.58963937", "text": "def __log(self, msg: str, color: str = \"white\") -> bool:\n colors = {\"red\":\"31\", \"green\":\"32\", \"blue\":\"34\", \"cyan\":\"36\",\n \"white\":\"37\", \"yellow\":\"33\", \"magenta\":\"34\"}\n logtime = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime())\n log = \"\\033[1;\"+colors[color]+\"m\"+logtime+\" SERVER: \"+msg+\"\\033[0m\"\n print(log)\n return True", "title": "" }, { "docid": "9de6e978c76826f23997275148833b29", "score": "0.589526", "text": "async def blue(self, ctx):\n await ctx.send(\"ORANGE!\")", "title": "" }, { "docid": "8d8b53d2b30859aa467ff04bd837602a", "score": "0.58874685", "text": "def __drawMoveMsg__(self):\n GameRenderer.MOVE_MSG_SURFACE.fill(BLACK)\n\n prevMove = self.game.peekPrevMove()\n\n if prevMove is not None:\n side = self.game.sides[prevMove.sideId]\n prevMoveStr = prevMove.msg\n if prevMoveStr is not None:\n displayStr = f\"{prevMoveStr} {str(side)}\"\n text = GameRenderer.INFO_FONT.render(displayStr, 1, WHITE)\n GameRenderer.MOVE_MSG_SURFACE.blit(text, (0, 0))\n self.screen.blit(GameRenderer.MOVE_MSG_SURFACE, (10, 80))", "title": "" }, { "docid": "445025d56cd43f134ec8431e8c60dee9", "score": "0.5875596", "text": "def err_message(self, id, mes):\n self.error.append(1)\n self.timer_id = GLib.timeout_add_seconds(5, self.error_false)\n if not self.sbar:\n self.statusbar.push(id, mes)\n self.timer_id = GLib.timeout_add_seconds(5, self.statusbar.pop, id)\n else: # Show bar if it isn't there\n self.toggle_statusbar()\n self.statusbar.push(id, mes)\n self.timer_id = GLib.timeout_add_seconds(5, self.statusbar.pop, id)\n self.timer_id = GLib.timeout_add_seconds(5, self.toggle_statusbar)", "title": "" }, { "docid": "33d8e966229752bc6e30e6cc281c3c98", "score": "0.5869648", "text": "def colorprint(cls, message, color=\"\"):\n print(color + message + Style.RESET_ALL)", "title": "" }, { "docid": "dbe877efd02b548fd9f70b41a4049080", "score": "0.5856061", "text": "def print_ui_msg(msg):\n cprint(msg, fg('blue'))", "title": "" }, { "docid": "f4fee2968d09a4580d7f9bca54fbccbe", "score": "0.58539927", "text": "def info(message):\n msg(colored('[INFO]: ', 'green') + message)", "title": "" }, { "docid": "1ab81fd93d2fbe0a3b0da1b3d8650869", "score": "0.5852004", "text": "def yellow(self, msg):\n return self._color(msg, \"yellow\")", "title": "" }, { "docid": "a1062519d0f2e4e608f9770b0b2f11a6", "score": "0.5844738", "text": "def Warning(self, msg):\n self._Output(1, msg, self._color.YELLOW)", "title": "" }, { "docid": "12888b90d71ab25f9e75a47fccf87381", "score": "0.5840191", "text": "def display_info_message(message,color,width):\n text_mode(1)\n select_background_color(16)\n select_text_color(color)\n text_mode(4)\n cursor_navigate(5,2)\n terminal_erase(5)\n sys.stdout.write(message)", "title": "" }, { "docid": "508e9f0ea23fdb8c5cbb2cdbedd5f393", "score": "0.5829263", "text": "def maybe_colored(msg, color, opt):\n if opt.monochrome:\n return msg\n\n return colored(msg, color)", "title": "" }, { "docid": "a1245f56746d148ec04a946906ef4c36", "score": "0.5818706", "text": "def Notify(self, message, color=(255,255,255), bgcolor=(0,0,0), strobe = False, strobeColor = (0,0,1)):\n self.DisplayMessage(message, color, bgcolor, boxed=True)\n \n# pygame.event.clear() # clear previous events\n# \n# while True:\n# for event in pygame.event.get(pygame.KEYUP):\n# if (event.key == pygame.K_RIGHT):\n# return \n raw_input('Press return to continue')", "title": "" }, { "docid": "76a3f8227fd3b3aee2f798deb85aacd4", "score": "0.58183587", "text": "def drawMessage(self,title=\"\",message=\"\"): \n# block=[]\n #need to split the message, as windows length limited, \n #31car\n# for line in message.split('\\n'):\n# if len(line) >= 30 :\n# block.append(line[0:28]+\"-\")\n# block.append(\"-\"+line[28:])\n# else :\n# block.append(line)\n# bpy.ops.('INVOKE_DEFAULT')\n# setattr(bpy.types.Scene,propName,bpy.props.BoolProperty(update=updateF,\n# step=1,precision = 3,min=0.,max=1.0,soft_min=0.,\n# soft_max=1.0,subtype=\"COLOR\"))\n# setattr(self,propName+\"_o\",None)\n\n bpy.ops.upy.dialog_message('INVOKE_DEFAULT',messageString = message,\n width=300, height=300)\n# retval = Blender.Draw.PupBlock(title, block)\n return", "title": "" }, { "docid": "8f774e596bca45b2546c3f9f24bc6673", "score": "0.5815493", "text": "def info(msg: str) -> str:\n return bcolors.OKBLUE + msg + bcolors.ENDC", "title": "" }, { "docid": "bca7a5e973f4642ccd82865b885cdc55", "score": "0.5815492", "text": "async def onMessage(self, msg):\n await self.checkHighlights(msg)", "title": "" }, { "docid": "69782df070f0746d633a9999bcbdb111", "score": "0.5809571", "text": "def send_colors(self, checker, i: int, j: int):\n checker.colors = (\n self.rand_bytes_array[i],\n self.permuted_colors[i],\n self.rand_bytes_array[j],\n self.permuted_colors[j],\n )", "title": "" }, { "docid": "d08193a47d8bb642ec590ceee30175e8", "score": "0.5807658", "text": "def _errmsg(self, message):\n errWin = tk.Toplevel()\n tk.Label(errWin, text=message, foreground='white', background='red' ).pack()\n tk.Button( errWin,text='Ok', command=errWin.destroy ).pack()", "title": "" }, { "docid": "d07e8e458ed95a2e47ea520975794022", "score": "0.5806726", "text": "def display_ok(message):\n display(message, bcolors.OKGREEN)", "title": "" }, { "docid": "6c0856b9b9194e0524b31650987b5e90", "score": "0.58055514", "text": "def Error(self, msg):\n self._Output(0, msg, self._color.RED)", "title": "" }, { "docid": "0a340d69b71f142b416d7f68ff23aa4f", "score": "0.5798158", "text": "def process_colour_hook(info):\n sock, = hooks.parse_info(info)\n buf = sock.outbound_text\n newbuf = []\n\n # go through our outbound text and process all of the colour codes\n i = 0\n while i < len(buf):\n if buf[i] == base_colour_marker and i + 1 < len(buf):\n i = i + 1\n char = buf[i]\n\n # upper case is bright, lower case is dark\n shade = cLIGHT\n if char == char.lower():\n shade = cDARK\n\n # if it's a valid colour code, build it\n if base_colours.has_key(char.lower()):\n newbuf.append(colour_start + shade + ';' + \\\n base_colours[char.lower()] + 'm')\n\n # if it was an invalid code, ignore it\n else:\n newbuf.append(base_colour_marker)\n if not char == base_colour_marker:\n i = i - 1\n\n else:\n newbuf.append(buf[i])\n i = i + 1\n\n # replace our outbound buffer with the processed text\n sock.outbound_text = ''.join(newbuf)", "title": "" }, { "docid": "f181676521a4ca3e83cae5c79ef83cce", "score": "0.5784118", "text": "def coloring(w):\n\n if curses.has_colors():\n \n curses.init_pair(1, curses.COLOR_YELLOW, curses.COLOR_RED)\n curses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_GREEN)\n curses.init_pair(3, curses.COLOR_MAGENTA, curses.COLOR_CYAN)\n\n w.addstr(\"Yellow on red\\n\\n\", curses.color_pair(1))\n w.refresh()\n\n else:\n \n w.addstr(\"has_colors() = False\\n\");\n w.refresh()", "title": "" }, { "docid": "daf9c1cf7cca5843142baa541b034514", "score": "0.57838005", "text": "def write_status(self, msg):\n self.txt_image_processing.insert(END, msg)\n self.txt_image_processing.update()", "title": "" }, { "docid": "c1b03123c83afdc59f1a11dc98e8afa4", "score": "0.57826906", "text": "def DisplayMessage(self, message, color=(255,255,255), bgcolor=(0,0,0), boxed=False):\n if self.screen != None:\n self.screen.fill(bgcolor, self.displayRect)\n\n text = textrect.render_textrect(message,self.font,self.displayRect,color,bgcolor,0)\n \n self.screen.blit(text, self.displayRect.topleft)\n\n pygame.display.flip()\n\n if boxed:\n print \"**********************************************************************\"\n print \"\"\n print message\n if boxed:\n print \"\"\n print \"**********************************************************************\"", "title": "" }, { "docid": "7381830c7e48059a39cc6ecdea80d7df", "score": "0.5774853", "text": "def _color(self, coor):\n if self._approve_press(coor):\n return \"Green\"\n else:\n return \"Red\"", "title": "" }, { "docid": "754bc4191b810057a4b5e1a6aeee4bc0", "score": "0.5768977", "text": "def _special(self, message: str, with_gap: bool=True):\n if with_gap:\n self._line_break()\n self._log(self._set_color(message, self.GREEN))", "title": "" }, { "docid": "fbd156bc07f08ea0e81590849b7c070a", "score": "0.5749491", "text": "async def orange(self, ctx):\n await ctx.send(\"BLUE!\")", "title": "" }, { "docid": "ad174b516eea72280ff6cb68ad4b7c08", "score": "0.57446223", "text": "def oops(self, message):\n return self.output(message, color=\"red\")", "title": "" }, { "docid": "798695fcff419cc58f6cdfc6f11ffb8f", "score": "0.57431203", "text": "async def eightcolor(self, context):\n q=basics.contentq(context.message.content, split=False)\n m=context.message\n color = ccc.eightcolor()\n image = Image.new('RGBA', (100,100), (0,0,0,0))\n draw = ImageDraw.Draw(image, mode=\"RGBA\")\n draw.ellipse((0,0,99,99),fill=\"#000\")\n border = 8\n draw.ellipse((border,border,99-border,99-border),fill=color)\n image = image.resize((25,25),Image.ANTIALIAS)\n await talking.reply(context,'asked {}, I respond: {}!'.format(basics.spitback(q),ccc.eightcolor()), PIL=image)", "title": "" }, { "docid": "6f0d6352039289ad859f5ea19c74def0", "score": "0.57347554", "text": "def adding_extra_color(msg, extra_spaces=False): \n answer = f\"\\033[48;5;232m\"\n\n if extra_spaces:\n ending = msg.split(\" \")[len(msg.split(\" \")) - 1]\n\n if ending == \"RUNNING\":\n answer += f\"{message_color1(msg.strip(ending))}\\033[48;5;232m\\033[38;5;2m{ending}\\033[0m\"\n\n elif ending == \"NOT RUNNING\":\n answer += f\"{message_color1(msg.strip(ending))}\\033[48;5;232m \\033[5m\\033[91m{ending}\\033[0m\"\n\n else:\n msg_splt = msg.split(\" \")\n\n for i in msg_splt:\n\n if i == \"OPTIONS\":\n answer += f\"{message_color1('OPTIONS')}\"\n\n elif i == \"q-QUIT\":\n answer += f\" {message_color1('q-')}\\033[48;5;232m\\033[38;5;1mQUIT\\033[0m\"\n\n elif i == \"m-MAIN MENU\":\n answer += f\" {message_color1('m-')}\\033[48;5;232m\\033[38;5;191mMENU\\033[0m\"\n \n elif i == \"l-LOG OUT\":\n answer += f\" {message_color1('l-')}\\033[48;5;232m\\033[38;5;27mLOG OUT\\033[0m\"\n\n return answer", "title": "" }, { "docid": "8869d3fdc76361223691327e5f52f721", "score": "0.57035816", "text": "def c(colour: t.Union[str, t.Iterable[str]], message: str) -> str:\n if isinstance(colour, str):\n colour = [colour]\n escape = '\\033[' + ';'.join(colour) + 'm'\n reset = '\\033[0m'\n return escape + message + reset", "title": "" }, { "docid": "71c6eb874e363edda3c3be2adfc66d31", "score": "0.56955117", "text": "def print_failure(msg):\n\n print(\"\\033[91m\" + msg + \"\\033[0m\")", "title": "" }, { "docid": "ca2b81bfb7e34ca028b406028600e588", "score": "0.56561565", "text": "def displayColor(self, r, g, b):\n for i in range(0, self.ledCount):\n self.sendPixel(r, g, b)\n self.show()", "title": "" }, { "docid": "a4f097a2e26eb44bd905469a1624be70", "score": "0.5634987", "text": "def msg_err(message: str):\n eprint(\"\\x1b[1;31mError: %s\\x1b[0m\" % message)", "title": "" }, { "docid": "ba0cae904033f323df8d371dc080b3f2", "score": "0.5634273", "text": "def colorize(message):\n with _PASTEL.colorized():\n return _PASTEL.colorize(message)", "title": "" }, { "docid": "a05e5df7f2360af85f6ab9fb895a4b8d", "score": "0.5632226", "text": "def std(colour, msg):\n return make_text([getattr(Colours, colour), '107'], msg)", "title": "" }, { "docid": "a5aebcad3e3ca47ad2bcb3b5c3c74e70", "score": "0.56108534", "text": "def success(string):\n print(colored('SUCCESS: ' + string, 'green'))", "title": "" }, { "docid": "1a9b3cfa5fa7de7f6d150e68bbe3f97a", "score": "0.5610003", "text": "def print_success(msg):\n if VERBOSE:\n print(curTime() + COLOR[\"BOLD\"] + COLOR[\"OKGREEN\"] + msg + COLOR[\"ENDC\"])", "title": "" }, { "docid": "2dd6e263b6e4a3ab0d61abba63f64714", "score": "0.5595013", "text": "def print_end_message(winner, loser):\n # Fill screen\n # TODO: Display game over background screen\n DISPLAYSURF.fill(gv.GREEN)\n\n # Render button texts\n play_again_surf = FONT_MEDIUM.render(\"Play Again\", True, gv.BLACK)\n exit_now_surf = FONT_MEDIUM.render(\"Exit Now\", True, gv.BLACK)\n\n # Render winner string\n who_won_text = f\"{loser} is unable to battle, {winner} wins!\"\n who_won_size = FONT_SMALL.size(who_won_text)\n who_won = FONT_SMALL.render(who_won_text, True, gv.BLACK)\n\n # Display game over mesasges centered on screen\n DISPLAYSURF.blit(game_over, ((gv.SCREEN_WIDTH-game_over_size[0])/2, 50))\n DISPLAYSURF.blit(who_won, ((gv.SCREEN_WIDTH-who_won_size[0])/2, 150))\n\n # end message main loop\n while True:\n mouse = pygame.mouse.get_pos()\n for event in pygame.event.get():\n if event.type == pygame.locals.MOUSEBUTTONDOWN:\n # If hit play again -> exit end message\n if 75 <= mouse[0] <= 285 and 200 <= mouse[1] <= 230:\n return\n # If hit exit now -> quit\n elif 455 <= mouse[0] <= 625 and 200 <= mouse[1] <= 230:\n pygame.quit()\n sys.exit()\n\n # If hit window X -> Exit\n if event.type == pygame.locals.QUIT:\n pygame.quit()\n sys.exit()\n\n # Draw play again button, change color if hovering\n if 75 <= mouse[0] <= 285 and 200 <= mouse[1] <= 230:\n play_again_color = gv.LIGHT_BLUE\n else:\n play_again_color = gv.DARK_BLUE\n\n # Draw exit now button, change color if hovering\n if 455 <= mouse[0] <= 625 and 200 <= mouse[1] <= 230:\n exit_now_color = gv.LIGHT_BLUE\n else:\n exit_now_color = gv.DARK_BLUE\n\n # Draw play again and exit texts and buttons\n pygame.draw.rect(DISPLAYSURF, play_again_color, [75, 200, 210, 30])\n pygame.draw.rect(DISPLAYSURF, exit_now_color, [455, 200, 170, 30])\n DISPLAYSURF.blit(play_again_surf, (80, 205))\n DISPLAYSURF.blit(exit_now_surf, (460, 205))\n\n # Write all changes to screen\n pygame.display.update()\n FramePerSec.tick(gv.FPS)", "title": "" }, { "docid": "5f0d903323e20137cebaeab56661c094", "score": "0.5588348", "text": "def next_color(self):", "title": "" }, { "docid": "0c6a32597a9fecf5a64019f529de3cfd", "score": "0.5585362", "text": "def sendWorldMessage(self, message):\r\n self.queueTask(TASK_WORLDMESSAGE, (255, self.world, COLOUR_YELLOW+message))", "title": "" }, { "docid": "b38ad62e9c40ff87796f571f88025069", "score": "0.55784273", "text": "def show_message_cb(self, greeter, text, type):\n #self.log(\"show_message_cb\")\n self.show_message(text)", "title": "" }, { "docid": "d53fe6b129040027226390fcd1465dd9", "score": "0.55783486", "text": "async def report_color(self, ctx, color: str = '000000'):\n \n server = ctx.message.server\n \n try:\n if server.id not in self.settings:\n await self.init(server)\n except:\n await self.error(ctx)\n \n try:\n color = color.replace(\"#\", \"\").replace(\"0x\", \"\")[:6]\n color = int(color, 16)\n except ValueError:\n color = '000000'\n \n self.settings[server.id]['colour']['report_embed'] = color\n try:\n dataIO.save_json('data/bettermod/settings.json', self.settings)\n except:\n await self.error(ctx)\n return\n await self.bot.say(\"New embed color has been registered. If the value is invalid, the color will not change.\")", "title": "" }, { "docid": "b26fee6240ea286b7b3ebfd738ac0d20", "score": "0.5575473", "text": "def showError_(self, msg):\n self.bgBrush = self.errorBrush\n self.showMessage(msg)", "title": "" }, { "docid": "28d931c8b75e5dbb373d0b8bc80cdee2", "score": "0.55746347", "text": "def display_success_message(self, msg):\n self._display_message(msg, self.COLOR_SUCCESS_MESSAGE)", "title": "" }, { "docid": "c63134914d12aa3a94c2764fbb7506e5", "score": "0.5569975", "text": "def info(message: str) -> None:\n\n print(f\"{COLOR.BLUE}{message}{COLOR.END}\")", "title": "" }, { "docid": "23b8c4e5dd03fc1bc38c329fed4600b9", "score": "0.55684537", "text": "def red_on():\n Leds.set_red(1)", "title": "" }, { "docid": "d6001eb722bdf026919071f41f55c021", "score": "0.556241", "text": "def color_on(self, default_colors=False):\n # global program_message\n self.editor.reset_line()\n\n if curses.has_colors():\n curses.start_color()\n else:\n if self.config.os_name == 'Macintosh':\n self.editor.get_confirmation('Color not supported on the OSX terminal!', True)\n else:\n self.editor.get_confirmation('Color not supported on your terminal!', True)\n self.config.set_default_settings(True, True)\n self.config['display_color'] = False\n self.editor.program_message = ' Monochrome display '\n return\n\n curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)\n curses.init_pair(2, curses.COLOR_CYAN, curses.COLOR_BLACK)\n curses.init_pair(3, curses.COLOR_BLUE, curses.COLOR_BLACK)\n curses.init_pair(4, curses.COLOR_GREEN, curses.COLOR_BLACK)\n curses.init_pair(5, curses.COLOR_YELLOW, curses.COLOR_BLACK)\n curses.init_pair(6, curses.COLOR_RED, curses.COLOR_BLACK)\n curses.init_pair(7, curses.COLOR_MAGENTA, curses.COLOR_BLACK)\n\n curses.init_pair(8, curses.COLOR_BLACK, curses.COLOR_WHITE)\n curses.init_pair(9, curses.COLOR_BLACK, curses.COLOR_CYAN)\n curses.init_pair(10, curses.COLOR_BLACK, curses.COLOR_BLUE)\n curses.init_pair(11, curses.COLOR_BLACK, curses.COLOR_GREEN)\n curses.init_pair(12, curses.COLOR_BLACK, curses.COLOR_YELLOW)\n curses.init_pair(13, curses.COLOR_BLACK, curses.COLOR_RED)\n curses.init_pair(14, curses.COLOR_BLACK, curses.COLOR_MAGENTA)\n\n curses.init_pair(15, curses.COLOR_BLUE, curses.COLOR_WHITE)\n curses.init_pair(16, curses.COLOR_GREEN, curses.COLOR_WHITE)\n curses.init_pair(17, curses.COLOR_RED, curses.COLOR_WHITE)\n\n curses.init_pair(18, curses.COLOR_WHITE, curses.COLOR_BLUE)\n curses.init_pair(19, curses.COLOR_WHITE, curses.COLOR_GREEN)\n curses.init_pair(20, curses.COLOR_WHITE, curses.COLOR_RED)\n\n curses.init_pair(21, curses.COLOR_RED, curses.COLOR_BLUE)\n curses.init_pair(22, curses.COLOR_BLUE, curses.COLOR_RED)\n\n curses.init_pair(23, curses.COLOR_MAGENTA, curses.COLOR_GREEN)\n curses.init_pair(24, curses.COLOR_GREEN, curses.COLOR_MAGENTA)\n\n curses.init_pair(25, curses.COLOR_YELLOW, curses.COLOR_GREEN)\n curses.init_pair(26, curses.COLOR_GREEN, curses.COLOR_YELLOW)\n\n curses.init_pair(27, curses.COLOR_WHITE, curses.COLOR_YELLOW)\n curses.init_pair(28, curses.COLOR_WHITE, curses.COLOR_MAGENTA)\n curses.init_pair(29, curses.COLOR_YELLOW, curses.COLOR_BLUE)\n curses.init_pair(30, curses.COLOR_GREEN, curses.COLOR_BLUE)\n curses.init_pair(31, curses.COLOR_MAGENTA, curses.COLOR_BLUE)\n curses.init_pair(32, curses.COLOR_CYAN, curses.COLOR_BLUE)\n\n curses.init_pair(33, curses.COLOR_CYAN, curses.COLOR_WHITE)\n curses.init_pair(34, curses.COLOR_YELLOW, curses.COLOR_WHITE)\n curses.init_pair(35, curses.COLOR_MAGENTA, curses.COLOR_WHITE)\n curses.init_pair(36, curses.COLOR_WHITE, curses.COLOR_WHITE)\n curses.init_pair(37, curses.COLOR_YELLOW, curses.COLOR_YELLOW)\n curses.init_pair(38, curses.COLOR_BLACK, curses.COLOR_BLACK)\n curses.init_pair(39, curses.COLOR_GREEN, curses.COLOR_RED)\n curses.init_pair(40, curses.COLOR_YELLOW, curses.COLOR_RED)\n curses.init_pair(41, curses.COLOR_CYAN, curses.COLOR_RED)\n curses.init_pair(42, curses.COLOR_MAGENTA, curses.COLOR_RED)\n curses.init_pair(43, curses.COLOR_BLUE, curses.COLOR_GREEN)\n curses.init_pair(44, curses.COLOR_CYAN, curses.COLOR_GREEN)\n curses.init_pair(45, curses.COLOR_RED, curses.COLOR_GREEN)\n curses.init_pair(46, curses.COLOR_RED, curses.COLOR_YELLOW)\n curses.init_pair(47, curses.COLOR_WHITE, curses.COLOR_CYAN)\n curses.init_pair(48, curses.COLOR_BLUE, curses.COLOR_CYAN)\n curses.init_pair(49, curses.COLOR_RED, curses.COLOR_CYAN)\n curses.init_pair(50, curses.COLOR_YELLOW, curses.COLOR_CYAN)\n curses.init_pair(51, curses.COLOR_MAGENTA, curses.COLOR_CYAN)\n\n curses.init_pair(52, curses.COLOR_BLUE, curses.COLOR_YELLOW)\n\n self.colors.update({\n \"white_on_black\": curses.color_pair(1),\n \"cyan_on_black\": curses.color_pair(2),\n \"blue_on_black\": curses.color_pair(3),\n \"green_on_black\": curses.color_pair(4),\n \"yellow_on_black\": curses.color_pair(5),\n \"red_on_black\": curses.color_pair(6),\n \"magenta_on_black\": curses.color_pair(7),\n\n \"black_on_white\": curses.color_pair(8),\n \"black_on_cyan\": curses.color_pair(9),\n \"black_on_blue\": curses.color_pair(10),\n \"black_on_green\": curses.color_pair(11),\n \"black_on_yellow\": curses.color_pair(12),\n \"black_on_red\": curses.color_pair(13),\n \"black_on_magenta\": curses.color_pair(14),\n\n \"blue_on_white\": curses.color_pair(15),\n \"green_on_white\": curses.color_pair(16),\n \"red_on_white\": curses.color_pair(17),\n\n \"white_on_blue\": curses.color_pair(18),\n \"white_on_green\": curses.color_pair(19),\n \"white_on_red\": curses.color_pair(20),\n\n \"red_on_blue\": curses.color_pair(21),\n \"blue_on_red\": curses.color_pair(22),\n\n \"magenta_on_green\": curses.color_pair(23),\n \"green_on_magenta\": curses.color_pair(24),\n\n \"yellow_on_green\": curses.color_pair(25),\n \"green_on_yellow\": curses.color_pair(26),\n\n \"white_on_yellow\": curses.color_pair(27),\n \"white_on_magenta\": curses.color_pair(28),\n\n \"yellow_on_blue\": curses.color_pair(29),\n \"green_on_blue\": curses.color_pair(30),\n \"magenta_on_blue\": curses.color_pair(31),\n \"cyan_on_blue\": curses.color_pair(32),\n\n \"cyan_on_white\": curses.color_pair(33),\n \"yellow_on_white\": curses.color_pair(34),\n \"magenta_on_white\": curses.color_pair(35),\n \"white_on_white\": curses.color_pair(36),\n \"yellow_on_yellow\": curses.color_pair(37),\n \"black_on_black\": curses.color_pair(38),\n \"green_on_red\": curses.color_pair(39),\n \"yellow_on_red\": curses.color_pair(40),\n \"cyan_on_red\": curses.color_pair(41),\n \"magenta_on_red\": curses.color_pair(42),\n \"blue_on_green\": curses.color_pair(43),\n \"cyan_on_green\": curses.color_pair(44),\n \"red_on_green\": curses.color_pair(45),\n \"red_on_yellow\": curses.color_pair(46),\n \"white_on_cyan\": curses.color_pair(47),\n \"blue_on_cyan\": curses.color_pair(48),\n \"red_on_cyan\": curses.color_pair(49),\n \"yellow_on_cyan\": curses.color_pair(50),\n \"magenta_on_cyan\": curses.color_pair(51),\n\n \"blue_on_yellow\": curses.color_pair(52),\n })\n\n if self.config.no_bold:\n bold = 0\n else:\n bold = curses.A_BOLD\n underline = curses.A_UNDERLINE\n\n # default colors\n\n if default_colors or self.config[\"default_colors\"]:\n self.config.settings.update({\n \"color_dim\": self.colors[\"white_on_black\"],\n \"color_line_numbers\": self.colors[\"black_on_yellow\"],\n \"color_line_num_reversed\": self.colors[\"white_on_blue\"] + bold,\n \"color_warning\": self.colors[\"white_on_red\"] + bold,\n \"color_normal\": self.colors[\"white_on_black\"] + bold,\n \"color_background\": self.colors[\"white_on_black\"] + bold,\n \"color_message\": self.colors[\"white_on_magenta\"] + bold,\n \"color_reversed\": self.colors[\"white_on_magenta\"] + bold,\n \"color_underline\": self.colors[\"white_on_black\"] + underline + bold,\n \"color_commands\": self.colors[\"green_on_black\"] + bold,\n \"color_commands_reversed\": self.colors[\"white_on_green\"] + bold,\n \"color_quote_double\": self.colors[\"yellow_on_black\"] + bold,\n \"color_comment\": self.colors[\"yellow_on_black\"],\n \"color_comment_block\": self.colors[\"black_on_yellow\"],\n \"color_comment_separator\": self.colors[\"black_on_red\"],\n \"color_comment_leftjust\": self.colors[\"white_on_magenta\"] + bold,\n \"color_comment_rightjust\": self.colors[\"white_on_red\"] + bold,\n \"color_comment_centered\": self.colors[\"yellow_on_green\"] + bold,\n \"color_number\": self.colors[\"cyan_on_black\"],\n \"color_entry\": self.colors[\"white_on_blue\"] + bold,\n\n \"color_entry_command\": self.colors[\"green_on_blue\"] + bold,\n \"color_entry_quote\": self.colors[\"yellow_on_blue\"] + bold,\n \"color_entry_quote_triple\": self.colors[\"red_on_blue\"] + bold,\n \"color_entry_comment\": self.colors[\"red_on_blue\"] + bold,\n \"color_entry_functions\": self.colors[\"magenta_on_blue\"] + bold,\n \"color_entry_class\": self.colors[\"cyan_on_blue\"] + bold,\n \"color_entry_number\": self.colors[\"cyan_on_blue\"] + bold,\n \"color_entry_dim\": self.colors[\"white_on_blue\"],\n\n \"color_operator\": self.colors[\"white_on_black\"],\n \"color_functions\": self.colors[\"magenta_on_black\"] + bold,\n \"color_functions_reversed\": self.colors[\"white_on_magenta\"] + bold,\n \"color_class\": self.colors[\"blue_on_black\"] + bold,\n \"color_class_reversed\": self.colors[\"white_on_blue\"] + bold,\n \"color_quote_triple\": self.colors[\"red_on_black\"],\n \"color_mark\": self.colors[\"yellow_on_blue\"] + bold + underline,\n \"color_negative\": self.colors[\"red_on_black\"] + bold,\n \"color_entry_negative\": self.colors[\"red_on_blue\"] + bold,\n \"color_positive\": self.colors[\"cyan_on_black\"] + bold,\n \"color_entry_positive\": self.colors[\"cyan_on_blue\"] + bold,\n \"color_tab_odd\": self.colors[\"white_on_black\"],\n \"color_tab_even\": self.colors[\"yellow_on_black\"],\n \"color_whitespace\": self.colors[\"black_on_white\"] + underline,\n \"color_header\": self.colors[\"white_on_black\"] + bold,\n \"color_bar\": self.colors[\"white_on_black\"],\n \"color_constant\": self.colors[\"white_on_black\"] + underline,\n \"color_entry_constant\": self.colors[\"white_on_blue\"] + bold,\n \"color_quote_single\": self.colors[\"yellow_on_black\"] + bold,\n \"color_selection\": self.colors[\"black_on_white\"] + underline,\n \"color_selection_reversed\": self.colors[\"black_on_cyan\"] + underline,\n })\n self.config[\"display_color\"] = True", "title": "" }, { "docid": "31b76d3a064fd382b2f3d0f86fd7af9a", "score": "0.5542477", "text": "def eyes_narrow(self, message):\n self.current_frame = self.draw_custom(self.middle_blink)", "title": "" }, { "docid": "188bff5bd1eae7dc4bc67ef9dea223ee", "score": "0.55343705", "text": "def callbackRGB(data):", "title": "" }, { "docid": "f1e58a03ee5353cf87526e8f89fb79ad", "score": "0.5531371", "text": "def emit0(self, record):\n # noinspection PyBroadException\n try:\n msg = self.format(record)\n stream = self.stream\n if record.levelno == 10:\n # msg_color = ('\\033[0;32m%s\\033[0m' % msg) # 绿色\n msg_color = ('\\033[%s;%sm%s\\033[0m' % (self._display_method, 34 if self._is_pycharm_2019 else 32, msg)) # 绿色\n elif record.levelno == 20:\n msg_color = ('\\033[%s;%sm%s\\033[0m' % (self._display_method, self.bule, msg)) # 青蓝色 36 96\n elif record.levelno == 30:\n msg_color = ('\\033[%s;%sm%s\\033[0m' % (self._display_method, self.yellow, msg))\n elif record.levelno == 40:\n msg_color = ('\\033[%s;35m%s\\033[0m' % (self._display_method, msg)) # 紫红色\n elif record.levelno == 50:\n msg_color = ('\\033[%s;31m%s\\033[0m' % (self._display_method, msg)) # 血红色\n else:\n msg_color = msg\n # print(msg_color,'***************')\n stream.write(msg_color)\n stream.write(self.terminator)\n self.flush()\n except Exception:\n self.handleError(record)", "title": "" }, { "docid": "b22cc6df16f0597af38e36baca4a79b2", "score": "0.552513", "text": "def send_pledging_colouring(self, checker):\n # Permute the colours\n perm = np.random.permutation(COLOR.values())\n\n # Permute the colours of the nodes\n self.permuted_colors = [\n COLOR(perm[color-1]) for color in self.graph.colors\n ]\n\n # Generates 128 bits randomly for each node\n self.rand_bytes_array = [\n token_bytes(128) for _ in range(self.graph.size)\n ]\n\n # Send the pledge\n checker.pledging = pledging_colouring(\n self.permuted_colors,\n self.rand_bytes_array,\n )", "title": "" } ]
998ac34f3ca67be64fc68b36be75906c
It allows to use set literals.
[ { "docid": "f3ce205a5cc66bbc4d76cef40f77b209", "score": "0.6530405", "text": "def test_Set():\n assert restricted_eval('{1, 2, 3}') == set([1, 2, 3])", "title": "" } ]
[ { "docid": "e70be52f1c10ed4811277ad0453827fc", "score": "0.7011735", "text": "def sets(*args, **kwargs):\n\n pass", "title": "" }, { "docid": "2f47632f0dd26d1d75200dd3a942aa2d", "score": "0.691617", "text": "def set(self, s):\r\n self.handle(set, s)", "title": "" }, { "docid": "12c8c627b899675eccaef087bdd68132", "score": "0.6835857", "text": "def _set(*args):\n return set(args)", "title": "" }, { "docid": "43575298fcada55d06435c475d9e7b4f", "score": "0.67724437", "text": "def set(object, value):", "title": "" }, { "docid": "240e7883954dab897cebfe103a0802a3", "score": "0.6633977", "text": "def do_set(parser, token):\n code = token.contents\n firstspace = code.find(' ')\n if firstspace >= 0:\n code = code[firstspace + 1 :]\n return Setter(code)", "title": "" }, { "docid": "dbc123aaeb5e3360333eacbff09e66db", "score": "0.6631852", "text": "def set(*args):\r\n return _idaapi.func_item_iterator_t_set(*args)", "title": "" }, { "docid": "e50a7c1f0ce4f6c32fdfa8e28d756475", "score": "0.6543921", "text": "def testSimpleSet(self):\n local_data = {'userID': 'oui'}\n simple_set(\"userID\", local_data, 'non')\n self.assertEqual(local_data[\"userID\"], 'non')", "title": "" }, { "docid": "658abf840f52cab313fac0c0f35b5edb", "score": "0.6376022", "text": "def set(*args):\r\n return _idaapi.func_parent_iterator_t_set(*args)", "title": "" }, { "docid": "c4dca8a0dc1bf4622b311a7f761dc045", "score": "0.63734686", "text": "def set(self, *args, **kwargs) -> Any:\n pass", "title": "" }, { "docid": "2e39b83b66f5109616f39440408074fb", "score": "0.6363303", "text": "def _set(self):", "title": "" }, { "docid": "0a5951e03cca2ed462391e350aae1b6e", "score": "0.6362125", "text": "def Set(*args):\n return _TFunction.TFunction_Scope_Set(*args)", "title": "" }, { "docid": "c897e45c05aa07772766550ccba0714e", "score": "0.63616896", "text": "def generate_set(self, left: 'parser.Source', right: 'parser.Source', kind: 'dsl.Set.Kind') -> 'parser.Source':", "title": "" }, { "docid": "253034c5f66f6503a0a2ebc616f1eb1d", "score": "0.63432884", "text": "def parse_set(self):\r\n lineno = self.stream.next().lineno\r\n target = self.parse_assign_target()\r\n self.stream.expect('assign')\r\n expr = self.parse_tuple()\r\n return nodes.Assign(target, expr, lineno=lineno)", "title": "" }, { "docid": "2f60239c8a659d4045f2157fa438a6d4", "score": "0.6315535", "text": "def set(self, *args, **kwargs):\n return self._call_with_fallback(\"set\", *args, **kwargs)", "title": "" }, { "docid": "1fd3c145d0ec7354fe8e1380e55dc4a5", "score": "0.63110363", "text": "def set(self, expression, value):\r\n pass", "title": "" }, { "docid": "bbbf79a550ed78d8b18affc7f818288d", "score": "0.63075596", "text": "def demonstrate_sets():\n\n bruce = {'Bruce', 'Springsteen', 'Bruce'}\n print('No duplicates:', bruce)\n print()\n\n bruce = set('Bruce')\n springsteen = set('Springsteen')\n print('bruce:', bruce)\n print('springsteen', springsteen)\n print('bruce & springsteen:', bruce & springsteen)\n print('bruce | springsteen:', bruce | springsteen)\n print('bruce ^ springsteen:', bruce ^ springsteen)\n print('bruce - springsteen:', bruce - springsteen)", "title": "" }, { "docid": "0b02dc017fd63893baffd5382521b74e", "score": "0.630018", "text": "def set_string(self, parm, set):\n self._set_string(parm.encode(), set.encode())", "title": "" }, { "docid": "9e047b5b2b17c3536931bf6fc2fe8652", "score": "0.628869", "text": "def set(attr, *args, **kwargs):\n \n pass", "title": "" }, { "docid": "4b8dc974fc968cd88461f8a66e04ae44", "score": "0.62409467", "text": "def set(self, value):", "title": "" }, { "docid": "cf2f794e86ae4f9ca40ed6f90e6b5278", "score": "0.62285244", "text": "def set(self, **kwargs):\n\t\tself._sets.update(self._input_maker(kwargs))", "title": "" }, { "docid": "a1720923ff0716a91e850c8d519233a2", "score": "0.6226446", "text": "def implement_set(self, left: Table, right: Table, kind: framod.Set.Kind) -> Table:", "title": "" }, { "docid": "51854519926bea99901c98e7680875f8", "score": "0.62193567", "text": "def cbwr_set(*args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "e9be2af7d47a05bccf4670cc37276527", "score": "0.6196369", "text": "def test_parse_set(self):\n pass", "title": "" }, { "docid": "fb779b565eb871b0b585446fe3407aad", "score": "0.6118672", "text": "def set(self):\n\n pass", "title": "" }, { "docid": "7be8741d27f749e7ec81418c9719c60c", "score": "0.60930717", "text": "def set(*args):\r\n return _idaapi.func_tail_iterator_t_set(*args)", "title": "" }, { "docid": "4faacb7abddbaf4aaafbd2ecd6647e99", "score": "0.6072043", "text": "def set(self, key, value):", "title": "" }, { "docid": "8d80f365b0021b9287110c9fe82163e9", "score": "0.60182893", "text": "def Set(*args):\n return _propgrid.PGChoices_Set(*args)", "title": "" }, { "docid": "876e9a0bf227d8496f4dc4669b733594", "score": "0.59893864", "text": "def __setitem__(self, key: str, value: str) -> None:\n set_key = key.lower().encode(\"latin-1\")\n set_value = value.encode(\"latin-1\")\n\n found_indexes = []\n for idx, (item_key, item_value) in enumerate(self._list):\n if item_key == set_key:\n found_indexes.append(idx)\n\n for idx in reversed(found_indexes[1:]):\n del self._list[idx]\n\n if found_indexes:\n idx = found_indexes[0]\n self._list[idx] = (set_key, set_value)\n else:\n self._list.append((set_key, set_value))", "title": "" }, { "docid": "49e79cac37226ad3fca5178796a8881e", "score": "0.5925128", "text": "def convert_set(self, set_value_python):\n if self.python_to_instr is None:\n return set_value_python\n else:\n return self.python_to_instr[set_value_python]", "title": "" }, { "docid": "69a5a92e74933704c6bf8a90e32a7a85", "score": "0.58868235", "text": "def add_cmd_set(self, set):\n self.interp.add_cmd_set(set)", "title": "" }, { "docid": "bd6dfa4a40e0c8ae322db0554cde54b1", "score": "0.58835185", "text": "def Set(*args):\n return _TFunction.TFunction_GraphNode_Set(*args)", "title": "" }, { "docid": "4c43fcc8598422b35d3943cc3af5ff46", "score": "0.585236", "text": "def set(self, *args):\n return _fife.Cursor_set(self, *args)", "title": "" }, { "docid": "d5bff84ef48c27f0599fa4f197308be6", "score": "0.5847461", "text": "def _setX(x):\n pass", "title": "" }, { "docid": "53d5c2f3f462143dd6eef73dbd82ea77", "score": "0.58349526", "text": "def set(self, *args, **kwargs):\n return self.cell().set(*args, **kwargs)", "title": "" }, { "docid": "53d5c2f3f462143dd6eef73dbd82ea77", "score": "0.58349526", "text": "def set(self, *args, **kwargs):\n return self.cell().set(*args, **kwargs)", "title": "" }, { "docid": "f31333a3f9133281ed36e2e366386e57", "score": "0.5827125", "text": "def pytype(self):\n return \"%s.set\" % BUILTINS", "title": "" }, { "docid": "42ed5d5ca2eb266355555a0b2c9f1edb", "score": "0.5811691", "text": "def set(self, set_name, set_content):\n result = self.db.hmset(set_name, set_content)\n return result", "title": "" }, { "docid": "03869ca43ccb906e4cc5a4b216af4471", "score": "0.5808372", "text": "def _set(iterable):\n if type(iterable) is str:\n return set() if iterable == '' else {iterable}\n else: return set(iterable)", "title": "" }, { "docid": "6448ffb3804eb92292e29c679f304c3b", "score": "0.58068377", "text": "def is_set(self):", "title": "" }, { "docid": "588451015dd2317bce53d1a95c21c20a", "score": "0.5805637", "text": "def str_is_set(string):\n return string", "title": "" }, { "docid": "2f59a14751b1625dce5dbb762acf9ab3", "score": "0.5778102", "text": "def value(sett):\n if 'x' in sett:\n return 'x'\n return 'o'", "title": "" }, { "docid": "39406f1d18a89e1758538de354289fce", "score": "0.57666546", "text": "def __repr__(self):\n return f\"Set({self})\"", "title": "" }, { "docid": "46a5903879a2e6248035cb2ff4fe1cde", "score": "0.57664126", "text": "def test_string_set(self):\n range_open = ph.string_set_from_range(10)\n range_bounded = ph.string_set_from_range(5, 10)\n \n self.assertTrue(\"0\" in range_open)\n self.assertTrue(\"9\" in range_open)\n self.assertFalse(\"22\" in range_open)\n \n self.assertFalse(\"0\" in range_bounded)\n self.assertTrue(\"9\" in range_bounded)\n self.assertFalse(\"22\" in range_bounded)", "title": "" }, { "docid": "8f7ab473cbf9d15eaae705ac722025a8", "score": "0.57663643", "text": "def set(self, x, y, z):\n return bool()", "title": "" }, { "docid": "cea2da1f335b9e0324531b9c2334640e", "score": "0.5764473", "text": "def encode_set(self, values) -> str:\n\n if not all(map(self.is_scalar, values)):\n raise ValueError(\n f\"ODL only allows scalar values in sets: {values}\"\n )\n\n return super().encode_set(values)", "title": "" }, { "docid": "18d36508e46fe5a77b6892cb6aab22d5", "score": "0.5761095", "text": "def serialize(self, **kwargs):\n\n key, value = super(ParseSetType, self).serialize(**kwargs)\n return key, set(value)", "title": "" }, { "docid": "8a0c5b5e3528e59dead8ad721ecf18b0", "score": "0.5746541", "text": "def set_(self, subs, val):\n self._access(subs, _SET_, val)", "title": "" }, { "docid": "cc2684e934642370d206866c79bea287", "score": "0.5742214", "text": "def encode_set(self, value: abc.Set) -> str:\n return \"{\" + self.encode_setseq(value) + \"}\"", "title": "" }, { "docid": "604af8fd482ad8fb99de95087bf115da", "score": "0.5736879", "text": "def setValueTest_SetInValidValues_4 (self):\r\n self.assertRaises (ATOM3BadAssignmentValue, self.atc.setValue, ( [\"e1\", \"e2\"], \"e\" ))", "title": "" }, { "docid": "0b91434e3ed49af121c058ec624eb932", "score": "0.57343715", "text": "def set(self, x, y, val):\n self.repr[y][x] = val", "title": "" }, { "docid": "bab16d312ce6bd21a469ade63c04e035", "score": "0.57192487", "text": "def set(self, input_set: set, input_set_name: str):\n n_values = len(input_set)\n self.log(\"Set {} length : {}\".format(input_set_name, n_values), self.default_level, depth=3)\n if n_values <= MAX_SET:\n self.log(\"Set {} : {}\".format(input_set_name, input_set), self.default_level, depth=3)", "title": "" }, { "docid": "a2370e9a6fe067409cc613bc1de6ad3d", "score": "0.5713149", "text": "def test_set(small_string):\n hashe = HashTable()\n hashe.set(small_string, 12)\n assert len(hashe.buckets[hashe.hash_key(small_string)]) == 1", "title": "" }, { "docid": "763d1266a638300e9f49cad84f7ef937", "score": "0.57083106", "text": "def test_set(self):\n self.p.set(1, 1, 1)\n self.assertEqual(1, self.p.get(1, 1))\n\n # Can over-write already set cell\n self.p.set(1, 1, 1)\n self.assertEqual(1, self.p.get(1, 1))\n\n # Can't write same value to same row or column twice\n self.assertRaises(ValueError, self.p.set, 1, 2, 1)\n self.assertRaises(ValueError, self.p.set, 2, 1, 1)", "title": "" }, { "docid": "30473366a733db927e70db5d389a52ad", "score": "0.5707844", "text": "def set(self) -> None:\n raise NotImplementedError", "title": "" }, { "docid": "0295cbe2807277ea2d836aed8d4ff80a", "score": "0.5702998", "text": "def declareSets(self, esM, pyM):\n raise NotImplementedError", "title": "" }, { "docid": "03a8b783096a9f51cdae7f9dc9bf3acc", "score": "0.5699542", "text": "def set(self, key, value):\n pass", "title": "" }, { "docid": "03a8b783096a9f51cdae7f9dc9bf3acc", "score": "0.5699542", "text": "def set(self, key, value):\n pass", "title": "" }, { "docid": "3bc04574f54a234235d858b8ba2f9dfb", "score": "0.56812394", "text": "def demonstrate_sets():\n\n s = {\"Bruce Springsteen\", \"Patti Smith\", \"Bruce Springsteen\", \"Foy Vance\"}\n s1 = {\"Bruce Springsteen\", \"Patti Smith\", \"Eric Clapton\"}\n print(s)\n print(s & s1)\n print(s | s1)\n print(s - s1)\n print(s ^ s1)\n print()\n\n print()", "title": "" }, { "docid": "57d6f9da482dce6fad29163483af7caa", "score": "0.567862", "text": "def test_set_reg_to_literal(self):\n # SET A, 0x10\n self.cpu.ram[0].value = pack_instruction(op_code=OPCODE.SET,\n a=Value.reg(REG.A),\n b=Value.literal(0x10))\n self.emulator.dispatch()\n self.assertTrue(self.cpu.registers[REG.A].value == 0x10, \"Register value error\")", "title": "" }, { "docid": "52f747271b8d399c1af0703c906f8808", "score": "0.56634814", "text": "def process_setarg(arg):\n import pyomo.core.base.set as new_set\n if isinstance(arg, (_SetDataBase, new_set._SetDataBase)):\n # Argument is a non-indexed Set instance\n return arg\n elif isinstance(arg,IndexedSet):\n # Argument is an indexed Set instance\n raise TypeError(\"Cannot index a component with an indexed set\")\n elif isinstance(arg,Component):\n # Argument is some other component\n raise TypeError(\"Cannot index a component with a non-set \"\n \"component: %s\" % (arg.name))\n else:\n try:\n #\n # If the argument has a set_options attribute, then use\n # it to initialize a set\n #\n options = getattr(arg,'set_options')\n options['initialize'] = arg\n return Set(**options)\n except:\n pass\n # Argument is assumed to be an initialization function\n return Set(initialize=arg)", "title": "" }, { "docid": "c032a2f3e6aa4835f760222f6542f1fd", "score": "0.5643932", "text": "def encode_set(self, values) -> str:\n for v in values:\n if not self.is_symbol(v) and not isinstance(v, int):\n raise ValueError(\n \"The PDS only allows integers and symbols \"\n f\"in sets: {values}\"\n )\n\n return super().encode_set(values)", "title": "" }, { "docid": "d7b1dade12f3d99fd76a2ad927151e22", "score": "0.5640344", "text": "def test_set_empty():\n ht = HT()\n assert ht.set('hello', 'world') == {'hello': 'world'}", "title": "" }, { "docid": "da3d4dd0c33b3c7325cc219bee45379d", "score": "0.5638774", "text": "def set_sets(self, setnames):\n\n if isinstance(setnames, str):\n setnames = [setnames]\n\n if isinstance(setnames, float) and pd.isna(setnames):\n self.sets = None\n else:\n self.sets = setnames", "title": "" }, { "docid": "f924699ea1539ee8f4f1b9b0a7598983", "score": "0.56314725", "text": "def TFunction_GraphNode_Set(*args):\n return _TFunction.TFunction_GraphNode_Set(*args)", "title": "" }, { "docid": "1977706a0ba4f78bfc989c0606afab47", "score": "0.5625379", "text": "def setValueTest_SetValidValues_1 (self):\r\n self.atc.setValue(( [\"e1\", \"e2\"], 1 ))", "title": "" }, { "docid": "40bc1a6a880bc9a734bed5503db52013", "score": "0.56204087", "text": "def setValueTest_SetInValidValues_2 (self):\r\n self.assertRaises (ATOM3BadAssignmentValue, self.atc.setValue, ( [\"e1\", \"e2\"], 2 ))", "title": "" }, { "docid": "ad126290cf887bc2d3dea05a8c4cc1ae", "score": "0.5615595", "text": "def setify(gen):\n @wraps(gen)\n def patched(*args, **kwargs):\n return set(gen(*args, **kwargs))\n return patched", "title": "" }, { "docid": "c6982d5bc63db03cbb2a4b81cca55399", "score": "0.5607882", "text": "def set(self, id, value):", "title": "" }, { "docid": "3e96424454a167a69d847f1f052602c6", "score": "0.5588573", "text": "def __set__(self, obj, value):\r\n pass", "title": "" }, { "docid": "2eee722f7cc8a8600f8981cc0f910d17", "score": "0.5571473", "text": "def set(self, *args, **kwargs):\n return self.client.set(*args, **kwargs)", "title": "" }, { "docid": "2c872aa4ef63a8376fa74a975caf3b66", "score": "0.55704045", "text": "def _is_set(self, var):\n # The try/except is necessary to fix spyder-ide/spyder#19516.\n try:\n return isinstance(var, set)\n except Exception:\n return False", "title": "" }, { "docid": "993616bab48a0acdb591668d65cabfaa", "score": "0.5569747", "text": "def sets(self):\n raise NotImplementedError", "title": "" }, { "docid": "513021773ba029fb341a3a32e7134d17", "score": "0.5568167", "text": "def setter(self, fset):\n\n # TODO: reorder inheritance - this is stupid!\n raise NotImplementedError()", "title": "" }, { "docid": "513021773ba029fb341a3a32e7134d17", "score": "0.5568167", "text": "def setter(self, fset):\n\n # TODO: reorder inheritance - this is stupid!\n raise NotImplementedError()", "title": "" }, { "docid": "59c0c825451a177112b0d6206a2b8508", "score": "0.5556044", "text": "def set(self, separator=None, **set_dict):\n self.modify(separator=separator, append_if_not_present=True, **set_dict)", "title": "" }, { "docid": "26447714b43eff70842cdd9989f65cac", "score": "0.55527943", "text": "def SetMembers(self, key):\n if key in self.val_dict:\n if type(self.val_dict[key]) != set:\n return Operation(success=False)\n\n val = self.val_dict[key]\n\n else:\n val = set([])\n\n return Operation(success=True, response_value=val)", "title": "" }, { "docid": "a2116b4a280923140c5b5e1880ed6972", "score": "0.5549163", "text": "def SetRendering(self, set: int) -> None:\n ...", "title": "" }, { "docid": "d704cf81dfd4b008dcc2edf2861024e9", "score": "0.5546437", "text": "def setValueTest_SetInValidValues_5 (self):\r\n self.assertRaises (ATOM3BadAssignmentValue, self.atc.setValue, ( \"e1\", 1 ))", "title": "" }, { "docid": "ef36253045ef5dc33403621a30a8569c", "score": "0.55366886", "text": "def test_set_into_existing(hash_test):\n assert hash_test.set('sup', 'bro') == {'sup': 'bro'}", "title": "" }, { "docid": "5c4d3ad159d19f5bc6e660839c39499f", "score": "0.55225056", "text": "def get_datatype(self):\n return \"SET\"", "title": "" }, { "docid": "4d89738be754b70e98c9085b5960ece5", "score": "0.5521528", "text": "def test_051_tuple_set(self):\n \n ts1 = tuple_set( ( 'a', 'b') )\n ts2 = tuple_set( ( 'b', 'a') )\n \n # order matters when comparing tuple_sets\n self.assertReallyNotEqual(ts1, ts2)\n \n \n ts1 = tuple_set( ['a', 'b'] )\n \n self.assertReallyEqual( ts1 , ('a', 'b' ) ) # a tuple_set (used internally for heading) is equal to a tuple.\n\n self.assertNotEqual( ts1 , ['a', 'b'] ,\"tuple_set is not list.\")\n\n ts2 = tuple_set( { 'a':1 , 'b':2 } )\n\n self.assertEqual( ts2 ,tuple_set( ('a', 'b') ) , \"tuple_set is tuple not dictionary.\")", "title": "" }, { "docid": "ff8fd33fa58484ef87bb60f0d8bcb91a", "score": "0.55049753", "text": "def set(self, x):\n print(\"set class D\")\n pass", "title": "" }, { "docid": "c6826614571a1593322d9b852472c8e3", "score": "0.5504579", "text": "def set(key, val, time=0, namespace=None):\n # Note that both key and value are encoded before insertion.\n return set_many({key: val}, time, namespace)", "title": "" }, { "docid": "fbb9af290c23719849891bfe7bae170d", "score": "0.5499461", "text": "def must_have_set(self) -> Set[str]:\n return self.must_have.native", "title": "" }, { "docid": "63349258daa0aee355f4f7ae63bae6b5", "score": "0.5498017", "text": "def test_assertIn(self):\n self.assertIn(set([1, 'a', 'b']), 'a')", "title": "" }, { "docid": "ac04e9f5d2c6d7ad9daa65f1bcf83395", "score": "0.54958427", "text": "def __set__(self, instance, value):\n pass", "title": "" }, { "docid": "0e48d69e0554bb01321a48fbe6cf61f6", "score": "0.54877174", "text": "def test_set_with_similar_values(\n assert_errors,\n parse_ast_tree,\n code,\n first,\n second,\n default_options,\n):\n tree = parse_ast_tree(code.format(first, second))\n\n visitor = WrongCollectionVisitor(default_options, tree=tree)\n visitor.run()\n\n assert_errors(visitor, [NonUniqueItemsInHashViolation])", "title": "" }, { "docid": "e20aac15e2d81ae05966cfb561e1c496", "score": "0.548757", "text": "def field_set():", "title": "" }, { "docid": "00054e34b78ee0615632d8b22463e4f0", "score": "0.54788005", "text": "def test_set_resource_set(self):\n pass", "title": "" }, { "docid": "da533a12c241586414e7aee511c9bad0", "score": "0.5473795", "text": "def raw_set(self, sec, key, val):\n return ConfigParser.set(self, sec, key, val)", "title": "" }, { "docid": "77409f8eb5cad5265dfc03241381c7d7", "score": "0.54696107", "text": "def set(categories):", "title": "" }, { "docid": "a875afb2ae03b52b76a842582d8e1a43", "score": "0.5466394", "text": "def set_int(self, parm, set):\n self._set_int(parm.encode(), set)", "title": "" }, { "docid": "90609bf4cb3360d1c91cc406f832a6e6", "score": "0.54589593", "text": "def set(self, value):\n try:\n values = iter(value)\n except TypeError:\n values = iter([value])\n for value in values:\n pass", "title": "" }, { "docid": "f0259e589a8fdfe5325b784604462f17", "score": "0.5453235", "text": "def set(self, *args, **kwargs):\n if len(args) == 2:\n return self._update(**{args[0]: args[1]})\n else:\n return self._update(**kwargs)", "title": "" }, { "docid": "309ccae4dcbc318a0d65f11d6d3afce0", "score": "0.54450244", "text": "def buildSet(args=None):\n if isinstance(args, str):\n return set([args])\n if args is None:\n return set()\n return set(args)", "title": "" }, { "docid": "f28e159b4459de5eb20bbdf995a554b8", "score": "0.5437599", "text": "def test_pset_field_checked_set():\n class Record(PRecord):\n value = pset_field(int)\n record = Record(value=[1, 2])\n with pytest.raises(TypeError):\n record.value.add(\"hello\") # type: ignore", "title": "" }, { "docid": "c65cad7ecf185933de0326288f372785", "score": "0.5435832", "text": "def setValueTest_SetInValidValues_3 (self):\r\n self.assertRaises (ATOM3BadAssignmentValue, self.atc.setValue, ( [\"e1\", \"e2\"], -2 ))", "title": "" }, { "docid": "38b118e30460043cfc6046b3aa222ded", "score": "0.54338396", "text": "def visit_set(self, node):\r\n return '{%s}' % ', '.join([child.accept(self) for child in node.elts])", "title": "" }, { "docid": "9d322d439f3caf644814ba65e5d3824f", "score": "0.54306877", "text": "def _set_repn(self, other):\n if isinstance(other, SimpleSet):\n return other\n if isinstance(other, OrderedSimpleSet):\n return other\n return SetOf(other)", "title": "" }, { "docid": "47864870248ec287b3c7f28ed5dc1af5", "score": "0.5426855", "text": "def set(namespace, name):", "title": "" } ]
eb6bfd65ff86d411b9e914fa5d93df5a
Get movie showtimes for cinema. Given the cinema name as a lowercase string (e.g. arena, delft, rembrandt) gets all the current movie showtimes for this cinema. Returns a list of twovalue tuples representing showtime date and time as `datetime.datetime` object and showtime technology.
[ { "docid": "6d3e5a53c3c54afbc0c3db2b8065824f", "score": "0.7254321", "text": "def get_movie_showtimes_for_cinema(movie_page_doc, cinema):\n cinema_movie_showtimes = []\n showtime_days = movie_page_doc.xpath(SHOWTIME_DAYS.format(cinema))\n for showtime_day in showtime_days:\n day_name = showtime_day.xpath(SHOWTIME_DAY)\n\n # Skip elements that are not day names (i.e. that contain only whitespace)\n if len(day_name) > 1:\n continue\n\n day_name = day_name[0]\n showtime_date = normalize_date(day_name)\n\n showtimes = showtime_day.xpath(SHOWTIMES)\n for showtime in showtimes:\n showtime_url = showtime.xpath(SHOWTIME_URL)\n # We don't need the showtime that has already been sold out\n if showtime_url == '#modal-soldout':\n continue\n if 'javascript:openPopup' in showtime_url:\n showtime_id = re.match(\n \"^javascript:openPopup\\('https://onlinetickets.pathe.nl/ticketweb.php?\"\n \".*&ShowID=(\\d+)&.*'\\)$\",\n showtime_url\n ).group(1)\n else:\n showtime_id = re.match('^/tickets/start/(\\d+)$', showtime_url).group(1)\n showtime_id = int(showtime_id)\n\n showtime_items = showtime.xpath(SHOWTIME_ITEMS)\n showtime_items = [''.join(showtime_item.split()) for showtime_item in showtime_items]\n\n # Pathe does not explicitly indicate 2D technology so append it\n if len(showtime_items) < 2:\n showtime_items.append('2D')\n\n # TODO: Handle date for Nacht22op23mei correctly\n # Some shows have additional info, e.g. 'Nacht22op23mei', 'Grotezaal'. We don't need it\n # so remove it and instead replace it with a technology\n if showtime_items[1] not in PATHE_TECHNOLOGIES:\n showtime_items[1] = '2D'\n\n showtime_time_match = re.match('^(\\d{2}:\\d{2})(.*)$', showtime_items[0])\n showtime_time_groups = showtime_time_match.groups()\n if showtime_time_groups[1]:\n # TODO: Log this and send a rollbar event\n print(\"Something special about this showtime: {0}\".format(showtime_time_groups[1]))\n\n showtime_time = datetime.datetime.strptime(showtime_time_groups[0], '%H:%M').time()\n showtime_datetime = datetime.datetime.combine(showtime_date, showtime_time)\n showtime_datetime = datetime_to_utc(showtime_datetime)\n cinema_movie_showtimes.append((showtime_id, showtime_datetime, showtime_items[1]))\n\n return cinema_movie_showtimes", "title": "" } ]
[ { "docid": "274542cf0da3b9dc0d4ffa7f6c2d9c6d", "score": "0.63900685", "text": "def getShowList(self):\n showXML = self.sendRequestAndReadData(\"core/getShows\")\n showNames = [] \n if showXML:\n root = ET.fromstring(showXML)\n shows = root.findall('Show')\n for show in shows:\n showNames+=[show.get('name')]\n\n return showNames", "title": "" }, { "docid": "0a37170b0bee5350827608d60b301af2", "score": "0.6204213", "text": "def get_shows():\n\n # connect to the database\n db = connect_minerva_db()\n\n # return all the shows as a list\n shows = list(db.shows.find())\n return shows", "title": "" }, { "docid": "7eaf6a0c6d7e76068c93ab454473fe28", "score": "0.6015793", "text": "def showtimes_list():\n showtimes = Showtime.query.all()\n serialized_objects = showtimes_schema.dumps(\n showtimes, sort_keys=True, indent=4)\n\n return Response(\n response=serialized_objects,\n status=http_status.OK,\n mimetype=\"application/json\"\n )", "title": "" }, { "docid": "fcde8c87bd7937892793fc0780268ffe", "score": "0.59471107", "text": "def getShows(self):\n return list(self.__shows.values())", "title": "" }, { "docid": "a70a264794be2483c1c5b88a3c5afe17", "score": "0.59260625", "text": "def __extract_shows(self, content):\n\n bs = BeautifulSoup(content, \"html.parser\")\n shows = []\n lines = bs.find_all('div', class_='clearfix p-v-md bgc-white bb-grey-3')\n for line in lines:\n channel, prime_time, late_show = line.find_all('div', recursive=False)\n channel_name = self.__get_channel_name(channel)\n show_name = self.__get_show_name(prime_time)\n show_start = self.__get_show_start(prime_time)\n rating = self.__get_show_rating(prime_time)\n prog_type = self.__get_show_type(prime_time)\n length = self.__get_show_length(prime_time)\n\n shows.append({\n 'name': show_name,\n 'channel': channel_name,\n 'start': show_start,\n 'rating': rating,\n 'type': prog_type,\n 'length': length\n })\n \n return shows", "title": "" }, { "docid": "df6c772fc14cafafd4bba8b63ccd7010", "score": "0.59105915", "text": "def getShowsList():\n shows_list = []\n try:\n with open('./data/TVshowList.csv', 'r') as f:\n reader = csv.reader(f)\n for row in reader:\n shows_list.append(row[0])\n except IOError as error:\n logger.Error(\"Error in getShowsList function: \", str(error))\n\n return shows_list", "title": "" }, { "docid": "7488bac2bf3b316490f0df1b115c5078", "score": "0.58857316", "text": "def getShowtimes(self):\n return self.__showtimes", "title": "" }, { "docid": "c1b46c678d3fb6cb6edd69de8280a475", "score": "0.58583456", "text": "def getShowNames(self):\n return list(self.__shows.keys())", "title": "" }, { "docid": "d163aeb0bd7151ce4be5165aab3fc0ad", "score": "0.577293", "text": "def get_moods(self):\n movie_details = self.movie_page.find(id='mdp-details')\n moods = movie_details.find('dt', string=re.compile(\"^T\"))\n try:\n moods = moods.parent.dd.get_text().split(',')\n moods = [i.strip() for i in moods]\n except:\n logging.info('No moods found for this movie/show.')\n return moods", "title": "" }, { "docid": "1c1e44405c57141c71756f3ca9525bf0", "score": "0.5760418", "text": "def get_shows(self):\n # Check the cache.\n if self.shows_list is not None:\n return self.shows_list\n\n javascript_source = self._get_shows_dropdown_javascript()\n # Search for the first assignment of a list of things. We take that as the shows list.\n shows_list_match = re.search(r'=\\s*(\\[.+?\\])', javascript_source)\n if not shows_list_match:\n raise RuntimeError(\"Cannot find shows list in Javascript source\")\n\n parsed_shows = json.loads(shows_list_match.group(1))\n\n # The shows that we have parsed.\n self.shows_list = {}\n\n for parsed_show in parsed_shows:\n original_name = parsed_show['text']\n try:\n show_id = int(parsed_show['id'])\n except ValueError:\n # Skip this show and move on to the next one.\n continue\n\n # EZTV have a neat trick where they place the \"The \" of a show at\n # the end of the string, but also append the year. So \"The Big\n # Bang Theory\" will become \"Big Bang Theory, The (2007)\". We put\n # it at the beginning in order to normalize it to make it more\n # intuitive to lookup against.\n match = re.search(r'(.+?), (The)\\s*(\\(\\d+\\))?', original_name)\n if match:\n # Only keep the truthy name parts.\n name_parts = filter(None, [match.group(2), match.group(1), match.group(3)])\n normalized_name = ' '.join(name_parts)\n else:\n normalized_name = original_name\n\n self.shows_list[show_id] = normalized_name\n\n return self.shows_list", "title": "" }, { "docid": "20e168a34d9c5686994ee4f145194da1", "score": "0.5758185", "text": "def get_movies() -> List[str]:\n tree = get_tree()\n return [movie.get(key='title').strip() for movie in tree.findall('movie')]", "title": "" }, { "docid": "70519bf42107eb15e2e0e6a08a2f6274", "score": "0.5742863", "text": "def xtractor_list():\n schedule_list_url = 'https://animeschedule.net/'\n list_resp = requests.get(schedule_list_url)\n raw_soup = BeautifulSoup(list_resp.text, 'html.parser')\n days_soup = raw_soup.find_all('div', id='timetable')[0]\n days_soup = days_soup.find_all('div', class_='timetable-column')\n Show = namedtuple('Show', ['show_name', 'episode', 'time'])\n Weekday = namedtuple('Weekday', ['day', 'shows'])\n show_list = []\n for show_item in days_soup:\n day = show_item.find('h1', class_='timetable-column-day').get_text()\n div_soup = show_item.find_all('div', class_=['timetable-column-show', 'compact'])\n\n # Temporary storing the shows for each weekdays and then adding them to single day namedtuple\n temp_show_list = []\n for day_show in div_soup:\n episode = day_show.find('span', class_='show-episode').get_text(strip=True)\n\n # Stripping new lines and stuff from episode number.\n episode = (lambda ep: ' '.join(ep.split()))(episode)\n air_time = day_show.find('span', class_='show-air-time').get_text(strip=True)\n show_title = day_show.find('h2', class_='show-title-bar').get_text(strip=True)\n temp_show_list.append(Show(show_name=show_title, episode=episode, time=air_time))\n\n show_list.append(Weekday(day=day, shows=temp_show_list))\n\n # # TODO: TESTING, prints list schedule.\n # for i in show_list:\n # print(f'Day: {i.day}')\n # print('-------------------')\n # for j in i.shows:\n # print(f'{j}')\n # print('-------------------')\n return show_list", "title": "" }, { "docid": "d00a557616e3c28e3479f593e4d13ce5", "score": "0.5723744", "text": "def shows():\n shows = db.session.query(Show).all()\n result = []\n for show in shows:\n artist = show.artist\n venue = show.venue\n result.append({\n \"venue_id\": venue.id if venue else None,\n \"venue_name\": venue.name if venue else None,\n \"artist_id\": artist.id if artist else None,\n \"artist_name\": artist.name if artist else None,\n \"artist_image_link\": artist.image_link if artist else None,\n \"start_time\": str(show.start_time),\n })\n return render_template('pages/shows.html', shows=result)", "title": "" }, { "docid": "cfb3e92aae5f57a13a6e30ecf9623b35", "score": "0.5628923", "text": "def shows():\n\n\t\tshows = Show.query.all()\n\n\t\tresults = [{\n\t\t\"venue_id\": show.venue_id,\n\t\t\"venue_name\": Venue.query.get(show.venue_id).name,\n\t\t\"artist_id\": show.artist_id,\n\t\t\"artist_name\": Artist.query.get(show.artist_id).name,\n\t\t\"artist_image_link\": Artist.query.get(show.artist_id).image_link,\n\t\t\"start_time\": show.start_time,\n\t\t} for show in shows]\n\t\treturn render_template('pages/shows.html', shows=results)", "title": "" }, { "docid": "4f6c1bee5989443b058eac0d49d27952", "score": "0.56287754", "text": "def shows():\n # displays list of shows at /shows\n # TODO: replace with real venues data.\n # num_shows should be aggregated based on number of upcoming shows per venue.\n data = []\n shows_Query = Show.query.order_by('start_time').all()\n for show in shows_Query:\n data.append({\n \"venue_id\": show.venue_id,\n \"venue_name\": show.venue.name,\n \"artist_id\": show.artist_id,\n \"artist_name\": show.artist.name,\n \"artist_image_link\": show.artist.image_link,\n \"start_time\": show.start_time.isoformat()\n })\n return render_template('pages/shows.html', shows=data)", "title": "" }, { "docid": "d4d5a83328c0271c81077f1bea072e10", "score": "0.557879", "text": "def get_shows(self):\n return self.all()", "title": "" }, { "docid": "61eb4959bfa55049d3e723c44dec9880", "score": "0.5552825", "text": "def shows():\n\n data = []\n all_shows = Show.query.all()\n\n for show in all_shows:\n venue = Venue.query.get(show.venue_id)\n artist = Artist.query.get(show.artist_id)\n data.append(\n {\n \"venue_id\": show.venue_id,\n \"venue_name\": venue.name,\n \"artist_id\": show.artist_id,\n \"artist_name\": artist.name,\n \"artist_image_link\": artist.image_link,\n \"start_time\": format_datetime(str(show.start_time)),\n }\n )\n return render_template(\"pages/shows.html\", shows=data)", "title": "" }, { "docid": "a424a738bc0278e9c2db886001cec85d", "score": "0.5551029", "text": "def get_shows(self):\n r = requests.get(self.base_url + '/series')\n soup = BeautifulSoup(r.text, 'html.parser')\n all_series = soup.find('section',\n {'data-context': 'promo group:All Showtime Series'})\n\n self.shows = []\n anchors = all_series.find_all('a', {'class': 'promo__link'})\n for a in anchors:\n title = a.text.strip()\n link = self.base_url + a['href']\n self.shows += [{'title': title, 'link': link}]", "title": "" }, { "docid": "45fb33d7c6587f4b4bea5f37b1c95581", "score": "0.55113804", "text": "def get_ten_shows():\n\n # connect to the database\n db = connect_minerva_db()\n\n # return all the shows as a list\n ten_shows = list(db.shows.find().limit(10))\n return ten_shows", "title": "" }, { "docid": "653c35daf14af1b5b61413d4be9051d0", "score": "0.55047405", "text": "def all_shows(request):\n\n # returns just upcoming dates\n shows = Show.objects.all()\n\n context = {\n 'shows': shows\n }\n\n template = 'shows/shows.html'\n\n return render(request, template, context)", "title": "" }, { "docid": "4b874c54f1c2c1ad98609feeb93fb94f", "score": "0.54851276", "text": "def event_showtimes(request, event_id):\n\n event = get_object_or_404(Event, pk=event_id)\n showtimes = Showtime.objects.select_related().filter(event=event_id).order_by('date')\n\n context = {\n 'showtimes': showtimes,\n 'event': event,\n }\n\n return render(request, 'events/showtimes.html', context)", "title": "" }, { "docid": "c972434dafe2c336596125897d481d30", "score": "0.5484561", "text": "def getShows():\n try:\n \tlog(\"DEBUG\", \"Running getShows() function.\")\n \tdata = requests.get(url).json()\n \tshows = {}\n\n \tfor show in data[\"payload\"]:\n showID = data[\"payload\"][show][\"show_id\"]\n showTitle = data[\"payload\"][show][\"title\"]\n if showID != 13031:\n shows[showID] = showTitle\n\n \treturn shows\n except IOError as e:\n \tlog(\"API\",\"Could not acess API.\", str(e))\n \tsys.exit(0)", "title": "" }, { "docid": "d30be8b07761eb314d5acd567b6184d6", "score": "0.54436314", "text": "def tv_schedule():\n logger.info('Incoming request %r' % request)\n \n\n ptv = ProgrammeTv()\n tv_shows = ptv.get_shows()\n \n logger.info('Returning %s' % tv_shows)\n return tv_shows", "title": "" }, { "docid": "8d366dab71afe12a2c5371bf191ae871", "score": "0.5391066", "text": "def get_all_movies(cls):\n return list(map(cls.format_movie, db_session.query(Movie).all()))", "title": "" }, { "docid": "d81eca15dce1dbf50e050ada2eeb5710", "score": "0.5387686", "text": "def movies(channel_name=None, date=None, channel_id=None, movie_title='',\n start_time=None, end_time=None, on_air=False):\n if on_air:\n start_time = end_time = datetime.today()\n\n if channel_name:\n channel_name = channel_name.decode('utf-8')\n if movie_title:\n movie_title = movie_title.decode('utf-8')\n if channel_id:\n channel_id = int(channel_id)\n\n if date is None or date == 'today':\n date = datetime.today()\n else:\n date = datetime.strptime(date, '%Y-%m-%d')\n result = search(\n date,\n channel_name=channel_name, channel_id=channel_id,\n movie_title=movie_title, start_time=start_time,\n end_time=end_time\n )\n\n for index, channel in result.items():\n puts(colored.green(\"[%d] %s\" % (index, channel['name'])))\n\n for movie in channel['movies']:\n with indent(4):\n puts(colored.white(\n '%s - %s %s' % (\n movie['start_time'],\n movie['end_time'],\n movie['title'],\n )))", "title": "" }, { "docid": "ea9f0c3e6a641cc61a90c34b1cd95f7e", "score": "0.53699577", "text": "def get_films(self, showid, filmui):\n if self.conn is None:\n return\n if showid.find(',') == -1:\n # only one channel id\n return self._search_condition(\n condition='( `showid` = %s )',\n params=(int(showid), ),\n filmui=filmui,\n showshows=False,\n showchannels=False,\n maxresults=self.settings.maxresults,\n order='film.aired desc'\n )\n # multiple channel ids\n return self._search_condition(\n condition='( `showid` IN ( {} ) )'.format(showid),\n params=(),\n filmui=filmui,\n showshows=False,\n showchannels=True,\n maxresults=self.settings.maxresults,\n order='film.aired desc'\n )", "title": "" }, { "docid": "1ac56bcb98f7f6191f0a6fefae0b527d", "score": "0.53447974", "text": "def get_tv_show_information(self,name,unravel = False):\n ia = imdb.IMDb()\n content_search = ia.search_movie(name)\n content_type = content_search[0].data.get('kind')\n if (content_type == 'tv series') & unravel:\n return self.get_series(content_search[0].movieID)\n else:\n return self.get_individual(content_search[0].movieID)", "title": "" }, { "docid": "da45a2f343445e91bd3a5821ee452eff", "score": "0.5337343", "text": "def get_all_movies(self):\n return self.val.get_movies()", "title": "" }, { "docid": "2eef2204745eed38573ba6ffc7038bc1", "score": "0.53161335", "text": "def allShows(self):\n shows = config.readJson().keys()\n return shows", "title": "" }, { "docid": "79edae298d8e0a9bde97820cf36c2467", "score": "0.5283835", "text": "def get_shows(self):\n logging.debug(\"Starting get_shows()\")\n self.showlist_url = self.root_url + \"/shows\"\n logging.info(f\"Showlist URL is {self.showlist_url}\")\n\n # Opens the connection, grabs the page\n response = requests.get(self.showlist_url)\n showlist_html = response.text\n\n # HTML parsing\n showlist_soup = soup(showlist_html, \"html.parser\")\n\n # Grabs both active and retired shows\n shows = showlist_soup.findAll(\"h3\", {\"class\": \"broadcast__name\"})\n\n for show in shows:\n logging.debug(f\"Processing show: {show}\")\n try:\n Dictionary = {}\n \n if show.a:\n Dictionary['title'] = show.a.text\n Dictionary['url'] = show.a[\"href\"]\n self.show_catalog.append(Dictionary)\n except TypeError: # This was being thrown by Master Feed since there's no URL to show\n logging.error(f\"TypeError occured on show: {show}\")\n pass\n except AttributeError:\n logging.error(f\"AttributeError occured on show: {show}\")\n pass\n\n logging.debug(\"Returning show_catalog\")\n return self.show_catalog", "title": "" }, { "docid": "ad8ce00959dac2a52081a633bb797479", "score": "0.527317", "text": "def parse_movies(self):\n soup = Soup(urllib.urlopen(self.moviePage))\n movies = []\n for link in soup.find_all(\"a\"):\n time = link.previous_sibling\n url = link.get('href')\n title = link.get('title')\n if not \"/peliculas/\" in url: continue\n if int(time[:2]) < self.START_TIME: continue\n if time[:2] == self.END_TIME: break\n (longTitle, verboseInfo) = self.get_movie_verbose_info(title, url)\n movies.append({ 'time': time[0:6], \n 'title':longTitle.encode(encoding='UTF-8',errors='strict'), \n 'url': url.encode(encoding='UTF-8',errors='strict'), \n 'info': verboseInfo.encode(encoding='UTF-8',errors='strict'), \n })\n return movies", "title": "" }, { "docid": "a922f9099d5ee15cb2ad96d6454479f2", "score": "0.52666247", "text": "def tvshows_list(self, request):\n header = os.environ.get('HTTP_AUTHORIZATION', '')\n token = header.split(' ')[-1]\n payload = JWTHelper.validate_jwt_token(token)\n query = TvShow.query_user(payload['email'])\n if request.order == ListRequest.Order.NAME:\n query = query.order(TvShow.name)\n elif request.order == ListRequest.Order.RATE:\n query = query.order(-TvShow.rate)\n elif request.order == ListRequest.Order.DATE:\n query = query.order(-TvShow.date)\n items = [entity.to_message() for entity in query.fetch(request.limit)]\n return ListResponse(items=items)", "title": "" }, { "docid": "f48e189fd8d7a86ebcb864e4723ea404", "score": "0.52654886", "text": "def timelisting(self):\n\n curtimes = self.times\n timestrs = []\n for (i,j) in curtimes:\n curlist = [datetime.utcfromtimestamp(i).__str__(),datetime.utcfromtimestamp(j).__str__()]\n timestrs.append(curlist)\n\n return timestrs", "title": "" }, { "docid": "4c05d51339678b0c9d2983ba167cada3", "score": "0.52631366", "text": "def show_all_movies():\n\n movies = Movie.query.all()\n\n return movies", "title": "" }, { "docid": "738f6db39d58c52f88e6e27b87ca0373", "score": "0.5261529", "text": "def get_all_movies(self):\n\t\ti = 1\n\t\tmovies = []\n\t\ttry:\n\t\t\twhile True:\n\t\t\t\tLOG.info(\"In get all movies\")\n\t\t\t\tele = list(Imdb.EVERY_MOVIE_NAME)\n\t\t\t\tele[1] %= i\n\t\t\t\tmovie_name = self.label.get_text(tuple(ele))\n\t\t\t\tif len(movie_name) == 0:\n\t\t\t\t\tbreak\n\t\t\t\tmovies.append(movie_name)\n\t\t\t\ti += 1\n\t\texcept Exception as e:\n\t\t\tLOG.info(\"Found all movies %s\" % str(e))\n\t\treturn movies", "title": "" }, { "docid": "7d4a930a349aba63ddc436ea146a2f03", "score": "0.5247531", "text": "def movie_list(self, page):\n response = fetch_movie_list(page)\n return response", "title": "" }, { "docid": "e3771556b64feba5a0ea5965c44e44b6", "score": "0.5203493", "text": "def show_all_movies():\n movies_map = get_response(api_url_base)\n\n title_list = sorted(list(movies_map.keys()))\n\n print(\"\\nAll Movies List:\\n----------------------------\\n\")\n for index, title in enumerate(title_list):\n movie = movies_map[title][0]\n print(\" Index: {0}\\n Title: {1}\\n Year: {2}\\n Type: {3}\\n Poster: {4}\"\n .format(index + 1, movie['Title'], movie['Year'], movie['Type'], movie['Poster']))\n print(\"\\n-------------------------------------------------------------\\n\")", "title": "" }, { "docid": "c9ccff7658f6b5e3c7008bd29f1bbb9e", "score": "0.5199907", "text": "def get_movies(self):\n\t\t# print('get_movies')\n\t\troot = self.get_tree()\n\t\t# print(root)\n\n\t\tr = []\n\t\tfor c in root.iter(tag='movie'):\n\t\t\tyield movie.attrib['title']", "title": "" }, { "docid": "3f7d8177172bfeed8a429a186683937d", "score": "0.51872987", "text": "def search_shows():\n search_term = request.form.get('search_term', '')\n shows = db.session.query(Show).filter((Show.artist.has(Artist.name.ilike('%{}%'.format(search_term)))) |\n (Show.venue.has(Venue.name.ilike('%{}%'.format(search_term))))).all()\n result = []\n for show in shows:\n artist = show.artist\n venue = show.venue\n result.append({\n \"venue_id\": venue.id if venue else None,\n \"venue_name\": venue.name if venue else None,\n \"artist_id\": artist.id if artist else None,\n \"artist_name\": artist.name if artist else None,\n \"artist_image_link\": artist.image_link if artist else None,\n \"start_time\": str(show.start_time),\n })\n return render_template('pages/search_shows.html', shows=result, count=len(result), search_term=search_term)", "title": "" }, { "docid": "97c04bfd518d8d805e60e0e52d5b3803", "score": "0.5174582", "text": "def timelist(self) -> List[str]:\n return self._timelist", "title": "" }, { "docid": "89b9a09ea4a849a606526d8bd4e78197", "score": "0.5173992", "text": "def get_movies_schedule(html_page):\n movies_list = []\n tree = lxml.html.fromstring(html_page)\n\n movies_node = tree.xpath('//article[@class=\"item item-scheda item-scheda-film cf option-view-list\"]') # Get movies\n for movie_node in movies_node:\n title = movie_node.xpath('.//a[contains(@href, \"http://www.filmtv.it/film/\")]/text()')[0].strip().encode(\n 'utf-8') # Get title\n movie_url = movie_node.xpath('.//a[contains(@href, \"http://www.filmtv.it/film/\")]/@href')[0] # Get FilmTV url\n original_title = movie_node.xpath('.//p[@class=\"titolo-originale\"]/text()') # Get original title\n year = movie_node.xpath('.//ul[@class=\"info cf\"]/li/time/text()')[0] # Get year\n if len(original_title) == 0:\n original_title = None\n else:\n original_title = original_title[0].strip().encode('utf-8')\n channel = movie_node.xpath('.//h3[@class=\"media tv\"]/text()')[0].strip().encode('utf-8') # Get channel\n time = movie_node.xpath('.//time[@class=\"data\"]/text()')[0][2:].strip() # Get time\n director = movie_node.xpath('.//p[@class=\"regia\"]/text()')[0].strip().encode('utf-8') # Get director\n genre = movie_node.xpath('.//ul[@class=\"info cf\"]/li/text()')[0].encode('utf-8') # Get genre\n cast = movie_node.xpath('.//p[@class=\"cast\"]/text()')\n if len(cast) == 0:\n cast = None\n else:\n cast = movie_node.xpath('.//p[@class=\"cast\"]/text()')[0].strip().encode('utf-8') # Get cast\n if channel != \"Rsi La1\" and channel != \"Rsi La2\": # Remove the swiss channels\n value = {\"title\": title, \"originalTitle\": original_title, \"channel\": channel, \"time\": time,\n \"movieUrl\": movie_url, \"year\": year, \"director\": director, \"genres\": genre, \"cast\": cast}\n movies_list.append(value) if value not in movies_list else logging.info(\"Movie already in schedule\")\n # Control of doubles in schedule\n\n return movies_list", "title": "" }, { "docid": "8eeb1c502785a8d3179d294b53fc8e6b", "score": "0.51349384", "text": "def fetch_all_anime_names(self) -> list[str]:\r\n return list(self._anime_name_map.keys())", "title": "" }, { "docid": "38121109cf0909d8018d12812c867ca7", "score": "0.5109766", "text": "def extract_shabbos_times():\n with HebCalSession() as session:\n dataset = session.get_shabbos_times().json()[\"items\"]\n return [d for d in dataset if d.get(\"category\") in [\"havdalah\", \"candles\",\"parashat\"]]", "title": "" }, { "docid": "ea88f53e016ca19cf662302438ef4569", "score": "0.51090926", "text": "def show_movies_list():\n\n movies = Movie.query.order_by(Movie.title).order_by(Movie.released_at).all()\n\n return render_template(\"movie_list.html\",\n movies=movies)", "title": "" }, { "docid": "8dbf61d8284caaea08272e02950f3722", "score": "0.51035184", "text": "def get_episodes(self, show_name):\n shows = self.get_shows()\n\n if show_name in shows.values():\n match = show_name\n else:\n # Find the best match that we can using the input as a prefix.\n match = filter(lambda candidate: candidate.lower().startswith(show_name.lower()), shows.values())\n\n # If there is more than one match\n if len(match) > 1:\n raise Exception(\"More than one partial match for \" + show_name)\n\n match = match[0]\n\n if not match:\n raise KeyError(\"Show not found\")\n\n (show_id, ) = [k for k, v in shows.iteritems() if v == match]\n\n return self.get_episodes_by_id(show_id)", "title": "" }, { "docid": "a725cb8a1241708f979c032e5b06073b", "score": "0.509456", "text": "def get_at(self, thetime):\n thetime = utc.normalize(thetime.astimezone(utc))\n shows = sorted(self, lambda a, b: cmp(a.time, b.time))\n \n if thetime < shows[0].time:\n raise KeyError(\"%s is before the first show\" % thetime)\n\n lastshow = shows[0]\n for show in shows:\n if show.time > thetime:\n return lastshow\n lastshow = show\n return lastshow", "title": "" }, { "docid": "f2a58e58f319b21366960f56cb332653", "score": "0.50904524", "text": "def _get_mass_spectrometers(self):\n db = self.get_database()\n cp = os.path.join(paths.setup_dir, \"names\")\n if db:\n if not db.connect():\n self.warning(\"not connected to database\")\n return []\n with db.session_ctx(use_parent_session=False):\n ms = db.get_mass_spectrometer_names()\n names = [mi.capitalize() for mi in ms]\n elif os.path.isfile(cp):\n names = self._get_names_from_config(cp, \"Mass Spectrometers\")\n else:\n names = [\"Jan\", \"Obama\"]\n\n return [\"Spectrometer\", LINE_STR] + names", "title": "" }, { "docid": "7afa1937f85ce35339dae89532212fbb", "score": "0.5074073", "text": "def list_times(day, mouse, cycle, master_tt_dic):\n time_list = []\n for time_temp_tuple in master_tt_dic[day][mouse][cycle]:\n time_list.append(hms_to_secs(time_temp_tuple[0]) / 3600.0)\n return time_list", "title": "" }, { "docid": "2d073680b5f0deda28b6e00d5503be91", "score": "0.5071795", "text": "def personalised_movies_list():\n\n deadpool = media.Movie(title='Deadpool', trailer_youtube_url='https://www.youtube.com/watch?v=gtTfd6tISfw',\n poster_image_url='https://s-media-cache-ak0.pinimg.com/originals/00/e5/93/00e593607694b56314a6e9bd6095986d.jpg') # noqa\n\n pursuit_of_happiness = media.Movie(title='Pursuit of Happiness',\n trailer_youtube_url='https://www.youtube.com/watch?v=89Kq8SDyvfg', # noqa\n poster_image_url='https://s-media-cache-ak0.pinimg.com/736x/91/94/bb/9194bbfac3481c4940c6c0c2afdbe16b--pursuit-of-happiness-the-pursuit-of-happyness.jpg') # noqa\n\n inception = media.Movie(title='Inception', trailer_youtube_url='https://www.youtube.com/watch?v=8hP9D6kZseM',\n poster_image_url='https://s.aolcdn.com/dims5/amp:33d46f2e8d0d23c2a537408cab362e84d00af849/r:360,540/q:70/?url=http%3A%2F%2Faolx.tmsimg.com%2Fmovieposters%2Fv8%2FAllPhotos%2F7825626%2Fp7825626_p_v8_af.jpg%3Fw%3D360') # noqa\n\n batman = media.Movie(title='Batman', trailer_youtube_url='https://www.youtube.com/watch?v=1T__uN5xmC0',\n poster_image_url='http://www.impulsegamer.com/articles/wp-content/uploads/2015/04/3001312_press01.jpg') # noqa\n\n wolf_of_wallstreet = media.Movie(title='Wolf of Wallstreet',\n trailer_youtube_url='https://www.youtube.com/watch?v=iszwuX1AK6A', # noqa\n poster_image_url='https://images-na.ssl-images-amazon.com/images/M/MV5BMjIxMjgxNTk0MF5BMl5BanBnXkFtZTgwNjIyOTg2MDE@._V1_UY1200_CR90,0,630,1200_AL_.jpg') # noqa\n\n movie_list = [deadpool, pursuit_of_happiness, inception, batman, wolf_of_wallstreet]\n\n return movie_list", "title": "" }, { "docid": "32c3f9b7a3c644cb2ee5b4fb9b4ec4b9", "score": "0.5034522", "text": "def filtered(self):\n return [m for m in self.movies if m['type'] == \"movie\"]", "title": "" }, { "docid": "4944700cef12cab0da77bc9e8e7a296c", "score": "0.5027359", "text": "def GetRecentShows(self, limit=5):\n self.logger.debug(\"Fetching recently added TV Shows\")\n try:\n kodi = Server(self.url('/jsonrpc', True))\n properties = ['showtitle', 'season', 'episode', 'title', 'runtime',\n 'thumbnail', 'plot', 'fanart', 'file']\n limits = {'start': 0, 'end': int(limit)}\n return kodi.VideoLibrary.GetRecentlyAddedEpisodes(properties=properties, limits=limits)\n except Exception as e:\n self.logger.exception(e)\n self.logger.error(\"Unable to fetch recently added TV Shows\")\n return", "title": "" }, { "docid": "9c8e37e5619a008051caed3d41c9cf62", "score": "0.5023061", "text": "def GetRecentShows(self, limit=5):\r\n try:\r\n plex_host = htpc.settings.get('plex_host', 'localhost')\r\n plex_port = htpc.settings.get('plex_port', '32400')\r\n episodes = []\r\n\r\n for section in self.JsonLoader(urlopen(Request('http://%s:%s/library/sections' % (plex_host, plex_port), headers=self.getHeaders())).read())[\"_children\"]:\r\n if section['type'] == \"show\":\r\n for episode in self.JsonLoader(urlopen(Request('http://%s:%s/library/sections/%s/all?type=4&sort=addedAt:desc&X-Plex-Container-Start=0&X-Plex-Container-Size=%s' % (plex_host, plex_port, section[\"key\"], limit), headers=self.getHeaders())).read())[\"_children\"]:\r\n jepisode = {}\r\n\r\n jepisode['label'] = \"%sx%s. %s\" % (episode[\"parentIndex\"], episode[\"index\"], episode[\"title\"])\r\n jepisode['id'] = int(episode[\"ratingKey\"])\r\n\r\n if 'summary'in episode:\r\n jepisode['plot'] = episode[\"summary\"]\r\n\r\n if 'index'in episode:\r\n jepisode['episode'] = episode[\"index\"]\r\n\r\n if 'parentIndex'in episode:\r\n jepisode['season'] = episode[\"parentIndex\"]\r\n\r\n if 'grandparentTitle'in episode:\r\n jepisode['showtitle'] = episode[\"grandparentTitle\"]\r\n\r\n if 'duration'in episode:\r\n jepisode['runtime'] = int(episode['duration']) / 60000\r\n\r\n if 'thumb'in episode:\r\n jepisode['fanart'] = episode[\"thumb\"]\r\n\r\n if 'addedAt'in episode:\r\n jepisode['addedAt'] = episode[\"addedAt\"]\r\n\r\n episodes.append(jepisode)\r\n\r\n\r\n return {'episodes': sorted(episodes, key=lambda k: k['addedAt'], reverse=True)[:int(limit)]}\r\n except Exception, e:\r\n self.logger.error(\"Unable to fetch episodes movies! Exception: \" + str(e))\r\n return", "title": "" }, { "docid": "826c33d5593887a91e0563609852e52c", "score": "0.50147414", "text": "def get_movies(self):\n return self.movies", "title": "" }, { "docid": "26fc48a88f25b4291080ba1a859058fe", "score": "0.50098664", "text": "def GetRecentShows(self, limit=5):\n self.logger.debug(\"Fetching recently added TV Shows\")\n try:\n xbmc = Server(self.url('/jsonrpc', True))\n properties = ['showtitle', 'season', 'episode', 'title', 'runtime',\n 'thumbnail', 'plot', 'fanart', 'file']\n limits = {'start': 0, 'end': int(limit)}\n return xbmc.VideoLibrary.GetRecentlyAddedEpisodes(properties=properties, limits=limits)\n except Exception, e:\n self.logger.exception(e)\n self.logger.error(\"Unable to fetch recently added TV Shows\")\n return", "title": "" }, { "docid": "b5f2d91a5d8b2f98f1e857ebcf9889c1", "score": "0.5003441", "text": "def GetShows(self, start=0, end=0, hidewatched=0):\r\n try:\r\n plex_host = htpc.settings.get('plex_host', '')\r\n plex_port = htpc.settings.get('plex_port', '32400')\r\n tvShows = []\r\n limits = {}\r\n\r\n if hidewatched == '1':\r\n hidewatched = \"unwatched\"\r\n else:\r\n hidewatched = \"all\"\r\n\r\n for section in self.JsonLoader(urlopen(Request('http://%s:%s/library/sections' % (plex_host, plex_port), headers=self.getHeaders())).read())[\"_children\"]:\r\n if section['type'] == \"show\":\r\n\r\n for tvShow in self.JsonLoader(urlopen(Request('http://%s:%s/library/sections/%s/%s' % (plex_host, plex_port, section['key'], hidewatched), headers=self.getHeaders())).read())[\"_children\"]:\r\n jshow = {}\r\n jshow['itemcount'] = 0\r\n jshow['playcount'] = 0\r\n\r\n jshow['title'] = tvShow[\"title\"]\r\n\r\n jshow['id'] = tvShow[\"ratingKey\"]\r\n\r\n if 'thumb'in tvShow:\r\n jshow['thumbnail'] = tvShow[\"thumb\"]\r\n\r\n if 'year'in tvShow:\r\n jshow['year'] = int(tvShow[\"year\"])\r\n\r\n if 'summary'in tvShow:\r\n jshow['plot'] = tvShow[\"summary\"]\r\n\r\n if 'viewedLeafCount'in tvShow:\r\n jshow['playcount'] = int(tvShow[\"viewedLeafCount\"])\r\n\r\n if 'leafCount'in tvShow:\r\n jshow['itemcount'] = int(tvShow[\"leafCount\"])\r\n\r\n tvShows.append(jshow)\r\n\r\n limits['start'] = int(start)\r\n limits['total'] = len(tvShows)\r\n limits['end'] = int(end)\r\n if int(end) >= len(tvShows):\r\n limits['end'] = len(tvShows)\r\n\r\n return {'limits': limits, 'tvShows': sorted(tvShows, key=lambda k: k['title'])[int(start):int(end)] }\r\n except Exception, e:\r\n\r\n self.logger.error(\"Unable to fetch all shows! Exception: \" + str(e))\r\n return", "title": "" }, { "docid": "78e4b7f515b89ceddd5ebe632b5387ec", "score": "0.50027233", "text": "def GetRecentShows(self, limit=5):\r\n self.logger.debug(\"Fetching recently added TV Shows\")\r\n try:\r\n xbmc = Server(self.url('/jsonrpc', True))\r\n properties = ['showtitle', 'season', 'episode', 'title', 'runtime',\r\n 'thumbnail', 'plot', 'fanart', 'file']\r\n limits = {'start': 0, 'end': int(limit)}\r\n return xbmc.VideoLibrary.GetRecentlyAddedEpisodes(properties=properties, limits=limits)\r\n except Exception, e:\r\n self.logger.exception(e)\r\n self.logger.error(\"Unable to fetch recently added TV Shows\")\r\n return", "title": "" }, { "docid": "87d440c466927d301908139f907d099c", "score": "0.500139", "text": "def scrape_and_write_medias(self):\n logging.info('SHOWTIME MOVIE SEARCH')\n self.get_movie_pages()\n self.get_movies()\n self.lookup_and_write_medias(medias=self.movies, mtype='movie')\n\n logging.info('SHOWTIME SHOW SEARCH')\n self.get_shows()\n self.lookup_and_write_medias(medias=self.shows, mtype='show')\n\n # remove any sources not just updated: media this provider no longer has\n flaskapp.remove_old_sources('showtime')", "title": "" }, { "docid": "5a11d7384b84c271513bb2e2de9b3a31", "score": "0.499455", "text": "def shows():\n shows = serialize_show(Show.query.all(), many=True)\n return render_template('pages/shows.html', shows=shows)", "title": "" }, { "docid": "ac9ad4db7d546988be7ae57ca92e571a", "score": "0.4946677", "text": "def getMoviesOneDay(url):\n\n response = requests.get(url)\n html = response.text\n\n listOfMovieNames = []\n\n soup = BeautifulSoup(html, 'html.parser')\n movietags = soup.select('td')\n\n i = 1\n\n while i < len(movietags):\n element = movietags[i]\n movieName = element.text.split()\n movieNameString = \"\"\n for element in movieName:\n movieNameString = movieNameString + \" \" + element\n\n movieNameString = movieNameString.strip()\n listOfMovieNames.append(movieNameString)\n i = i + 2\n \n return listOfMovieNames", "title": "" }, { "docid": "d8bb8ab7005b57323a4b97ed3379adcf", "score": "0.4945225", "text": "def get_movies(self):\n return self.__movies", "title": "" }, { "docid": "ef546be1f498f4a6469552b8fc6f67f5", "score": "0.49376014", "text": "def getShows(show=None):\n if show is None:\n params = []\n else:\n params = [['sg_short_name', 'is', show]]\n try:\n return sg.find('Project', params, fields=['name', 'sg_short_name'])\n except gaierror, e:\n raise", "title": "" }, { "docid": "d0148c0f3c6216652733a505e2955721", "score": "0.49340236", "text": "def result_movies_schedule_list(tv_type_list):\n schedule_list = []\n\n if tv_type_list == []:\n logging.info(\"no tv type set\")\n tv_type_list = [\"free\"]\n\n for i in tv_type_list:\n schedule_list = schedule_list + result_movies_schedule(i, \"today\")\n\n return schedule_list", "title": "" }, { "docid": "49bd750049ab17783863da371b89b51d", "score": "0.49321175", "text": "def movie_list(genre, CACHE_FNAME):\n pass", "title": "" }, { "docid": "dde0423a47149b515e136d6bc4c3e8c4", "score": "0.49301162", "text": "def get_all_times(self):\n _times = sorted([strptime(t[\"_id\"], \"%d.%m.%Y %H:%M:%S\")\n for t in self.col_songs.find({}, {\"_id\": True})], reverse=True)\n\n return [strftime(\"%d.%m.%Y %H:%M:%S\", t) for t in _times]", "title": "" }, { "docid": "f7c399f8a082592eeb7222c99173d006", "score": "0.49146712", "text": "def get_list_tv_shows_data(self, content_list,unravel = False):\n output_df = pd.DataFrame(columns=['kind','series_name','season','episode','original_air_date','rating','votes'])\n for show_name in content_list:\n row = self.get_tv_show_information(show_name,unravel)\n output_df = output_df.append(row)\n return output_df.reset_index(drop = True)", "title": "" }, { "docid": "0bf8fdc1cca90a3d6b5426edca0e77dc", "score": "0.49056172", "text": "def timelisting(self):\n\n curtimes = self.Time_Vector\n timestrs = []\n for (i,j) in curtimes:\n curlist = [datetime.utcfromtimestamp(i).__str__(),datetime.utcfromtimestamp(j).__str__()]\n timestrs.append(curlist)\n\n return timestrs", "title": "" }, { "docid": "7079bd2765f1ba17e24941071b75d3f2", "score": "0.4900752", "text": "def movie_list() -> list:\n movies = []\n with open('../phase1/resources/final_movies.csv', newline='') as movies_list:\n reader = csv.DictReader(movies_list)\n movies = [movie['Movie_Titles'].strip() for movie in reader]\n \n movies_list.close()\n \n\n return movies", "title": "" }, { "docid": "e215267fb849a2b7d2a407fda6fbddda", "score": "0.49001178", "text": "def showswithout():\n return Show.query.filter_by(episode=None).all()", "title": "" }, { "docid": "221a8dc24a0382817e16d4b9bbf0b871", "score": "0.48881263", "text": "def GetShows(self, start=0, end=0, sortmethod='title', sortorder='ascending', hidewatched=0, filter=''):\n self.logger.debug(\"Fetching TV Shows\")\n try:\n kodi = Server(self.url('/jsonrpc', True))\n sort = {'order': sortorder, 'method': sortmethod, 'ignorearticle': True}\n properties = ['title', 'year', 'plot', 'thumbnail', 'playcount']\n limits = {'start': int(start), 'end': int(end)}\n filter = {'field': 'title', 'operator': 'contains', 'value': filter}\n if hidewatched == \"1\":\n filter = {\"and\": [filter, {'field': 'playcount', 'operator': 'is', 'value': '0'}]}\n shows = kodi.VideoLibrary.GetTVShows(sort=sort, properties=properties, limits=limits, filter=filter)\n return shows\n except Exception as e:\n self.logger.exception(e)\n self.logger.error(\"Unable to fetch TV Shows\")\n return", "title": "" }, { "docid": "b93d60c870b1d944807997ec290b7ef1", "score": "0.48860648", "text": "def get_movies_from_file(f, **kwargs):\n movie_names = get_from_file(f=f)\n movie_times = [''] * len(movie_names)\n\n return movie_names, movie_times # appropriately sized list of empty strings", "title": "" }, { "docid": "9b2e06b8af821baa1104d4e789c124da", "score": "0.48803604", "text": "def fetch_tower_events(today):\n r = requests.get(current_app.config['TOWER_TIX'])\n soup = BeautifulSoup(r.text, 'html.parser')\n\n # The date passed in by the route is in a format different from the date\n # string in the HTML document so the passed in date has to be reformatted\n # to match the HTML document's version.\n new_today = today.strftime('%A %-d, %B')\n\n films = []\n # get all the dates in the document in order to find the desired one\n dates = soup.find_all('h3', 'date-title')\n for date in dates:\n # find the div tag that matches the given date\n if new_today == date.get_text():\n # date_soup points to the div (and corresponding child elements)\n # for all films showing on the given date so we only parse a\n # smaller section of the HTML document.\n date_soup = date.parent\n # film_tags is a list containing every div that contains film info\n film_tags = date_soup.find_all('div', class_='film')\n # we loop over each individual film div\n for film_tag in film_tags:\n # this gets the name of the film\n name_tags = film_tag.find_all('h3', class_='title')\n for name_tag in name_tags:\n name = name_tag.get_text().strip()\n\n # this gets the showtimes and corresponding ticketing links\n sessions = film_tag.find('ul', class_='session-times')\n li_tags = sessions.find_all('li')\n showtimes = []\n for li_tag in li_tags:\n # the li tag only has the start time so we build a\n # 'datetime' string and pass it into the dateutil parser in\n # order to create a datetime object with a date\n start_time = dateutil.parser.parse(f'{date.get_text()} {li_tag.a.time.get_text()}')\n buy_link = li_tag.a['href']\n # for every showtime, create an instance of FilmSchedule\n showtime = FilmSchedule(start_time, buy_link)\n showtimes.append(showtime)\n # create a new instance of FilmEvent during every loop of the\n # film divs\n film = FilmEvent(name, showtimes)\n films.append(film)\n\n return films", "title": "" }, { "docid": "ccf65b348700dbc3f392cbd7ec5cd0fd", "score": "0.48754048", "text": "def get_movies(**kwargs):\n now = int(time.time())\n all_movies = Movie.query\n movies = all_movies\n\n if kwargs.get('current'):\n movies = movies.filter(db.or_(Movie.show_end == None, Movie.show_end >= now))\n\n if kwargs.get('starting_within_days'):\n starting_within_seconds = kwargs['starting_within_days'] * 24 * 60 * 60\n movies = movies.filter(Movie.show_start != None)\\\n .filter(Movie.show_start <= now + starting_within_seconds)\n\n return movies.all()", "title": "" }, { "docid": "42633d55cfe9d90766a57e4e0e118aa7", "score": "0.48701417", "text": "def GetShows(self, start=0, end=0, sortmethod='title', sortorder='ascending', hidewatched=0, filter=''):\n self.logger.debug(\"Fetching TV Shows\")\n try:\n xbmc = Server(self.url('/jsonrpc', True))\n sort = {'order': sortorder, 'method': sortmethod, 'ignorearticle': True}\n properties = ['title', 'year', 'plot', 'thumbnail', 'playcount']\n limits = {'start': int(start), 'end': int(end)}\n filter = {'field': 'title', 'operator': 'contains', 'value': filter}\n if hidewatched == \"1\":\n filter = {\"and\": [filter, {'field': 'playcount', 'operator': 'is', 'value': '0'}]}\n shows = xbmc.VideoLibrary.GetTVShows(sort=sort, properties=properties, limits=limits, filter=filter)\n return shows\n except Exception, e:\n self.logger.exception(e)\n self.logger.error(\"Unable to fetch TV Shows\")\n return", "title": "" }, { "docid": "b53005f4ef737ea9c1601d672ba150ad", "score": "0.48671716", "text": "def getSequencesForShow(self, show):\n sequencesXML = self.sendRequestAndReadData(\"core/getSequences/%s\" % show)\n sequenceNames = []\n if sequencesXML:\n root = ET.fromstring(sequencesXML)\n sequences = root.findall('Sequence')\n for sequence in sequences:\n sequenceNames+=[sequence.get('name')]\n\n return sequenceNames", "title": "" }, { "docid": "0d159755ff9a7a9634359130ee8771d8", "score": "0.48553562", "text": "def show_movie_list():\n\n movies = Movie.query.order_by('title').all()\n\n return render_template(\"movies.html\", movies=movies)", "title": "" }, { "docid": "57bfe1dacff673dc7dd4c16af57101ce", "score": "0.4850903", "text": "def get_list_of_movies(self):\n\n WebDriverWait(self.driver, 5).until(\n EC.element_to_be_clickable(\n (By.XPATH, '//*/tbody[@class=\"lister-list\"]/tr/td[1]')\n ))\n\n list_of_movies = self.driver.find_elements_by_xpath(\n '//*/tbody[@class=\"lister-list\"]/tr/td[1]/a'\n )\n list_of_movies = list_of_movies[0:self.n_movies]\n\n return list_of_movies", "title": "" }, { "docid": "e03d57da77dad5349d7a330adc5bcacd", "score": "0.48490927", "text": "def get_movies(theater, date, **kwargs):\n theater = theater.lower()\n\n D_ACTIONS = dict(\n # bos:\n brattle_theatre=get_movies_brattle,\n coolidge_corner=get_movies_coolidge,\n harvard_film_archive=get_movies_hfa,\n mfa_boston=get_movies_mfa,\n kendall_cinema=get_movies_landmark,\n somerville_theatre=get_movies_somerville,\n amc_boston_common=get_movies_amc,\n regal_fenway=get_movies_showtimes,\n # nyc:\n alamo_drafthouse_brooklyn=get_movies_alamo,\n angelika_film_center=get_movies_village_east_or_angelika,\n anthology=get_movies_anthology,\n bam_rose_cinemas=get_movies_bam,\n cinema_village=get_movies_cinema_village,\n cobble_hill_cinemas=get_movies_cobble_hill,\n # film_forum=get_movies_film_forum, # custom is slow - have to open headless => fall back to default\n film_noir=get_movies_film_noir,\n ifc=get_movies_ifc,\n loews_jersey_theater=get_movies_loews_theater,\n lincoln_center=get_movies_filmlinc,\n metrograph=get_movies_metrograph,\n moma=get_movies_moma,\n museum_of_the_moving_image=get_movies_momi,\n nitehawk=get_movies_nitehawk,\n nitehawk_prospect_park=get_movies_nitehawk,\n quad_cinema=get_movies_quad,\n syndicated_bk=get_movies_syndicated,\n ua_court_st=get_movies_showtimes,\n village_east_cinema=get_movies_village_east_or_angelika,\n #videology=get_movies_videology, # RIP\n # pgh:\n regent_square_theater=get_movies_pghfilmmakers,\n harris_theater=get_movies_pghfilmmakers,\n melwood_screening_room=get_movies_pghfilmmakers,\n the_manor=get_movies_manor,\n row_house_cinema=get_movies_rowhouse,\n the_waterfront=get_movies_amc\n )\n\n def fallback(*args, **kwargs):\n try:\n return get_movies_google(*args, **kwargs) # default to google search\n except(NoMoviesException):\n return get_movies_showtimes(*args, **kwargs) # or, last ditch effort, showtimes.com search\n\n action = D_ACTIONS.get(theater.replace(' ', '_'), fallback)\n\n return action(theater, date)", "title": "" }, { "docid": "a76875e06c6717fd8d65ae8024e34cb6", "score": "0.48487112", "text": "def LIST_OF_MONSTERS() -> list:\n return [\"Amputator\", \"Bone Breaker\", \"Dark Cultist\", \"Fallen Shaman\", \"Flesh Harvester\", \"Terror Bat\",\n \"Dust Imp\", \"Demonic Hellflyer\"]", "title": "" }, { "docid": "982f22cfdca0b5143943e0aa4c830048", "score": "0.48420092", "text": "def movie_list():\n\n\t# Get list of Movie objects\n\tmovies = Movie.query.order_by(\"title\").all()\n\n\treturn render_template(\"movie_list.html\", movies=movies)", "title": "" }, { "docid": "d409473547afd7efd442ce8206c17e9b", "score": "0.48405024", "text": "def list_timezones():\n return H2OFrame._expr(expr=ExprNode(\"listTimeZones\"))._frame()", "title": "" }, { "docid": "980a1dc0726ccd5b4efce8c820dd80e1", "score": "0.4838276", "text": "def GetShows(self, start=0, end=0, sortmethod='title', sortorder='ascending', hidewatched=0, filter=''):\r\n self.logger.debug(\"Fetching TV Shows\")\r\n try:\r\n xbmc = Server(self.url('/jsonrpc', True))\r\n sort = {'order': sortorder, 'method': sortmethod, 'ignorearticle': True}\r\n properties = ['title', 'year', 'plot', 'thumbnail', 'playcount']\r\n limits = {'start': int(start), 'end': int(end)}\r\n filter = {'field': 'title', 'operator': 'contains', 'value': filter}\r\n if hidewatched == \"1\":\r\n filter = {\"and\": [filter, {'field': 'playcount', 'operator': 'is', 'value': '0'}]}\r\n shows = xbmc.VideoLibrary.GetTVShows(sort=sort, properties=properties, limits=limits, filter=filter)\r\n return shows\r\n except Exception, e:\r\n self.logger.exception(e)\r\n self.logger.error(\"Unable to fetch TV Shows\")\r\n return", "title": "" }, { "docid": "074f4613bf7fa804b32d204f30dfb9ac", "score": "0.48284054", "text": "def getCIDsMovieIDsfromProbe(self):\n m = []\n c = self.conn.cursor()\n\n c.execute(\"\"\"SELECT customerID, MovieID FROM probe\n ORDER BY customerID\"\"\")\n\n for row in c:\n m.append(row)\n \n return m", "title": "" }, { "docid": "5c12c5df7222b11912f8a762d94b343d", "score": "0.48281327", "text": "def get_movimientos(self):\r\n return self.__movimientos", "title": "" }, { "docid": "c623daf2e3a2a7c2b9d5289847ef46b1", "score": "0.48253062", "text": "def GetBenchmarkStoryNames(benchmark):\n story_list = []\n for story in GetBenchmarkStorySet(benchmark):\n story_list.append(story.name)\n return story_list", "title": "" }, { "docid": "bddd38640da907ee3981ef6ca18483f3", "score": "0.48232195", "text": "def getTvShow(self, showName):\n for show in self.shows:\n if show.name == showName:\n return show\n return None", "title": "" }, { "docid": "4da82b3d395a89992cafdcd764ec5d4f", "score": "0.48194644", "text": "def get_data_from_movie(self):\n WebDriverWait(self.driver, 5).until(\n EC.presence_of_element_located(\n (By.CLASS_NAME, 'titleBar')\n ))\n title_1 = self.driver.find_element_by_xpath(\n '//*/div[@class=\"originalTitle\"]'\n )\n title_2 = title_1.find_element_by_tag_name('span')\n title = title_1.text.replace(title_2.text, '')\n year = self.driver.find_element_by_id('titleYear').text\n year = year.replace('(', '').replace(')', '')\n rating = self.driver.find_element_by_class_name('ratingValue').text\n rating = rating.split('/')[0]\n director = self.driver.find_element_by_xpath(\n '//*/h4[@class = \"inline\" and contains(text(), \"Director\")]/following-sibling::a'\n ).text\n stars = self.driver.find_elements_by_xpath(\n '//*/h4[@class = \"inline\" and contains(text(), \"Stars\")]/following-sibling::a'\n )\n actors =[]\n for star in stars[0: len(stars)-1]:\n actors.append(star.text)\n cast = ', '.join(actors)\n\n recomendations = self.get_recomendations()\n movie_info = [title, year, rating, director, cast]\n\n return movie_info, recomendations", "title": "" }, { "docid": "a737e0c5809e5ea731f010e0247f846e", "score": "0.4811743", "text": "def scrape(self):\n shows = jsonp_loads(requests.get(self.HOST + self.SHOWS_URI).content)\n artists = jsonp_loads(requests.get(self.HOST + self.ARTIST_URI).content)\n\n return [self.show_json_to_event(show, artists) for show in shows]", "title": "" }, { "docid": "af1c4ff4f8c006346628e6e6ce3b25a0", "score": "0.48098224", "text": "def _fetch_showtimes( self, url, day ):\r\n try:\r\n # add the appropriate number of days to todays date\r\n date = datetime.date.today() + datetime.timedelta( days=day )\r\n # get the current locals long date format and convert it to what strftime() expects\r\n try:\r\n format = xbmc.getRegion( \"datelong\" ).replace( \"DDDD\", \"%A\" ).replace( \"MMMM\", \"%B\" ).replace( \"D\", \"%d\" ).replace( \"YYYY\", \"%Y\" )\r\n except:\r\n format = \"%A, %B %d, %Y\"\r\n # format our date\r\n date = date.strftime( format )\r\n # create our url\r\n url = \"%s&date=%d\" % ( url, day, )\r\n # open url or local file (if debug)\r\n if ( not debug ):\r\n usock = urllib.urlopen( url )\r\n else:\r\n usock = open( os.path.join( os.getcwd().replace( \";\", \"\" ), \"showtimes_source.txt\" ), \"r\" )\r\n # read source\r\n htmlSource = usock.read()\r\n # close socket\r\n usock.close()\r\n # if no showtimes were found raise an error\r\n if ( htmlSource.find( \"No showtimes were found\" ) >= 0 ):\r\n raise\r\n # save htmlSource to a file for testing scraper (if debugWrite)\r\n if ( debugWrite ):\r\n file_object = open( os.path.join( os.getcwd().replace( \";\", \"\" ), \"showtimes_source.txt\" ), \"w\" )\r\n file_object.write( htmlSource )\r\n file_object.close()\r\n # parse htmlSource for showtimes\r\n parser = _ShowtimesParser()\r\n parser.feed( htmlSource )\r\n #parser.close()\r\n return date, parser.theaters\r\n except:\r\n # oops print error message\r\n print \"ERROR: %s::%s (%d) - %s\" % ( self.__class__.__name__, sys.exc_info()[ 2 ].tb_frame.f_code.co_name, sys.exc_info()[ 2 ].tb_lineno, sys.exc_info()[ 1 ], )\r\n return date, None", "title": "" }, { "docid": "c7d1c4f8bdd7c34aecc0b610e71fe468", "score": "0.47934124", "text": "def showcoastlines(self):\n return self[\"showcoastlines\"]", "title": "" }, { "docid": "add4861a5017d663b73557adaa4ee183", "score": "0.47894183", "text": "def get_all_times(self):\n return self.times", "title": "" }, { "docid": "15fb92fdea9552703f8dabd6c0881de4", "score": "0.47509992", "text": "def get_corpus_as_shows(path):\n corpus = []\n \n # get all files' and folders' names in the current directory\n filenames= sorted(os.listdir(path)) \n\n # loop through all the files and folders\n for filename in filenames:\n if filename[0] == '.' :\n continue\n # check whether the current object is a folder or not (ie check if it's a show)\n if os.path.isdir(os.path.join(os.path.abspath(path), filename)):\n text =\"\"\n show_path = path+\"/\"+filename\n for season in sorted(os.listdir(show_path)):\n if season[0] == '.' :\n continue\n season_path = show_path+\"/\"+season\n for episode in sorted(os.listdir(season_path)):\n if episode[0] == '.' :\n continue\n episode_path = season_path+\"/\"+episode\n \n f = open(episode_path, 'r',encoding='utf-8', errors='ignore')\n lines = f.readlines()\n f.close()\n for line in lines :\n text += line\n corpus.append(text)\n \n return corpus", "title": "" }, { "docid": "b36be06b92c5194e5e8389d89e89dd77", "score": "0.47355938", "text": "def get_alltvshows():\n\n conn = database_connect()\n if(conn is None):\n return None\n cur = conn.cursor()\n try:\n #########\n # TODO # \n #########\n\n #############################################################################\n # Fill in the SQL below with a query to get all tv shows and episode counts #\n #############################################################################\n sql = \"\"\"\n SELECT tvshow_id , tvshow_title, COUNT(media_id)\n FROM mediaserver.TVShow NATURAL JOIN mediaserver.TVEpisode\n GROUP BY tvshow_id\n ORDER BY tvshow_id, tvshow_title\n \"\"\"\n #KARLO: Whenever I remove the comments, the 'tv shows' button seems to work better. But when the above doesn't have comments, it doesn't work\n\n\n\n r = dictfetchall(cur,sql)\n print(\"return val is:\")\n print(r)\n cur.close() # Close the cursor\n conn.close() # Close the connection to the db\n return r\n except:\n # If there were any errors, return a NULL row printing an error to the debug\n print(\"Unexpected error getting All TV Shows:\", sys.exc_info()[0])\n raise\n cur.close() # Close the cursor\n conn.close() # Close the connection to the db\n return None", "title": "" }, { "docid": "7176042fd287b56414dae67d65706999", "score": "0.47214702", "text": "def get(self, name, session):\n name = series.normalize_series_name(name)\n matches = series.shows_by_name(name, session=session)\n\n shows = []\n for match in matches:\n shows.append(get_series_details(match))\n\n return jsonify({\n 'shows': shows,\n 'number_of_shows': len(shows)\n })", "title": "" }, { "docid": "b5ef4267d60662ee8267110e065b921c", "score": "0.47178975", "text": "def get_queryset(self):\n # get the cast using the actor_slug value in the url\n self.cast = MovieCast.objects.get(actor_slug=self.kwargs.get('actor_slug'))\n # return all of the movies that related to the cast instance\n return Movie.objects.filter(cast=self.cast)", "title": "" }, { "docid": "e73d672cdd862f9a7f791c1eac897f0a", "score": "0.47115692", "text": "def list_movies(): # Finished\n global movie_library\n for movie in movie_library:\n print(\"\")\n for key in movie.keys():\n print('{}: {}'.format(key, movie[key]))\n print(\"\")\n return None", "title": "" }, { "docid": "997ec17d4887617436c65addb8e6b86c", "score": "0.4711219", "text": "def get_shows():\n shows = postgres.get_shows()\n\n if not shows:\n message = {\"message\": \"No active shows in DB\"}\n return message, 500\n\n message = {\"shows\": shows}\n return message, 200", "title": "" }, { "docid": "c7a1898b71fecdf0283175432f62b54b", "score": "0.47075906", "text": "def get_dreams(self):\r\n\r\n return self.dreams", "title": "" }, { "docid": "242573ff8d51062b7b5a221a4c8a51bb", "score": "0.47006235", "text": "def getMovies(category, location):\n movies = []\n movie_url = getMovieUrl(category, location)\n html = MoviePractice.expanddouban.getHtml(movie_url, True)\n soup = BeautifulSoup(html, \"html.parser\")\n all_list = soup.find_all(\"a\", class_=\"item\")\n for item in all_list:\n name = item.find(\"span\", class_=\"title\").get_text()\n rate = item.find(\"span\", class_=\"rate\").get_text()\n info_link = item[\"href\"]\n cover_link = item.find(\"img\")[\"src\"]\n m = Movie(name, rate, location, category, info_link, cover_link)\n movies.append(m)\n return movies", "title": "" }, { "docid": "d48ffb6cc923344063d39c8bd04d82ba", "score": "0.46918076", "text": "def get_magazines():\n # return pd.read_csv(resource_path('data/magazines.csv'), dtype=str, header=None)[0].tolist()\n columns = np.genfromtxt(resource_path('data/magazines.csv'), delimiter=',', dtype=str)\n return list(columns.T[1])", "title": "" }, { "docid": "beae576718624709f5622989df6a0579", "score": "0.46782947", "text": "def get_all_tvshoweps_for_tvshow(tvshow_id):\n\n conn = database_connect()\n if(conn is None):\n return None\n cur = conn.cursor()\n try:\n #########\n # TODO # \n #########\n\n #############################################################################\n # Fill in the SQL below with a query to get all information about all #\n # tv episodes in a tv show #\n #############################################################################\n sql = \"\"\"\n select media_id, tvshow_episode_title, season, episode, air_date\n from mediaserver.TVEpisode\n where tvshow_id = %s\n order by season, episode\n \"\"\"\n\n r = dictfetchall(cur,sql,(tvshow_id,))\n print(\"return val is:\")\n print(r)\n cur.close() # Close the cursor\n conn.close() # Close the connection to the db\n return r\n except:\n # If there were any errors, return a NULL row printing an error to the debug\n print(\"Unexpected error getting All TV Shows:\", sys.exc_info()[0])\n raise\n cur.close() # Close the cursor\n conn.close() # Close the connection to the db\n return None", "title": "" } ]
a594995c748a30819fd0e6d94b30a20b
We discard the first 0.5 seconds of each trial as it does not contain any information.
[ { "docid": "ee29ece24dd63cfd2b7b24d1d030d35e", "score": "0.51472926", "text": "def discard_irrelevant_measurements(train_x, sfreq):\n tmin = 0\n tmax = 1\n tmin_original = -0.5\n\n beginning = np.round((tmin - tmin_original) * sfreq).astype(np.int)\n end = np.round((tmax - tmin_original) * sfreq).astype(np.int)\n train_x = train_x[:, :, beginning:end]\n\n return train_x", "title": "" } ]
[ { "docid": "510154af90407dcb2af1c70437e247e3", "score": "0.5970883", "text": "def trimOut(self, time):\n ...", "title": "" }, { "docid": "00b793f9cf69953663cf316d57150db6", "score": "0.58393484", "text": "def _timeskip(self):\n tm = time.time()\n bundy.bundy.component.time.time = lambda: tm + 30", "title": "" }, { "docid": "54dab35a8e5c89367f6bd88cc8f5bd52", "score": "0.57321686", "text": "def trialTable_nonr():\n\n trial = 2\n while trial <= 2048:\n result = compareTrials_nonr(trial)\n print trial, '\\t', result[0], '\\t', result[1]\n\n trial *= 2", "title": "" }, { "docid": "ca3db74b7a9d1ec52a2aa94f4d81d164", "score": "0.56972253", "text": "def dsleep(self):\n now = time.time()\n if now < self.next_sample:\n yield dsleep(self.next_sample - now)\n self._set_next_sample()", "title": "" }, { "docid": "7b14b9ef1859e7f926f5557ab188dee1", "score": "0.56707937", "text": "def singletrial(t, num_trials):\n trainset = [t]\n testset = [i for i in range(trainset[0])] + [i for i in range(trainset[-1] + 1, num_trials)]\n\n testset = sort([t % num_trials for t in testset])\n\n return trainset, testset", "title": "" }, { "docid": "703215fe4377ab11d69120290d2f1aa8", "score": "0.5635139", "text": "def run_trials(self):\n self.reporter.writeln(self.get_trial_header())\n while not self.terminate_now():\n # Prelims\n self.trial_iter += 1\n iter_header = ('\\nEXP %d/%d:: '%(self.trial_iter, self.num_trials)\n + self.get_iteration_header())\n iter_header += '\\n' + '=' * len(iter_header) + '\\n'\n self.reporter.writeln(iter_header)\n # R trial iteration.\n self.run_trial_iteration()\n # Save results\n self.save_results()\n # Wrap up the trials\n self.wrapup_trials()", "title": "" }, { "docid": "b8b88fe35f2825a7e353554d99e36367", "score": "0.563226", "text": "def sleep(self):\n now = time.time()\n if now < self.next_sample:\n time.sleep(self.next_sample - now)\n self._set_next_sample()", "title": "" }, { "docid": "c38b2cca74212cadf9e7a907f8e13a3e", "score": "0.56277597", "text": "def test_reset_statistics(self):\n sampler = ArgonTemperingSampler(100, np.linspace(300.0, 400.0, 20))\n sampler.sample(nsteps=1, niterations=5, save_freq=1)\n sampler.reset_statistics()\n assert np.sum(sampler.histogram) == 0 and sampler.nmoves == 0", "title": "" }, { "docid": "986ce3b3d717a2a99da8cf9773210720", "score": "0.5626628", "text": "def trial_missed(self, trial):\n if trial[2] == 0:\n self.no_gvs_missed.append(trial)\n else:\n self.gvs_missed.append(trial)", "title": "" }, { "docid": "9de19cd7e9bb5337a7f39df1d1cc6d50", "score": "0.55941314", "text": "def skip(self):\n self.seek(int(self.status.duration) - 5)", "title": "" }, { "docid": "4445bffc4e5154047a5ba96bbea7fc38", "score": "0.5560629", "text": "def wrapup_trials(self):\n # pylint: disable=no-self-use\n pass", "title": "" }, { "docid": "db15cd88ebde162f3ce4e8a2dc3cc37a", "score": "0.55389476", "text": "def time_step():\n return 0.25", "title": "" }, { "docid": "f3edf29e868fd471dee95561489484d9", "score": "0.5520545", "text": "def terminate_now(self):\n return self.trial_iter >= self.num_trials", "title": "" }, { "docid": "9d93ba76155272ee3eb9dc519f4e160e", "score": "0.55078906", "text": "def finish_trial(self):\n return None", "title": "" }, { "docid": "5c12039cf13c0f1fcd57a59316b94bd9", "score": "0.5496972", "text": "def simulationTwoDrugsDelayedTreatment(numTrials):\n # TODO", "title": "" }, { "docid": "5d7a24104abad7ca8f33f3ed7b1eb8f8", "score": "0.54622316", "text": "def run_experiment():\n return[random.random() < 0.5 for _ in range(1000)]", "title": "" }, { "docid": "f7e19e3c2fc2c99e23ecdb874f5b1bed", "score": "0.5439285", "text": "def time_to_failure():\r\n return random.expovariate(BREAK_MEAN)", "title": "" }, { "docid": "6d85bbee242c005a34c7065f63c9bd24", "score": "0.54390997", "text": "def show_next_trial(self):\n if self.levitt_exp.run_number > self.max_runs:\n self.test_level = self.levitt_exp.calculate_threshold()\n self.show_stimulus('All done, with a threshold of %g.' % self.test_level)\n return\n super().show_next_trial()\n msg = f'Last result was {self.last_result.lower()}. '\n msg += f'Now showing run #{self.levitt_exp.run_number}, '\n msg += f'trial #{self.levitt_exp.trial_number}. '\n msg += f'This test is {self.test_description}.'\n self.update_debug(msg)", "title": "" }, { "docid": "5816a94f7925187ee2e5c9658eb575d9", "score": "0.5389274", "text": "def limitTestRunningTime():\n return 300", "title": "" }, { "docid": "0cf44c32f17a5a779816eb5c6d82a1a1", "score": "0.53688323", "text": "def trimIn(self, time):\n ...", "title": "" }, { "docid": "076fe525f8401ee719bd18a5e6676c34", "score": "0.53506494", "text": "def remove_trials_end_early(df):\n num_total_trials = df['trial_params']['trial_num'].max()\n num_completed_trials = df['traces']['trial_num'].max()\n\n temp_params = df['trial_params'].iloc[:num_completed_trials].copy()\n df['trial_params'] = temp_params\n\n return df", "title": "" }, { "docid": "40d9120d5ca4c20e4d7280ef03643c71", "score": "0.53419656", "text": "def _time_series_drop(self):\n time_series_drop_rate = 0.0 # Currently all data is used.\n\n n_background_series = len(self.file_groups['background_series'])\n n_foreground_series = len(self.file_groups['foreground_series'])\n\n if n_background_series > 0:\n n_background_to_keep = int(n_background_series * (1 - time_series_drop_rate))\n n_foreground_to_keep = int(n_foreground_series * (1 - time_series_drop_rate))\n\n background_indices = np.array(random.sample(range(n_background_series), n_background_to_keep))\n foreground_indices = np.array(random.sample(range(n_foreground_series), n_foreground_to_keep))\n\n self.file_groups['background_series'] = self.file_groups['background_series'][background_indices]\n self.file_groups['foreground_series'] = self.file_groups['foreground_series'][foreground_indices]\n\n print(\"Background files after dropping:\", len(self.file_groups['background_singles']), \"singles +\",\n len(self.file_groups['background_series']), \"series\")\n print(\"Foreground files after dropping:\", len(self.file_groups['foreground_singles']), \"singles +\",\n len(self.file_groups['foreground_series']), \"series\")", "title": "" }, { "docid": "5ce286635dda103b14df8a7fab6d9703", "score": "0.5335113", "text": "def makeTrialList():\r\n trialList = [] \r\n trial_count = 1\r\n curr_onset = 2 #initial onset\r\n stims = r.sample(stim_ids*int(exp_len * .25),exp_len)\r\n \r\n trial_states = [1] #start off the function\r\n #creates the task-set trial list. Task-sets alternate based on recusive_p\r\n #with a maximum repetition of 25 trials. This function also makes sure\r\n #that each task-set composes at least 40% of trials\r\n while abs(np.mean(trial_states)-.5) > .1:\r\n curr_state = r.choice(states.keys())\r\n trial_states = []\r\n state_reps = 0\r\n for trial in range(exp_len):\r\n trial_states.append(curr_state)\r\n if r.random() > trans_probs[curr_state,curr_state] or state_reps > 25:\r\n curr_state = 1-curr_state\r\n state_reps = 0\r\n else:\r\n state_reps += 1\r\n \r\n #define bins. Will set context to center point of each bin\r\n bin_boundaries = np.linspace(-1,1,11)\r\n \r\n \r\n for trial in range(exp_len):\r\n state = states[trial_states[trial]]\r\n dis = norm(state['c_mean'],state['c_sd'])\r\n binned = -1.1 + np.digitize([dis.rvs()],bin_boundaries)*.2\r\n context_sample = round(max(-1, min(1, binned[0])),2)\r\n\r\n \r\n trialList += [{\r\n 'trial_count': trial_count,\r\n 'state': trial_states[trial],\r\n 'ts': state['ts'],\r\n 'c_dis': {'mean': dis.mean().item(), 'sd': dis.std().item()},\r\n 'context': context_sample,\r\n 'stim': stims[trial],\r\n 'onset': curr_onset,\r\n 'FBDuration': FBDuration,\r\n 'FBonset': FBonset,\r\n #option to change based on state and stim\r\n 'reward': 1,\r\n 'punishment': 0\r\n }]\r\n\r\n \r\n trial_count += 1\r\n curr_onset += stimulusDuration+FBDuration+FBonset+intertrial+r.random()*.5\r\n \r\n \r\n \r\n \r\n return trialList", "title": "" }, { "docid": "924ceaef93dae47c01ea2aee60a0fb3d", "score": "0.5307701", "text": "def control_time(n_trials,stim_length,pause_length,len_act_rew):\n # initialize the dictionary\n\tcur_dic = {0:1}\n\tcur_time = 0\n\t\t\n\t# for every trial create the stimulation\n\tfor ii in range(n_trials):\n\t\tcur_stim_length = stim_length # add jitter here later\n\t\tcur_pause_length = pause_length # add jitter here later\n\t\tcur_dic[cur_time+0.05] = 1\n\t\tcur_time = cur_time + cur_pause_length # first add the pause\n\t\tcur_dic[cur_time+0.05] = 0\n\t\tcur_time = cur_time + cur_stim_length # first add the pause\n\t\tcur_time = cur_time + len_act_rew\n\treturn cur_dic", "title": "" }, { "docid": "c39987da6bc91285ef21ca4115fada9b", "score": "0.5269008", "text": "def test_simple_case(self):\n tag = \"test.timewith.{0}.simple\".format(self.run_id)\n with timewith(tag):\n time.sleep(1.2)\n results = influx.query(\"SELECT sum(value) FROM \\\"{0}\\\" WHERE time > now() - 1m\".format(tag), database=\"themis_tests\")\n points = list(results.get_points())\n self.assertEqual(len(points), 1)\n self.assertGreater(points[0][\"sum\"], 1200)\n # clean up the measurement we just made\n influx.query(\"DROP MEASUREMENT \\\"{0}\\\"\".format(tag), database=\"themis_tests\")", "title": "" }, { "docid": "0ee551b86f576f79c1e14ab0fc934f47", "score": "0.52456725", "text": "def random_skip(self):\n return True", "title": "" }, { "docid": "1e8a9e0560f5173514858a36c75a6421", "score": "0.5243512", "text": "def NOTFINISHED_spiketimes_subset(spikeTimesFromEventOnset, trialIndexForEachSpike,\n indexLimitsEachTrial, trials):\n trialsList = np.flatnonzero(trials)\n nSpikesEachTrial = np.diff(indexLimitsEachTrial, axis=0)[0]\n indexLimitsEachTrialNew = indexLimitsEachTrial[:, trials]\n spikeTimesFromEventOnsetNew = np.empty(0, dtype='float64')\n trialIndexForEachSpikeNew = np.empty(0, dtype='int')\n for indtrial, thisTrial in enumerate(trialsList):\n indsThisTrial = slice(indexLimitsEachTrial[0, thisTrial],\n indexLimitsEachTrial[1, thisTrial])\n spikeTimesFromEventOnsetNew = np.concatenate((spikeTimesFromEventOnsetNew,\n spikeTimesFromEventOnset[indsThisTrial]))\n trialIndexForEachSpikeNew = np.concatenate((trialIndexForEachSpikeNew,\n np.repeat(indtrial, nSpikesEachTrial[thisTrial])))\n return spikeTimesFromEventOnsetNew, trialIndexForEachSpikeNew, indexLimitsEachTrialNew", "title": "" }, { "docid": "dad7e2ba902530c5416a39970020e280", "score": "0.524302", "text": "def resetTimer(self, time=0):", "title": "" }, { "docid": "180ef0cbbb449d0ecb9b7b74d9c1fa85", "score": "0.5231563", "text": "def _short_sample_time(self, min_time=100):\n return max(min_time,self._get_t_lande())", "title": "" }, { "docid": "192b21aeb6b7545dc74fa795a1c279cb", "score": "0.5229092", "text": "def trialTable():\n\n trial = 2\n while trial <= 2048:\n result = compareTrials(trial)\n print trial, '\\t', result[0], '\\t', result[1]\n\n trial *= 2", "title": "" }, { "docid": "b504f2e83f168d5423bc752436255dd3", "score": "0.5225699", "text": "def _waitForHe3PotToStartCooling(self):\r\n startTime = downTime = currTime = time()\r\n timeout = 1800.0\r\n duration = 120.0\r\n times = []\r\n vals = []\r\n while currTime - downTime < duration and currTime - startTime < timeout:\r\n currTime = time()\r\n times.append(currTime)\r\n currTemp = self.directGetTemperatureHe3()\r\n vals.append(currTemp)\r\n if simpleLinearRegression(times, vals)[0] > -0.00001:\r\n downTime = currTime\r\n times = [currTime]\r\n vals = [currTemp]\r\n sleep(1.0)", "title": "" }, { "docid": "d9101ea8c1522f18bb7468aef005a65f", "score": "0.5221261", "text": "def updateTrial(now):\n\tif rt['trialIsRunning']:\n\t\ttimeSinceStart = time.time() - rt['startTrialSeconds']\n\t\tif timeSinceStart > rt['trialDurationSeconds']:\n\t\t\tstopTrial()\n\t\t\treturn False\n\telse:\n\t\treturn True", "title": "" }, { "docid": "58d0c7a04fe9e8eac45912d3f046fec1", "score": "0.5212814", "text": "def cut_waves(sound_list, time, name):\n\n for i in range(len(sound_list)):\n\n sound = AudioSegment.from_file(\"files/\" +name+ \"/\" + sound_list[i])\n\n if time == 3:\n \"\"\" When we want it to last 3 seconds. \"\"\"\n\n t1 = random.randint(0, 13000)\n t2 = t1 + 3000\n cropped = sound[t1:t2]\n\n \"\"\" Saving cropped waves in appropriate folder \"\"\"\n cropped.export(\"files/\"+name+\"_3s/\" + sound_list[i], format='wav')\n\n elif time == 6:\n \"\"\" When we want it to last 6 seconds. \"\"\"\n\n t1 = random.randint(0, 10000)\n t2 = t1 + 6000\n cropped = sound[t1:t2]\n\n \"\"\" Saving cropped waves in appropriate folder \"\"\"\n cropped.export(\"files/\"+name+\"_6s/\" + sound_list[i], format='wav')\n\n elif time == 10:\n \"\"\" When we want it to last 10 seconds. \"\"\"\n\n t1 = random.randint(0, 6000)\n t2 = t1 + 10000\n cropped = sound[t1:t2]\n\n \"\"\" Saving cropped waves in appropriate folder \"\"\"\n cropped.export(\"files/\"+name+\"_10s/\" + sound_list[i], format='wav')\n\n else:\n print(\"Unhandled case.\")", "title": "" }, { "docid": "c107cae2f0e5b7a78726def70a172d85", "score": "0.5211525", "text": "def throw(self,number = 1000):\n self.sample = []\n while number >= 0:\n self.sample.append(self.roll())\n number -= 1\n\n return self.sample", "title": "" }, { "docid": "1f02a5bd53935c7783378ebf2397d01f", "score": "0.52026284", "text": "def clear_times_rolled(self):\n self.times_rolled = 0", "title": "" }, { "docid": "f0d6f32eded32529a92956718fce0933", "score": "0.52025586", "text": "def dummy(self, sleep=0):\n if sleep:\n time.sleep(sleep)", "title": "" }, { "docid": "dbb18d32c485547eee5f6ecfdaa6e83f", "score": "0.5200143", "text": "def quinn_shuffle(self):\n # Code taken from Quinn via discord, thanks Quinn.\n for x in range(12):\n self.right(primary=-60, counter=0)\n time.sleep(.1)\n self.left(primary=-60, counter=0)\n time.sleep(.1)\n self.stop() \n\n # SEVENTH DANCE", "title": "" }, { "docid": "b36afaea1e675d151fc8c16f36ae0b4a", "score": "0.5190828", "text": "def __aggregate_next_second(self):\r\n parsed_sec = AbstractReader.pop_second(self)\r\n if parsed_sec:\r\n self.pending_second_data_queue.append(parsed_sec)\r\n timestamp = int(time.mktime(parsed_sec.time.timetuple()))\r\n if timestamp in self.stat_data.keys():\r\n del self.stat_data[timestamp]\r\n else:\r\n self.log.debug(\"No new seconds present\")", "title": "" }, { "docid": "6649fae7da64dd6385e274724fdb5298", "score": "0.5190469", "text": "def split_by_trial(lines, LT):\n if len(lines) == 0:\n return [[]]\n \n # Save setup info\n trial_starts = [0]\n\n # Remove 2990 and no-command events\n # What are these? Would be more efficient to run in one loop for all events\n lines = [line for line in lines if (len(line.split())>1 and line.split()[0] != '2990')]\n\n # FA and normal files\n if not LT:\n # Find start indices\n trial_s = [i for i, x in enumerate(lines) if x.split()[1] == start_trial_token]\n # Find end indices\n trial_ends = [i for i, x in enumerate(lines) if x.split()[1] == trial_released_token]\n # Include zero index\n trial_starts = trial_starts + trial_s\n # Find nogo trial indices (lines with OUTC 3)\n nogos = set([x for i, x in enumerate(lines) if (x.split()[1] == trial_result_token and trial_outcome_token in x.split()[2] and str(NOGO) in x.split()[3])])\n # LT only\n else:\n # Find start indices (start defined as OUTC of previous trial [n-1])\n # Water rewards are given ~500ms prior to OUTC printing\n # Measuring from OUTC discards all immediate post-reward (non-learning)\n # type licks and appreciates quick (pre-trial-change) behavioral changes\n trial_starts = [i for i, x in enumerate(lines) if (x.split()[1] == trial_result_token and trial_outcome_token in x.split()[2])]\n # Find end indices (end defined as EV reward of current trial [n])\n # Ignore manual water rewards (demarcated EV AAR_R/L)\n trial_ends = [i for i, x in enumerate(lines) if (x.split()[1] == trial_reward_token and 'AA' not in x.split()[2])] \n # Find nogo trial indices (lines with OUTC 3)\n nogos = [i for i, x in enumerate(lines) if (x.split()[1] == trial_result_token and trial_outcome_token in x.split()[2] and str(NOGO) in x.split()[3])]\n # Simply replace all NOGO trial entries (OUTC 3) with DBG entries\n # Result will be an outlier insignifanct when using median measurements\n # Transforms output from EV --> OUTC 3 (nogo) --> OUTC 1 (correct) to \n # EV --> OUTC 1 (and can remove consecutive OUTC 3 entries)\n # This works because EV is not an output in NOGO trials\n # Sort to restore trial order (sets are unordered iterables in Python)\n for nogo in nogos:\n curr_line = lines[nogo]\n replacement_line = curr_line.split()[0] + ' DBG\\n'\n lines[nogo] = replacement_line\n trial_starts_corrected = list(set(trial_starts).symmetric_difference(nogos))\n trial_starts = trial_starts_corrected\n trial_starts.sort()\n # Remove first trial start (OUTC) (always right trial)\n trial_starts = trial_starts[1:]\n # Remove first two trial ends (EV) (always right trial)\n trial_ends = trial_ends[2:]\n # Remove last trial start (OUTC) (unmatched; no next EV)\n trial_starts = trial_starts[:-1]\n\n # If unfinished last trial\n if len(trial_starts) > len(trial_ends):\n last_chunk = lines[trial_starts[-1]:]\n # If outcome recorded, add outcome as trial end\n if len(list(filter(lambda x: trial_outcome_token in x, last_chunk))) > 0:\n outc = list(filter(lambda x: trial_outcome_token in x, last_chunk))[-1]\n trial_ends.append(lines.index(outc))\n # If no outcome, remove last trial\n else:\n trial_starts = trial_starts[:(len(trial_starts)-1)]\n\n # Find commands designating manual water rewards\n man_rewards = set([x for i, x in enumerate(lines) if (x.split()[1] == trial_reward_token and manual_reward_token in x.split()[2])])\n\n # Now iterate over trial_starts and append the chunks\n splines = []\n\n # Initiate nogo counters\n on_nogos = 0\n off_nogos = 0\n\n for i in range(len(trial_starts)):\n # check if manual reward in trial\n # if manual reward found in trial lines, do not append trial\n spline = lines[trial_starts[i]:trial_ends[i]+1] if not man_rewards.intersection(set(lines[trial_starts[i]:trial_ends[i]+1])) else None\n # if nogo trial, do not append trial (already discarded LT nogos)\n if not LT and spline is not None:\n parsed_lines = parse_lines_into_df(spline)\n spline = spline if not nogos.intersection(set(lines[trial_starts[i]:trial_ends[i]+1])) else None\n if spline is None:\n # record laser status for nogo trials only if random trial\n is_rand = parsed_lines[(parsed_lines['command'] == trial_param_token) & (parsed_lines['argument'].str.contains('ISRND'))]['argument'].item()\n is_rand = True if str(YES) in is_rand else False\n if is_rand:\n opto = parsed_lines[parsed_lines[\"command\"] == trial_param_token]['argument'].to_list()\n idx = [i for i, o in enumerate(opto) if 'OPTO' in o][0]\n opto = opto[idx]\n opto = 'ON' if '3' in opto else 'OFF'\n if opto is 'OFF':\n off_nogos += 1\n else:\n on_nogos += 1\n if spline is not None: \n splines.append(spline)\n\n # return lines split by trial and number of nogo trials by laser status\n return splines, on_nogos, off_nogos", "title": "" }, { "docid": "89c7e994e1243cc42d84f66741d0885e", "score": "0.5181141", "text": "def trial(self) -> Any:\n return self._trial", "title": "" }, { "docid": "89c7e994e1243cc42d84f66741d0885e", "score": "0.5181141", "text": "def trial(self) -> Any:\n return self._trial", "title": "" }, { "docid": "ab773b681e9f1361366b060ea4c35608", "score": "0.51728225", "text": "def updater(self): # called regularly by QTimer\n for i in range(len(self.arduinos)):\n if time.time() - self.prevs[i] > 1.0 / self.arduinos[i].sampling_rate:\n self.arduinos[i].sample()\n self.prevs[i] = time.time()", "title": "" }, { "docid": "bea67ae9acf239bc6276fd9350003c20", "score": "0.5163921", "text": "def increment_time(self):\n self.spikes[self.spikes > -1] -= 1\n self.neuron_sums[self.neuron_sums > 5] -= .5", "title": "" }, { "docid": "37820f6d87b009bfa189b731c9a3da88", "score": "0.5155768", "text": "def integrated_profile(data_trial, times):\n import numpy as np\n cs = np.abs(data_trial).cumsum()\n a = cs[-1] / (times[-1] - times[0])\n l = a * times\n d = cs - l\n return d", "title": "" }, { "docid": "ca6f2d9daa48766b0022c193c16d34e0", "score": "0.5148597", "text": "def clear_inlet(self):\n while self.inlet.pull_sample(timeout=0.0)[0]:()", "title": "" }, { "docid": "acb187535f7d608cf69708c493e40433", "score": "0.51457196", "text": "def test_spontaneous(self):\n protocol = protocols.SpontaneousProtocol(4000)\n \n baseline = kernik.KernikModel()\n tr = baseline.generate_response(protocol)\n data_baseline = tr.get_last_ap(is_peak=True)\n data_baseline.t = data_baseline.t - min(data_baseline.t)\n\n for current in [\"G_Na\", \"P_CaL\", \"G_Kr\", \"G_Ks\", \"G_K1\", \"G_to\"]:\n model = kernik.KernikModel(updated_parameters={current: 1.4})\n tr = model.generate_response(protocol)\n try:\n data = tr.get_last_ap(is_peak=True)\n data.t = data.t - min(data.t)\n plt.plot(data.t, data.V, label=current)\n plt.plot(data_baseline.t, data_baseline.V, label=\"Baseline\")\n plt.legend()\n plt.show()\n except:\n import pdb\n pdb.set_trace()\n \n \n plt.legend()", "title": "" }, { "docid": "6a35633682959c15a1a30f07d0714594", "score": "0.51407635", "text": "def setCountTime(self, t): \n sleep(t)", "title": "" }, { "docid": "bc0215731f245103df1163decd226f46", "score": "0.513748", "text": "def PostEachRunProcess(self, run_counter):\n if not self.remove_first_result or run_counter > 0:\n self.times.append(time.time() - self.start)", "title": "" }, { "docid": "3d94da688de514fddec94a267482dffd", "score": "0.51259923", "text": "def take(self):\n\n pass", "title": "" }, { "docid": "9a1d446248ed6d625197cbde603ee372", "score": "0.51207715", "text": "def delay(t):\n time.sleep(t / 1000.0)", "title": "" }, { "docid": "a16f452790895ee36a63248e261e8207", "score": "0.51206684", "text": "def skip(t, n):\n t.pu\n t.fd(n*2)\n t.pu", "title": "" }, { "docid": "1afa51a268f1354cf7b895903c72258b", "score": "0.51147", "text": "def unsubsample_tracts(self):\n self._subsampled_tracts = None\n self._subsampled_data = None", "title": "" }, { "docid": "f75e835f8c1b1542d443c171bc20ac87", "score": "0.5114305", "text": "def test_drop_zero_timepoints(self):\n exp_timepoints = np.array([0. , 11.11111111, 22.22222222, 33.33333333,\n 44.44444444, 55.55555556, 66.66666667, 77.77777778,\n 88.88888889, 100.])\n exp_measures = np.array([0.74524402, 1.53583955, 2.52502335, 3.92107899, 4.58210253,\n 5.45036258, 7.03185055, 7.75907324, 9.30805318, 9.751119])\n measure = TimecourseMeasurement('Variable_1', np.log(exp_measures), exp_timepoints)\n\n measure.drop_timepoint_zero()\n assert (measure.timepoints[0] == 11.11111111)", "title": "" }, { "docid": "597f37930f694c53026aec0ef01a0a37", "score": "0.51066184", "text": "def rand_sleep(self):\n sleep_sec = random.uniform(1, 3)\n print('Sleeping for {} seconds'.format(str(sleep_sec)))\n sleep(sleep_sec)", "title": "" }, { "docid": "37c499e960136a9714739af5b93adcc3", "score": "0.51043636", "text": "def challenge_one():\n\n import time\n for number in range(10):\n time.sleep(1) #Waits one second before continuing.", "title": "" }, { "docid": "2f50857ae26bad0cd15fcbb83dc18218", "score": "0.51012385", "text": "def initial_consumption(model, t):\n return 0.5", "title": "" }, { "docid": "2752854a6a4eae7c2acbd776edc269ea", "score": "0.5095934", "text": "def _cullResults(self, limit = 60*60):\n \n count = 0\n \n for k,v in self.signalHistory.iteritems():\n for i in v:\n if i[0] < time.time() - limit:\n self.signalHistory[k].remove(i)\n count += 1\n \n print(\"$s removed %d old results.\" % (self.name, count))\n reactor.callLater(60*60, self._cullResults, limit)", "title": "" }, { "docid": "868a3e958de952a03ab01785736964ff", "score": "0.5091106", "text": "def __process_pending_second(self):\r\n next_time = int(\r\n time.mktime(self.pending_second_data_queue[0].time.timetuple()))\r\n if self.last_sample_time and (next_time - self.last_sample_time) > 1:\r\n self.last_sample_time += 1\r\n self.log.debug(\r\n \"Adding phantom zero sample: %s\", self.last_sample_time)\r\n res = self.get_zero_sample(\r\n datetime.datetime.fromtimestamp(self.last_sample_time))\r\n else:\r\n res = self.pending_second_data_queue.pop(0)\r\n self.last_sample_time = int(time.mktime(res.time.timetuple()))\r\n res.overall.planned_requests = self.__get_expected_rps()\r\n self.log.debug(\"Pop result: %s\", res)\r\n return res", "title": "" }, { "docid": "f6146cd7710a109ba0b69208a5b444d8", "score": "0.5086104", "text": "def nap(self):\n time.sleep(self.callspersecond / 1000)", "title": "" }, { "docid": "9b82c082e92cbf29ae73014f252ea78c", "score": "0.50726163", "text": "def start_trial(self, sequence_type):\n pass", "title": "" }, { "docid": "fe20a957011a2007365dea6994d5bec3", "score": "0.50724715", "text": "def requestTotalTime(self):\n print(self.timesRecorder[-1])", "title": "" }, { "docid": "da87f850dd8c53c427eb3799a1d4d0df", "score": "0.50710356", "text": "def step(t, tb=1*min):\n # generate half time\n tibeep = coacher.duration(ibeep)\n channel = []\n for ti in range(0, int(t-tb-tb/2), tb):\n channel += silence(tb-tibeep)\n channel += ibeep\n channel += silence(t-coacher.duration(channel))\n return channel", "title": "" }, { "docid": "efeef2cacd5132235efd6df14ed78d89", "score": "0.50632256", "text": "def purge(cand_times, test_times):\n # Remove \"for loop\" by forming \"test period\"\n test_period0 = test_times.index[0] # test period start\n test_period1 = test_times.max() # test period end\n\n # training starts within test\n case_1_idx = cand_times[\n (test_period0 <= cand_times.index) & (cand_times.index <= test_period1)].index\n # training ends within test\n case_2_idx = cand_times[\n (test_period0 <= cand_times) & (cand_times <= test_period1)].index\n # training envelops test\n case_3_idx = cand_times[\n (cand_times.index <= test_period0) & (test_period1 <= cand_times)].index\n\n purged_train_times = cand_times.drop(\n case_1_idx.union(case_2_idx).union(case_3_idx))\n\n return purged_train_times", "title": "" }, { "docid": "ce803dc2876b8554aa9c95a2a52a451f", "score": "0.5058805", "text": "def _sleep(self):\n if self.sleep:\n time.sleep(random.uniform(0.25, 2.0))", "title": "" }, { "docid": "5862b51d3da351a56169113f40ed36dd", "score": "0.50543284", "text": "def initial_time():\n return 0", "title": "" }, { "docid": "dc5abffca1a0c9db49c7e98b24c7b250", "score": "0.50479996", "text": "def delay(self):\n if self.next_request_timestamp is None:\n return\n sleep_seconds = self.next_request_timestamp - time.time()\n if sleep_seconds <= 0:\n return\n message = f\"Sleeping: {sleep_seconds:0.2f} seconds prior to call\"\n log.debug(message)\n time.sleep(sleep_seconds)", "title": "" }, { "docid": "d41badc8984e799f5728f60441a0a107", "score": "0.50473464", "text": "def SampleStop(self):\n if self._cpu_time is None or self._system_time is None:\n return\n\n self.total_cpu_time += time.clock() - self._cpu_time\n self.total_system_time += time.time() - self._system_time\n self.number_of_samples += 1\n\n self._cpu_time = None\n self._system_time = None", "title": "" }, { "docid": "dc8cc90a5163a821bca8e200ccd0c2a0", "score": "0.5039631", "text": "def mix():\n\n sleep_time = random.randint(1, 10000)\n time.sleep(sleep_time/100000.0)", "title": "" }, { "docid": "4b3ddcf89d1933e113174c1d07dc52a9", "score": "0.5039555", "text": "def print_every_five_min():\n timeout = 300\n time.sleep(timeout)", "title": "" }, { "docid": "21e60052542f0a5e7e3959016a8ef282", "score": "0.50391614", "text": "def early_second(self):\n now = time.time()\n while now - int(now) > 0.99:\n time.sleep(0.01)\n now = time.time()\n return int(now)", "title": "" }, { "docid": "6190351e4e7af563c8332691f3ca2e72", "score": "0.50358784", "text": "def sampleWhiteNoise(self):\n M = self.M\n alpha = self.alpha\n tArray = self.tArray\n\n # Compute truncated random series in the time domain\n print(\"### Computing truncated random series in the time domain.\")\n sd = npr.seed()\n self.signal = np.sum([1/np.sqrt(2)*(npr.randn()+1J*npr.randn())*s\n for s in laguerre.IFLaguerreFunctions(M,alpha,tArray)], 0)\n\n # Compute the same series in the frequency domain, and avoid Fourier\n print(\"### Computing spectrum.\")\n npr.seed(sd)\n freqs, _ = utils.fourier(self.signal, self.A)\n self.wArray = np.linspace(0, freqs[-1], len(freqs))\n self.spectrum = np.sum([1/np.sqrt(2)*(npr.randn()+1J*npr.randn())*l*np.sqrt(laguerre.mu(alpha,self.wArray))\n for l in laguerre.LaguerrePolynomials(M,alpha,self.wArray)], 0)", "title": "" }, { "docid": "b46d25f2c864d3ae510e6b2f81ff3ee2", "score": "0.5035155", "text": "def get_next_sample(self, force):\r\n pass", "title": "" }, { "docid": "57e2f4c5d67c8bef6948ef750af0f053", "score": "0.50337017", "text": "def reset_timeout(self):\n self.t0 = time.time()", "title": "" }, { "docid": "ab718435c5836e4df13ddaac36138824", "score": "0.5030673", "text": "def to_trial(self):\n self.manager.scene = self\n self.t0 = self.clock.getTime()\n self.next_question()", "title": "" }, { "docid": "aaad85279bc1beb916b3c52789728af4", "score": "0.5024584", "text": "def test_list(timer, iterations):\n timer.start()\n a = list()\n for i in range(iterations):\n a.append(i)\n a.append(i)\n a.pop(0)\n return timer.end()", "title": "" }, { "docid": "7b6f4df7d0507a55471a9979827c2304", "score": "0.5017004", "text": "def tick(self):\n if self.backoff is None:\n self.backoff = random.randrange(self.medium.cw_size + 1)\n\n self.backoff -= 1", "title": "" }, { "docid": "3ca82d32d46873fdde0f73d84f13dac9", "score": "0.5011627", "text": "def time_to_next_tide():", "title": "" }, { "docid": "eac77a49cf8a642b0d71e56fe1846d29", "score": "0.50096107", "text": "def step(self, time):\n pass", "title": "" }, { "docid": "70ccb3760d7efa99776ee26e5b74ed88", "score": "0.5005852", "text": "def reset():\n global time, attempts, success\n timer.stop()\n time = 0\n attempts = 0\n success = 0\n run = True", "title": "" }, { "docid": "c3c3b7af29d8be9633ce4dba488705b1", "score": "0.5004403", "text": "def final_time():\n return 45", "title": "" }, { "docid": "1172ed1179eda743aeab7668fed94f6a", "score": "0.49967065", "text": "def time_step():\n return TimeStepUniform()", "title": "" }, { "docid": "8d99d58783ecc5586415afc8ea950c41", "score": "0.499378", "text": "def pytestcase_rawiterator_dont_do_time_shifting(tmpdir, dataset_dir):\n # GIVEN\n timeslice = 100000\n filename = os.path.join(dataset_dir,\n \"metavision_core\", \"event_io\", \"recording.raw\")\n mv_iterator = EventsIterator(filename, start_ts=0, delta_t=timeslice,\n max_duration=1e6, relative_timestamps=False, do_time_shifting=False)\n # WHEN\n current_time = 0\n for evs in mv_iterator:\n current_time += 1\n # THEN\n assert current_time == 10", "title": "" }, { "docid": "a5881be0f499edd336dba1e9c2db5ae7", "score": "0.49903196", "text": "def should_stop_trials_early(\n self,\n trial_indices: Set[int],\n experiment: Experiment,\n **kwargs: Dict[str, Any],\n ) -> Dict[int, Optional[str]]:\n pass # pragma: nocover", "title": "" }, { "docid": "afb6e2f7deb71717bf729d9afde4b51c", "score": "0.49902385", "text": "def time_per_part():\r\n time = random.normalvariate(PT_MEAN, PT_SIGMA)\r\n return time", "title": "" }, { "docid": "a488602357ef7c16804d830b354bf2e9", "score": "0.49883264", "text": "def reset(self):\n self._previous_time = time.time()\n self._last_sleep = 0", "title": "" }, { "docid": "2271e0a7d27d0527eb4f4ec7cde07385", "score": "0.49854028", "text": "def time_update(estimate, noise):\r\n return estimate[-1], noise[-1]", "title": "" }, { "docid": "9b843ea130f9d43d1e7a8f2e1e4c40ca", "score": "0.49838075", "text": "def _make_sampling_times(self):\n sample_times_after_shift_1 = self._get_sample_times_after_shift()\n for elem in list(sample_times_after_shift_1):\n if elem > self._MAX_SAMPLE_TIME:\n sample_times_after_shift_1.discard(elem)\n self._SET_OF_SAMPLE_TIMES_AFTER_SHIFT = sample_times_after_shift_1\n self._MAX_SAMPLE_TIME = max(self._SET_OF_SAMPLE_TIMES_AFTER_SHIFT) +1\n self._MAX_SAMPLE_TIME_UNITS_N = float(self._MAX_SAMPLE_TIME)/float(self.N)\n self._param_dict['T_max'] = self._MAX_SAMPLE_TIME\n\n self._SET_OF_SAMPLE_TIMES_BEFORE_SHIFT = self._get_sample_times_before_shift()", "title": "" }, { "docid": "34f5f3c4d3c996c291282806e2afc51e", "score": "0.49823707", "text": "def show_trial(data_trial, times, \n use_raw=True, th_raw=3.5, time_limit_raw=.025, min_samples_raw=3, \n varying_min_raw=1, mbsl_raw=None, stbsl_raw=None, \n use_tkeo=True, th_tkeo=8, time_limit_tkeo=.025, min_samples_tkeo=10, \n varying_min_tkeo=0, mbsl_tkeo=None, stbsl_tkeo=None, \n sf=None, ip_search=[-.050,.050], \n moving_avg_window=.015):\n import numpy as np\n import pylab as plt\n \n onsets,offsets = get_onsets(data_trial, times, \n use_raw=use_raw, th_raw=th_raw, time_limit_raw=time_limit_raw, min_samples_raw=min_samples_raw,\n varying_min_raw=varying_min_raw, mbsl_raw=mbsl_raw, stbsl_raw=stbsl_raw, \n use_tkeo=use_tkeo, th_tkeo=th_tkeo, time_limit_tkeo=time_limit_tkeo, min_samples_tkeo=min_samples_tkeo,\n varying_min_tkeo=varying_min_tkeo, mbsl_tkeo=mbsl_tkeo, stbsl_tkeo=stbsl_tkeo, \n sf=sf, ip_search=ip_search,\n moving_avg_window=moving_avg_window)\n\n if use_raw:\n emg_sections = detector_var(data_trial, times, th=th_raw, time_limit=time_limit_raw, min_samples=min_samples_raw, varying_min=varying_min_raw, mbsl=mbsl_raw, stbsl=stbsl_raw, sf=sf)\n else:\n emg_sections = np.transpose(([],[]))\n if use_tkeo:\n tkeo_sections = detector_var(tkeo(data_trial), times, th=th_tkeo, time_limit=time_limit_tkeo, min_samples=min_samples_tkeo, varying_min=varying_min_tkeo, mbsl=mbsl_tkeo, stbsl=stbsl_tkeo, sf=sf)\n else:\n tkeo_sections = np.transpose(([],[]))\n \n for tk in tkeo_sections:\n non_overlap = [b for b in range(len(emg_sections)) if ((emg_sections[b][1] < tk[0]) | (tk[1] < emg_sections[b][0]))]\n emg_sections = emg_sections[non_overlap]\n\n emg_sections = np.vstack((emg_sections,tkeo_sections))\n\n \n plt.figure()\n plt.plot(times, data_trial, 'b', linewidth = .75)\n for section in emg_sections:\n plt.vlines(times[section[0]], data_trial.min()*1.1, data_trial.max()*1.1, linestyles='dotted')\n plt.vlines(times[section[1]], data_trial.min()*1.1, data_trial.max()*1.1, linestyles='dotted')\n\n for o in onsets:\n plt.plot(times[o],data_trial[o],'rx', markersize = 5)\n\n for o in offsets:\n plt.plot(times[o],data_trial[o],'rx', markersize = 5)", "title": "" }, { "docid": "4c57f46fe23d46d06a89c99d67552e33", "score": "0.49813274", "text": "def remove_infrequent_samples(csv_data, sample_threshold):\n\n sorted_by_sample = sorted(csv_data, key=lambda k: k['sample'])\n\n counter = 0\n testing_sample = sorted_by_sample[0]['sample']\n marked_for_removal = []\n for dict in sorted_by_sample:\n # print dict['sample']\n if dict['sample'] == testing_sample:\n counter += 1\n # print(counter)\n else:\n if counter < sample_threshold:\n marked_for_removal.append(testing_sample)\n testing_sample = dict['sample']\n counter = 1\n # print(counter)\n # For last sample\n if counter < sample_threshold:\n marked_for_removal.append(testing_sample)\n\n new_dict_list = []\n for dict in csv_data:\n if dict['sample'] not in marked_for_removal:\n new_dict_list.append(dict['sample'])\n\n return new_dict_list\n\n # print(marked_for_removal)\n # print(len(marked_for_removal))", "title": "" }, { "docid": "e2832d2b3c65247c17ecb0df699fb805", "score": "0.4980719", "text": "def quit_trial(self):\r\n responses = []\r\n for stimulusObject in self.stimulusObjects:\r\n responses.append(stimulusObject.response)\r\n assert len(responses) == len(self.stimulusObjects), \"\"\"the number of responses does not match\r\n the number of stimulus objects\"\"\"\r\n if not None in responses:\r\n self.setupData.responseList = responses\r\n self.recorder.add_data(self.setupData)\r\n self.destroy()\r\n else:\r\n pass", "title": "" }, { "docid": "262a146fb2ab02061b84815abd630063", "score": "0.49806955", "text": "def h_timer_minute(self, count):\n if len(self.todo) == 0:\n return\n \n if not self.db_ok:\n return\n \n count=self.MAX_BURST_SIZE\n while count!=0 :\n try: ts, deviceId, sensorId, value=self.todo.pop(0)\n except IndexError: break\n r=self._record(deviceId, sensorId, value, ts, retry=False)\n if r:\n self.dprint(\"Retry successful: deviceId(%s) sensorId(%s) value(%s)\" % (deviceId, sensorId, value))\n count -= 1", "title": "" }, { "docid": "43f6e5b3c7db0bb99053bf54a010c730", "score": "0.49774137", "text": "def saveper():\n return time_step()", "title": "" }, { "docid": "6b7a0c7243f4918d3f51631930cf0e2b", "score": "0.49643567", "text": "def get_trial_header(self):\n # pylint: disable=no-self-use\n return ''", "title": "" }, { "docid": "b988900373ee7c1c4ce318d4d8a4056c", "score": "0.49639857", "text": "def use_layoff(self):\n self.skipped_turns_remaining -= 1", "title": "" }, { "docid": "2d701f0ef0241dd1a081bd8e9b9aa831", "score": "0.49562633", "text": "def restart(self):\n self.target_time = time.monotonic() + self.duration", "title": "" }, { "docid": "bd08df8721dde2b28ce9d4ab4ea41fe6", "score": "0.4956118", "text": "def sampling(array):\n\ttime = array[len(array)-1,2] - array[0,2]\n\tsamples = len(array)\n\treturn samples/time*1e6", "title": "" }, { "docid": "9bcd7c42ebdec705f921e2af9e4e3fc7", "score": "0.49523595", "text": "def test_add_downsampled_smp(session):", "title": "" }, { "docid": "40a2314b52f96f4ccf476e38f7d147cf", "score": "0.49502906", "text": "def test_timeouts(self):\n pass", "title": "" }, { "docid": "31838136e788b2f3fba71bac7599a0cc", "score": "0.4947779", "text": "def trim_utts(corpus_name, corpus, spk, target_dur, current_dur):\n print((\"Trimming corpus {} speaker {} \"\n \"from {} to {} seconds\").format(corpus_name, spk,\n current_dur, target_dur))\n assert target_dur <= current_dur\n spk_utts = [utt for utt in corpus.utt2spk if corpus.utt2spk[utt] == spk]\n # get speaker utterances by recording \n spk_segs = {}\n for utt in spk_utts:\n seg = corpus.segments[utt][0]\n if seg in spk_segs:\n spk_segs[seg].append(utt) \n else:\n spk_segs[seg] = [utt]\n # get utterances in reverse order of occurrence (last to first)\n spk_segs = {seg: np.array(spk_segs[seg]) for seg in spk_segs}\n for seg in spk_segs:\n if len(spk_segs[seg]) > 1:\n utt_starts = np.array([corpus.segments[utt][1]\n for utt in spk_segs[seg]])\n utt_order = np.argsort(utt_starts)[::-1] # decreasing order\n spk_segs[seg] = spk_segs[seg][utt_order]\n # get recordings in increasing order of duration\n durations = corpus.utt2duration()\n seg_ids = np.array([seg for seg in spk_segs]) \n seg_durs = np.array([sum([durations[utt] for utt in spk_segs[seg]])\n for seg in seg_ids])\n order = np.argsort(seg_durs)\n sorted_seg_ids = seg_ids[order]\n # get utt_ids to exclude\n spurious_utts = []\n assert abs(current_dur - sum(seg_durs)) < 1e-8 # handle rounding errors\n stop = False\n for seg in sorted_seg_ids:\n if stop:\n break\n for utt in spk_segs[seg]:\n dur = durations[utt]\n if current_dur - dur > target_dur:\n spurious_utts.append(utt)\n current_dur -= durations[utt]\n else:\n stop = True\n # decide what to do with current utt\n err1 = abs(target_dur - current_dur) \n err2 = abs(target_dur - current_dur + dur)\n if err1 > err2:\n spurious_utts.append(utt) # remove current uttt\n current_dur -= durations[utt]\n break \n return spurious_utts", "title": "" }, { "docid": "f164562c6460cf4e37c46573be4cc922", "score": "0.49436563", "text": "def fixed_timer(x, y, z, initial_pos, count=5, t=1):\n rast = raster(x, y, z, initial_pos)\n while True:\n next_pos = next(rast)\n for i in range(count):\n try:\n yield next_pos\n time.sleep(t)\n except StopIteration:\n break", "title": "" } ]
981994ac7d1b428ccae299d8115d26e2
Create an empty list, append an empty list for every row (height) and append a 0 to every sublist for each column (width). For any element cells[i][j], i and j represent x and y coordinates respectively on the grid.
[ { "docid": "17e5bce32d632431826aa3ff78ad80a0", "score": "0.8121759", "text": "def init_empty_cells(width, height):\n cells = []\n if width <= 0 or height <= 0:\n return(cells)\n else:\n for i in range(height):\n cells.append([])\n for i in cells:\n for j in range(width):\n i.append(0)\n return(cells)", "title": "" } ]
[ { "docid": "e96b05476685bbbae7d04a7e8291668d", "score": "0.7523356", "text": "def create_empty_grid(width: int, height: int, value=tile_empty) -> List:\n\n grid = []\n for row in range(height):\n grid.append([])\n for column in range(width):\n grid[row].append(value)\n return grid", "title": "" }, { "docid": "6ae0da4c8d0284caf30f9fb80deb94a4", "score": "0.75167936", "text": "def init_cells(num_rows: int, num_columns: int) -> list[list[list[int]]]:\n cells = []\n for i in range(num_rows + 2):\n row = []\n for j in range(num_columns + 2):\n cell = [] # type: list[int]\n row.append(cell)\n cells.append(row)\n # link border cells to opposite cells\n for i in range(num_rows + 2):\n if i == 0:\n map_i = num_rows\n elif i == num_rows + 1:\n map_i = 1\n else:\n map_i = i\n for j in range(num_columns + 2):\n if j == 0:\n map_j = num_columns\n elif j == num_columns + 1:\n map_j = 1\n else:\n map_j = j\n\n cells[i][j] = cells[map_i][map_j]\n return cells", "title": "" }, { "docid": "d1105509614153ce183673b19569fcc2", "score": "0.74226546", "text": "def create_lists():\r\n l = list(range(WIDTH // COTE))\r\n for i in l:\r\n l[i] = list(range(WIDTH // COTE))\r\n for j in range(WIDTH // COTE):\r\n l[i][j] = 0\r\n return l", "title": "" }, { "docid": "193ecffcad2043c7a16c189548704207", "score": "0.7345675", "text": "def create_grid(height, width):\n grid = []\n\n for r in range(height):\n row = [0] * width # a row containing width 0s\n grid += [row]\n\n return grid", "title": "" }, { "docid": "95c81fe47066768c5cd7ca7118cf5fa1", "score": "0.7306515", "text": "def _cellify(self, grid):\n newgrid = []\n for row_idx, row_val in enumerate(grid):\n newrow = []\n for cell_idx, cell_val in enumerate(row_val):\n newrow.append(Cell(row_idx, cell_idx, isWall=cell_val))\n newgrid.append(newrow)\n return newgrid", "title": "" }, { "docid": "a0dfd335768cf5968fe0c152efba6594", "score": "0.7254954", "text": "def build_grid():\n return [[None] * GRID_WIDTH for line in xrange(GRID_HEIGHT)]", "title": "" }, { "docid": "7154a9f83aa579b3d8985c5fdc2e4b97", "score": "0.7173941", "text": "def create_grid():\r\n rows = []\r\n for row_num in range(self.num_rows):\r\n list_of_columns = [0]*self.num_columns\r\n rows.append(list_of_columns)\r\n return rows", "title": "" }, { "docid": "c447a514a9ec6d04ef10f833659176c0", "score": "0.71669126", "text": "def create_empty_grid(width, height, default_value=TILE_EMPTY):\n grid = []\n for row in range(height):\n grid.append([])\n for column in range(width):\n grid[row].append(default_value)\n return grid", "title": "" }, { "docid": "7e6aca924001929739af223fff826059", "score": "0.7135744", "text": "def createEmptyGrid(size) :\n grid=[]\n for i in range(size) :\n grid.append([])\n for j in range(size) :\n grid[i].append([None, None])\n return grid", "title": "" }, { "docid": "94aeae3c3a843bf453061960e1adae2a", "score": "0.7097644", "text": "def init_nested_list(rows, cols):\r\n out_list = [None] * rows\r\n for i in range(0, rows):\r\n out_list[i] = [None] * cols #in_list\r\n return out_list", "title": "" }, { "docid": "71bb9ccfce078dbccedc4360ed898c5d", "score": "0.7083556", "text": "def create_grid(grid):\r\n \r\n for i in range(4):\r\n grid.append([0]*4)\r\n return grid", "title": "" }, { "docid": "9f31e583e061b0ad069f20b3fa9b7f73", "score": "0.7055197", "text": "def zeros(self):\n self.grid = [[0 for j in range(self.n)] for i in range(self.n)]", "title": "" }, { "docid": "e66ff28564939e0765ba98612b65d34a", "score": "0.7018065", "text": "def create_grid(self):\n board = []\n for row in range(self.max_length):\n board.append([])\n for column in range(int(self.max_width / 5)):\n board[row].extend([\" \", \" \", \" \"])\n return board", "title": "" }, { "docid": "54641f6cace5c489ca8d92648c56ef7e", "score": "0.7007149", "text": "def createBoard(width, height): \n A = []\n for row in range(height):\n A += [int(width) * [0]]\n return A", "title": "" }, { "docid": "2976257967266a5456fbbe5effa759fe", "score": "0.6957916", "text": "def printable_grid(grid) -> list:\n square_grid = []\n for i in range(9):\n line = [x if x else 0 for x in grid[row(i)]]\n square_grid.append(line)\n\n return square_grid", "title": "" }, { "docid": "d69d0af32c900ce327d1a537885412db", "score": "0.68997633", "text": "def extend_borders(cells):\n I, J = len(cells), len(cells[0])\n for i in range(I):\n cells[i][0:0] = [0]\n cells[i].append(0)\n J = J+2\n I = I+2\n cells[0:0] = [[0] * J]\n cells.append([0] * J)\n return cells, I, J", "title": "" }, { "docid": "f4a3be12cd4fd8c841c151b366874f7e", "score": "0.68563825", "text": "def create_grid(grid):\r\n for i in range(4):\r\n grid.append([0,0,0,0])\r\n return grid", "title": "" }, { "docid": "c60b464d1793847e0a7c9a8d11a0b20a", "score": "0.68449026", "text": "def make_cells(self):\r\n\r\n\t\t# Make the list\r\n\t\tself.cell_list = []\r\n\r\n\t\t# Iterate over all cell ids\r\n\t\tfor i in self.cell_ids:\r\n\r\n\t\t\t# Get the row and column of this cell\r\n\t\t\trow = i//self.width\r\n\t\t\tcol = i%self.width\r\n\r\n\t\t\t# Collect adjacent cells\r\n\t\t\tadj = []\r\n\r\n\t\t\t# Based on the column, include the requisite adjacent cells\r\n\t\t\tif col == 0:\r\n\t\t\t\tadj.append(i+1)\r\n\t\t\telif col == self.width-1:\r\n\t\t\t\tadj.append(i-1)\r\n\t\t\telse:\r\n\t\t\t\tadj.extend([i-1, i+1])\r\n\r\n\t\t\t# Based on the row, include the requisite adjacent cells\r\n\t\t\tif row == 0:\r\n\t\t\t\tadj.append(i+self.width)\r\n\t\t\telif row == self.height-1:\r\n\t\t\t\tadj.append(i-self.width)\r\n\t\t\telse:\r\n\t\t\t\tadj.extend([i-self.width, i+self.width])\r\n\r\n\t\t\t# When initializing the cells, make the accessible cells equal\r\n\t\t\t# to the adjacent cells\r\n\t\t\tacc = copy.copy(adj)\r\n\r\n\t\t\t# Append a Cell object to the cell list\r\n\t\t\tself.cell_list.append(Cell(i, adj, acc))\r\n\r\n\t\treturn 0", "title": "" }, { "docid": "a19a04c66944e34b594f2aa4a25613c4", "score": "0.6825201", "text": "def zero_matrix(width, height) -> list:\n return [[0] * width for _ in range(height)]", "title": "" }, { "docid": "39d7e094e7e45deb9323ea3fbd089563", "score": "0.6818011", "text": "def create_grid(grid):\r\n for i in range (4):\r\n grid.append([0,0,0,0])", "title": "" }, { "docid": "87c6cfc343d2d772499c52b136ac0489", "score": "0.67997706", "text": "def cell_view(self)-> list:\n\n return [[\"cell\" for x in range(self.width)] \n for y in range(self.height)]", "title": "" }, { "docid": "993269c6e47a33c6159261104c23d565", "score": "0.67744595", "text": "def reset(self):\n self.cells = [ [0 for dummy_col in range(self.grid_width)] for dummy_row in range(self.grid_height)]", "title": "" }, { "docid": "31ead531dd43d6f3e0ce27da16887c06", "score": "0.67467433", "text": "def create_grid(self):\n\n grid = []\n for rowNumber in range(self.__rows):\n row = []\n for columnNumber in range(self.__columns):\n row.append(Cell(rowNumber, columnNumber))\n grid.append(row)\n return grid", "title": "" }, { "docid": "6a43d33452bbb442126514cd8affad51", "score": "0.6675033", "text": "def copy(cells):\n l = []\n for i in range(len(cells)):\n l.append([])\n for j in range(len(cells[i])):\n l[i].append(cells[i][j])\n return(l)", "title": "" }, { "docid": "e73619c1ac4a67a421fe8bc049f0ca04", "score": "0.661965", "text": "def create_score_grid(board):\r\n scoregrid = []\r\n for dummy_index in range(board.get_dim()):\r\n onerow = []\r\n for dummy_index2 in range(board.get_dim()):\r\n onerow.append(0)\r\n scoregrid.append(onerow)\r\n return scoregrid", "title": "" }, { "docid": "1341a6f27c9613fbeeb73896f4271b41", "score": "0.66073817", "text": "def EmptyCells(CurrentBoard):\n Cells = []\n\n for x, Row in enumerate(CurrentBoard):\n for y, Cell in enumerate(Row):\n if Cell == 0:\n Cells.append([x, y])\n\n return Cells", "title": "" }, { "docid": "1c82ab9052d374ba12f53f166c8fda31", "score": "0.6583501", "text": "def make_grid():\r\n width = 900\r\n rows = 9\r\n grid = []\r\n for row1 in range(rows):\r\n grid.append([])\r\n for column1 in range(rows):\r\n node1 = Node(row1, column1)\r\n grid[row1].append(node1)\r\n\r\n return grid", "title": "" }, { "docid": "b4af3e298358ede0fd33a74724efe514", "score": "0.6563564", "text": "def reset(self):\n # replace with your code\n grid_height = self.get_grid_height()\n grid_width = self.get_grid_width()\n grid = [[] for i in range(grid_height)]\n for i in range(grid_height):\n grid[i] = [0 for j in range(grid_width)]\n # print grid\n self.grid = grid\n return grid", "title": "" }, { "docid": "b8a4974ba0ef497adaed11eb59c74c4e", "score": "0.65498054", "text": "def mkcells(self, dimensions):\n if dimensions > 0:\n # generate all values in the lower dimension\n pts = self.mkcells(dimensions - 1)\n # prepent the current dimension's values to each lower value\n for pt in pts:\n for i in xrange(self.size):\n yield [i] + pt \n else:\n yield []", "title": "" }, { "docid": "a40b4926b645ee93ba099ae1146641c2", "score": "0.65284723", "text": "def get_empty(self):\n self._empty = []\n for row in range(len(self._grid)):\n for col in range(len(self._grid[row])):\n if self._grid[row][col] == 0:\n self._empty.append([row, col])\n return self._empty", "title": "" }, { "docid": "2c6e7370bd488dd66acaf8c2d4b60588", "score": "0.65027374", "text": "def _init_grid(self, fill_value):\n # Get the height and width from the shape\n height, width = self.shape\n # Create the grid\n values = []\n for i in range(height):\n row = []\n for j in range(width):\n coord = (i, j)\n # Create an empty cell\n cell = Cell(coord, fill_value)\n row.append(cell)\n values.append(row)\n \n return values", "title": "" }, { "docid": "82808120810fdcb693e463bee50caa98", "score": "0.6485312", "text": "def empty_cells(self,state):\n cells = []\n for i in range(self.N):\n for j in range(self.N):\n if state[i][j] == 0:\n cells.append([i, j])\n return cells", "title": "" }, { "docid": "791c51f5be4813cd6f80ca353e37d51d", "score": "0.64579856", "text": "def InitializeBoard(self):\n self.board = []\n for y in range(self.width):\n self.board.append([0] * self.height)", "title": "" }, { "docid": "4300489e4c0402d86cc3f275c233e7b1", "score": "0.644287", "text": "def cell_list(self):\n cell_list = []\n for i in range(self.__size):\n for j in range(self.__size):\n cell_list.append((i, j))\n cell_list.append(self.target_location())\n return cell_list", "title": "" }, { "docid": "499cc669f5d019adb97f1638c9d9bc30", "score": "0.6426016", "text": "def init_board(self):\n self.new_board = []\n for _ in range(self.board_height):\n row = []\n for _ in range(self.board_width):\n row.append(\"0\")\n self.new_board.append(row)", "title": "" }, { "docid": "90bcf4c789e47a78335a676c30eae6dd", "score": "0.6413139", "text": "def list(self):\n\n self.cell_list= []\n self.cell_list.append(self.indivcell * self.num_layers)\n\n return self.cell_list", "title": "" }, { "docid": "97eb9335bef32ce722c71e16ff3038af", "score": "0.6402879", "text": "def _create_board(size):\r\n\r\n board = []\r\n\r\n for x in range(size):\r\n row = []\r\n\r\n for y in range(size):\r\n row.append(0)\r\n board.append(row)\r\n\r\n return board", "title": "" }, { "docid": "256cce833d8b518022a2a715c74e9365", "score": "0.63626826", "text": "def _create_empty_board(self):\n return [[(255, 255, 255) for _ in range(self.COLS)] for _ in range(self.ROWS)]", "title": "" }, { "docid": "60ed5b740e7ab19ad4684e94bb499da9", "score": "0.63561285", "text": "def generate_arrays(rows, cols):\n\n board = []\n walls = []\n\n for y in range(cols):\n board.append([])\n walls.append([])\n for x in range(rows):\n board[y].append(0)\n walls[y].append([])\n for i in range(2):\n walls[y][x].append(1)\n\n return board, walls", "title": "" }, { "docid": "60fe7484f0cad268f583cc8ae8f9115f", "score": "0.6329085", "text": "def init_grid():\r\n grid = [[ey, bp, ey, bp, ey, bp, ey, bp],\r\n [bp, ey, bp, ey, bp, ey, bp, ey],\r\n [ey, bp, ey, bp, ey, bp, ey, bp],\r\n [ey, ey, ey, ey, ey, ey, ey, ey],\r\n [ey, ey, ey, ey, ey, ey, ey, ey],\r\n [wp, ey, wp, ey, wp, ey, wp, ey],\r\n [ey, wp, ey, wp, ey, wp, ey, wp],\r\n [wp, ey, wp, ey, wp, ey, wp, ey]]\r\n return grid", "title": "" }, { "docid": "266f1082b96d89152e2d6ff7c6b4e6ac", "score": "0.63140285", "text": "def __init__(self, width = 7, height = 6):\n self.__gameBoard = []\n self.__width = width\n self.__height = height\n for col in range(width):\n self.__gameBoard += [[]]\n #print(self.__gameBoard)\n for row in range(height):\n self.__gameBoard[col] += \" \"", "title": "" }, { "docid": "c9f7265b03097a17cf411318b934399a", "score": "0.6311199", "text": "def init_grids(self):\r\n def create_grid():\r\n \"\"\"\r\n Creates an empty 2D grid\r\n \"\"\"\r\n rows = []\r\n for row_num in range(self.num_rows):\r\n list_of_columns = [0]*self.num_columns\r\n rows.append(list_of_columns)\r\n return rows\r\n\r\n self.grids.append(create_grid())\r\n self.grids.append(create_grid())\r\n self.set_grid()\r\n print(self.grids[0])", "title": "" }, { "docid": "f870f968107292df2d725412922966a6", "score": "0.63029796", "text": "def initialise_grid(pos, box_size, radius): \n\n\n grid_list = np.zeros((len(pos), len(box_size)), dtype='int')\n for i in range(len(grid_list)):\n grid_list[i] = grid_from_pos(pos[i], box_size, radius) \n return grid_list", "title": "" }, { "docid": "320c746b02793fa69af31b526767b3ba", "score": "0.63024455", "text": "def emptyBoard():\n \n return [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]", "title": "" }, { "docid": "4a7288d3abbfe7d48f34c7189051c8c3", "score": "0.62559974", "text": "def generate_empty_table():\n row_len = table_left_margin + 1 + (1+cell_inner_width)*n_cell_x\n count_rows = table_up_margin + 1 + (1+cell_inner_height)*n_cell_y\n\n table = [[\"\"] for y in range(table_up_margin)] \n for y in range(count_rows-table_up_margin):\n now_row = [\" \"] * table_left_margin\n if y % (cell_inner_height+1) == 0:\n now_row += [\"+\"] + ([\"-\"] * cell_inner_width + [\"+\"]) * n_cell_x\n else:\n now_row += [\"|\"] + ([\" \"] * cell_inner_width + [\"|\"]) * n_cell_x\n table.append(now_row)\n\n return table", "title": "" }, { "docid": "0a8b2d91026272a059be54f2363a7f33", "score": "0.6254948", "text": "def new_board(width=10, height=None):\r\n if height is None:\r\n height = width\r\n if height <= 0 or width <= 0:\r\n return\r\n ls = []\r\n for ind in range(height):\r\n ls.append(list(None for leng in range(width)))\r\n return ls", "title": "" }, { "docid": "131b78e86a783fde3b00740036512916", "score": "0.62548244", "text": "def get_empty_tiles(self):\n return [(row, col) for col in xrange(self.get_grid_width())\n for row in xrange(self.get_grid_height())\n\n if self.get_tile(row,col) == 0]", "title": "" }, { "docid": "3b08276f515a4c8008732858e6dcacfa", "score": "0.6248108", "text": "def create_cell_list() -> list:\n\n list_representation = []\n\n for _ in range(ROW_COUNT):\n column_repr = []\n for _ in range(COLUMN_COUNT):\n random_int = random.randrange(2)\n label = 1 if random_int >= 1 else 0\n column_repr.append(label)\n list_representation.append(column_repr)\n\n return list_representation", "title": "" }, { "docid": "3317b1203fe50183228cd23c9979fddc", "score": "0.62307674", "text": "def make_grid(rows, size):\n\n grid = []\n gap = size // rows\n\n for i in range(rows):\n grid.append([])\n # Same number of columns as rows so rows can be used again\n for j in range(rows):\n node = Node(i, j, gap, rows)\n grid[i].append(node)\n\n set_hard_barriers(grid)\n\n return grid", "title": "" }, { "docid": "8a93a04c232930f36dfbe2612cb04d31", "score": "0.6212587", "text": "def make_cells_matrix(cells_matrix, lines, columns):\n for i in range(0, len(lines)):\n col = []\n for j in range(0, len(columns)):\n col.append(0)\n\n cells_matrix.append(col)", "title": "" }, { "docid": "70886a3fd71be7e53bd9789bb5790e1f", "score": "0.62049663", "text": "def to_squares(grid):\n squares = []\n for _ in range(9):\n squares.append([])\n for rownum, row in enumerate(grid):\n for colnum, cell in enumerate(row):\n squareno = colnum // 3 + (rownum // 3) * 3\n squares[squareno].append(cell)\n return squares", "title": "" }, { "docid": "fc3d428513c0eccc1269011d0abe0925", "score": "0.61839396", "text": "def init_grid(n):\n\t\n\tnum = [] \n\tgrid = []\n\n\tfor i in range(n * n): # Creates a list of numbers that will be added into the grid\n\t\tnum.append(i)\n\t\n\tfor i in range(n): \n\t\tgrid.append(num[:n])\n\t\tnum = num[n:] # This splices the list of numbers, so that the used numbers aren't reused\n\treturn grid", "title": "" }, { "docid": "a396bf83fd644ec399962720ca662aa3", "score": "0.6178469", "text": "def get_clear_board(self) -> List[List[GameTile]]:\n return [\n [\n GameTile(x_position=column, y_position=row, ship_id=EMPTY_TILE_ID)\n for column in range(self.size)\n ]\n for row in range(self.size)\n ]", "title": "" }, { "docid": "1674f4c0b664d1c7e6a25c9eb4c89393", "score": "0.6142199", "text": "def create_knots_grid(cell, image_original, cell_height, cell_width, input_knot_size):\n\n row_index = 0 # ro create the 2 dimensional list\n img_grid = []\n\n # Iterate through the image pixel list, in cell jumps each iteration\n for x in range(0, image_original.height, cell_height):\n if x + cell_height >= image_original.height:break\n img_grid.append([])\n for y in range(0, image_original.width, cell_width):\n if y + cell_width >= image_original.width:break\n # set the index variables and get the small cell pixels and the new averege color\n cell.set_height_index(y)\n cell.set_width_index(x)\n\n # Calculate the color of the cell\n cell.set_cell_color()\n\n # From the cell color we calculated contruct a new iamge\n image_constructed = Image_constructed(input_knot_size, input_knot_size, cell.color)\n\n # Add the images to the image array so we can show them as a collection af one picture\n img_grid[row_index].append(image_constructed.img)\n\n row_index += 1\n\n return img_grid", "title": "" }, { "docid": "c7ba3e8ab44d4c731891c156a307c8fd", "score": "0.61253613", "text": "def create_grid(n=4):\n game_grid = []\n # Grid of size n x n\n for i in range(0, n):\n line = [0] * n\n game_grid.append(line)\n return game_grid", "title": "" }, { "docid": "3bdb4677d219838aa5868c5e74d046b0", "score": "0.6120114", "text": "def cell_list(self):\n\n result = list()\n\n for i in range(BOARD_LEN):\n for j in range(BOARD_LEN):\n result.append((i, j))\n\n result.append(self.target_location())\n\n return result", "title": "" }, { "docid": "94875e7fdb68ba7a81ca7c18933cabca", "score": "0.61153424", "text": "def zeros_matrix(rows, cols):\n M = []\n while len(M) < rows:\n M.append([])\n while len(M[-1]) < cols:\n M[-1].append(Fraction(0))\n\n return M", "title": "" }, { "docid": "51ba6cdc6b36011f820a993874652773", "score": "0.61031294", "text": "def make_grid(self, width, height, reveal_callback, flag_callback):\n width = int(width)\n height = int(height)\n grid = [[0 for y in range(height)] for x in range(width)]\n for i in range(0, width):\n for j in range(0, height):\n grid[i][j] = Square(self.board_window, i, j, reveal_callback, flag_callback)\n return grid", "title": "" }, { "docid": "c845595d79b05cc5d5b428864c107461", "score": "0.6102652", "text": "def create_matrix(seq):\n lst = []\n for row in range(len(seq)):\n lst.append([])\n for col in range(len(seq)):\n lst[row].append(0)\n return lst", "title": "" }, { "docid": "2c5c47df2c061c7978b7e0f9e4cb002f", "score": "0.609848", "text": "def _set_cells(self, size_width, size_height):\n\n i = 0\n \n while i < size_height:\n rowCells = list()\n j = 0\n\n while j < size_width:\n rowCells.append(Cell(j,i))\n j = j + 1\n\n i = i + 1\n self._cells.append(rowCells)", "title": "" }, { "docid": "66d79d1f78dc5f680d2349d82a258523", "score": "0.60910124", "text": "def generate_empty_board(x,y):\n board = [[0]*y for i in range(x)] \n \n return board", "title": "" }, { "docid": "5ed651ca62eda806782ca2394f300223", "score": "0.6089305", "text": "def convert_grid(grid: list) -> list:\n dims = len(grid)\n converted_grid = [['' for dim in range(dims)] for dim in range(dims)]\n for y, line in enumerate(grid):\n for x, cell in enumerate(line):\n if cell in ['🏡', '👦', '👹', '👾', '⬛']:\n converted_grid[y][x] = 0\n else:\n converted_grid[y][x] = 1\n # print(grid)\n return converted_grid", "title": "" }, { "docid": "5ed651ca62eda806782ca2394f300223", "score": "0.6089305", "text": "def convert_grid(grid: list) -> list:\n dims = len(grid)\n converted_grid = [['' for dim in range(dims)] for dim in range(dims)]\n for y, line in enumerate(grid):\n for x, cell in enumerate(line):\n if cell in ['🏡', '👦', '👹', '👾', '⬛']:\n converted_grid[y][x] = 0\n else:\n converted_grid[y][x] = 1\n # print(grid)\n return converted_grid", "title": "" }, { "docid": "9c5441db7b6c1059a06422e632798d8f", "score": "0.60852504", "text": "def autogrid(layoutBB, cell_bbs, netlist, meshsize):\n xmin, xmax, ymin, ymax = layoutBB[0][0], layoutBB[1][0], layoutBB[0][1], layoutBB[1][1]\n xlist = np.arange(xmin, xmax+meshsize, meshsize)\n ylist = np.arange(ymin, ymax+meshsize, meshsize)\n grid = []\n \"\"\" Make a copy of netlist that isn't a tuple \"\"\"\n netlist_grid = [None, None]\n \n \n for j in xrange(len(ylist)-1):\n row = []\n for i in xrange(len(xlist)-1):\n cornerBB = [[xlist[i], ylist[j]], [xlist[i]+meshsize, ylist[j]+meshsize]]\n \n occupied = 0 #not occupied\n for bb in cell_bbs:\n if doBoxesIntersect(bb, cornerBB):\n occupied=1\n \n if isInside(netlist[0], cornerBB):\n netlist_grid[0] = [i, j]\n occupied = 2\n if isInside(netlist[1], cornerBB):\n netlist_grid[1] = [i, j] \n occupied = 2\n \n row.append(occupied)\n \n grid.append(row)\n return grid, netlist_grid", "title": "" }, { "docid": "06e2f3724c5415e37bbd8e171df7ea10", "score": "0.6082912", "text": "def _new_game_board(columns: int, rows: int, topleft) -> [list]:\r\n board = []\r\n top_left = topleft\r\n for col in range(columns):\r\n board.append([])\r\n for row in range(rows):\r\n board[-1].append(\"{:3}\".format(EMPTY))\r\n if top_left == BLACK:\r\n color1 = BLACK\r\n color2 = WHITE\r\n elif top_left == WHITE:\r\n color1 = WHITE\r\n color2 = BLACK\r\n board[int(columns/2)-1][int(rows/2)-1] = \"{:3}\".format(color1)\r\n board[int(columns/2)][int(rows/2)] = \"{:3}\".format(color1)\r\n board[int(columns/2)][int(rows/2)-1] = \"{:3}\".format(color2)\r\n board[int(columns/2)-1][int(rows/2)] = \"{:3}\".format(color2)\r\n return board", "title": "" }, { "docid": "1cbc3eff7c4a19142ff253664f96c270", "score": "0.60653996", "text": "def __init__(self, grid=None, width=10, height=10):\n cell_null = Cell(1)\n if grid:\n self._width = len(grid)\n self._height = len(grid[0])\n self._grid = grid\n for i in range(0, self._width):\n for j in range(0, self._height):\n self._grid[i][j] = Cell(self._grid[i][j])\n else:\n self._width = width\n self._height = height\n self._grid = [cell_null] * width\n for i in range(0, height):\n self._grid[i] = [cell_null] * height", "title": "" }, { "docid": "46cc682d5e6873e2516b4196c91bdb39", "score": "0.6055889", "text": "def cells(self):\n res = []\n for i in range(len(self)):\n for j in range(len(self[i])):\n if self[i][j] is not None:\n res.append( (i,j) )\n return res", "title": "" }, { "docid": "3b318674ef59cfa8cf300617f6a37435", "score": "0.6055842", "text": "def inner_grid(height, width):\n grid = create_grid(height, width)\n for r in range(1, height - 1):\n for c in range(1, width - 1):\n if r > 0 and r < height - 1:\n grid[r][c] = 1\n return grid", "title": "" }, { "docid": "b53a7a22baa1db9e499dbe94708b79fc", "score": "0.60317385", "text": "def __init__(self, n):\n self.grid = [[0 for i in range(n)] for j in range(n)]", "title": "" }, { "docid": "b3a59f7f87ece9e4fabb6b955d47be7a", "score": "0.6031271", "text": "def init_hw_matrix(height, width):\r\n return [[None for _ in xrange(width)] for _ in xrange(height)]", "title": "" }, { "docid": "960925f137f22e313a53bcafa61504a2", "score": "0.6027164", "text": "def zeroMatrix(rows, cols):\n matrixZ = []\n while len(matrixZ) < rows:\n matrixZ.append([])\n while len(matrixZ[-1]) < cols:\n matrixZ[-1].append(0.0)\n return matrixZ", "title": "" }, { "docid": "801e13ef13e56eb924c73d996fbc9c5b", "score": "0.6005218", "text": "def expand(grid):\n\n index_x = 0\n index_y = 0\n min_zeros = 82\n\n # check through all 0's\n for i in range(len(grid)):\n zeros = 0\n for j in range(len(grid[i])):\n if grid[i][j] == 0:\n\n # Count for surrounding 0's\n for k in range(len(grid)):\n if grid[i][k] == 0:\n zeros += 1\n for k in range(len(grid)):\n if grid[k][j] == 0:\n zeros += 1\n start_x, start_y = quadrant(i,j)\n for k in range(3):\n x = start_x + k\n for l in range(3):\n y = start_y + l\n if grid[x][y] == 0:\n zeros += 1\n zeros -= 3 # Counted ourself 3 times\n if zeros < min_zeros:\n min_zeros = zeros\n index_x = i\n index_y = j\n\n expanded = []\n for i in range(1,10):\n grid[index_x][index_y] = i\n expanded.append(deepcopy(grid))\n\n return expanded", "title": "" }, { "docid": "da43b409a52a7ea149ff10acfeb7d994", "score": "0.5999655", "text": "def _cell_vert_list(self, i, j):\n self._copy_cache = False\n pts = []\n xgrid, ygrid = self.xvertices, self.yvertices\n pts.append([xgrid[i, j], ygrid[i, j]])\n pts.append([xgrid[i + 1, j], ygrid[i + 1, j]])\n pts.append([xgrid[i + 1, j + 1], ygrid[i + 1, j + 1]])\n pts.append([xgrid[i, j + 1], ygrid[i, j + 1]])\n pts.append([xgrid[i, j], ygrid[i, j]])\n self._copy_cache = True\n if np.isscalar(i):\n return pts\n else:\n vrts = np.array(pts).transpose([2, 0, 1])\n return [v.tolist() for v in vrts]", "title": "" }, { "docid": "50508b325658b11386977ee0360c17a6", "score": "0.59924656", "text": "def getEmptyCells(self):\n \n return [i for i,x in enumerate(self.tiles) if x==0]", "title": "" }, { "docid": "c7cab1f9ca9cddee3c58932280d6978a", "score": "0.59924215", "text": "def getEmptyCells(self):\n return [(x, y) for x in self.__size_range\n for y in self.__size_range if self.getCell(x, y) == 0]", "title": "" }, { "docid": "f5ac1c34f2a454d34ae46dab39c703a5", "score": "0.5988802", "text": "def make_grid(area):\n height = 3\n width = (area // height) + 1\n return [[1] * width for _ in range(height)]", "title": "" }, { "docid": "37e55f540b5b2ab3c8c61c8e95ca31b0", "score": "0.5986159", "text": "def __init__(self):\n self.contents = []\n for i in range(9):\n self.contents.append([])\n for x in range(9):\n self.contents[len(self.contents) - 1].append(0)", "title": "" }, { "docid": "6c087c091cc3be7c1a55749dbf707919", "score": "0.59751743", "text": "def init_board(size=5):\n board = []\n row = []\n while len(row) < size:\n row.append(\"0\")\n while len(board) < size:\n copy_row = row.copy()\n board.append(copy_row)\n return board", "title": "" }, { "docid": "e8428c9915b8c6fce49ad0c968d72bf7", "score": "0.5974593", "text": "def reset(self):\n self._cells = [[0 for dummy_col in range(self._grid_width)] for dummy_row in range(self._grid_height)]\n self.new_tile()\n self.new_tile()", "title": "" }, { "docid": "768bdb50b38cedb8fcedfc2b57d4c922", "score": "0.59745777", "text": "def __init__(self, rows, collumns, start=(0, 0), end=None):\n\n if end is None:\n self.end = (rows - 1, collumns - 1)\n else:\n self.end = end\n\n self.start = start\n\n self._grid = []\n self.rows = rows\n self.collumns = collumns\n\n for row in range(rows):\n self._grid.append([])\n for col in range(collumns):\n if (row, col) == self.start:\n node_type = NodeType.START\n elif (row, col) == self.end:\n node_type = NodeType.END\n else:\n node_type = NodeType.EMPTY\n\n self._grid[row].append(Node(row, col, node_type))", "title": "" }, { "docid": "59ed4890a71abdfae3295b403b763e8d", "score": "0.5972279", "text": "def zero_matrix(rows, cols):\n return [([0]*cols) for _ in range(rows)]", "title": "" }, { "docid": "b705406195913826eec744efc27fb6f2", "score": "0.5970957", "text": "def fill_initial_board(self):\n new_board = []\n for row in range(self.NUM_SQ):\n row = []\n for col in range(self.NUM_SQ):\n row.append(False)\n new_board.append(row)\n return new_board", "title": "" }, { "docid": "b579730cc1acd737b13213b970388984", "score": "0.59562445", "text": "def square_2d_list_w_list_comp(input_grid=PYTHON_GRID):\n\t# Must do this since Lists are mutable\n\tpower_grid = copy.deepcopy(input_grid)\n\n\tgrid = [[pow(y,2) for y in x] for x in power_grid]\n\n\treturn grid", "title": "" }, { "docid": "c7bbb639035971c70f12bc6305a3c52e", "score": "0.5944123", "text": "def create_maze(width: int, height: int) -> List[List[str]]:\n horizontal, vertical = MazeAlgorithm.generate_maze(width, height)\n maze = []\n for i in range(len(horizontal)):\n maze.append(horizontal[i])\n maze.append(vertical[i])\n\n # Mark start and finish point\n maze[1][0] = '| S '\n maze[-3][-2] = ' F '\n cell_string_as_list = list(maze[-3][-2])\n cell_string_as_list[2] = 'F'\n maze[-3][-2] = ''.join(cell_string_as_list)\n\n return maze", "title": "" }, { "docid": "b65f8a986a174d2e86be6e1857de0b23", "score": "0.5939332", "text": "def basic_init(self):\n self.rows = self.init_2d_array(self.n ** 2)\n self.cols = self.init_2d_array(self.n ** 2)\n self.boxes = self.init_2d_array(self.n ** 2)\n self.uncompleted = [0 for _ in range(self.n ** 4)]\n\n for r in range(self.n ** 2):\n for c in range(self.n ** 2):\n cell = Cell(n=self.n, row=r, col=c)\n self.cells.append(cell)\n self.uncompleted[cell.get_pos_in_sudoku()] = cell\n self.rows[r][c] = cell\n self.cols[c][r] = cell\n self.boxes[cell.get_box()][cell.get_pos_in_box()] = cell", "title": "" }, { "docid": "e7d06adef113d8c800344175bb71ac85", "score": "0.59277004", "text": "def get_all_empty_squares(self):\r\n empty = []\r\n for boxrow in range(self._dim):\r\n for boxcol in range(self._dim):\r\n for row in range(self._dim):\r\n for col in range(self._dim):\r\n if self._board[boxrow][boxcol][row][col] == EMPTY:\r\n empty.append((boxrow, boxcol, row, col))\r\n return empty", "title": "" }, { "docid": "3c135d9147103544a96ac8831c8205fd", "score": "0.59232134", "text": "def init_board(self, size):\n self.board = []\n for _ in range(size):\n self.board.append([0] * size)", "title": "" }, { "docid": "f63f8db500b8aab0e859efa4a19237fb", "score": "0.5918179", "text": "def __init__(self, cells: Tuple[int, int] = PIXEL_SIZE,\r\n cell_size: int = CELL_SIZE):\r\n self.cells = cells\r\n self.cell_size = cell_size\r\n\r\n # convert grid dimensions to pixel dimensions\r\n self.pixels = tuple(i * cell_size for i in cells)", "title": "" }, { "docid": "d67bb15b44738264f3b79e2ddd04d934", "score": "0.58859825", "text": "def copy_grid (grid):\r\n grid2=[]\r\n grid2 = create_grid(grid2)\r\n for i in range (len(grid)):\r\n for j in range(4):\r\n grid2[i][j]=(grid[i][j])\r\n return grid2", "title": "" }, { "docid": "dde48e8da2df33109d74e8c4d4299c9b", "score": "0.588322", "text": "def reset(self):\n for index in range(0, self.grid_height):\n self.grid.append([index - index] * self.grid_width)\n return self.grid", "title": "" }, { "docid": "1346a81030e98428785031e38006e377", "score": "0.5877573", "text": "def get_empty_squares(self):\n empty_squares = []\n for i in range(self.size):\n for j in range(self.size):\n if self.get_status(i, j) == \"empty\":\n empty_squares.append((i, j))\n return empty_squares", "title": "" }, { "docid": "f68d49b4f7a7bcb58685f9aa463cbeea", "score": "0.5874871", "text": "def init_grid(grid, cell_indexes):\n# reviewer edited frm to from in above description\n\n for active in cell_indexes:\n i, j = active.split(\":\") # splits any numbers with a colon in between them\n grid[int(i) - 1][int(j) - 1] = 1", "title": "" }, { "docid": "151b68a6de889bb780d008d199e66ae0", "score": "0.58727556", "text": "def new_tile(self):\n empty_square_list = [] \n for row in range(self.grid_height):\n for col in range(self.grid_width):\n if self.cells[row][col] == 0:\n empty_square_list.append((row, col))\n \n index = random.choice(empty_square_list)\n row = index[0]\n col = index[1]\n self.cells[row][col] = random.choice([2, 2, 2, 2, 2, 2, 2, 2, 2, 4])", "title": "" }, { "docid": "3e347b49b61350a6187488a6bbc45bdb", "score": "0.58723783", "text": "def empty_tiles(self) -> List[Tuple[int, int]]:\n empty_tiles = []\n for row in range(3):\n for column in range(3):\n if self._board[row][column] == TileState.EMPTY:\n empty_tiles.append([row, column])\n return empty_tiles", "title": "" }, { "docid": "59068c53cd57c3ce5609a7118dbff036", "score": "0.58722585", "text": "def init_board(self):\n if not self.board_initialized:\n self.board_initialized = True\n xrng, yrng = range(self.width), range(self.height)\n self.board = [ [self.make_tile(Loc(x, y)) for x in xrng] for y in yrng ]", "title": "" }, { "docid": "beb35ac8d31b4fd2f5456d0b662bcea2", "score": "0.5866986", "text": "def createOneRow(width):\n row = []\n for col in range(width):\n row += [0]\n return row", "title": "" }, { "docid": "afc5899a47c6f01049cc39d7e15c3816", "score": "0.58668464", "text": "def initial_board(self):\n board = [EMPTY] * 80\n for i in self.squares():\n board[i] = EMPTY\n return board", "title": "" }, { "docid": "0d410f9c3cf5c8b40b7322821033cb13", "score": "0.5863817", "text": "def __init__(self, wall_length, wall_width):\n self.length = wall_length\n self.width = wall_width\n self.matrix = []\n for i in range(0, self.length):\n self.matrix.append([])\n for j in range(0, self.width):\n self.matrix[i].append('X')", "title": "" }, { "docid": "6ee5f49d2111782a9fbf21831ac50ab6", "score": "0.58515555", "text": "def __init__(self, size) :\n \n self._W = list()\n for i in range(size):\n self._W.append(list())\n for j in range(size):\n self._W[i].append(0 if i == j else math.inf)", "title": "" }, { "docid": "89dd3d1d98f5c32fad0460af2803e9b2", "score": "0.58417773", "text": "def create_grid(list_representation: list) -> arcade.SpriteList:\n\n grid = arcade.SpriteList()\n\n for row_index, row in enumerate(list_representation):\n for value_index, value in enumerate(row):\n x_coord = (MARGIN + WIDTH) * value_index + MARGIN + WIDTH // 2\n y_coord = (MARGIN + HEIGHT) * row_index + MARGIN + HEIGHT // 2\n color = FILLED if value else EMPTY\n\n circle = arcade.SpriteCircle(WIDTH // 2, color)\n circle.center_x = x_coord\n circle.center_y = y_coord\n\n grid.append(circle)\n\n return grid", "title": "" } ]
a24db6b35b2e15cbd0ffa24ee0106685
Convert a list of ranges to a set of numbers
[ { "docid": "07869e22c41238d7506ad869c60bc49a", "score": "0.78628695", "text": "def range_to_set(ranges):\n return set().union(*[set(range(l, h + 1)) for l, h in ranges])", "title": "" } ]
[ { "docid": "e034e9ef47574a444309c76e9a1af6e0", "score": "0.7111665", "text": "def parse_ranges(ccdlist):\n s = set()\n for r1 in ccdlist.split(','):\n r2 = r1.split('-')\n if len(r2) == 1:\n s.add(int(r2[0]))\n elif len(r2) == 2:\n for j in range(int(r2[0]), int(r2[1]) + 1):\n s.add(j)\n else:\n raise ValueError('Bad integer range expression in parse_ranges: ' + r1)\n return sorted(s)", "title": "" }, { "docid": "7a381674d817c100781dc2e2101507b9", "score": "0.70323634", "text": "def _run_ranges_to_set(self, ranges):\n return range_to_set(self._run_ranges(ranges))", "title": "" }, { "docid": "6d48b0948295af60226a80592a3d744e", "score": "0.6843297", "text": "def parse_range(rangelist):\n\n oklist = set([])\n excludelist = set([])\n\n rangelist = rangelist.replace(' ', '')\n rangelist = rangelist.split(',')\n\n # item is single value or range\n for item in rangelist:\n item = item.split(':')\n\n # change to ints\n try:\n int_item = [int(ii) for ii in item]\n except(ValueError):\n print repr(':'.join(item)), 'not convertable to integer'\n raise\n\n if 1 == len(int_item):\n # single inclusive or exclusive item\n if int_item[0] < 0:\n excludelist.add(abs(int_item[0]))\n else:\n oklist.add(int_item[0])\n\n elif 2 == len(int_item):\n # range\n if int_item[0] <= int_item[1]:\n if int_item[0] < 0:\n print item[0], ',', item[1], 'must start with a '\n 'non-negative number'\n return []\n\n if int_item[0] == int_item[1]:\n thisrange = [int_item[0]]\n else:\n thisrange = range(int_item[0], int_item[1]+1)\n\n for ii in thisrange:\n oklist.add(ii)\n else:\n print item[0], ',', item[1], 'needs to be in increasing '\n 'order'\n raise\n else:\n print item, 'has more than 2 values'\n\n for exitem in excludelist:\n try:\n oklist.remove(exitem)\n except(KeyError):\n oklist = [str(item) for item in oklist]\n print 'ERROR: excluded item', exitem, 'does not exist in '\n 'inclusive range'\n raise\n\n return sorted(list(oklist))", "title": "" }, { "docid": "9f02aa0823ace47455ad6457305f4e2a", "score": "0.6772059", "text": "def to_ranges(iterable):\n iterable = sorted(set(iterable))\n for key, group in groupby(enumerate(iterable), lambda t: t[1] - t[0]):\n group = list(group)\n if sys.version_info.major == 3:\n yield range(group[0][1], group[-1][1]+1)\n else:\n yield xrange(group[0][1], group[-1][1]+1)", "title": "" }, { "docid": "6c945e902b69a22f790cc6c3fc040c18", "score": "0.67651635", "text": "def get_residue_ranges(numbers):\n nums = sorted(set(numbers))\n gaps = [[s, e] for s, e in zip(nums, nums[1:]) if s + 3 < e]\n edges = iter(nums[:1] + sum(gaps, []) + nums[-1:])\n return list(zip(edges, edges))", "title": "" }, { "docid": "3cf84e855d49d655a76918a3a5bdc5f3", "score": "0.66908693", "text": "def all_ranges(self):\n ret = []\n for i in self.all_numb:\n ret.append(EnrichedTuple(*[range(min(j), max(j) + 1) for j in i],\n getters=self.dimensions))\n return tuple(ret)", "title": "" }, { "docid": "7550114ec0a7b34aa8b13c317905de5b", "score": "0.6619967", "text": "def range_to_numbers(start, end):\n\n def RepresentsInt(s):\n try:\n int(s)\n return True\n except ValueError:\n return False\n\n if RepresentsInt(start) and RepresentsInt(end):\n # we have a range\n return range(int(start), int(end) + 1)\n\n # we have a complex range, so we need to be a bit more creative\n\n start_int = int(\"\".join(c for c in start if c in \"0123456789\"))\n end_int = int(\"\".join(c for c in end if c in \"0123456789\"))\n numeric_range = range(start_int, end_int + 1)\n numbers = []\n for x in numeric_range:\n number = str(x)\n pos = 0\n exten = \"\"\n for character in start:\n if character in \"0123456789\":\n # we are eligable to replace this with a digit from our range\n exten += number[pos]\n pos = pos + 1\n else:\n exten += character\n numbers.append(exten)\n\n return numbers", "title": "" }, { "docid": "7aadf7c29463f218558de533bfafb723", "score": "0.6518725", "text": "def parseRangeString(s, convertToZeroBased=False):\n\n result = set()\n for _range in s.split(','):\n match = _rangeRegex.match(_range)\n if match:\n start, end = match.groups()\n start = int(start)\n if end is None:\n end = start\n else:\n end = int(end)\n if start > end:\n start, end = end, start\n if convertToZeroBased:\n result.update(range(start - 1, end))\n else:\n result.update(range(start, end + 1))\n else:\n raise ValueError(\n 'Illegal range %r. Ranges must single numbers or '\n 'number-number.' % _range)\n\n return result", "title": "" }, { "docid": "33c2bca295af7213f5126a2f404c6b6f", "score": "0.6445233", "text": "def _irregular_frame_set(spec):\n result = set()\n val = spec.strip(',')\n for part in [x.strip() for x in val.split(',')]:\n number_matches = NUMBER_RE.match(part)\n range_matches = RANGE_RE.match(part)\n if number_matches:\n vals = number_matches.groups()\n result = result.union([int(vals[0])])\n elif range_matches:\n tup = _to_xrange(range_matches.groups())\n result = result.union(tup)\n return result", "title": "" }, { "docid": "791db63a6fbc4ab7620599e7e40a2506", "score": "0.6395762", "text": "def to_indexes(input_range: str) -> List[int]:\n input_range = input_range.replace(' ', '')\n if not set(input_range).issubset(set('1234567890,-')):\n raise SyntaxError(f\"The following characters are not allowed: {set(input_range) - set('1234567890,-')}\\n\"\n f\"Only use numbers(0-9), comma(,) or dash(-)!\")\n result = []\n for part in input_range.split(','):\n if '-' in part:\n a, b = part.split('-')\n result.extend(range(int(a), int(b) + 1))\n else:\n result.append(int(part))\n return result", "title": "" }, { "docid": "b3e2007b2f4f623b5034e0a74715f9b0", "score": "0.62789947", "text": "def _process_ranges(self, nb_samples):\n marks = np.linspace(0, nb_samples, self._nb_processes + 1, dtype=int)\n return [range(marks[i], marks[i + 1]) for i in range(self._nb_processes)]", "title": "" }, { "docid": "c96bd9f01e226c2ad41bc3de3154ab01", "score": "0.6253015", "text": "def find_range(nums):\n lowest = min(nums)\n highest = max(nums)\n r = highest - lowest\n return lowest, highest, r", "title": "" }, { "docid": "1b7d21b36889a8a2c119c7a47a6b0ac1", "score": "0.6239611", "text": "def range(self):\n lows, highs = [], []\n for feature in self.features.values():\n for start, end in feature.locations:\n lows.append(start)\n highs.append(end)\n if len(lows) != 0 and len(highs) != 0: # Default in case there is\n return (min(lows), max(highs)) # nothing in the set\n return 0, 0", "title": "" }, { "docid": "ea3f2f1cb41acaca6b83ae187bc34d6b", "score": "0.62151736", "text": "def convert_arg_range(arg):\n arg = arg.split(',')\n init = [map(int, option.split('_')) for option in arg]\n rv = []\n for i in init:\n if len(i) == 1:\n rv.append(i[0])\n elif len(i) == 2:\n rv.extend(range(i[0],i[1]+1))\n return rv", "title": "" }, { "docid": "2614cc46b735c97bba6ec69be85a8be7", "score": "0.62118465", "text": "def generateRange(value):\n valuesList = []\n disjointList = [x.strip() for x in value.split(\",\")]\n for r in disjointList:\n rLimits = r.split(\"-\")\n if len(rLimits)>1:\n valuesList.extend(range(XMLDoc.tryint(rLimits[0]), XMLDoc.tryint(rLimits[-1])+1))\n else:\n valuesList.append(XMLDoc.tryint(rLimits[0]))\n \n return valuesList", "title": "" }, { "docid": "717874eb74dbf27c4464d084fc9f30f1", "score": "0.6207504", "text": "def range(self):\r\n lows, highs = [], []\r\n for feature in self.features.values():\r\n for start, end in feature.locations:\r\n lows.append(start)\r\n highs.append(end)\r\n if len(lows) != 0 and len(highs) != 0: # Default in case there is\r\n return (min(lows), max(highs)) # nothing in the set\r\n return 0, 0", "title": "" }, { "docid": "1bda7b7467c2b7c572ab5132a45c091d", "score": "0.61758405", "text": "def ranges(self):\n ranges = []\n for grouped_range in self.grouped_ranges.values():\n for commit_range in grouped_range:\n if commit_range not in ranges:\n ranges.append(commit_range)\n\n return ranges", "title": "" }, { "docid": "1bda7b7467c2b7c572ab5132a45c091d", "score": "0.61758405", "text": "def ranges(self):\n ranges = []\n for grouped_range in self.grouped_ranges.values():\n for commit_range in grouped_range:\n if commit_range not in ranges:\n ranges.append(commit_range)\n\n return ranges", "title": "" }, { "docid": "5d6b901baa3471c5aa077cf4cd311c03", "score": "0.6169456", "text": "def parse_int_set(nputstr = \"\"):\n selection = set()\n invalid = set()\n # tokens are comma seperated values\n\n tokens = [x.strip() for x in nputstr.split(',')]\n for i in tokens:\n try:\n # typically tokens are plain old integers\n\n selection.add(int(i))\n except:\n # if not, then it might be range\n\n try:\n token = [int(k.strip()) for k in i.split('-')]\n if len(token) > 1:\n token.sort()\n # we have items seperated by dash\n # try to build valid range\n first = token[0]\n last = token[len(token)-1]\n for x in range(first, last+1):\n selection.add(x)\n except:\n # not an int and not range...\n\n invalid.add(i)\n return selection", "title": "" }, { "docid": "8728c91ed9a3e48738b6ffae497749ce", "score": "0.6166399", "text": "def ranges(self):\n return self.__ranges()", "title": "" }, { "docid": "f59cda806eb012b5553a9e2e5b2681f8", "score": "0.616627", "text": "def seq2set(xs):\n rs = []\n s = -1\n for x in xs:\n sx = x + 1\n s += sx\n rs.append(s)\n return rs", "title": "" }, { "docid": "22d45da8b51495088a5c9d26ef76d19f", "score": "0.6163825", "text": "def make_list_of_ranges_from_nums(nums, prefix=None):\n\n # Make sure they are sorted\n if not nums:\n return []\n nums = sorted(nums)\n ranges = []\n # The first range_start will be the first element of nums\n range_start = None\n for num, next_num in itemAndNext(nums):\n if not range_start:\n range_start = num\n\n if next_num is None or num + 1 != next_num:\n if prefix is not None:\n ranges.append((f\"{prefix}{range_start}\", f\"{prefix}{num}\"))\n else:\n ranges.append((range_start, num))\n range_start = None\n\n return ranges", "title": "" }, { "docid": "0ce37f20165d9253f0316857db9b575e", "score": "0.61355066", "text": "def get_range(arr: List[List[int]]):\n a_min, a_max, a_len = 0, 0, 0\n for i in range(0, len(arr)):\n a_min = min(arr[i]) if a_min > min(arr[i]) else a_min\n a_max = max(arr[i]) if a_max < max(arr[i]) else a_max\n a_len = len(arr[i]) if a_len < len(arr[i]) else a_len\n return a_min, a_max, a_len", "title": "" }, { "docid": "f8fde4f6b7394a9856b301e30988b44a", "score": "0.6121884", "text": "def test_get_cut_ranges(self):\n data = np.array([[1e12, -5.0],\n [1e12 + 1000, 6.2],\n [1e12 + 2000, 6.0],\n [1e12 + 3600, 8.0],\n [1e12 + 4000, -15.0],\n [1e12 + 5000, 2.0],\n [1e12 + 6000, 6.0],\n [1e12 + 7000, 3.0],\n [1e12 + 8000, -2.0],\n [1e12 + 9000, -42.0],\n [1e12 + 10000, 8.0],\n [1e12 + 11000, 8.0],\n [1e12 + 12000, 8.0],\n [1e12 + 13000, -8.0]])\n\n result = _spark_get_cut_ranges(data=data, lambda_expr=lambda x: x > 0)\n\n self.assertEqual(result, [\n [1e12 + 1000, 1e12 + 3600],\n [1e12 + 5000, 1e12 + 7000],\n [1e12 + 10000, 1e12 + 12000]\n ])", "title": "" }, { "docid": "2142f6cb27d8fab51f2c0f654cb9eeb4", "score": "0.61176085", "text": "def ranges(self):\r\n return self._ranges", "title": "" }, { "docid": "da6550529e54f7fbbff4eb837efe950c", "score": "0.6106617", "text": "def get_ranges(self):\n if self._ranged_based:\n return list(self._ranges)\n return list(self.iter_ranges())", "title": "" }, { "docid": "d0ed8a6757aad21596cd3582e5df74d2", "score": "0.60885954", "text": "def calc_ranges(number, n_range):\r\n step = number / n_range\r\n ranges = {}\r\n for i in range(n_range):\r\n ranges[i+1] = [round(step*i), round(step*(i+1))]\r\n\r\n return ranges", "title": "" }, { "docid": "6ba8e223ff9e89853b645efc77a9d7f2", "score": "0.6079344", "text": "def array_conv(arr):\n comb = [] ### list of all created integers\n for item in arr:\n lst = []\n for ind in item.split(','):\n try:\n ind = list( map(int, re.split('-|:', ind) ) )\n except ValueError:\n assert False, \"Example of csv_ind: ['1-10,15', '1-5']\"\n\n if len(ind) > 1: #range goes from start value to include ending\n lst += range(ind[0], ind[1]+1)\n else:\n lst += ind\n comb.append(set(lst))\n return comb", "title": "" }, { "docid": "94af4e70a07eb9f0bbf4a37e2ed7a37c", "score": "0.6078123", "text": "def range(self):\r\n lows, highs = [], [] # Holds set of low and high values from sets\r\n if self.start is not None:\r\n lows.append(self.start)\r\n if self.end is not None:\r\n highs.append(self.end)\r\n for set in self._sets.values():\r\n low, high = set.range() # Get each set range\r\n lows.append(low)\r\n highs.append(high)\r\n if lows:\r\n low = min(lows)\r\n else:\r\n low = None\r\n if highs:\r\n high = max(highs)\r\n else:\r\n high = None\r\n return low, high # Return lowest and highest values\r", "title": "" }, { "docid": "64513b3808efed38f01f952946885c90", "score": "0.6075116", "text": "def detect_range(L):\n for i, j in itertools.groupby(enumerate(L), lambda x: x[1] - x[0]): \n j = list(j) \n yield j[0][1], j[-1][1]", "title": "" }, { "docid": "62495d145da6986af569560ca733985b", "score": "0.6060334", "text": "def _get_ranges(bounds: np.ndarray) -> Tuple[int, int]:\n x_range = np.max(bounds[:, 0]) - np.min(bounds[:, 0])\n y_range = np.max(bounds[:, 1]) - np.min(bounds[:, 1])\n return (x_range, y_range)", "title": "" }, { "docid": "fcf99e7773f7f5d2402c5573ba6faac0", "score": "0.6042965", "text": "def get_ranges(arr: np.ndarray) -> List[Tuple[int, int]]:\n ranges = []\n start = None\n for i, val in enumerate(arr):\n if (val and start) is None:\n start = i\n elif not val and start is not None:\n ranges.append((start, i))\n start = None\n if start is not None:\n ranges.append((start, len(arr)))\n return ranges", "title": "" }, { "docid": "6234eab17cebe89317837bfe428f27c0", "score": "0.59886456", "text": "def get_ranges(self, epsilon=0.01):\n return np.array([[r[0] + epsilon, r[1] - epsilon]\n for r in self.ranges])", "title": "" }, { "docid": "9ce5a90ec00c69352fbac130cb9cdff2", "score": "0.5988107", "text": "def reduce_ranges(ranges_s: Set[Daterange]) -> Set[Daterange]: \n if len(ranges_s) == 0:\n return ranges_s\n \n ranges_l: List[Daterange] = list(ranges_s)\n ranges_l.sort()\n new_ranges = set()\n\n cur_range = ranges_l[0]\n i = 1\n while i < len(ranges_l):\n if cur_range.end_date >= ranges_l[i].start_date:\n cur_range = Daterange(cur_range.start_date,\n max(cur_range.end_date,\n ranges_l[i].end_date))\n else:\n new_ranges.add(cur_range)\n cur_range = ranges_l[i]\n i = i + 1\n\n new_ranges.add(cur_range)\n return new_ranges", "title": "" }, { "docid": "d2bafa6b646c0ebbacbdbbfa7d5905f8", "score": "0.59782445", "text": "def intervals_to_boundaries(intervals, q=5):\n\n return np.unique(np.ravel(np.round(intervals, decimals=q)))", "title": "" }, { "docid": "aa766b2aeade5dc704b8e5e996dfd969", "score": "0.5977401", "text": "def intRange(low, high):\n if low > high:\n return []\n else:\n return [low] + intRange(low+1, high)", "title": "" }, { "docid": "1374829af65d09678a214bdaa914b074", "score": "0.5957816", "text": "def findMissingRanges(self, nums: List[int], lower: int, upper: int) -> List[str]:\n out = []\n cur_low = lower\n for num in nums:\n if num == cur_low:\n cur_low = num + 1\n continue\n \n if num - 1 == cur_low:\n out.append(str(cur_low))\n else:\n out.append(str(cur_low) + '->' + str(num - 1))\n cur_low = num + 1\n \n if upper == cur_low:\n out.append(str(upper))\n elif upper > cur_low:\n out.append(str(cur_low) + '->' + str(upper))\n \n return out", "title": "" }, { "docid": "de0118ecd61d0936057227727fe4ff72", "score": "0.59239763", "text": "def range(self):\r\n lows, highs = [], []\r\n for graph in self._graphs.values():\r\n low, high = graph.range()\r\n lows.append(low)\r\n highs.append(high)\r\n return (min(lows), max(highs))", "title": "" }, { "docid": "9aa8543907b571c000ed159944cbea6f", "score": "0.589923", "text": "def ranges(i: List[int]) -> Iterator[Tuple[int, int]]:\n for a, b in itertools.groupby(enumerate(i), lambda x: x[1] - x[0]):\n b = list(b)\n yield b[0][1], b[-1][1]", "title": "" }, { "docid": "942a51ad145538aa51b6c1d8d1e3fb77", "score": "0.58975327", "text": "def parseset(self, arg):\n\n if arg is None or len(arg) == 0:\n return\n argparts = string.split(arg, ',')\n d = copy.copy(self.rangeset)\n for a in argparts:\n r = self.rangelist.getrange(a)\n d[a] = r\n self.rangeset = d", "title": "" }, { "docid": "83d993a9b0bc034e53ec89c3c8df1cf2", "score": "0.5875789", "text": "def ranges(i):\n\n for a, b in itertools.groupby(enumerate(i), lambda (x, y): y - x):\n b = list(b)\n yield b[0][1], b[-1][1]", "title": "" }, { "docid": "b2c2c8d4d0f4272e487bb48d2eed71b2", "score": "0.58637774", "text": "def filter_valid_range(points, rect):\n ret = []\n for x, y in points:\n if x >= rect[0] and x <= rect[1] and y >= rect[2] and y <= rect[3]:\n ret.append((x, y))\n if len(ret) == 0:\n ret.append(points[0])\n return ret", "title": "" }, { "docid": "0e290cae407a7e34bdc7d550bef7e26f", "score": "0.58614886", "text": "def integers(start: Optional[int] = None, stop: Optional[int] = None) -> Iterable[int]:\n\n if start is not None and stop is not None:\n return range(start, stop)\n elif start is None and stop is not None:\n return naturals(stop, -1)\n elif start is not None and stop is None:\n return naturals(start)\n\n return (z for n in naturals() for z in (n, ~n))", "title": "" }, { "docid": "e375229243488ed2a266e3906fc2dfc6", "score": "0.5857149", "text": "def parseRangeExpression(s, convertToZeroBased=False):\n if not s:\n return set()\n\n pos = 0\n expr = ''\n for match in _rangeExpressionRegex.finditer(s):\n start, end = match.span()\n if start > pos:\n expr += s[pos:start]\n expr += '{%s}' % ','.join(\n map(str, parseRangeString(match.group(0), convertToZeroBased)))\n pos = end\n\n if pos < len(s):\n expr += s[pos:]\n\n try:\n return eval(expr)\n except Exception:\n raise ValueError(expr)", "title": "" }, { "docid": "748351df91a9a04ace05712e7579cdc1", "score": "0.58427954", "text": "def hyphen_range(s):\n s = \"\".join(s.split())\n r = set()\n for x in s.split(','):\n t = x.split('-')\n if len(t) not in [1,2]: raise SyntaxError(\"hash_range is given its arguement as \"+s+\" which seems not correctly formated.\")\n r.add(int(t[0])) if len(t) == 1 else r.update(set(range(int(t[0]),int(t[1])+1)))\n l = list(r)\n l.sort()\n return l", "title": "" }, { "docid": "1926f8b19153a318ab8f3e949829c155", "score": "0.58423245", "text": "def require_array_ranges(ranges):\n if ranges is None:\n return None\n elif isinstance(ranges, int):\n return [(0, ranges)]\n elif isinstance(ranges, tuple):\n return [ranges]\n elif isinstance(ranges, list):\n return ranges\n else:\n raise Exception(\"Not a valid ranges object.\")", "title": "" }, { "docid": "72ffd7780e3c9eb261a3778786b37080", "score": "0.5822808", "text": "def get_data_range(data, train_ids):\n min_d, max_d = get_data_min_max(data, train_ids)\n range_d = max_d - min_d\n sum_range_d = float(range_d.sum())\n return (min_d, max_d, range_d, sum_range_d)", "title": "" }, { "docid": "a0b2f64f22a2690e2b2282b30a69faa7", "score": "0.58200485", "text": "def minmax(ListofNumbers: Sequence[int]):\n return [min(ListofNumbers), max(ListofNumbers)]", "title": "" }, { "docid": "89d4dffaf3214de07ff4c5bc2760b031", "score": "0.581568", "text": "def build_closed_intervals_from_list(bounds_list):\n return [ClosedInterval(min, max) for (min, max) in bounds_list]", "title": "" }, { "docid": "986d6a19d900a4a009924a1bf6f662ca", "score": "0.58115256", "text": "def nice_range(lower, upper):\n\n flipped = 1 # set to -1 for inverted\n\n # Case for validation where nan is passed in\n if np.isnan(lower):\n lower = 0\n if np.isnan(upper):\n upper = 0.1\n\n if upper < lower:\n upper, lower = lower, upper\n flipped = -1\n return [_int_if_int(x) for x in _nice_range_helper(lower, upper)][::flipped]", "title": "" }, { "docid": "fca68e32497da947ce816c4e3232ec66", "score": "0.5779956", "text": "def _run_ranges(self, ranges):\n input_data = [self.MockExperiment(x) for x in ranges]\n return offset_ranges(calculate_batch_offsets(input_data), ranges)", "title": "" }, { "docid": "52fff85c88fd6b455cfab1041b767943", "score": "0.5744838", "text": "def _from_literals(literals, contig_map=None):\n return ranges.RangeSet.from_regions(literals, contig_map)", "title": "" }, { "docid": "c1c7a26e29b855b6bebaab168c7cb0bb", "score": "0.57353127", "text": "def as_set(self):\n return set(range(self[0], self[1] + 1))", "title": "" }, { "docid": "5553a54a9d29d709055d03f88ded7569", "score": "0.57078683", "text": "def get_ranges(num_reads, num_total_chunks):\n\n chunks_per_file = get_chunks_per_file(num_total_chunks)\n\n bounds = [x*(num_reads/chunks_per_file) for x in xrange(chunks_per_file)]\n bounds += [num_reads]\n starts = bounds[:-1]\n ends = bounds[1:]\n\n intervals = zip(starts, ends)\n \n ovl_ranges = []\n\n for hash_int in intervals:\n for ref_int in intervals:\n if ref_int[0] >= hash_int[1]:\n continue\n\n ovl_ranges.append((hash_int, ref_int))\n\n return ovl_ranges", "title": "" }, { "docid": "8d029cd4203f5b469aa5a3d7365a427d", "score": "0.5707772", "text": "def get_field_ranges(s: str) -> List[Iterable[int]]:\n # List of pairs of ints. Each pair defines inclusive bounds for one range.\n range_bounds: List[List[str]] = [\n map(int, pair.split('-')) for pair in re.findall(r'\\d+-\\d+', s)\n ]\n # The range's upper bound is `b + 1` because we want `b` to be in the range.\n return [range(a, b + 1) for a, b in range_bounds]", "title": "" }, { "docid": "09dd8ab140e75e1f6827955734a93f3f", "score": "0.5704282", "text": "def __parse_range(self, r):\n if not r:\n return []\n parts = r.split(\"-\")\n if len(parts) == 1: # if given 1 value, take range from 0->value\n return 0, int(r)\n elif len(parts) == 2:\n return int(parts[0]), int(parts[1])\n if len(parts) > 2:\n raise ValueError(\"Invalid range: {}\".format(r))", "title": "" }, { "docid": "5032017f5bef227d63e48b17268f2a3f", "score": "0.5677674", "text": "def group_contiguous_integers(data):\n ranges = []\n for key, group in groupby(enumerate(data), lambda i: i[0] - i[1]):\n group = list(map(itemgetter(1), group))\n if len(group) > 1:\n ranges.append((group[0], group[-1]))\n return ranges", "title": "" }, { "docid": "3932a971dd94be8fddb2fc690d581729", "score": "0.5674482", "text": "def parse_ranges(range_string):\n\n range_string = range_string.strip()\n if not range_string:\n return []\n\n if \"inf\" in range_string:\n range_string = re.sub(r\"inf\", repr(sys.float_info.max), range_string)\n\n ranges = ast.literal_eval(range_string)\n if isinstance(ranges, list) and not isinstance(ranges[0], list):\n ranges = [ranges]\n\n # Verify that ranges is a list of list of numbers.\n for item in ranges:\n if len(item) != 2:\n raise ValueError(\"Incorrect number of elements in range\")\n elif not isinstance(item[0], (int, float)):\n raise ValueError(\"Incorrect type in the 1st element of range: %s\" %\n type(item[0]))\n elif not isinstance(item[1], (int, float)):\n raise ValueError(\"Incorrect type in the 2nd element of range: %s\" %\n type(item[0]))\n\n return ranges", "title": "" }, { "docid": "f2b33150a90ca2f76f89e0ae4880657a", "score": "0.5670956", "text": "def test_prange():\n start = 3\n stop = 9\n step = 2\n\n expected = list(range(start, stop, step))\n output = list(_compat.prange(start, stop, step))\n\n assert expected == output", "title": "" }, { "docid": "b21cb542b19e1b2acc602c7fd7cacb65", "score": "0.5652219", "text": "def get_split_set(args_set, args_range, set_file=None):\n split_set = None\n\n def add_to(split_set, iterable):\n if split_set is None:\n return iterable\n\n if iterable is not None:\n split_set.update(iterable)\n\n return split_set\n\n split_range = None\n if args_range is not None and len(args_range) != 2:\n split_range = None\n logger.error(\"Expected split range of length 2. Got range size of %d\" % len(args_range))\n exit(1) # not going to continue with this error\n elif args_range is not None:\n split_range = sorted(args_range)\n split_range = set(range(split_range[0], split_range[1] + 1))\n\n split_set = add_to(split_set, split_range)\n split_set = add_to(split_set, args_set)\n\n if set_file is not None:\n with open(set_file, 'r') as f:\n try:\n numbers = set(map(int, f.read().split()))\n split_set = add_to(split_set, numbers)\n except FileNotFoundError:\n logger.error(\"Split set file: %s not found.\" % set_file)\n raise\n except ValueError:\n logger.error(\"Malformed split set file: %s\" % set_file)\n raise\n except:\n logger.error(\"Split set file: %s\" % set_file)\n raise\n return split_set", "title": "" }, { "docid": "b794f8709023a06da206d9bb92b60156", "score": "0.5645277", "text": "def parse_int_ranges_from_number_string(input_string):\n # Assign helper variable\n parsed_input = []\n # Construct a list of integers from given number string,range\n for cell in input_string.split(','):\n if '-' in cell:\n start, end = cell.split('-')\n parsed_range = list(range(int(start), int(end) + 1))\n parsed_input.extend(parsed_range)\n else:\n parsed_input.append(int(cell))\n return parsed_input", "title": "" }, { "docid": "f318685e08c22c74ef023db9f9a1165a", "score": "0.5640551", "text": "def get_range(cls, start: int, end: int) -> List[Build]:\n return [cls.get(build_id) for build_id in range(start + 1, end + 1)]", "title": "" }, { "docid": "4ffd7f5819ab777a9cdf01f5b0fe1fdb", "score": "0.5640301", "text": "def get_range(items):\n date_list = []\n for a in items:\n for item in a:\n date_list.append(item)\n return date_list", "title": "" }, { "docid": "db79b6f10d466fe4d27fa36d0d3ba021", "score": "0.5636271", "text": "def monotone_map(cls, x, fn):\n x = cls.wrap(x)\n l = fn(x.lower)\n u = fn(x.upper)\n return ValueRanges(min(l, u), max(l, u))", "title": "" }, { "docid": "421327e4c14405df0de226c7ab7c159f", "score": "0.56306505", "text": "def test_ranges(self):\r\n self.assertEqual([(15, 20), (158, 161)], self.hsp.hit_range_all)\r\n self.assertEqual([(0, 6), (10, 13)], self.hsp.query_range_all)", "title": "" }, { "docid": "2663a95d637ba153203111aed74555c1", "score": "0.5624696", "text": "def linear_ranges(values):\n ranges = []\n def close(x,y): return abs(y-x) < 1e-6\n for i in range(0,len(values)):\n is_linear_before = i >= 2 and \\\n close(values[i]-values[i-1],values[i-1]-values[i-2])\n is_linear_after = 1 <= i <= len(values)-2 and \\\n close(values[i]-values[i-1],values[i+1]-values[i])\n if is_linear_before or \\\n (len(ranges) > 0 and len(ranges[-1]) == 1 and is_linear_after):\n ranges[-1] += [values[i]]\n else: ranges += [[values[i]]]\n return ranges", "title": "" }, { "docid": "db3dadd4763767ef088822580a24e6b1", "score": "0.56204194", "text": "def _new_value_range(rng):\n range_values = []\n for row in rng:\n range_values.append(_new_row(row))\n return range_values", "title": "" }, { "docid": "2be0ea5f77ae4d571bad003395e5d2b4", "score": "0.5618263", "text": "def construct_ranges(self, dimensionality, minimum, maximum):\n ranges = []\n for _ in range(dimensionality):\n ranges.append([minimum, maximum])\n return ranges", "title": "" }, { "docid": "9e7451db230c556c05541a30ee10b25d", "score": "0.56181365", "text": "def geospace(start, stop, num=7):\n\n answer = {start, stop}\n start = max(start, 1)\n for i in range(1, num-1):\n answer.add(int(np.rint(start*(stop/start)**(i/(num-1)))))\n return sorted(answer)", "title": "" }, { "docid": "934f80d7ac1cd82f1cf523faf606c5e1", "score": "0.56178653", "text": "def iprange_to_globs(start: CIDR, end: CIDR) -> List[IPGlob]:\n ...", "title": "" }, { "docid": "d2a27d80b84f335b9a08762e5c1a2ed8", "score": "0.5613653", "text": "def get_xs(min_value, max_value, precision=1.0) -> list:\n multiplier = precision ** (-1)\n return [x / multiplier\n for x in range(int(int(floor(min_value)) * multiplier), int(multiplier * (int(ceil(max_value)) + 1)))]", "title": "" }, { "docid": "1cb92346ca8996fd19f659b0b7759bab", "score": "0.55948526", "text": "def get_range(periods): \n try:\n lower, higher = int(periods[0][0]), int(periods[0][1])\n for nr in periods[1:]:\n low, high = int(nr[0]), int(nr[1])\n lower = min(low,lower)\n higher = max(high, higher)\n # to keep into account the open interval\n higher = higher\n except:\n return None, None\n return str(lower), str(higher)", "title": "" }, { "docid": "223390cd4a1ee14873f22092c8a426f1", "score": "0.5589736", "text": "def computeTargetValues(data,lowerBound,upperBound):\n \n data = sorted(set(data)) #sort data in O(nlog(n)). Also it removes duplicates\n \n minVal = min(data)\n maxVal = max(data)\n\n targetSet = set() #here I'll store unique values (x + y) between lowerBound\n #and upperBound such that x != y and both of them are in 'data'.\n \n for val in data:\n minRange = lowerBound - val #obtain the range of possible values\n maxRange = upperBound - val\n \n if (maxRange < minVal) or (minRange > maxVal):\n continue\n \n i = bi.bisect_left(data,minRange) #find index of closest match in O(log(n))\n j = bi.bisect_left(data,maxRange) #find index of closest match in O(log(n))\n \n if (j - i) != 0: #if there exist values that sum a target value\n candidates = data[i:j]\n candidates = [x + val for x in candidates]\n targetSet.update(candidates)\n \n return len(targetSet) #cardinality of unique target values", "title": "" }, { "docid": "824fe7e89a3b4555809d5f7b586f4209", "score": "0.5588334", "text": "def range(self):\r\n lows, highs = [], []\r\n for track in self.tracks.values(): # Get ranges for each track\r\n low, high = track.range()\r\n lows.append(low)\r\n highs.append(high)\r\n return (min(lows), max(highs)) # Return extremes from all tracks\r", "title": "" }, { "docid": "60185b5026ff53cef6c6287e79a8bd11", "score": "0.55876213", "text": "def extract_ranges(index_list, range_size_limit=32):\n if not index_list:\n return [], []\n first = index_list[0]\n last = first\n ranges = []\n singles = []\n for i in index_list[1:]:\n if i == last + 1 and (last - first) <= range_size_limit:\n last = i\n else:\n if last > first:\n ranges.append([first, last])\n else:\n singles.append(first)\n first = i\n last = i\n if last > first:\n ranges.append([first, last])\n else:\n singles.append(first)\n return ranges, singles", "title": "" }, { "docid": "deca7a5634ed2acae2286cb1c2d8d289", "score": "0.55786026", "text": "def get_rangeset(self) :\r\n return self._rangeset", "title": "" }, { "docid": "faf8e7f54b6f47a1bdd98b6f3355381c", "score": "0.5569707", "text": "def _extract_ranges(self):\n dataset_progress = ProgressPrinter(self.dataset.num_vertices(),\n desc=\"Extract frequency of reference\")\n range_set = RangeSet()\n for vertex in self.dataset.vertices():\n node = self.dataset.vp.data[vertex]\n logger.debug(\"Inspect node %s\", node)\n r_node = self.DataRange(node.cap.base,\n node.cap.base + node.cap.length)\n node_set = RangeSet([r_node])\n # erode r_node until it is fully merged in the range_set\n # the node_set holds intermediate ranges remaining to merge\n while len(node_set):\n logger.debug(\"merging node\")\n # pop first range from rangeset and try to merge it\n r_current = node_set.pop(0)\n # get first overlapping range\n r_overlap = range_set.pop_overlap_range(r_current)\n if r_overlap == None:\n # no overlap occurred, just add it to the rangeset\n range_set.append(r_current)\n logger.debug(\"-> no overlap\")\n continue\n logger.debug(\"picked current %s\", r_current)\n logger.debug(\"picked overlap %s\", r_overlap)\n # merge r_current and r_overlap data and push any remaining\n # part of r_current back in node_set\n #\n # r_same: referenced count does not change\n # r_inc: referenced count incremented\n # r_rest: pushed back to node_set for later evaluation\n if r_overlap.start <= r_current.start:\n logger.debug(\"overlap before current\")\n # 2 possible layouts:\n # |------ r_current -------|\n # |------ r_overlap -----|\n # |-r_same-|-- r_inc ----|- r_rest -|\n #\n # |--------------- r_overlap --------------|\n # |-r_same-|-------- r_inc ---------|r_same|\n r_same, other = r_overlap.split(r_current.start)\n if r_same.size > 0:\n range_set.append(r_same)\n\n if r_current.end >= r_overlap.end:\n # other is the remaining part of r_overlap\n # which falls all in r_current, so\n # r_inc = other\n other.num_references += 1\n range_set.append(other)\n # r_rest must be computed from the end\n # of r_overlap\n _, r_rest = r_current.split(r_overlap.end)\n if r_rest.size > 0:\n node_set.append(r_rest)\n else:\n # other does not fall all in r_current so\n # split other in r_inc and r_same\n # r_current is not pushed back because it\n # was fully covered by r_overlap\n r_inc, r_same = other.split(r_current.end)\n r_inc.num_references += 1\n range_set.append(r_inc)\n range_set.append(r_same)\n else:\n logger.debug(\"current before overlap\")\n # 2 possible layouts:\n # |------ r_current ---------|\n # |------ r_overlap ---------|\n # |-r_rest-|-- r_inc --------| r_same |\n #\n # |------ r_current --------------|\n # |--- r_overlap ---|\n # |r_rest|----- r_inc -----|r_rest|\n r_rest, other = r_current.split(r_overlap.start)\n if r_rest.size > 0:\n node_set.append(r_rest)\n\n if r_current.end >= r_overlap.end:\n # other is the remaining part of r_current\n # which completely covers r_overlap so\n # split other in r_inc and r_rest\n r_inc, r_rest = other.split(r_overlap.end)\n r_inc.num_references += r_overlap.num_references\n range_set.append(r_inc)\n if r_rest.size > 0:\n node_set.append(r_rest)\n else:\n # other does not cover all r_overlap\n # so r_inc = other and the remaining\n # part of r_overlap is r_same\n other.num_references += r_overlap.num_references\n range_set.append(other)\n _, r_same = r_overlap.split(r_current.end)\n range_set.append(r_same)\n logger.debug(\"merge loop out Range set step %s\", range_set)\n logger.debug(\"merge loop out Node set step %s\", node_set)\n logger.debug(\"Range set step %s\", range_set)\n logger.debug(\"Node set step %s\", node_set)\n dataset_progress.advance()\n dataset_progress.finish()\n logger.debug(\"Range set %s\", range_set)\n self.range_set = range_set", "title": "" }, { "docid": "370c33675902b0de9965a91d036c1da6", "score": "0.55696684", "text": "def get_range(value):\n return range(int(value))", "title": "" }, { "docid": "402341077ffe2d0ef2cd6edd87df4718", "score": "0.55526006", "text": "def allowedValues(self):\n nones = len(filter(lambda r: r is None, self.range))\n if nones == 1:\n raise RuntimeError(\"identifier set (%s) is not closed\" % self.name)\n\n if nones == 0:\n out = range(self.range[0], self.range[1])\n else:\n out = []\n if self.values:\n out.extend(self.values)\n out.sort()\n\n return out", "title": "" }, { "docid": "c7441b95553a01948f33b1f9aabd4939", "score": "0.55448884", "text": "def range_simple(start,end,step, decimal=1):\n\tlst = list(np.arange(start,end,step))\n\tmp = map(round_single, lst) if decimal==1 else map(round_double, lst) # Python's Ternary Operator\n\tlst = list(mp)\n\treturn lst", "title": "" }, { "docid": "8b75e3cb9baf93a7f81e75edad8c11e3", "score": "0.55374295", "text": "def allvalues(self):\n result = []\n\n if self.values is not None:\n result = self.values.split('|')\n\n if len(result) == 1:\n result = result[0].split('-')\n\n minval = result[0]\n maxval = result[1]\n\n minplus = '+' in minval\n maxplus = '+' in maxval\n\n result = [\n str(val) for val in range(int(minval), int(maxval) + 1)\n ]\n\n if minplus:\n result[0] = minval\n\n if maxplus:\n result[-1] = maxval\n\n return result", "title": "" }, { "docid": "94a8fe918e77aecf427bfc01841da138", "score": "0.55230796", "text": "def _simplify_range(my_range: range) -> Tuple[int, int, int]:\n\n return (my_range.start, my_range.stop, my_range.step)", "title": "" }, { "docid": "6e542f8ebb869372682e88cdeefa2070", "score": "0.55222464", "text": "def to_range(s):\n if IFACE_RANGE in s:\n x, y = map(int, s.split(IFACE_RANGE))\n return range(x, y+1)\n else:\n return [int(s)]", "title": "" }, { "docid": "d28aab8a75c73f6870e6438c1033124e", "score": "0.5515701", "text": "def iprange_to_cidrs(start: CIDR, end: CIDR) -> List[CIDR]:\n ...", "title": "" }, { "docid": "72838bd1a52e0b7968815337c94a274c", "score": "0.550896", "text": "def test_invert_ranges_many_ranges(self):\n ranges = [DataRange(max=2, max_inclusive=False),\n DataRange(min=4, min_inclusive=True)]\n opposite_range = DataRange(min=2, max=4)\n self.assertEqual(sina.utils.invert_ranges(ranges), [opposite_range])", "title": "" }, { "docid": "ee411ee730321672915c7bdd0a54e04b", "score": "0.5506704", "text": "def test_frange_int_three(start: int, stop: int, step: int):\n fr_result = list(frange(start, stop, step))\n r_result = list(range(start, stop, step))\n assert fr_result == r_result", "title": "" }, { "docid": "df281ca3e4232c103c2e5481d356056c", "score": "0.55053014", "text": "def has_consecutive_ranges(ranges):\n total_entries = sum(h - l + 1 + 1 for l, h in ranges)\n union = set().union(*[set(range(l, h + 2)) for l, h in ranges])\n return len(union) < total_entries", "title": "" }, { "docid": "ae7c7d96a71b1030242dcc8b73fadf2f", "score": "0.5502856", "text": "def parse_nucs(s):\n nset = set()\n nucs = s.split(',')\n\n for nuc in nucs:\n if len(nuc) == 0:\n continue\n elif '-' in nuc:\n nsplit = nuc.split()\n nlower = nucname.zzaaam(nsplit[0])\n nupper = nucname.zzaaam(nsplit[1])\n if 0 == nupper%10000:\n nupper += 10000\n else:\n nupper += 1\n tmpset = set(range(nlower, nupper))\n else:\n n = nucname.zzaaam(nuc)\n if 0 == n%10000:\n nrange = range(n, n + 10000)\n else:\n nrange = [n]\n tmpset = set(nrange)\n\n # Add the union \n nset = (nset | tmpset)\n\n return nset", "title": "" }, { "docid": "a118888d9da036c2077cfdd084ecd8d8", "score": "0.5501115", "text": "def allinrange(x, x_range):\n if isinstance(x, (int, float, np.float, np.int)):\n x = np.array([x])\n return np.where(np.logical_or(x < x_range[0], x > x_range[1]))[0].size == 0", "title": "" }, { "docid": "ea60bc8c76cc9698580576f9f76917e7", "score": "0.54971147", "text": "def expandrange(modestr):\n\n ranges = (x.split(\"-\") for x in modestr.split(\",\"))\n return [i for r in ranges for i in range(int(r[0]), int(r[-1]) + 1)]", "title": "" }, { "docid": "bfe2d61c2e35cd96d3f62516803e8f9a", "score": "0.5490465", "text": "def validate_range(self, input_list):\n\t\tvalidation_result = False if len(input_list) != 2 else True\n\t\thas_range = True if len(input_list[1].split('-')) == 2 else False\n\t\treturn [has_range, validation_result]", "title": "" }, { "docid": "0eb2b23810d79335e233d8409742d2d5", "score": "0.5484017", "text": "def ranges_from_iterable(iterable: Iterable[float], step: float = 1) -> Iterable[Tuple[float, float]]:\n\n\tfor group in groupfloats(iterable, step):\n\t\tyield group[0], group[-1]", "title": "" }, { "docid": "675bbda50c844cfb542af971a045de56", "score": "0.5478938", "text": "def regex_for_range(min_: int, max_: int) -> str: # noqa\n\n def split_to_patterns(min_, max_):\n subpatterns = []\n start = min_\n for stop in split_to_ranges(min_, max_):\n subpatterns.append(range_to_pattern(start, stop))\n start = stop + 1\n return subpatterns\n\n def split_to_ranges(min_, max_):\n stops = {max_}\n nines_count = 1\n stop = fill_by_nines(min_, nines_count)\n while min_ <= stop < max_:\n stops.add(stop)\n nines_count += 1\n stop = fill_by_nines(min_, nines_count)\n zeros_count = 1\n stop = fill_by_zeros(max_ + 1, zeros_count) - 1\n while min_ < stop <= max_:\n stops.add(stop)\n zeros_count += 1\n stop = fill_by_zeros(max_ + 1, zeros_count) - 1\n stops = list(stops)\n stops.sort()\n return stops\n\n def fill_by_nines(integer, nines_count):\n return int(str(integer)[:-nines_count] + \"9\" * nines_count)\n\n def fill_by_zeros(integer, zeros_count):\n return integer - integer % 10**zeros_count\n\n def range_to_pattern(start, stop):\n pattern = \"\"\n any_digit_count = 0\n for start_digit, stop_digit in zip(str(start), str(stop)):\n if start_digit == stop_digit:\n pattern += start_digit\n elif start_digit != \"0\" or stop_digit != \"9\":\n pattern += \"[{}-{}]\".format(start_digit, stop_digit)\n else:\n any_digit_count += 1\n if any_digit_count:\n pattern += r\"\\d\"\n if any_digit_count > 1:\n pattern += \"{{{}}}\".format(any_digit_count)\n return pattern\n\n positive_subpatterns = []\n negative_subpatterns = []\n\n if min_ < 0:\n min__ = 1\n if max_ < 0:\n min__ = abs(max_)\n max__ = abs(min_)\n negative_subpatterns = split_to_patterns(min__, max__)\n min_ = 0\n if max_ >= 0:\n positive_subpatterns = split_to_patterns(min_, max_)\n\n negative_only_subpatterns = [\"-\" + val for val in negative_subpatterns if val not in positive_subpatterns]\n positive_only_subpatterns = [val for val in positive_subpatterns if val not in negative_subpatterns]\n intersected_subpatterns = [\"-?\" + val for val in negative_subpatterns if val in positive_subpatterns]\n subpatterns = negative_only_subpatterns + intersected_subpatterns + positive_only_subpatterns\n return \"|\".join(subpatterns)", "title": "" }, { "docid": "1a976744ece43cc1dcded82174519370", "score": "0.5470228", "text": "def build_ranges(self):\n return RangesBuilder(self).build_ranges()", "title": "" }, { "docid": "642dc30e248edc631bceb75967d175ed", "score": "0.5466638", "text": "def get_hexes_in_range(self, start_hex: Hex, _range: int, **kwargs) -> Dict[str, Hex]:\n hexes_in_range = {}\n for q in range(-_range, _range + 1):\n for r in range(max(-_range, -q - _range), min(_range, -q + _range) + 1):\n _hex_id = f\"{q + start_hex.q};{r + start_hex.r}\"\n hexes_in_range[_hex_id] = self.get(_hex_id)\n return self.filter(hexes_in_range, **kwargs)", "title": "" }, { "docid": "84eb141d88f011445cf57e37a3372477", "score": "0.54585296", "text": "def from_range_with_bounds(self, value):\n values = value[1:-1].split(',')\n try:\n lower, upper = map(\n lambda a: int(a.strip()), values\n )\n except ValueError as e:\n raise NumberRangeException(e.message)\n\n self.lower_inc = value[0] == '('\n if self.lower_inc:\n lower += 1\n\n self.upper_inc = value[-1] == ')'\n if self.upper_inc:\n upper -= 1\n\n self.lower = lower\n self.upper = upper", "title": "" }, { "docid": "bfa076f5dbf4addc6498726bcff68a33", "score": "0.54345", "text": "def get_range( value ):\n return range( value )", "title": "" }, { "docid": "bfa076f5dbf4addc6498726bcff68a33", "score": "0.54345", "text": "def get_range( value ):\n return range( value )", "title": "" }, { "docid": "328bc2b3c08ec271c3cc11994cc53058", "score": "0.54339266", "text": "def list_to_range(ilist, shelf=None):\n # pylint: disable= W0141\n rlist = list()\n olist = list()\n for d in ilist:\n sd = d.split('.')\n if len(sd) == 2:\n shelf = sd[0]\n slot = sd[1]\n elif len(sd) == 1:\n slot = sd[0]\n else:\n raise LibraryError(\"for %s could identify slot in %s\" % (d, sd))\n rlist.append(int(slot))\n rlist = [map(itemgetter(1), g) for _, g in groupby(enumerate(rlist), lambda (i, x): i - x)]\n for i in rlist:\n if len(i) > 1:\n olist.append(\"{0}-{1}\".format(i[0], i[0] + (len(i) - 1)))\n else:\n olist.append(\"{0}\".format(i[0]))\n if shelf:\n for i in range(len(olist)):\n olist[i] = \"{0}.{1}\".format(shelf, olist[i])\n return olist", "title": "" }, { "docid": "9ad08a5f5925c23e02c2538517a8c2d7", "score": "0.54339164", "text": "def id_as_range(self):\n values = self.Id.split(\"-\")\n if len(values) == 1:\n return range(int(self.Id), int(self.Id)+1)\n return range(int(values[0]), int(values[1])+1)", "title": "" } ]
a6f2094b23fd016f411dd70f2df2b7ef
Using three different increments, find how much tilt, roll, and twist of the birdbath yields maximum waterholding ability.
[ { "docid": "6001527e7637b1559b11b1ce1fe85531", "score": "0.60584825", "text": "def helper(func):\n curr_best_water = -1\n curr_best_roll = -1\n curr_best_twist = -1\n curr_best_tilt = -1\n mini = -45\n maxi = 45\n delta = [12, 6, 1]\n for d in delta: # use three different delta values to adjust the range and find max water fraction\n roll = mini # set the three variables to the minimum of the range\n tilt = mini\n twist = mini\n curr_best_water, curr_best_roll, curr_best_tilt, curr_best_twist = findbest(roll, tilt, twist, mini, maxi, d, func)\n curr_bests = [curr_best_roll, curr_best_tilt, curr_best_twist]\n mini = min(curr_bests) - d # mini is minimum of the current best values - current delta\n maxi = max(curr_bests) + d # maxi is maximum of the current best values + current delta\n\n return curr_best_water, curr_best_roll, curr_best_tilt, curr_best_twist", "title": "" } ]
[ { "docid": "aeef00b8dc0602f922a09d4c7ef36bf5", "score": "0.65780234", "text": "def findbest(roll, tilt, twist, min, max, delta, func):\n best_water = -1\n best_roll = -1\n best_tilt = -1\n best_twist = -1\n while roll < max: # until roll, tilt, twist values hit max value in a range\n tilt = min # evaluate how much water it holds and increment by delta\n while tilt < max:\n twist = min\n while twist < max:\n water = func(roll, tilt, twist)\n if(water > best_water): # compare the current water fraction with the best water fraction\n best_water = water\n best_roll = roll\n best_tilt = tilt\n best_twist = twist\n twist+= delta # increment the twist, tilt, roll by delta\n tilt += delta\n roll += delta\n return best_water, best_roll, best_tilt, best_twist", "title": "" }, { "docid": "42dbac2ee6801ea7137b3dd47c46bd21", "score": "0.62114006", "text": "def brent(self, patch):\n power = lam = 1\n tortoise = 0\n hare = 1\n while np.array_equal(self.values(patch, tortoise),\n self.values(patch, hare)) is False:\n if power == lam:\n tortoise = hare\n power = power * 2\n lam = 0\n hare = hare + 1\n lam = lam + 1\n if hare >= self.height or tortoise >= self.height:\n break\n\n tortoise = 0\n hare = lam\n\n mu = 0\n while np.array_equal(self.values(patch, tortoise),\n self.values(patch, hare)) is False:\n tortoise = tortoise + 1\n hare = hare + 1\n mu = mu + 1\n if hare >= self.height or tortoise >= self.height:\n break\n\n return lam, mu", "title": "" }, { "docid": "5314205e88799929de0f97de4ffbec67", "score": "0.5863721", "text": "def rgkmax_and_mt_effect():\n\n l_max_pairs = [{'zr': 6, 'o': 5}]\n energy_index = 'i1'\n energy_cutoff = 100\n muffin_tins = {'equal': {'zr': 1.6, 'o': 1.6},\n 'correct_ratio': {'zr': 2.0, 'o': 1.6}\n }\n\n # PWs in basis = 483, LOs = 807\n rgk8_mt_equal = process_gw_calculation('/users/sol/abuccheri/gw_benchmarks/A1_set8/zr_lmax6_o_lmax5_rgkmax8/gw_q222_omeg32_nempty2000/max_energy_i1')\n # PWs in basis = 680, LOs = 807\n rgk9_mt_equal = process_gw_calculation('/users/sol/abuccheri/gw_benchmarks/A1_set8/zr_lmax6_o_lmax5_rgkmax9/gw_q222_omeg32_nempty2000/max_energy_i1')\n\n # PWs in basis = 483, LOs in basis = 878\n rgk8_mt_ratio = process_gw_calculation('/users/sol/abuccheri/gw_benchmarks/A1_set9/zr_lmax6_o_lmax5_rgkmax8/gw_q222_omeg32_nempty3000/max_energy_i1')\n # PWs in basis = 680, LOs in basis = 875\n # TODO Note the LO I had to remove from the basis in the GW calc\n rgk9_mt_ratio = process_gw_calculation('/users/sol/abuccheri/gw_benchmarks/A1_set9/zr_lmax6_o_lmax5_rgkmax9/gw_q222_omeg32_nempty3000/max_energy_i1')\n\n print_results(rgk8_mt_equal, \"(6,5), 100 Ha cutoff, rgkmax8, MT (1.6, 1.6)\")\n print_results(rgk9_mt_equal, \"(6,5), 100 Ha cutoff, rgkmax9, MT (1.6, 1.6)\")\n print_results(rgk8_mt_ratio, \"(6,5), 100 Ha cutoff, rgkmax8, MT (2.0, 1.6)\")\n print_results(rgk9_mt_ratio, \"(6,5), 100 Ha cutoff, rgkmax9, MT (2.0, 1.6)\")", "title": "" }, { "docid": "33a1de83485e9a4ce5311bf69f28a011", "score": "0.58241403", "text": "def strategy(hand, num_die_sides):\r\n _rolls = gen_all_holds(hand)\r\n _max = 0\r\n _set_hold = {}\r\n #_expect_value = sum([score(hand+_roll) for _roll in _rolls]) / float(len(_rolls))\r\n for _item in _rolls:\r\n _value = expected_value(_item,num_die_sides, len(hand) - len(_item))\r\n if _max < _value:\r\n _max = _value\r\n _set_hold = _item\r\n return (_max, _set_hold)", "title": "" }, { "docid": "e39887c8f8a288f97302c9b32f3f18ce", "score": "0.5820457", "text": "def max_trapped_water(heights):\n max_water = 0\n i, j = 0, len(heights) - 1\n indices = (i, j)\n while i < j:\n width = j - i\n curr_water_content = width * min(heights[i], heights[j])\n if curr_water_content > max_water:\n max_water = curr_water_content\n indices = (i, j)\n if heights[i] < heights[j]: # gives sense because the width is always constant, 1 unit\n i += 1\n else:\n j -= 1\n\n return max_water, indices", "title": "" }, { "docid": "48ee5a00283eb213c9b05bed444d588f", "score": "0.5815728", "text": "def MAX_ROLLS() -> int:\n return 3", "title": "" }, { "docid": "c967a2269b9c9e1394f9db594cf5325b", "score": "0.57614326", "text": "def get_max_drawdown_underwater(underwater):\n valley = np.argmax(underwater) # end of the period\n peak = underwater[:valley][underwater[:valley] == 0].index[-1]\n try:\n recovery = underwater[valley:][underwater[valley:] == 0].index[0]\n except IndexError:\n recovery = np.nan\n return peak, valley, recovery", "title": "" }, { "docid": "a64ec8f1bd9248d41e6e82c97116746a", "score": "0.57421345", "text": "def main():\n\t# 10\n print get_max_straight([10])\n\n\t# 15\n print get_max_straight([10, 15])\n\n\t# 16\n print get_max_straight([1, 2, 3, 4, 5, 1, 2, 3, 4, 5])\n\n # 3200\n print get_max_straight([1000, 10, 10, 1000, 1500, 1200, 10])\n\n # 2710\n print get_max_straight([1000, 10, 10, 1000, 1500, 10, 1200])", "title": "" }, { "docid": "28810e5c9abed33cfa92cf808d38d21f", "score": "0.57256645", "text": "def get_max_value(x, y, z):\n if x >= y and x >= z:\n return x\n if y >= x and y >= z:\n return y\n return z", "title": "" }, { "docid": "1623381282dd9c3bf144eb8f6599d988", "score": "0.5585795", "text": "def robust_max_pitch(g, bias=20):\n C = 50\n pitch = max_pitch(g, bias)\n for i in range(1,len(pitch)):\n if abs(2 * pitch[i] - pitch[i-1]) < C and pitch[i] < 170:\n pitch[i] = 2 * pitch[i]\n for i in range(len(pitch)-2, 0, -1):\n if abs(2 * pitch[i] - pitch[i+1]) < C and pitch[i] < 170:\n pitch[i] = 2 * pitch[i]\n return pitch", "title": "" }, { "docid": "2dec3810fcfdab574b6469031130c48c", "score": "0.5567997", "text": "def get_bonus_rolls_needed(self):\n if self.name == 10:\n return 0\n if self.pin_sets[0].get_status() == 'Strike':\n return 2\n if self.pin_sets[0].get_status() == 'Spare':\n return 1\n return 0", "title": "" }, { "docid": "aa7c6c717500bddf5f5cf3a4af327f96", "score": "0.5554858", "text": "def get_max_reattempts(self):\n return 1", "title": "" }, { "docid": "c94345e61e07441d9852e0a3f2382225", "score": "0.5539963", "text": "def _determine_threshold(self, roll_prob, ranges, std=False):\n\n possible_thresholds = []\n for ranger in ranges:\n possible_thresholds.append(np.amax(roll_prob[ranger]))\n possible_thresholds = np.array(possible_thresholds)\n # TODO: multiplier is arbitrary\n t = 1.0 * np.amin(possible_thresholds[np.nonzero(possible_thresholds)])\n if std:\n std = np.std(possible_thresholds[np.nonzero(possible_thresholds)])\n return t, std\n return t", "title": "" }, { "docid": "beec48749b8d53a2d388f4f8770ca7df", "score": "0.5536579", "text": "def FOE_STAB_BACK_MAXIMUM_DAMAGE():\n return 4", "title": "" }, { "docid": "42109e1894b8b1b57409b4d4ed4425ba", "score": "0.54913336", "text": "def v1(takeoff_weight: float, velocity_2: int) -> int:\n if takeoff_weight >= 65.0:\n return velocity_2 - 10\n elif 65.0 > takeoff_weight >= 60.0:\n return velocity_2 - 11\n elif 60.0 > takeoff_weight >= 55.0:\n return velocity_2 - 12\n elif 55.0 > takeoff_weight >= 50.0:\n return velocity_2 - 13\n elif 50.0 > takeoff_weight >= 45.0:\n return velocity_2 - 15\n else:\n return velocity_2 - 17", "title": "" }, { "docid": "c90f8566f831af438e1e204050069640", "score": "0.54735804", "text": "def max_of_three(num1, num2, num3):\n\n if num1 >= num2 and num1 >= num3:\n return num1\n\n elif num2 >= num1 and num2 >= num3:\n return num2\n\n elif num3 >= num1 and num3 >= num2:\n return num3", "title": "" }, { "docid": "01b736babdbaaabd0e0bfafb4d64f9fa", "score": "0.5444124", "text": "def max_of_three(num1, num2, num3):\n\n if num1 >= num2 and num1 >= num3:\n\n return num1\n\n elif num2 >= num1 and num2 >= num3:\n\n return num2\n\n elif num3 >= num1 and num3 >= num2:\n\n return num3", "title": "" }, { "docid": "27879c9738d289b379f5fda96383ce5e", "score": "0.5411914", "text": "def two_of_three(a, b, c):\n \"*** YOUR CODE HERE ***\"\n\n x = max(a,b)\n y = max(b,c)\n \n return x*x + y*y", "title": "" }, { "docid": "2def69713ba729a7c97c0db5046bd8fb", "score": "0.5407619", "text": "def max_of_three(num1, num2, num3):\n\n if num1 >= num2 and num1 >= num3:\n return num1\n\n elif num2 >= num1 and num2 >= num3:\n return num2\n\n elif num3 >= num1 and num3 >= num2:\n return num3\n\n else: \n return \"Something went wrong\"", "title": "" }, { "docid": "1705325b3fd608f3502ad231230ef71c", "score": "0.5390615", "text": "def calc_DH_supply(t_0, t_1):\n tmax = max(t_0, t_1)\n return tmax", "title": "" }, { "docid": "435b187994c6678a219e42a85080c76f", "score": "0.5390303", "text": "def greedy_max_pitch(g, bias=20):\n pitch = []\n for l in g:\n p = 0\n for i in range(len(l)-1):\n if l[i] > l[i+1]:\n p = 1 / (0.0001 * (i+bias))\n break\n pitch.append(p)\n return pitch", "title": "" }, { "docid": "90821345a75bcbddcbe39bda83b85d09", "score": "0.53719676", "text": "def max_temp(self):\n return 39", "title": "" }, { "docid": "ed69720bbe5ce9e93bc2c652c1f6972a", "score": "0.5370995", "text": "def _get_roll(self):\r\n return self.equatorial[2]", "title": "" }, { "docid": "161ee76dfe9ddbbea51fed2b22b92622", "score": "0.53673744", "text": "def bir(n0, N0, n1, N1, Ntrial=1e6, CL=95):\n\n u, ll, ul = biratio(n0, N0, n1, N1, Ntrial=Ntrial, CL=CL)\n\n return u, ul - u, u - ll", "title": "" }, { "docid": "d3f464a24c6a682bad6718ad3456c4c9", "score": "0.5339209", "text": "def highest_multiple_of_3(numbers):\n pass", "title": "" }, { "docid": "33316ae91cc3c5fb68c0db1085a48ec6", "score": "0.5331952", "text": "def get_water_level(self):\n with self.simlock:\n return 1.0 * self.current_volume / self.tank_capacity", "title": "" }, { "docid": "2ed98db5fb26905370412777269cc0ce", "score": "0.53179014", "text": "def compute_max_gain(self):\n q = self.dimension // self.nb_target_actions\n k = self.dimension % self.nb_target_actions\n # self.dimension = q*self.nb_target_actions + k (Euclidean division)\n duration_round = self.nb_target_actions/2*q # expected optimal duration to reach target x or y coordinate\n if k < self.nb_target_actions/2:\n duration_round = duration_round - self.nb_target_actions/2 + k\n return (self.reward_distribution_target.mean + 2*duration_round*\n self.reward_distribution_states.mean)/(2*duration_round + 1)", "title": "" }, { "docid": "42b8c9092c1a3818c48582c53cc8a95f", "score": "0.5311642", "text": "def maximum3(a, b, c):\n # your code here", "title": "" }, { "docid": "c1cb67496dd78df18288c211e15bd27a", "score": "0.53023744", "text": "def calculate_base_farmer_reward(height: uint32) -> uint64:\n\n if height == 21:\n return uint64(int((1 / 8) * 2100000 * _mojo_per_thyme))\n elif height % 21000000 == 0:\n return uint64(int((1 / 8) * 210000 * _mojo_per_thyme))\n elif height % 2100000 == 0:\n return uint64(int((1 / 8) * 21000 * _mojo_per_thyme))\n elif height % 210000 == 0:\n return uint64(int((1 / 8) * 210 * _mojo_per_thyme))\n elif height % 21000 == 0:\n return uint64(int((7 / 8) * 21 * _mojo_per_thyme))\n else:\n return uint64(int((1 / 8) * 0.5 ** (height / (_halflife * _blocks_per_year)) * _coins_per_block * _mojo_per_thyme))", "title": "" }, { "docid": "c5971923f286fbe934e498996a6ba9bb", "score": "0.5292305", "text": "def find_plateau_central(time_series, threshold, uncertainty=1e-6):\n\n # Step 1\n top1 = time_series.max()\n max_index = np.argmax(time_series)\n tau = top1 - threshold\n left_border, right_border = 0, len(time_series) - 1\n\n # Step 2\n left_border, max_index, tau_l = find_left_plateau(time_series, left_border, max_index, tau)\n\n # Step 3\n max_index, right_border, tau_r = find_right_plateau(time_series, max_index, right_border, tau)\n\n # Step 4\n count = 0\n while abs(tau_l - tau_r) > abs(tau_l * uncertainty):\n if tau_l > tau_r: # Step 5\n max_index, right_border, tau_r = find_right_plateau(time_series, max_index, right_border, tau_l)\n # print('tau_r: {:.2f}'.format(tau_r))\n else: # Step 6\n left_border, max_index, tau_l = find_left_plateau(time_series, left_border, max_index, tau_r)\n # print('tau_l: {:.2f}'.format(tau_l))\n count += 1\n tau = tau_l\n return left_border, right_border, max_index, tau", "title": "" }, { "docid": "8fd30e671323f0b27293cee04d756555", "score": "0.52897704", "text": "def findMaxRange(self):\n v = self.vconfig\n speeds = v['Power Curve']['Speeds']\n powers = v['Power Curve']['PowersSL']\n SPEEDmaxr = 0.\n POWERmaxr = 999999.\n imin = 9999999.\n for i in range(1, len(speeds)):\n if powers[i]/speeds[i] < imin:\n imin = powers[i]/speeds[i]\n SPEEDmaxr = speeds[i]\n POWERmaxr = powers[i]\n if debug: print speeds\n if debug: print powers\n fuelweight = v['Weights']['MaxAvailableFuelWeight'] # self.GW - v['Weights']['EmptyWeightFraction']*self.GW - v['Weights']['UsefulLoad'] # REMOVEME # change back to normal\n hourstoempty = fuelweight / (self.SFC(POWERmaxr) * POWERmaxr)\n v['Performance']['SFCatMaxRange'] = self.SFC(POWERmaxr)\n v['Performance']['MaxRange'] = hourstoempty * SPEEDmaxr\n v['Performance']['MaxRangeSpeed'] = SPEEDmaxr\n v['Performance']['PowerAtMaxRangeSpeed'] = POWERmaxr", "title": "" }, { "docid": "76f9c3be687f8489a65ff1b857cc2645", "score": "0.5274803", "text": "def calculatewealth(self):\n i = 0\n final = 0\n while i < len(self.containedLand):\n temp = self.containedLand[i].getproduction()\n temp = (temp - self.containedLand[i].getupkeep())\n final += temp\n i += 1\n final = final - self.ruler.combatants.calculateupkeep()\n self.stores.wealth += final\n self.log.tracktext(str(self.ruler.name) + \" has \" + str(self.stores.wealth) + \" grain at his disposal\")", "title": "" }, { "docid": "b76da94b37be3b2d918bdc597d9ab427", "score": "0.5271212", "text": "def get_attacker_advantage(self):\n return max(np.abs(self.tpr - self.fpr))", "title": "" }, { "docid": "4b9132ffe8dc0d30121a084789a55c54", "score": "0.52703595", "text": "def max_trial():\n return 3", "title": "" }, { "docid": "edbbd68e2d30fa2ff68b83bc3a83c72e", "score": "0.5269012", "text": "def FIGHTER_SKILL():\n return {'dirty boxing': 1, 'low sweep': 1, 'bat swing': 1, 'kendo slash': 2, 'tornado kick': 2,\n 'dragon sword': 2, 'sword dance': 3, 'divine crash': 3, 'critical hammer shot': 3}", "title": "" }, { "docid": "768e1aef41dd7b90e5d7cd278abc5652", "score": "0.5261597", "text": "def safetyfactor_curtain_hangingweight_open(weight, stopheight = 0):\n weight = float(weight)\n stopheight = float(stopheight)\n if stopheight >=192: return weight * 1.2\n if stopheight < 50: return weight + 5\n #if 50 < stopheight < 100: return weight + 10\n return weight * 1.1", "title": "" }, { "docid": "6c9ac08c315a636a809e85596ab94384", "score": "0.52555877", "text": "def behaviour_advance():\n global ds_val, TURN_COUNT, NAVI_COUNT\n\n ret_l = MOTOR_SPEED\n ret_r = MOTOR_SPEED\n \n \"\"\"Depending on turn_count value, different weights are given to the robot distance sensors.\n This was implemented to avoid the robot being stuck in the corners of the arena. This way,\n the robot can only get stuck for the seconds equivalent to the integer given to THRESH_TURN_COUNT (300).\n This means the robot will change weights every 300 ticks or 30 seconds.\n First set of weights favour the turning towards the right and second set to the left.\"\"\"\n \n TURN_COUNT = TURN_COUNT + 1\n NAVI_COUNT = NAVI_COUNT + 1\n \n if NAVI_COUNT == NAVI_RESET:\n NAVI_COUNT = 0\n \n if TURN_COUNT == TURN_RESET:\n TURN_COUNT = 0\n \n if TURN_COUNT < THRESH_TURN_COUNT: \n weights = (2,1,1,0.5,-2)\n else:\n weights = (2,-0.5,-1,-1,-2)\n \n count = 0\n \n for i in range(DIST_SENS_FRONT_COUNT):\n count += ds_val[i] * weights[i]\n\n if count>0:\n # Object to left, spin right\n ret_l = MOTOR_SPEED\n ret_r = -MOTOR_SPEED\n elif count<0:\n # Object to right, spin left\n ret_l = -MOTOR_SPEED\n ret_r = MOTOR_SPEED\n else: \n if NAVI_COUNT < THRESH_NAVI_COUNT:\n ret_l = MOTOR_SPEED*0.87\n ret_r = MOTOR_SPEED\n else:\n ret_l = MOTOR_SPEED\n ret_r = MOTOR_SPEED*0.87\n # If count==0 use default values above\n \n return ret_l, ret_r", "title": "" }, { "docid": "016941feb60716b6ca1e6f655cf21e35", "score": "0.5254495", "text": "def findMaxEndurance(self):\n v = self.vconfig\n speeds = v['Power Curve']['Speeds']\n powers = v['Power Curve']['PowersSL']\n SPEEDmaxe = 0.\n POWERmaxe = 999999.\n pmin = 9999999.\n for i in range(1, len(speeds)):\n if powers[i] < pmin:\n pmin = powers[i]\n SPEEDmaxe = speeds[i]\n POWERmaxe = powers[i]\n if debug: print speeds\n if debug: print powers\n fuelweight = 10000. #self.GW - v['Weights']['EmptyWeightFraction']*self.GW - v['Weights']['UsefulLoad'] # REMOVEME # change back to normal\n hourstoempty = fuelweight / (self.SFC(POWERmaxe) * POWERmaxe)\n v['Performance']['SFCatMaxEndurance'] = self.SFC(POWERmaxe)\n v['Performance']['MaxEndurance'] = hourstoempty\n v['Performance']['MaxEnduranceSpeed'] = SPEEDmaxe\n v['Performance']['PowerAtMaxEnduranceSpeed'] = POWERmaxe", "title": "" }, { "docid": "b40f05758d2a467b5891d1a383c21d3b", "score": "0.52472746", "text": "def main(limit):\n\tnumberLychrels = 0\n\n\tfor i in range(1, limit):\n\t\tif isLychrel(i):\n\t\t\tnumberLychrels += 1\n\treturn numberLychrels", "title": "" }, { "docid": "29b05f3c7f3781477e92d211d6de81b7", "score": "0.5246422", "text": "def main(n=3):\n bound = pow(10, n) - 1\n\n maximum = None\n for a in range(bound, 0, -1):\n if maximum and a * bound < maximum:\n # everything from here on is smaller than what we've found\n return maximum\n\n for b in range(bound, 0, -1):\n if maximum and a * b < maximum:\n # we can't get any value from here that's > maximum\n break\n\n product = a * b\n if str(product) == str(product)[::-1]:\n maximum = product", "title": "" }, { "docid": "db0f2ef62df26068eb238fc55a0d45a3", "score": "0.5244168", "text": "def two_of_three(x, y, z):\n return x**2 + y**2 + z**2 - max(x,y,z)**2", "title": "" }, { "docid": "8603f5f43ebc81f182588df6a2fa5a8e", "score": "0.52428895", "text": "def totient_maximum(limit=10):\n max_ratio, max_n = 0.0, 0\n\n # start_and_inc = 60 if limit > 100 else 2\n for num in range(2, limit + 1, 2):\n relative_ratio = float(num) / phi(num)\n if relative_ratio > max_ratio:\n max_ratio, max_n = relative_ratio, num\n return max_n, max_ratio", "title": "" }, { "docid": "f8b166962295555dbde2a00265df1fd6", "score": "0.52378553", "text": "def _compute_reward(self, action: int) -> Tuple[int, int]:\n r = self.rewards[self.idx, action]\n max_r = self.max_rewards[self.idx]\n return r, max_r", "title": "" }, { "docid": "090b4510fb6d20e3a83b49de1b52197b", "score": "0.5233725", "text": "def curtain_getendlockweight(slats, endlockweight, windlocks = False):\n if slats == 0: return 0\n ## I dont particularly like this function, but this is how it was originally written in the source\n if windlocks:\n endlockweight = (((slats / windlocks) * .656) / slats ) + .17\n return slats * endlockweight * 2", "title": "" }, { "docid": "52860dea714d4486c72ba5acb943e124", "score": "0.52314967", "text": "def advisability(self):\n final_result = self._temperature_factor() + self._rain_factor()\n if final_result < -5:\n final_result = -5\n elif final_result > 5:\n final_result = 5\n return final_result", "title": "" }, { "docid": "d6a97366ec7f65c36e7753414cd15516", "score": "0.5231098", "text": "def max_effective_range(self) -> float:\n return max(int(w.range) for m in self.models for w in m.weapons if w.range != 'melee')", "title": "" }, { "docid": "fc395c62b2be9afdc0fcce16c9958165", "score": "0.5221466", "text": "def get_binned_maxima(vel_read, qtty_read, fb, cb):\n #=-=-=-=-=-=-=-=-=-=-=-=- get binned mass -=-=-=-=-=-=-=-=-=-=-=-=\n vu = vel_read.unit\n\n #Make finer bins.\n fb_step = fb.to(vu).value\n vel_fb = np.arange(vel_read.value[0], vel_read.value[-1], fb_step) * vu\n qtty_fb = np.zeros(len(vel_fb)) * qtty_read.unit\n \n for i,vb in enumerate(vel_fb):\n for j,vr in enumerate(vel_read):\n if vr >= vb:\n qr = qtty_read[j-1]\n #print ' gotcha', vb, vr, qr\n break \n qtty_fb[i] = qr\n\n #Get qtty in regions.\n condition_i = ((vel_fb[0:-1] > 7850. * u.km / u.s)\n & (vel_fb[0:-1] <= 13680. * u.km / u.s))\n condition_m = ((vel_fb[0:-1] >= 13681. * u.km / u.s)\n & (vel_fb[0:-1] <= 16064. * u.km / u.s)) \n condition_o = ((vel_fb[0:-1] >= 16065. * u.km / u.s)\n & (vel_fb[0:-1] <= 19168. * u.km / u.s))\n condition_u = (vel_fb[0:-1] >= 19169. * u.km / u.s)\n\n qtty_i = qtty_fb[condition_i]\n qtty_m = qtty_fb[condition_m]\n qtty_o = qtty_fb[condition_o]\n qtty_u = qtty_fb[condition_u]\n \n #print zip(vel_fb[condition_o],qtty_fb[condition_o])\n #print zip(vel_fb[condition_u],qtty_fb[condition_u])\n \n return max(qtty_i), max(qtty_m), max(qtty_o), max(qtty_u)", "title": "" }, { "docid": "3d4b76360f3a3488cf282caa44a0cebb", "score": "0.52193695", "text": "def player_ahead(self):\n if self.p1_scoring_bucket.num_beads > self.p2_scoring_bucket.num_beads:\n return 1\n elif self.p2_scoring_bucket.num_beads > self.p1_scoring_bucket.num_beads:\n return 2\n return 0", "title": "" }, { "docid": "e83b6d0f25f34d6dc70731b453c170e3", "score": "0.52179265", "text": "def BOWMAN_SKILL():\n return {'double shot': 1, 'bomb arrow': 1, 'sling shot': 1, 'fire arrow': 2, 'lightning arrow': 2,\n 'crossbow shot': 2, 'Dragon breath': 3, 'bullseye shot': 3, 'terra ray': 3}", "title": "" }, { "docid": "03f15cefe107626c0860dcd1edfb080a", "score": "0.5215372", "text": "def wears_jacket(temp, raining):\n return temp < 60 or raining", "title": "" }, { "docid": "0232bad65f88af684b37e64d54be96b9", "score": "0.5209329", "text": "def jump_stats(previous_jumps, chute_altitude):\n n_previous_jumps = len(previous_jumps)\n n_better = sum([1 for pj in previous_jumps if chute_altitude < pj])\n return n_previous_jumps, n_better", "title": "" }, { "docid": "e13695cc211c1130e55935c05256f1b5", "score": "0.52084476", "text": "def roulette_swing(self,wheel):\n which = np.random.random()\n for n in range(len(wheel)):\n if which > wheel[n][1] and which < wheel[n][2]:\n return int(wheel[n][0])", "title": "" }, { "docid": "c9db2bcfd069ee0b2866ddc17a52bd76", "score": "0.52015615", "text": "def calculate_handicap(data_universe):\n differentials = []\n \n for rnd in data_universe:\n if not rnd[\"is_nine\"]:\n differentials.append(calculate_differential(rnd))\n \n lowest_eight_differentials = sorted(differentials, reverse=True)[:7]\n \n return round(mean(lowest_eight_differentials),0)", "title": "" }, { "docid": "4924ceccae6384dc8262c0efd069988f", "score": "0.51963043", "text": "def score_for_three_of_a_kind(roll: tuple) -> int:\r\n for n in dice_frequency(roll).values():\r\n if n >= 3:\r\n return sum(roll)\r\n return 0", "title": "" }, { "docid": "8fd13e12a046f3b25a77afbcf33e50b5", "score": "0.5192435", "text": "def FOE_MAX_HP_LV3():\n return 13", "title": "" }, { "docid": "3cb0c32c9ffc1dfd5a0875d6f3777eb1", "score": "0.518988", "text": "def get_max_level(self):\n # Turn off the logger in get_amplitude temporarily\n \n # Initial value\n n = 2\n # Look at the amplitudes or channels to find the max_level\n while self.get_amplitudes(n) or ((n,False) in list(self['decay_channels'].keys())):\n n += 1\n\n # n is the failed value, return n-1.\n return (n-1)", "title": "" }, { "docid": "57cc1146bfd4681e33497c137a9b3ad9", "score": "0.51892275", "text": "def bet(self):\n if self.count<=0:\n return 10\n elif self.count == 1:\n return 50\n elif self.count == 2:\n return 100\n elif self.count == 3:\n return 200\n elif self.count == 4:\n return 500\n else:\n return 1000", "title": "" }, { "docid": "0306d72269a8852d0901d6c80dfad06b", "score": "0.5188337", "text": "def max_dis(icebergs=all_icebergs):\r\n n = len(icebergs)\r\n m = -1\r\n for i in range(n):\r\n for j in range(i + 1, n):\r\n m = max(m, icebergs[i].get_turns_till_arrival(icebergs[j]))\r\n return m", "title": "" }, { "docid": "49447db1e3916ce724c1638c462ee41d", "score": "0.5188176", "text": "def get_reward(self):\r\n skill = np.argmax(self.state, axis=1)\r\n reward = 700 - 4 * np.sum( (skill-self.ground_truth_skill)**2 )\r\n # reward = reward / 200\r\n\r\n return reward", "title": "" }, { "docid": "aba59fcc84b419db507d0323df03a459", "score": "0.5187436", "text": "def maxPouthc(self):\r\n # open circuit voltage\r\r\n voc, mue, J, P, T = self.Vochc()\r\r\n maxP = max(P)\r\r\n # carrier temperature at maximum power point\r\r\n Tmpp = T[np.argmax(P)]\r\r\n # print \"maxP: {0:g}\".format(maxP)\r\r\n return maxP, Tmpp", "title": "" }, { "docid": "2738bf9a47e4577d7d1e6becb02b8359", "score": "0.51871514", "text": "def calc_DC_return(t_0, t_1):\n # if t_0 == 0:\n # t_0 = -1E6\n # if t_1 > 0:\n # tmax = max(t_0, t_1)\n # else:\n # tmax = t_0\n if t_0 == 0:\n t_0 = -1E6\n if t_1 == 0:\n t_1 = -1E6\n tmax = max(t_0, t_1)\n return tmax", "title": "" }, { "docid": "2fbb5f28ae1844a2f43d4c0625126f1d", "score": "0.518547", "text": "def q1(self):\n # we have 20 total rolls, so our most likely probabilities\n # are thed number of times a value appears / total number\n # of times we roll \n return [0.4, 0.2, 0.1, 0.3]", "title": "" }, { "docid": "50e6fb7cc1f830879b345db250e2dfed", "score": "0.5182325", "text": "def best_hand_val(self):\n if self.hand_val(hard=True) > 21:\n return 0\n if self.hand_val() <= 21:\n return self.hand_val()\n return self.hand_val(hard=True)", "title": "" }, { "docid": "8ecdcd41a08fa7aae93809663cc89801", "score": "0.518056", "text": "def test_previous_max_calculations(self):\n for week, w in zip(range(1, len(logic.weeks)), (95, 85, 90)):\n self.assertEqual(self.max_weight,\n logic.get_max_from_previous(\n w, week, increment=0),\n \"week = {}, weight = {}\".format(week, w))", "title": "" }, { "docid": "a0a757b0f0a3396f9291f07a6f980ee3", "score": "0.51740825", "text": "def main_logic(*args, **kwargs) -> int:\n number: int = args[0]\n number += 1\n maximal = 0\n number_with_maximal_sum = 0\n for i in range(1, number):\n if divisor(i) > maximal:\n maximal = divisor(i)\n number_with_maximal_sum = i\n return number_with_maximal_sum", "title": "" }, { "docid": "5cdae701533b2cee5cf5e41d67b39008", "score": "0.51714325", "text": "def alive_bonus(self, roll, pitch):\n #return +1 if z > 0.26 else -1 # 0.25 is central sphere rad, die if it scrapes the ground\n alive = roll < np.pi/2 and roll > -np.pi/2 and pitch > -np.pi/2 and pitch < np.pi/2\n debugmode = 0\n if debugmode:\n print(\"roll, pitch\")\n print(roll, pitch)\n print(\"alive\")\n print(alive)\n return +1 if alive else -1", "title": "" }, { "docid": "2cd3a14bce88e2db27fe5f35d87cf6e0", "score": "0.51714015", "text": "def score_for_threes(roll: tuple) -> int:\r\n return roll.count(3) * 3", "title": "" }, { "docid": "c3753f0ab57430d6aa072a0c6f3888b8", "score": "0.5170927", "text": "def ent_tank_a(self):\n # first guess planview area\n a_new = 1 * u.m**2\n a_ratio = 2 # set to >1+tolerance to start while loop\n tolerance = 0.01\n a_floc_pv = (\n self.floc.vol /\n (self.floc.downstream_H + (self.floc.HL / 2))\n )\n while a_ratio > (1 + tolerance):\n a_et_pv = a_new\n a_etf_pv = a_et_pv + a_floc_pv\n w_tot = a_etf_pv / self.floc.max_L\n w_chan = w_tot / self.floc.channel_n\n\n a_new = self.floc.max_L * w_chan\n a_ratio = a_new / a_et_pv\n return a_new", "title": "" }, { "docid": "117c60be3fd30e06b3c937425b5da08e", "score": "0.5169856", "text": "def rain(walls):\n if(not len(walls)):\n return 0\n n = len(walls)\n ans = 0\n for i in range(1, n - 1):\n left = walls[i]\n for j in range(i):\n left = max(left, walls[j])\n right = walls[i]\n for j in range(i + 1, n):\n right = max(right, walls[j])\n ans = ans + (min(left, right) - walls[i])\n return ans", "title": "" }, { "docid": "9c4ecd41046ee592d483601acd8d8359", "score": "0.51617366", "text": "def strategy(hand, num_die_sides):\n all_holds = gen_all_holds(hand)\n expected = []\n for hold in all_holds:\n num_free_dice = len(hand) - len(hold)\n expected.append(expected_value(hold, num_die_sides, num_free_dice))\n expected_score = float(max(expected))\n to_hold = list(all_holds)[expected.index(expected_score)]\n return (expected_score, to_hold)", "title": "" }, { "docid": "4ec8ee48a3f957f5345048d23b8b7ca3", "score": "0.51552826", "text": "def _beam_area(self):\n return np.pi * self.bmaj * self.bmin / (4.0 * np.log(2.0))", "title": "" }, { "docid": "2f90fb376ab66fcbee4d37b6ecde7cfb", "score": "0.515284", "text": "def get_decisions(nfl_pbp, max_ytg=21):\n\n ### Going for it on 4th down\n\n # expected value of 1st down given yardline\n plays = nfl_pbp[nfl_pbp['down'] == 1]\n firstDown_pts = np.zeros(99)\n for yds in range(1,100):\n firstDown_pts[yds-1] = np.mean(plays[plays.yardline_100 == yds].next_score_relative_to_posteam)\n \n # nonparametric smooth\n pts_nonparam = nonparam_smooth(firstDown_pts.copy(), window=21)\n firstDown_pts = dict(zip(range(1,100), pts_nonparam))\n\n\n ## Expected points from touchdown\n\n # 1 or 2 pt conversion\n # P(1-pt conversion) * P(successful 1-pt conversion) + 2 * P(2-pt conversion) * P(successful 2-pt conversion)\n one_pt_plays = np.sum(nfl_pbp.extra_point_attempt == 1)\n two_pt_plays = np.sum(nfl_pbp.two_point_attempt == 1)\n\n one_pt_prob = one_pt_plays / (one_pt_plays + two_pt_plays)\n one_pt_success = np.mean(nfl_pbp[nfl_pbp.extra_point_attempt == 1].extra_point_result == 'good')\n two_pt_success = np.mean(nfl_pbp[nfl_pbp.two_point_attempt == 1].two_point_conv_result == 'success')\n\n extra_pts = one_pt_prob * one_pt_success + 2 * (1 - one_pt_prob) * two_pt_success\n\n # expected points from touchdown\n kickoff_dist = 75 # average starting position after kickoff\n td_pts = 6 + extra_pts - firstDown_pts[kickoff_dist]\n\n def get_firstdown(play_type):\n \"\"\"\n Return dict of expected values of going for it, given yardline and yards to go\n \"\"\"\n plays = nfl_pbp[nfl_pbp.play_type == play_type]\n\n # probability of 1st down given yards to go\n firstDown_prob = np.zeros(max_ytg-1)\n for ytg in range(1, max_ytg):\n firstDown_prob[ytg-1] = np.mean(plays.yards_gained[plays.ydstogo == ytg] >= ytg)\n # polynomial smooth\n firstDown_poly = poly_smooth(np.arange(1,max_ytg), firstDown_prob.copy(), deg=3)[0]\n firstDown_prob = dict(zip(range(1,max_ytg), firstDown_poly))\n\n # calculate average yards gained on failed play_type, given yardline and yards to go\n firstDown_yds = {}\n for yardline in range(1,100):\n avg_yds = np.zeros(max_ytg-1)\n for ytg in range(1,max_ytg):\n avg_yds[ytg-1] = np.mean(plays[((plays.yardline_100 == yardline) & (plays.ydstogo == ytg) & (plays.yards_gained < ytg))].yards_gained)\n if avg_yds[ytg-1] != avg_yds[ytg-1]: # avg_yds is NaN\n avg_yds[ytg-1] = 0 \n # polynomial smooth\n avg_yds_poly = poly_smooth(np.arange(1,max_ytg), avg_yds.copy(), deg=3)[0]\n firstDown_yds[yardline] = dict(zip(range(1,max_ytg), avg_yds_poly))\n \n # cost of turnover given yardline and yards to go\n firstDown_cost = {}\n for yardline in range(1,100):\n firstDown_cost[yardline] = {}\n for ytg in range(1,max_ytg):\n opp_yardline = min(99, max(1, 100 - yardline + int(firstDown_yds[yardline][ytg]))) # must be int between 1 and 99\n firstDown_cost[yardline][ytg] = -1 * firstDown_pts[opp_yardline]\n\n if play_type == 'pass':\n # probability of interception at each yardline, and average yards gained\n intercept_probs = np.zeros(99) # probability of interception (not pick 6)\n intercept_td_probs = np.zeros(99) # probability of pick 6\n intercept_returns = np.zeros(99) # yards gained in return\n for i in range(99):\n ydline = plays[(plays.yardline_100 == i+1)]\n intercept_td_probs[i] = np.mean((ydline.interception == 1) & (ydline.touchdown == 1))\n intercept_probs[i] = np.mean((ydline.interception == 1) & (ydline.touchdown == 0))\n if sum(ydline.interception == 1) == 0:\n intercept_returns[i] = 0\n else:\n intercept_returns[i] = np.mean(ydline[(ydline.interception == 1)].air_yards - ydline[(ydline.interception == 1)].return_yards)\n # smoothing\n intercept_prob_nonparam = nonparam_smooth(intercept_probs.copy(), window=27)\n intercept_td_prob_nonparam = nonparam_smooth(intercept_td_probs.copy(), window=27)\n intercept_returns_nonparam = nonparam_smooth(intercept_returns.copy(), window=31)\n intercept_prob = dict(zip(range(1,100),intercept_prob_nonparam))\n intercept_td_prob = dict(zip(range(1,100),intercept_td_prob_nonparam))\n intercept_returns = dict(zip(range(1,100),intercept_returns_nonparam))\n\n # average cost of interception at each yardline\n interception = {}\n for yardline in range(1,100):\n if play_type == 'pass':\n opp_yardline = max(99, min(1, 100 - yardline + int(intercept_returns[yardline])))\n interception[yardline] = -1 * (intercept_td_prob[yardline] * td_pts + intercept_prob[yardline] * firstDown_pts[opp_yardline])\n else:\n interception[yardline] = 0\n\n # expected value of going for it, given yardline and yards to go\n goforit = {}\n for yardline in range(1, 100):\n yardline_value = {}\n for ytg in range(1, max_ytg):\n if yardline >= ytg and yardline-ytg < 90:\n if yardline == ytg: # score touchdown\n yardline_value[ytg] = firstDown_prob[ytg]*td_pts + (1-firstDown_prob[ytg])*firstDown_cost[yardline][ytg] + interception[yardline]\n else:\n yardline_value[ytg] = firstDown_prob[ytg]*firstDown_pts[yardline-ytg] + (1-firstDown_prob[ytg])*firstDown_cost[yardline][ytg] + interception[yardline]\n goforit[yardline] = yardline_value\n return goforit\n\n run_play = get_firstdown('run')\n pass_play = get_firstdown('pass')\n\n\n ### Field goal\n\n # probability of field goal given yardline\n plays = nfl_pbp[nfl_pbp['field_goal_attempt']==1]\n fg_prob = np.zeros(99)\n for i in range(50): # prob of field goal at any field position >= 50 is set to 0\n rows = plays.yardline_100==i+1\n fg_prob[i] = np.mean(plays[rows].posteam_score_post > plays[rows].posteam_score)\n if fg_prob[i] != fg_prob[i]: # if nan\n fg_prob[i] = 0\n\n # nonparametric smooth\n fg_nonparam = nonparam_smooth(fg_prob.copy(), window=21)\n fg_nonparam[fg_nonparam < 0] = 0\n fg_nonparam[fg_nonparam > 1] = 1\n\n fg_prob = dict(zip(range(1,100),fg_nonparam))\n\n # expected value of field goal given field position\n fg = {}\n fg_value = 3 - firstDown_pts[kickoff_dist] # value of field goal\n for yds in range(1, 100):\n if yds < 92: # max field position for field goal is 92\n fg[yds] = fg_prob[yds]*fg_value - (1-fg_prob[yds])*firstDown_pts[min(80, 100-(yds+8))]\n else:\n fg[yds] = -10 # small value that will never be chosen\n\n\n ### Punt\n plays = nfl_pbp[nfl_pbp.play_type == \"punt\"]\n\n # average punt distance\n punt_dist = np.zeros(99)\n for yds in range(1, 100):\n punt_dist[yds-1] = np.mean(plays[plays.yardline_100==yds].kick_distance) - np.mean(plays[plays.yardline_100==yds].return_yards)\n \n # nonparametric smooth\n punt_dist_nonparam = nonparam_smooth(punt_dist.copy(), window=21)\n punt_dist = dict(zip(range(1,100), punt_dist_nonparam))\n\n # expected value of punt given field position\n punt = {}\n for yds in range(1,100):\n if punt_dist[yds] != punt_dist[yds]: # if NaN, then touchback\n punt[yds] = -1 * firstDown_pts[80]\n else:\n punt[yds] = -1 * firstDown_pts[min(round(100 - yds + punt_dist[yds]), 80)]\n\n\n ### Decision and expected points matrices\n decision = np.zeros((max_ytg, 100))\n points = np.zeros((max_ytg, 100))\n points.fill(300)\n for yardline in range(1,100):\n for ytg in range(1, min(yardline+1, max_ytg)):\n if ytg > yardline or yardline-ytg >= 90:\n decision[ytg][yardline] = 0\n points[ytg][yardline] = 300\n else:\n decision[ytg][yardline] = np.array([fg[yardline], punt[yardline], run_play[yardline][ytg], pass_play[yardline][ytg]]).argmax()+1\n points[ytg][yardline] = max([fg[yardline], punt[yardline], run_play[yardline][ytg], pass_play[yardline][ytg]])\n return decision, points", "title": "" }, { "docid": "d0b91b564dfeb36ce33c17edd83c25a4", "score": "0.5139822", "text": "def get_max_rarity(loot_table):\n max_rarity = 0\n for key in loot_table.keys():\n rarity = loot_table[key]['rarity']\n max_rarity = rarity if rarity > max_rarity else max_rarity\n return max_rarity", "title": "" }, { "docid": "f55ffe984cbaa2d921409592cf5c0b3e", "score": "0.51357317", "text": "def _reward(self):\n reward = 0\n self._graspSuccess = 0\n for uid in self._objectUids:\n pos, _ = p.getBasePositionAndOrientation(uid)\n # If any block is above height, provide reward.\n if pos[2] > 0.2:\n self._graspSuccess += 1\n reward = 1\n break\n return reward", "title": "" }, { "docid": "07368d67f13ada1942377cbd7bc9976e", "score": "0.5129557", "text": "def specific_reward_function(self, bandit_price, bid_price, won, interval_size, max_reward_on_win, max_reward_on_lose, lower_bound_price):\n ###TODO: Fix this method, not working well at the moment April 22nd!\n upper_bound_price=interval_size\n\n diff = bid_price - bandit_price\n y=0\n\n if (diff <= interval_size): \n if won:\n y=max(0,round(-max_reward_on_win*diff/lower_bound_price + max_reward_on_win))\n else: #lose\n y=-max_reward_on_lose * diff * (diff - upper_bound_price)\n y=max(0,round(y))\n else:\n if won and diff>0:\n y= -1\n if not won and diff<0:\n y= -1\n\n return int(y)", "title": "" }, { "docid": "7057caaaa6af45c6b802873b39485a71", "score": "0.512249", "text": "def max_diffs(state):\n return best_action(state, pig_actions, Q_pig, win_diff)", "title": "" }, { "docid": "5e776edf521b748200a4ea26a496bbba", "score": "0.5118869", "text": "def determineshifts(shift):\n\n maxsh=0\n minsh=0\n for sh in shift:\n if sh > maxsh: maxsh=sh\n if sh < minsh: minsh=sh\n return maxsh, minsh", "title": "" }, { "docid": "08b5aac668e2cca003f4068aab473122", "score": "0.5111576", "text": "def compute_expected_value(outcomes, length):\n winnings = 0.0\n my_set = gen_all_sequences(outcomes, length)\n for roll in my_set:\n result = max_repeats(roll)\n if result == 3:\n winnings += 200\n elif result == 2:\n winnings += 10\n return winnings/len(my_set)", "title": "" }, { "docid": "98dd3fea770e5a9503219f0c50a9a2c1", "score": "0.51065844", "text": "def FOE_STAB_BACK_MINIMUM_DAMAGE():\n return 1", "title": "" }, { "docid": "1a53e56411e4d9434b6e1ee64a6167cb", "score": "0.5103236", "text": "def maxVal(toConsider, avail):\n if toConsider == [] or avail == 0:\n result = (0, ())\n elif toConsider[0].getCost() > avail:\n # Explore right branch only\n result = maxVal(toConsider[1:], avail)\n else:\n nextItem = toConsider[0]\n # Explore left branch\n withVal, withToTake = maxVal(toConsider[1:], avail - nextItem.getCost())\n withVal += nextItem.getValue()\n # Explore right branch\n withoutVal, withoutToTake = maxVal(toConsider[1:], avail)\n # Explore better branch\n if withVal > withoutVal:\n result = (withVal, withToTake + (nextItem,))\n else:\n result = (withoutVal, withoutToTake)\n return result", "title": "" }, { "docid": "b771347616f3ace3b1dc24d20e4c693f", "score": "0.5102057", "text": "def trap(self, height: List[int]) -> int:\n\n peak_list = self.findPeaks(height)\n\n if len(peak_list) <= 1: # If there is only 1 peak, then no water will be hold\n return 0\n\n else: # If there is two or more than two peak,\n # we have to check if the later peaks are between the first two peak\n # or a peak outside of the two peak range\n\n start, end = min(peak_list[0], peak_list[1]), max(peak_list[0], peak_list[1])\n peak_list = peak_list[2:]\n volume = self.vol(height[start:end + 1])\n\n while peak_list: # consume all the peaks by poping\n next_peak_idx = peak_list.pop(0)\n if next_peak_idx < start:\n volume += self.vol(height[next_peak_idx:start + 1])\n start = next_peak_idx\n elif next_peak_idx > end:\n volume += self.vol(height[end:next_peak_idx + 1])\n end = next_peak_idx\n else: # this is when the peak is between the current two peaks, so no calculation is needed\n pass\n\n return volume", "title": "" }, { "docid": "3079d8e5c1d86584f0981a3f80baa0fa", "score": "0.50929785", "text": "def character_damage_points(character: dict) -> tuple[int, int]:\n if character['class'] == PLAYER_CHOICES_CLASS()[0]: # Sorcerer\n min_dp_increment, max_dp_increment, min_initial, max_initial = SORCERER_DP_FACTORS_MIN_MAX_INCREMENT_INITIAL()\n elif character['class'] == PLAYER_CHOICES_CLASS()[1]: # Thief\n min_dp_increment, max_dp_increment, min_initial, max_initial = THIEF_DP_FACTORS_MIN_MAX_INCREMENT_INITIAL()\n elif character['class'] == PLAYER_CHOICES_CLASS()[2]: # Bowman\n min_dp_increment, max_dp_increment, min_initial, max_initial = BOWMAN_DP_FACTORS_MIN_MAX_INCREMENT_INITIAL()\n else: # Fighter\n min_dp_increment, max_dp_increment, min_initial, max_initial = FIGHTER_DP_FACTORS_MIN_MAX_INCREMENT_INITIAL()\n return min_dp_increment * character['level'] + min_initial, max_dp_increment * character['level'] + max_initial", "title": "" }, { "docid": "980d18968e62100391dc8779d807af6c", "score": "0.50914705", "text": "def pick_action(self):\n h, i, j = self.speed, self.lane, 0\n q_values = self.get_q_values(h, i, j)\n a = max(q_values.items(), key=operator.itemgetter(1))[0]\n return a", "title": "" }, { "docid": "caa3b334c56c1a524c2436ac2e0c3120", "score": "0.508702", "text": "def max_thruster(mem):\n return max(\n (run_amplifiers(mem, settings), settings)\n for settings in itertools.permutations(range(5))\n )", "title": "" }, { "docid": "7edda0fc5341a4d0fea4a802174dd03a", "score": "0.50861627", "text": "def trap(self, height: List[int]) -> int:\n peak_idx = self.find_max_idx(height, 0, len(height)) # first locate the max peak idx\n self.trap_helper(height, 0, peak_idx, peak_idx) # find everything before max_idx\n self.trap_helper(height, peak_idx + 1, len(height), peak_idx) # find everything after max_idx\n ans = self.VOLUME\n self.VOLUME = 0 # reset the class attributes for next run\n return ans", "title": "" }, { "docid": "d8dc32c6b655c69f51ac0c8ed00b36c7", "score": "0.5084326", "text": "def calculate_heuristic(room):\n value = 2\n if room.locally_allocated:\n value -= 2\n if room.pc:\n value += 2\n if room.whiteboard:\n value += 1\n if room.blackboard:\n value += 1\n if room.projector:\n value += 1\n if room.printer:\n value += 1\n return value", "title": "" }, { "docid": "44aeecc73a9eda06665e87083a3fb16b", "score": "0.5076175", "text": "def jail_pay_or_roll():\n r = np.random.rand()\n return 0 if r > 0.5 else 1", "title": "" }, { "docid": "b6621020ebf9094c2a457360311abf50", "score": "0.50754786", "text": "def max_efficieny(self)->float:\n for i in range(self.no_of_edges):\n p=Polygon(i+1,self.circumradius)\n self.ratios[i] = p.area()/p.perimeter()\n key = max(self.ratios,key = self.ratios.get)\n return self.ratios[key]", "title": "" }, { "docid": "63d40685bc04554e4b9cc076e122621f", "score": "0.50752646", "text": "def num_rushes(slope_height, rush_height_gain, back_sliding):\n slope_height-=rush_height_gain\n if slope_height <= 0:\n return 1\n slope_height+=back_sliding\n rush_height_gain*=0.95\n back_sliding*=0.95\n return 1 + num_rushes(slope_height, rush_height_gain, back_sliding)", "title": "" }, { "docid": "641717b4b70f4c3ee3827a5434c87f32", "score": "0.5074209", "text": "def Gaunt_low(L1,m1,L2,m2,L3,m3):\n import math\n def fa(n):\n if n<=10: fac = float(math.factorial(n))\n else: fac = math.sqrt(2*math.pi*n)*(n/math.e)**n\n return fac\n def fa_ratio(n1, n2): #calculates n1!/n2!\n p = 1.0\n for j in range (min(n1, n2)+1, max(n1, n2)+1):\n p = p*j\n if n1 >= n2: return p\n else: return 1/p\n params = [(L1,m1),(L2,m2),(L3,m3)]\n params = sorted(params,key=lambda list: list[1])\n l,u = params[-1]\n prms = params[:-1]\n prms = sorted(prms,key=lambda list: list[0])\n n,w = prms[0]\n m,v = prms[1]\n if not v+w == u: return 0\n else:\n s = (l+m+n)/2.0\n if not s-round(s) == 0 : return 0\n elif m+n <l : return 0\n elif m-n >l : return 0\n else:\n s = int(s)\n p = int(max(0, n-m-u))\n q = int(min(n+m-u,l-u,n-w))\n #print l,m,n,s\n #print fa(l-u),fa(n-w), fa(m-v),fa(s-l)\n a1 = (-1)**(s-m-w)*math.sqrt((2*l+1)*(2*m+1)*(2*n+1)*fa(l-u)*fa(n-w)/fa(m-v)/math.pi)/fa(s-l)/float(2*s+1)/4.0\n a2 = math.sqrt(fa(m+v)*fa(n+w)/fa(l+u))*fa(s)/fa(s-m)/fa(s-n)\n sum = 0\n for t in range (p,q+1):\n b1 = (-1)**t*fa(m+n-u-t)/fa(t)/fa(l-u-t)/fa(n-w-t) \n b2 = fa_ratio(l+u+t,2*s)*fa_ratio(2*s-2*n,m-n+u+t)\n sum+= b1*b2\n #print a1, a2, sum\n return a1*a2*sum", "title": "" }, { "docid": "867cfe29a055e90f1e49026430b2d224", "score": "0.50728136", "text": "def get_cooling_duty(heat_utilities):\n cooling_duty = sum([i.duty for i in heat_utilities if i.duty < 0 and i.flow > 0]) / 1e6 # GJ/hr\n return abs(cooling_duty)", "title": "" }, { "docid": "08fcd4c0a92e87f9d0e8e7bc344827b7", "score": "0.5070465", "text": "def max_value(self, gameState, depth):\n depth += 1\n value = -99999\n bestAction = \"\"\n for action in gameState.getLegalActions(0):\n newValue = self.value(gameState.generateSuccessor(0, action), (1)%gameState.getNumAgents(), depth)\n if newValue > value:\n value = newValue\n bestAction = action\n # print \"MAX at depth :\", depth, \"value :\", value\n #input(\"okay ?\")\n if bestAction ==\"\":\n value = self.evaluationFunction(gameState)\n return value, bestAction", "title": "" }, { "docid": "17dd3742f273a3577c03c875684f4b8c", "score": "0.5067159", "text": "def best_value(self) -> int:\n if self.value > 21 and self.has_aces():\n soft_value = self.value\n for _ in range(self.has_aces()):\n soft_value -= 10\n if soft_value <= 21:\n return soft_value\n return soft_value\n return self.value", "title": "" }, { "docid": "d7659da9b8ecbb853a302a4b0c5b1f31", "score": "0.5065543", "text": "def take_turn(self, last=None) -> Tuple[int, int]:\n crazy = (uniform(0, 1) < self.__craziness)\n ones = self.hand[1]\n d = np.argmax(list(self.hand.values())[2:])\n if len(self.opponent_hands) == 1 and self.opponent_hands[0] == 1:\n if self.size == 1:\n return one_on_one(last, self.hand[d])\n\n if not last:\n if len(self.opponent_hands) == 1 and sum(self.hand.values()) == 1:\n return (d, self.hand[d])\n if crazy:\n d = choice(range(1, 7))\n count = choice(range(1, (self.size // 4) + 1))\n return (d, count)\n if ones >= self.hand[d]:\n return (1, ones)\n return (d, ones + self.hand[d])\n\n k = last[1] - self.hand[last[0]]\n play = (self.__probs[last[0], k] < self.__aggressiveness)\n\n if (play and not crazy) or (not play and crazy):\n return (0, 0)\n play = self.__play(last)\n\n if self.__probs[last] - self.__probs[play[0], play[1] - ones] > 0.15:\n return (0, 0)\n\n if should_call(play, self.hand, self.total, self.wild):\n return (0, 0)\n\n return play", "title": "" }, { "docid": "dacd47223de5244831548c90a115d691", "score": "0.50653404", "text": "def strategy(hand, num_die_sides):\r\n set_of_holds = gen_all_holds(hand)\r\n\r\n #need to get die values from hold_set\r\n #call genb all holds\r\n\r\n count = 0\r\n hold_index = dict()\r\n hold_scores = dict()\r\n\r\n for held in set_of_holds: #held is a tuple\r\n hold_index[count] = held\r\n free_dice = len(hand) - len(held)\r\n expected_score = expected_value(held, num_die_sides, free_dice)\r\n hold_scores[count] = expected_score\r\n count += 1\r\n score_list = hold_scores.values()\r\n max_score = max(score_list)\r\n for index in hold_scores.keys():\r\n if hold_scores.get(index) == max_score:\r\n index_dice_to_hold = index\r\n\r\n return (max_score, tuple(hold_index.get(index_dice_to_hold)))", "title": "" }, { "docid": "d54a814e551bb35409a230b54dd1c120", "score": "0.50652343", "text": "def _threshold_youden(self):\n youden_index = np.argmax(np.abs(self.tpr_list - self.fpr_list))\n youden_threshold = self.roc_thresholds[youden_index]\n\n return youden_index, youden_threshold", "title": "" }, { "docid": "9b83a9c2f0a8049898d53785125e489d", "score": "0.50649476", "text": "def p39():\n def _get_b_for_perim_p(p, a):\n # Calc b based on solving equation a + b + sqrt(a ** 2 + b ** 2) = p\n # Also return whether number is integral\n num, den = p * (p - 2 * a), 2 * (p - a)\n is_int = not num % den\n return is_int, num / den\n \n max_sols = 0\n max_p = 0\n \n for p in xrange(1, 1001):\n num_sols = 0\n for a in xrange(1, p):\n is_int, b = _get_b_for_perim_p(p, a)\n if is_int:\n num_sols += 1\n \n if num_sols > max_sols:\n max_sols = num_sols\n max_p = p\n \n return max_p", "title": "" }, { "docid": "c927062f5907596cb9ac588e00c15526", "score": "0.5064848", "text": "def compute_level(tags):\n _level = biking_permitted(tags)\n #print('level', _level, 'tags', tags)\n if _level >= 100:\n _level = cycleway(_level, tags)\n _level = cyclestreet(_level, tags)\n _level = lane(_level, tags)\n _level = track(_level, tags)\n _level = separated_path(_level, tags)\n\n return _level", "title": "" }, { "docid": "806d8fb08b39e88dc754d5d0c9741f52", "score": "0.5064072", "text": "def place_water(comparisons):", "title": "" }, { "docid": "2b30e87a57fa619a3601468f01e8fda5", "score": "0.5063403", "text": "def _get_reward(self):\n logsize = 10000\n scale = Util.rss(self.limits[1, :])\n dpos = Util.rss(self.objective.position - self.platform.position)\n idx = np.int32((1-np.int32(dpos)/scale) * logsize)\n logrew_pos = np.logspace(-2, 2, logsize)\n rew = logrew_pos[idx]\n if self.platform.objective_in_fov:\n rew += 0.1\n if dpos < self.objective.radius:\n return 1000 # *(1-self.t/self.max_time)\n elif self._is_over() and Util.rss(self.platform.position - self.objective.position) > self.objective.radius:\n return -1000\n else:\n a = self.platform.position + self.platform.velocity\n b = self.platform.position\n c = self.objective.position\n return rew # util.getAngle(a, b, c)/10\n # if self.action_curr == 0:\n # return rew\n # else:\n # return rew/10", "title": "" } ]
28ac727e4fdb7ae12cfd62748788dfbe
Test to verify that a configuration error is thrown when supplying the heading_line_length value with a string that is not an integer.
[ { "docid": "4b5961f878003329402a5978b49890bb", "score": "0.76512426", "text": "def test_md013_bad_configuration_heading_line_length():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\", \"resources\", \"rules\", \"md013\", \"good_small_line.md\"\n )\n supplied_arguments = [\n \"--set\",\n \"plugins.md013.heading_line_length=not-integer\",\n \"--strict-config\",\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 1\n expected_output = \"\"\n expected_error = (\n \"BadPluginError encountered while configuring plugins:\\n\"\n + \"The value for property 'plugins.md013.heading_line_length' must be of type 'int'.\"\n )\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )", "title": "" } ]
[ { "docid": "62254daa2a5181a2d6752378cd4df595", "score": "0.730549", "text": "def test_md013_bad_configuration_line_length():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\", \"resources\", \"rules\", \"md013\", \"good_small_line.md\"\n )\n supplied_arguments = [\n \"--set\",\n \"plugins.md013.line_length=not-integer\",\n \"--strict-config\",\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 1\n expected_output = \"\"\n expected_error = (\n \"BadPluginError encountered while configuring plugins:\\n\"\n + \"The value for property 'plugins.md013.line_length' must be of type 'int'.\"\n )\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )", "title": "" }, { "docid": "54a7ca90c88761f102a30d4649daed7f", "score": "0.7140274", "text": "def test_md013_bad_configuration_line_length_zero():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\", \"resources\", \"rules\", \"md013\", \"good_small_line.md\"\n )\n supplied_arguments = [\n \"--set\",\n \"plugins.md013.line_length=$#0\",\n \"--strict-config\",\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 1\n expected_output = \"\"\n expected_error = (\n \"BadPluginError encountered while configuring plugins:\\n\"\n + \"The value for property 'plugins.md013.line_length' is not valid: Allowable values are any integer greater than 0.\"\n )\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )", "title": "" }, { "docid": "b7b3dbd7702f410f467d587f6a5f39a9", "score": "0.7119405", "text": "def test_set_length_error(self):\n header = KNXIPHeader()\n with pytest.raises(TypeError):\n header.set_length(2)", "title": "" }, { "docid": "e032a28807f8ad20a4dbd9a31bdcac45", "score": "0.6988386", "text": "def test_md013_bad_configuration_code_block_line_length():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\", \"resources\", \"rules\", \"md013\", \"good_small_line.md\"\n )\n supplied_arguments = [\n \"--set\",\n \"plugins.md013.code_block_line_length=not-integer\",\n \"--strict-config\",\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 1\n expected_output = \"\"\n expected_error = (\n \"BadPluginError encountered while configuring plugins:\\n\"\n + \"The value for property 'plugins.md013.code_block_line_length' must be of type 'int'.\"\n )\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )", "title": "" }, { "docid": "bbeb1db126edc2a110930408b606c5bb", "score": "0.6823097", "text": "def test_line_too_long():\n contents = 'a very long line'\n pytest.raises(ValueError, split_into_chunks, contents, max_size=1)", "title": "" }, { "docid": "34d91ebc00a769d88085f2120b90bf61", "score": "0.66932666", "text": "def test_invalid_unit_spec(self):\n\n self.assertRaises(RuntimeError, parse_abs_size_spec, \"747InVaLiDUnIt\")", "title": "" }, { "docid": "d2dadce2f539891f231ba7b821002953", "score": "0.6683647", "text": "def testInvalidInteger(self):\n test_config_lines = ['# PR_START',\n '# TIMEOUT = 10s',\n '# PR_END',\n ]\n self.assertRaises(ValueError,\n self.one_parser.ParseList,\n test_config_lines,\n False)\n\n test_config_lines = ['# PR_START',\n '# EXPECTED_RETURN = \"-1s\"',\n '# PR_END'\n ]\n self.assertRaises(ValueError,\n self.one_parser.ParseList,\n test_config_lines,\n False)", "title": "" }, { "docid": "316b17cf6cd62bfdfc5929d976166357", "score": "0.6644626", "text": "def test_broken_unit_spec(self):\n\n self.assertRaises(RuntimeError, parse_abs_size_spec, \"_+!HuHi+-=\")", "title": "" }, { "docid": "c30c3a5dfe08adb594cf9df170e26827", "score": "0.6636879", "text": "def test_get_elemental_line_parameters_3(eline):\n with pytest.raises(RuntimeError, match=f\"Elemental line {eline} has incorrect format\"):\n _get_elemental_line_parameters(elemental_line=eline, incident_energy=12)", "title": "" }, { "docid": "eed77296a239afb1cbc2aade7a29b6fc", "score": "0.6578464", "text": "def test_md013_bad_setext_heading_with_config():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\", \"resources\", \"rules\", \"md013\", \"bad_setext_heading.md\"\n )\n supplied_arguments = [\n \"--set\",\n \"plugins.md013.heading_line_length=$#100\",\n \"--strict-config\",\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 0\n expected_output = \"\"\n expected_error = \"\"\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )", "title": "" }, { "docid": "2cffd8b2f74414cb24e552f503fae861", "score": "0.65673816", "text": "def test_md013_good_small_line_with_config():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\", \"resources\", \"rules\", \"md013\", \"good_small_line.md\"\n )\n supplied_arguments = [\n \"--set\",\n \"plugins.md013.line_length=$#25\",\n \"--strict-config\",\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 1\n expected_output = (\n f\"{source_path}:1:1: \"\n + \"MD013: Line length \"\n + \"[Expected: 25, Actual: 38] (line-length)\"\n )\n expected_error = \"\"\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )", "title": "" }, { "docid": "dd43e8303dc501e77445dab1272c259b", "score": "0.6550586", "text": "def test_md013_bad_atx_heading_with_config():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\", \"resources\", \"rules\", \"md013\", \"bad_atx_heading.md\"\n )\n supplied_arguments = [\n \"--set\",\n \"plugins.md013.heading_line_length=$#100\",\n \"--strict-config\",\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 0\n expected_output = \"\"\n expected_error = \"\"\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )", "title": "" }, { "docid": "b0bdd29778209826704f6ab6a6a943ac", "score": "0.65015954", "text": "def test_too_long_nhs_numbers_raise_exception(self):\n with self.assertRaisesRegex(ValueError, \"Expecting ten digits\"):\n nhs_number_is_valid(\"12345678901\")", "title": "" }, { "docid": "6ad7e8f468eafc813936cf0c9a854ab1", "score": "0.6481844", "text": "def test_md013_good_medium_line_with_config():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\", \"resources\", \"rules\", \"md013\", \"good_medium_line.md\"\n )\n supplied_arguments = [\n \"--set\",\n \"plugins.md013.line_length=$#50\",\n \"--strict-config\",\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 1\n expected_output = (\n f\"{source_path}:1:1: \"\n + \"MD013: Line length \"\n + \"[Expected: 50, Actual: 80] (line-length)\"\n )\n expected_error = \"\"\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )", "title": "" }, { "docid": "fbb1e83067f45c785e43e6ca3a582893", "score": "0.6437536", "text": "def test_invalid_ratio_bounds_length_input():\n\n ratio = 'solar_capacity/wind_capacity'\n with pytest.raises(InputError) as excinfo:\n Hybridization(\n SOLAR_FPATH, WIND_FPATH, ratio=ratio, ratio_bounds=(1, 2, 3)\n )\n\n msg = (\"Length of input for ratio_bounds is 3 \"\n \"- but is required to be of length 2.\")\n assert msg in str(excinfo.value)", "title": "" }, { "docid": "3d98b1d4ec2f7a225ead912c70ee5d52", "score": "0.63685626", "text": "def test_too_short_nhs_numbers_raise_exception(self):\n with self.assertRaisesRegex(ValueError, \"Expecting ten digits\"):\n nhs_number_is_valid(\"12345678\")", "title": "" }, { "docid": "7338fc823fd5bbc1ec799198c355b02e", "score": "0.6355437", "text": "def test_error(self):\n raise ValueError", "title": "" }, { "docid": "d4b15b13ab7b3670a853caaced4ea11e", "score": "0.62700534", "text": "def test_validate_cfg(self):\n\n # Two values out of range.\n CFG = {'somersault': 0.0,\n 'tilt': 0.0,\n 'twist': 3.0 * np.pi,\n 'PTsagittalFlexion': 0.0,\n 'PTbending': 0.0,\n 'TCspinalTorsion': 0.0,\n 'TCsagittalSpinalFlexion': 0.0,\n 'CA1extension': 0.0,\n 'CA1adduction': 0.0,\n 'CA1rotation': 0.0,\n 'CB1extension': 0.0,\n 'CB1abduction': np.pi / 4.0,\n 'CB1rotation': 0.0,\n 'A1A2extension': 0.0,\n 'B1B2extension': 0.0,\n 'PJ1extension': 0.0,\n 'PJ1adduction': 0.0,\n 'PK1extension': -10.0 * np.pi,\n 'PK1abduction': 0.0,\n 'J1J2flexion': 0.0,\n 'K1K2flexion': 0.0,\n }\n desStr = (\"Joint angle twist = 3.0 pi-rad is out of range. \"\n \"Must be between -1.0 and 1.0 pi-rad.\\n\"\n \"Joint angle PK1extension = -10.0 pi-rad is out of range. \"\n \"Must be between -1.0 and 0.5 pi-rad.\\n\")\n\n old_stdout = sys.stdout\n sys.stdout = mystdout = StringIO()\n hum.Human(self.male1meas, CFG)\n sys.stdout = old_stdout\n\n self.assertEquals(mystdout.getvalue(), desStr)", "title": "" }, { "docid": "e19c72d205b630daad6caf7301baf33d", "score": "0.626611", "text": "def testWrongFormat(self):\n test_config_lines = ['# PR_START',\n '# ROOT_ACCESS = False',\n '# TIMEOUT = 600',\n '# EXPECTED_RETURN = 0',\n '# CONCURRENT = True',\n '# NFS = True']\n self.assertRaises(ValueError,\n self.one_parser.ParseList,\n test_config_lines,\n False)", "title": "" }, { "docid": "8fbd2ec492be5fb51669f68786486e3d", "score": "0.6262177", "text": "def test_md013_good_long_line_with_config():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\", \"resources\", \"rules\", \"md013\", \"good_long_line.md\"\n )\n supplied_arguments = [\n \"--set\",\n \"plugins.md013.line_length=$#110\",\n \"--strict-config\",\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 0\n expected_output = \"\"\n expected_error = \"\"\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )", "title": "" }, { "docid": "d30080bfd212812c29b158269aee0099", "score": "0.6238165", "text": "def test_md013_bad_setext_heading():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\", \"resources\", \"rules\", \"md013\", \"bad_setext_heading.md\"\n )\n supplied_arguments = [\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 1\n expected_output = f\"{source_path}:1:1: MD013: Line length [Expected: 80, Actual: 86] (line-length)\"\n expected_error = \"\"\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )", "title": "" }, { "docid": "d4a3d8cab466bdc8d685f5c086f23d34", "score": "0.6181366", "text": "def test_invalid_split_key_threshold(self):\n kwargs = {\"split_key_threshold\": \"invalid\"}\n self.assertRaisesRegex(\n TypeError,\n \"The split key threshold must be an integer.\",\n secrets.SplitKey,\n **kwargs\n )\n\n args = (\n secrets.SplitKey(),\n \"split_key_threshold\",\n \"invalid\"\n )\n self.assertRaisesRegex(\n TypeError,\n \"The split key threshold must be an integer.\",\n setattr,\n *args\n )", "title": "" }, { "docid": "32ff85fca3e251ec507755222b994fa9", "score": "0.61784315", "text": "def test_too_long_nhs_numbers_raise_exception(self):\n with self.assertRaisesRegex(ValueError, \"Expecting nine digits\"):\n calculate_check_digit(\"1234567890\")", "title": "" }, { "docid": "d31e2c9a8a284dbe78598f9ae71cd0b5", "score": "0.61321527", "text": "def test_md013_bad_configuration_headings_active():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\", \"resources\", \"rules\", \"md013\", \"good_small_line.md\"\n )\n supplied_arguments = [\n \"--set\",\n \"plugins.md013.headings=not-integer\",\n \"--strict-config\",\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 1\n expected_output = \"\"\n expected_error = (\n \"BadPluginError encountered while configuring plugins:\\n\"\n + \"The value for property 'plugins.md013.headings' must be of type 'bool'.\"\n )\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )", "title": "" }, { "docid": "a778770e923fcbba568dad0e13a90f71", "score": "0.6118983", "text": "def test_incorrect_len_input(self):\n text = 'rgb(1, 2, 3)'\n self.assertRaises(ValueError, utils.rgba_grayscale, text)", "title": "" }, { "docid": "8122c699e3ce926e8a72e50ff56403ec", "score": "0.61184126", "text": "def test_height_valueerror(self):\n with self.assertRaisesRegex(ValueError, \"height must be > 0\"):\n r = Rectangle(1, -1)\n with self.assertRaisesRegex(ValueError, \"height must be > 0\"):\n r = Rectangle(1, 0)", "title": "" }, { "docid": "d8077e9828289317b540e25b206571c9", "score": "0.6110063", "text": "def test_md013_bad_thematic_break_with_config():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\", \"resources\", \"rules\", \"md013\", \"bad_thematic_break.md\"\n )\n supplied_arguments = [\n \"--set\",\n \"plugins.md013.line_length=$#100\",\n \"--strict-config\",\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 0\n expected_output = \"\"\n expected_error = \"\"\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )", "title": "" }, { "docid": "92dcddacff55c516afa8ba4cc869a950", "score": "0.61066985", "text": "def test_badheightvaluewithstring(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Rectangle(1, \"foo\", 1, 2, 3)\n self.assertEqual(str(e.exception), 'height must be an integer')", "title": "" }, { "docid": "747fb10709fd61913058f9f74b4c82a7", "score": "0.6094965", "text": "def test_bad_hex_string_length(self):\n with self.assertRaises(ValueError):\n colour1 = colourettu.Colour(\"#dddd\")", "title": "" }, { "docid": "bd4b9609c5bb1c6472299be60d16ac48", "score": "0.6089908", "text": "def test_md013_bad_atx_heading():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\", \"resources\", \"rules\", \"md013\", \"bad_atx_heading.md\"\n )\n supplied_arguments = [\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 1\n expected_output = f\"{source_path}:1:1: MD013: Line length [Expected: 80, Actual: 88] (line-length)\"\n expected_error = \"\"\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )", "title": "" }, { "docid": "f634d8c3251424cff3992f72600e37ee", "score": "0.6075521", "text": "def testUnequalLengths(self):\n error = 'Invalid read: sequence length \\(4\\) != quality length \\(3\\)'\n with self.assertRaisesRegexp(ValueError, error):\n Read('id', 'ACGT', '!!!')", "title": "" }, { "docid": "5a7dd490565098cd4d317bbfbb21debe", "score": "0.60435706", "text": "def test_circle_validation_line_width_pts(self):\n for bad_line_width_pts in (1.2, Decimal(3), -1, 'hello'):\n self.assertRaises(\n ValueError,\n Circle,\n centre_x_pts=306,\n centre_y_pts=396,\n radius_pts=144,\n line_width_pts=bad_line_width_pts)", "title": "" }, { "docid": "0330f944bcfb7445523e0437f8f3e978", "score": "0.603935", "text": "def test_too_short_nhs_numbers_raise_exception(self):\n with self.assertRaisesRegex(ValueError, \"Expecting nine digits\"):\n calculate_check_digit(\"12345678\")", "title": "" }, { "docid": "9dab2a6969d5357d027f482fe793e161", "score": "0.60316014", "text": "def test_md013_bad_medium_line_with_long_last_word_with_config_strict():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\",\n \"resources\",\n \"rules\",\n \"md013\",\n \"bad_medium_line_with_very_long_last_word.md\",\n )\n supplied_arguments = [\n \"--set\",\n \"plugins.md013.strict=$!True\",\n \"--strict-config\",\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 1\n expected_output = (\n f\"{source_path}:1:1: \"\n + \"MD013: Line length \"\n + \"[Expected: 80, Actual: 102] (line-length)\"\n )\n expected_error = \"\"\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )", "title": "" }, { "docid": "83f1ba8e0be4d62dfd1d49ccdd2d2c6a", "score": "0.59969366", "text": "def test_section_type_too_long(self):\n \tself.assertRaises(ValidationError, SectionType.create, \"SectionTypeNameTooManyCharacters1\")", "title": "" }, { "docid": "476d9a4dca75a211f3120dac211f1907", "score": "0.59479785", "text": "def test_check_int_input():\n with pytest.raises(ValueError):\n check_int_input(\"9\")", "title": "" }, { "docid": "153d3f6aaee84b14b2e2b5fcab44bc26", "score": "0.5947949", "text": "def test_md013_bad_indented_code_block_with_config():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\", \"resources\", \"rules\", \"md013\", \"bad_indented_code_block.md\"\n )\n supplied_arguments = [\n \"--set\",\n \"plugins.md013.code_block_line_length=$#100\",\n \"--strict-config\",\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 1\n expected_output = f\"{source_path}:5:1: MD013: Line length [Expected: 100, Actual: 154] (line-length)\"\n expected_error = \"\"\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )", "title": "" }, { "docid": "83ddb933a487fc71eb5dda281d6d6922", "score": "0.5945557", "text": "def test_max_length_exceeded_error_is_value_error(self):\n\n with self.assertRaises(ValueError) as cm:\n parse_uk_postcode('N16 8QSSS', True, True)\n self.assertEquals(cm.exception.__class__, MaxLengthExceededError)", "title": "" }, { "docid": "8b2c7c0f29445a9005ce7f7227f3419e", "score": "0.5938718", "text": "def test_get_elemental_line_parameters_2(eline):\n with pytest.raises(RuntimeError, match=f\"Elemental line {eline} is not supported\"):\n _get_elemental_line_parameters(elemental_line=eline, incident_energy=12)", "title": "" }, { "docid": "9db6f1b8c7f219e6c67b65ae11e175f1", "score": "0.5932433", "text": "def test_badwidthvaluewithstring(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Rectangle(\"foo\", 1, 2, 3)\n self.assertEqual(str(e.exception), 'width must be an integer')", "title": "" }, { "docid": "a5701ea0045b3ceeb81fa741ecc9d373", "score": "0.59308225", "text": "def test_base_parse_invalid_header():\n fh = BootSectorHeader()\n with pytest.raises(ValueError):\n fh.parse_header(b'foo')", "title": "" }, { "docid": "f24542e4ffba1cd71d0114a5862f6518", "score": "0.59081703", "text": "def test_width_valueerror(self):\n with self.assertRaisesRegex(ValueError, \"width must be > 0\"):\n r = Rectangle(-1, 1)\n with self.assertRaisesRegex(ValueError, \"width must be > 0\"):\n r = Rectangle(0, 1)", "title": "" }, { "docid": "0a23cea12ed1d8a1d885da8631e7f088", "score": "0.58776915", "text": "def test_fat32_parse_invalid_header():\n fh = FAT32BootSectorHeader()\n with pytest.raises(ValueError):\n fh.parse_header(b'foo')", "title": "" }, { "docid": "b9f651b4d5f2efc2e5c0f9202e7ef444", "score": "0.58701867", "text": "def testTooLarge(self): \n print self.assertRaises(roman.OutOfRangeError, roman.toRoman, 4000)", "title": "" }, { "docid": "c8539117ec5026a92ab922edc2306cf0", "score": "0.58672893", "text": "def test_storyerror_get_line(patch, storyerror, error):\n error.line = '1'\n storyerror.story = 'x = 0\\ny = 1'\n assert storyerror.get_line() == 'x = 0'", "title": "" }, { "docid": "f821241497e75bff83ec48912c14dad7", "score": "0.58469886", "text": "def test__rules__std_L003_make_indent_invalid_param():\n with pytest.raises(ValueError):\n Rule_L003._make_indent(indent_unit=\"aaa\")", "title": "" }, { "docid": "42e7cc8091c248364da57b54fb7689d1", "score": "0.5842191", "text": "def test_invalid_setting(self):\n st = self.st.new_context()\n st.set_config({'safe_break_in_pulses': int(3600e9),\n 'dummy_version': 'test_invalid_setting',\n })\n with self.assertRaises(ValueError):\n st.make(self.run_id, 'raw_records')", "title": "" }, { "docid": "2df0004f39b34206e8d5aa72bcd41bd1", "score": "0.5839671", "text": "def test_invalid_prime_field_size(self):\n kwargs = {\"prime_field_size\": \"invalid\"}\n self.assertRaisesRegex(\n TypeError,\n \"The prime field size must be an integer.\",\n secrets.SplitKey,\n **kwargs\n )\n\n args = (\n secrets.SplitKey(),\n \"prime_field_size\",\n \"invalid\"\n )\n self.assertRaisesRegex(\n TypeError,\n \"The prime field size must be an integer.\",\n setattr,\n *args\n )", "title": "" }, { "docid": "39d3d458dde9e35ae73a56f900300438", "score": "0.5835295", "text": "def test_fat12_parse_invalid_header():\n fh = FAT12BootSectorHeader()\n with pytest.raises(ValueError):\n fh.parse_header(b'foo')", "title": "" }, { "docid": "e2ac65389cbb38c2f3a004b20d0fd51f", "score": "0.58347917", "text": "def test_fail_termination():\n random_string = \"test string\\n test string\"\n with pytest.raises(qcng.exceptions.UnknownError):\n GaussianHarness.check_convergence(logfile=random_string)", "title": "" }, { "docid": "dc67b6d1e4ca46d0a7b3bd5b1fc2d10b", "score": "0.5823609", "text": "def test_width_string(self):\n with self.assertRaises(TypeError):\n a = Square(\"1\")", "title": "" }, { "docid": "cf6efedc4871917ff0826227afd60eeb", "score": "0.58211195", "text": "def test_md013_bad_html_block_with_config():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\", \"resources\", \"rules\", \"md013\", \"bad_html_block.md\"\n )\n supplied_arguments = [\n \"--set\",\n \"plugins.md013.line_length=$#100\",\n \"--strict-config\",\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 0\n expected_output = \"\"\n expected_error = \"\"\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )", "title": "" }, { "docid": "21ccea666b5c4f5151e98869e911a252", "score": "0.58031356", "text": "def check_config(config):\n for key, value in config.items():\n if key == \"REPORT_SIZE\":\n try:\n if int(config[key]) <= 0:\n return \"REPORT_SIZE should be > 0\"\n except ValueError:\n return \"REPORT_SIZE should be integer\"\n else:\n if not os.path.exists(config[key]):\n return \"%s: %s - path doesn't exist-\" % (key, config[key])\n return None", "title": "" }, { "docid": "3a758b4046a5035c86bd2729dedc4b1e", "score": "0.57976794", "text": "def test_md013_good_medium_line_with_long_last_word_with_config_strict():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\",\n \"resources\",\n \"rules\",\n \"md013\",\n \"good_medium_line_with_very_long_last_word.md\",\n )\n supplied_arguments = [\n \"--set\",\n \"plugins.md013.strict=$!True\",\n \"--strict-config\",\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 1\n expected_output = (\n f\"{source_path}:1:1: \"\n + \"MD013: Line length \"\n + \"[Expected: 80, Actual: 102] (line-length)\"\n )\n expected_error = \"\"\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )", "title": "" }, { "docid": "059250ee42e84fad118f6977681732f4", "score": "0.5785742", "text": "def testInvalidMinLength(self):\n read = DNARead(\"id\", \"\")\n error = \"^minLength must be at least 1$\"\n assertRaisesRegex(\n self, ValueError, error, sequenceCategoryLengths, read, {}, minLength=0\n )", "title": "" }, { "docid": "b85b3103b5534ff4c2d8b622db44bfd4", "score": "0.57651466", "text": "def test_nlp_file_reader_consumer_num_str():\n with pytest.raises(Exception, match=\"Parameter num_consumer is not int.\"):\n FileReader(\"dummy.mindrecord\", \"4\")", "title": "" }, { "docid": "bc10502f69f803dea2988f51b0ba4e82", "score": "0.5761825", "text": "def test_can_fail(self):\n value = '123r456'\n validator = Digits()\n error = validator.validate(value)\n self.assertTrue(error)\n self.assertTrue(type(error.message) is str)", "title": "" }, { "docid": "948d585d5c184bed831e9ae88946a0da", "score": "0.57613677", "text": "def test_hints_from_strings_invalid():\n with pytest.raises(ValueError):\n hints_from_strings(\"abcde\")", "title": "" }, { "docid": "6ed759114fe17bb2bc08c884f741e307", "score": "0.57557565", "text": "def test_out_of_bounds(pipe_type):\n with pytest.raises(ValueError):\n pipe_type.parameters[\"Number_of_Radial_Soil_Nodes\"] = 21", "title": "" }, { "docid": "2d570e8083a61492692bbc463f17aec0", "score": "0.57542264", "text": "def test_MarriageBefore14(self):\r\n self.assertRaises(MarriageBefore14, lambda: self.x4.analyze())", "title": "" }, { "docid": "6c9ee7f16ddefe6999dbd59afaaa7041", "score": "0.5739225", "text": "def test_md013_good_medium_line_with_long_last_word_with_config_stern():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\",\n \"resources\",\n \"rules\",\n \"md013\",\n \"good_medium_line_with_very_long_last_word.md\",\n )\n supplied_arguments = [\n \"--set\",\n \"plugins.md013.stern=$!True\",\n \"--strict-config\",\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 1\n expected_output = (\n f\"{source_path}:1:1: \"\n + \"MD013: Line length \"\n + \"[Expected: 80, Actual: 102] (line-length)\"\n )\n expected_error = \"\"\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )", "title": "" }, { "docid": "d55023c33eb5a8eda4c3ba9ce7a84f20", "score": "0.57109624", "text": "def test_line_assert_exception_pattern():\n file_content = os.linesep.join(\n [\"file_roots:\", \" base:\", \" - /srv/salt\", \" - /srv/sugar\"]\n )\n cfg_content = \"- /srv/custom\"\n before_line = \"/sr.*\"\n with patch(\"os.path.realpath\", MagicMock(wraps=lambda x: x)), patch(\n \"os.path.isfile\", MagicMock(return_value=True)\n ), patch(\"os.stat\", MagicMock()), patch(\n \"salt.utils.files.fopen\", mock_open(read_data=file_content)\n ), patch(\n \"salt.utils.atomicfile.atomic_open\", mock_open()\n ):\n with pytest.raises(CommandExecutionError) as cm:\n filemod.line(\n \"foo\",\n content=cfg_content,\n before=before_line,\n mode=\"insert\",\n )\n assert (\n str(cm.value)\n == 'Found more than expected occurrences in \"before\" expression'\n )", "title": "" }, { "docid": "eb2b9c1296f24b84ab14d728efbed1a3", "score": "0.5710879", "text": "def test_providing_something_not_a_number_raises_exception(self):\n with self.assertRaisesRegex(ValueError, \"nhs_number must comprise only digits\"):\n nhs_number_is_valid(\"A\")", "title": "" }, { "docid": "0065358acf2801b68cd14e4fcc098fdf", "score": "0.57108384", "text": "def test_cv_file_writer_shard_num_str():\n with pytest.raises(Exception, match=\"Parameter shard_num's type is not int.\"):\n FileWriter(\"/tmp/123454321\", \"20\")", "title": "" }, { "docid": "264d944cf1882d5354e1322f1356fc26", "score": "0.57080317", "text": "def testErrorWrapping(self):\n temp_file_system = {'/tmp/source/test1.sh': ['# PR_START\\n',\n '# TIMEOUT = \"1o\"\\n'\n '# PR_END\\n',\n ]\n }\n self._PopulateFileSystem(temp_file_system)\n self.assertRaises(ValueError,\n self.one_parser.ParseFiles,\n ['/tmp/source/test1.sh'],\n False)", "title": "" }, { "docid": "047aab2aaa7fed755b0e9d5e0f4bc5b5", "score": "0.570726", "text": "def test_md013_good_long_line():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\", \"resources\", \"rules\", \"md013\", \"good_long_line.md\"\n )\n supplied_arguments = [\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 1\n expected_output = (\n f\"{source_path}:1:1: \"\n + \"MD013: Line length \"\n + \"[Expected: 80, Actual: 100] (line-length)\"\n )\n expected_error = \"\"\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )", "title": "" }, { "docid": "e6db8e675fadd730e1cc68d62339af76", "score": "0.57054704", "text": "def test_header(self):\n from rippy.RIP import header\n expected = 'foo\\n%(level)s\\n\\n'\n self.assertEqual(header('foo', 1), expected % {'level': '###'})\n self.assertEqual(header('foo', 2), expected % {'level': '***'})\n self.assertEqual(header('foo', 3), expected % {'level': '==='})\n self.assertEqual(header('foo', 4), expected % {'level': '---'})\n self.assertEqual(header('foo', 5), expected % {'level': '^^^'})\n self.assertEqual(header('foo', 6), expected % {'level': '\"\"\"'})\n # no text returns ''\n self.assertEqual(header('', 1), '')\n self.assertRaises(KeyError, header, 'foo', 7)", "title": "" }, { "docid": "573dabaafa2d8567fe3108d51e5337dc", "score": "0.57046646", "text": "def test_height_noint(self):\n with self.assertRaisesRegex(TypeError, \"height must be an integer\"):\n r = Rectangle(1, \"string\")\n with self.assertRaisesRegex(TypeError, \"height must be an integer\"):\n r = Rectangle(1, True)\n with self.assertRaisesRegex(TypeError, \"height must be an integer\"):\n r = Rectangle(1, [9])\n with self.assertRaisesRegex(TypeError, \"height must be an integer\"):\n r = Rectangle(1, (2, 1))\n with self.assertRaisesRegex(TypeError, \"height must be an integer\"):\n r = Rectangle(1, 2.0)", "title": "" }, { "docid": "ef80e922deeee767bbdd3f1b4bcd5428", "score": "0.5681129", "text": "def test_check_heights_myrorss_non_shear_invalid(self):\n\n with self.assertRaises(ValueError):\n radar_utils.check_heights(\n data_source=radar_utils.MYRORSS_SOURCE_ID,\n heights_m_asl=numpy.array(\n [radar_utils.DEFAULT_HEIGHT_MYRORSS_M_ASL + 1]),\n field_name=radar_utils.REFL_M10CELSIUS_NAME)", "title": "" }, { "docid": "5c0c4ac5e044274ce084d45d83ce64d7", "score": "0.5679538", "text": "def test_md013_bad_fenced_code_block_with_config():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\", \"resources\", \"rules\", \"md013\", \"bad_fenced_code_block.md\"\n )\n supplied_arguments = [\n \"--set\",\n \"plugins.md013.code_block_line_length=$#100\",\n \"--strict-config\",\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 1\n expected_output = f\"{source_path}:6:1: MD013: Line length [Expected: 100, Actual: 146] (line-length)\"\n expected_error = \"\"\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )", "title": "" }, { "docid": "0924fe3718d0ddbd14a0148bb265b49b", "score": "0.5679374", "text": "def testInvalidLightSource(self):\n config = ReadLineListConfig()\n config.lightSources = ['Invalid']\n config.lightSourceMap = {'Valid1': 'a', 'Valid2': 'b'}\n with self.assertRaises(RuntimeError):\n config.validate()", "title": "" }, { "docid": "7cf6dce745acefdeda9b049d72a28c32", "score": "0.5667589", "text": "def testRSformaterror(self):\n self.assertRaises(ValueError, self.toolkit.readstring, \"noel\", \"jkjk\")", "title": "" }, { "docid": "25ed61789cd057b9d37bba7e3e80bdfa", "score": "0.5665751", "text": "def test_does_not_crash():\n size = get_terminal_size()\n assert size.columns >= 0\n assert size.lines >= 0", "title": "" }, { "docid": "8cb57f5cf0239c8da7609bce52cfecb3", "score": "0.5661223", "text": "def test_invalid_nhs_number_detection(self):\n with self.assertRaisesRegex(ValueError, \"Number is invalid\"):\n calculate_check_digit(\"123456789\")", "title": "" }, { "docid": "df6a26b81e5f3d57e5b6fa0985d0a195", "score": "0.5658919", "text": "def test_value_error_check_map_input(self):\n with pytest.raises(ValueError):\n island_map_string = \"OOOO\\nOAO\\nOOOO\"\n self.rossumoya.check_map_input(island_map_string)\n with pytest.raises(ValueError):\n island_map_string = \"OOOO\\nOKJO\\nOOOO\"\n self.rossumoya.check_map_input(island_map_string)\n with pytest.raises(ValueError):\n island_map_string = \"OOOJ\\nOJSO\\nOOOO\"\n self.rossumoya.check_map_input(island_map_string)\n with pytest.raises(ValueError):\n island_map_string = \"OOOO\\nOJSO\\nOOSO\"\n self.rossumoya.check_map_input(island_map_string)\n with pytest.raises(ValueError):\n island_map_string = \"OOOJ\\nMJSO\\nOOOO\"\n self.rossumoya.check_map_input(island_map_string)\n with pytest.raises(ValueError):\n island_map_string = \"OOOO\\nOJSS\\nOOOO\"\n self.rossumoya.check_map_input(island_map_string)", "title": "" }, { "docid": "842ef395c2acc69211a110a212fa9126", "score": "0.5658603", "text": "def test_check_heights_myrorss_shear_invalid(self):\n\n with self.assertRaises(ValueError):\n radar_utils.check_heights(\n data_source=radar_utils.MYRORSS_SOURCE_ID,\n heights_m_asl=numpy.array(\n [radar_utils.SHEAR_HEIGHT_M_ASL + 1]),\n field_name=radar_utils.MID_LEVEL_SHEAR_NAME)", "title": "" }, { "docid": "1d2cd1c0f9748256b69b8cfa8a0eba23", "score": "0.56574196", "text": "def test_clusterconfig_from_configparser_errors():\n confpars = configparser.ConfigParser()\n confpars[CFG_CLUSTER] = {CFG_CLUSTER_NUM_CPUS: '-25',\n CFG_CLUSTER_NUM_NODES: 'abc',\n CFG_CLUSTER_BID_PERCENTAGE: '101'}\n confpars[CFG_BLAST] = {CFG_BLAST_MEM_LIMIT: '-5',\n CFG_BLAST_MEM_REQUEST: '-5',\n CFG_BLAST_DB_SRC: 'some-db-source'}\n\n with pytest.raises(ValueError) as err:\n cfg = ClusterConfig.create_from_cfg(confpars)\n\n # test that each invalid parameter value is reported\n errors = str(err.value).split('\\n')\n for key in confpars[CFG_CLUSTER].keys():\n assert [message for message in errors if key in message and 'invalid value' in message and confpars[CFG_CLUSTER][key] in message]\n for key in confpars[CFG_BLAST].keys():\n assert [message for message in errors if key in message and 'invalid value' in message and confpars[CFG_BLAST][key] in message]", "title": "" }, { "docid": "4bd8803c7186c80480a279199f1fbda6", "score": "0.5651014", "text": "def test_transform_incorrect_len_hex(self):\n color = '#ff00'\n self.assertRaises(ValueError, utils.transform_value, color)", "title": "" }, { "docid": "0321cfc068f4a90ab1ad86d6aa62a357", "score": "0.56437755", "text": "def report_error(self, line_number, offset, text, check):", "title": "" }, { "docid": "0837e43a55181dc822f8e858bda0144a", "score": "0.5638987", "text": "def test_invalid_ratio_format(ratio):\n\n with pytest.raises(InputError) as excinfo:\n Hybridization(\n SOLAR_FPATH, WIND_FPATH, ratio=ratio, ratio_bounds=(1, 1)\n )\n\n long_msg = (\"Please make sure the ratio input is a string in the form \"\n \"'numerator_column_name/denominator_column_name'\")\n assert \"Ratio input \" in str(excinfo.value)\n assert long_msg in str(excinfo.value)", "title": "" }, { "docid": "186328c92ae9cf2b9a3ec32f768967c6", "score": "0.5638573", "text": "def test_agentchannelkind_value_error():\n # Value error exceptions\n with pytest.raises(ValueError):\n AgentChannelKind('FOO')", "title": "" }, { "docid": "263f18c0e157d62f99f36bf10001331a", "score": "0.5637928", "text": "def test_heightsettervalidation(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Rectangle(25, 20, 30, 35, 100)\n r1.height = \"foo\"\n self.assertEqual(str(e.exception), 'height must be an integer')", "title": "" }, { "docid": "9bde5330dd4a9cc1af5ca0ae4c6d7246", "score": "0.5636542", "text": "def test_height_0(self):\n with self.assertRaises(ValueError):\n r1 = Rectangle(2, 0)", "title": "" }, { "docid": "d82fd56f68426c21bfe4c3374b6fe1bb", "score": "0.5634841", "text": "def testRaisesErrorMismatchParameters(self):\n with self.assertRaises(ValueError):\n self.test.calculate_goodness_of_fit()", "title": "" }, { "docid": "8195fc1f1edd0ade95280b5f2ba334fd", "score": "0.56302273", "text": "def maximum_line_length(physical_line, max_line_length, multiline,\n line_number, noqa):\n line = physical_line.rstrip()\n length = len(line)\n if length > max_line_length and not noqa:\n # Special case: ignore long shebang lines.\n if line_number == 1 and line.startswith('#!'):\n return\n # Special case for long URLs in multi-line docstrings or\n # comments, but still report the error when the 72 first chars\n # are whitespaces.\n chunks = line.split()\n if ((len(chunks) == 1 and multiline) or\n (len(chunks) == 2 and chunks[0] == '#')) and \\\n len(line) - len(chunks[-1]) < max_line_length - 7:\n return\n if hasattr(line, 'decode'): # Python 2\n # The line could contain multi-byte characters\n try:\n length = len(line.decode('utf-8'))\n except UnicodeError:\n pass\n if length > max_line_length:\n return (max_line_length, \"E501 line too long \"\n \"(%d > %d characters)\" % (length, max_line_length))", "title": "" }, { "docid": "8fce83c603a2977ce81aa37b6823fbe8", "score": "0.56224006", "text": "def test_receiveResponseHeadersTooLong(self):\n transport = StringTransportWithDisconnection()\n protocol = HTTP11ClientProtocol()\n transport.protocol = protocol\n protocol.makeConnection(transport)\n\n longLine = b'a' * LineReceiver.MAX_LENGTH\n d = protocol.request(Request(b'GET', b'/', _boringHeaders, None))\n\n protocol.dataReceived(\n b\"HTTP/1.1 200 OK\\r\\n\"\n b\"X-Foo: \" + longLine + b\"\\r\\n\"\n b\"X-Ignored: ignored\\r\\n\"\n b\"\\r\\n\"\n )\n\n # For now, there is no signal that something went wrong, just a\n # connection which is closed in what looks like a clean way.\n # L{LineReceiver.lineLengthExceeded} just calls loseConnection\n # without giving any reason.\n return assertResponseFailed(self, d, [ConnectionDone])", "title": "" }, { "docid": "741ee4458cc22be10550a9b97fd68a1b", "score": "0.56208986", "text": "def test_other_error_raised(self):\n self.validate_image.return_value = defer.fail(ValueError(':('))\n d = validate_launch_server_config(self.log, 'dfw', 'catalog', 'token',\n self.launch_config)\n f = self.failureResultOf(d, InvalidLaunchConfiguration)\n self.assertEqual(\n f.value.message,\n ('Following problems with launch configuration:\\n'\n 'Invalid imageRef \"imagegood\" in launchConfiguration'))", "title": "" }, { "docid": "61e6c475e493fcc0a10e7501022ccab2", "score": "0.5615528", "text": "def test_too_little_indent(self):\n with self.assertRaises(ValueError):\n Baseline(\"\"\"\n not indented enough\n \"\"\")\n\n self.check_updated_files()", "title": "" }, { "docid": "dc0b899077039e3f3536cc244a4169ac", "score": "0.5613792", "text": "def test_no_track_error():\n try:\n fail = survey.get_scale_height_data()\n except SyntaxError:\n assert True\n else:\n assert False", "title": "" }, { "docid": "8cb856b33d37e446872d9e87913fa476", "score": "0.5611", "text": "def test_bad_size_value(self):\n with self.assertRaises(ValueError) as cm:\n self.sq = Square(-10)", "title": "" }, { "docid": "a261f2a2089506b4c849fbf730bd6599", "score": "0.56094575", "text": "def test_md013_bad_paragraph_with_long_line_in_middle():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\",\n \"resources\",\n \"rules\",\n \"md013\",\n \"bad_paragraph_with_long_line_in_middle.md\",\n )\n supplied_arguments = [\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 1\n expected_output = (\n f\"{source_path}:3:1: \"\n + \"MD013: Line length \"\n + \"[Expected: 80, Actual: 91] (line-length)\"\n )\n expected_error = \"\"\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )", "title": "" }, { "docid": "be795e4a0ecf2bb82edc8238b3352af3", "score": "0.5605881", "text": "def test_get_error_definition() -> None:\n incident_auth_1000 = {\n \"errorCode\": \"1000\",\n \"microservice\": \"niomon-auth\"\n }\n error_definition_auth_1000 = {\n \"meaning\": \"Authentication error: request malformed. Possible missing header and parameters.\",\n \"severity\": LOW_SEVERITY,\n \"type\": AUTHENTICATION_TYPE\n }\n incident_decoder_1300 = {\n \"errorCode\": \"1300\",\n \"microservice\": \"niomon-decoder\"\n }\n error_definition_decoder_1300 = {\n \"meaning\": \"Invalid verification: signature verification failed. No public key or integrity is \"\n \"compromised.\",\n \"severity\": HIGH_SEVERITY,\n \"type\": AUTHENTICITY_TYPE\n }\n incident_enricher_0000 = {\n \"errorCode\": \"0000\",\n \"microservice\": \"niomon-enricher\"\n }\n error_definition_enricher_0000 = {\n \"meaning\": \"Tenant error: the owner of the device does not exist or cannot be acquired (3rd Party).\",\n \"severity\": HIGH_SEVERITY,\n \"type\": AUTHENTICITY_TYPE\n }\n incident_filter_0000 = {\n \"errorCode\": \"0000\",\n \"microservice\": \"filter-service\"\n }\n error_definition_filter_0000 = {\n \"meaning\": \"Integrity violation: duplicate hash detected. Possible injection, reply attack, \"\n \"or hash collision. \",\n \"severity\": HIGH_SEVERITY,\n \"type\": SEQUENCE_TYPE\n }\n error_definition_unknown = {}\n incident_unknown1 = {}\n incident_unknown2 = {\n \"errorCode\": \"1000\",\n \"microservice\": \"niomon\"\n }\n incident_unknown3 = {\n \"errorCode\": \"5000\",\n \"microservice\": \"niomon-auth\"\n }\n\n assert get_error_definition(incident_auth_1000) == error_definition_auth_1000\n assert get_error_definition(incident_decoder_1300) == error_definition_decoder_1300\n assert get_error_definition(incident_enricher_0000) == error_definition_enricher_0000\n assert get_error_definition(incident_filter_0000) == error_definition_filter_0000\n assert get_error_definition(incident_unknown1) == error_definition_unknown\n assert get_error_definition(incident_unknown2) == error_definition_unknown\n assert get_error_definition(incident_unknown3) == error_definition_unknown", "title": "" }, { "docid": "990b5f8908392d192869245e4b38123f", "score": "0.5605075", "text": "def test_convert_str_to_integer():\n with pytest.raises(ValueError):\n convert_str_to_integer(input_num=\"Input a string\")", "title": "" }, { "docid": "8a1f4d81c047d414a87d16671de93aaa", "score": "0.55981886", "text": "def test_badheightvaluefuncs(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Rectangle(1, print(), 1, 2, 3)\n self.assertEqual(str(e.exception), 'height must be an integer')", "title": "" }, { "docid": "44f09d2018752e65612cfd189104e1ac", "score": "0.5593231", "text": "def test_md013_bad_configuration_strict_mode():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\", \"resources\", \"rules\", \"md013\", \"good_small_line.md\"\n )\n supplied_arguments = [\n \"--set\",\n \"plugins.md013.strict=not-integer\",\n \"--strict-config\",\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 1\n expected_output = \"\"\n expected_error = (\n \"BadPluginError encountered while configuring plugins:\\n\"\n + \"The value for property 'plugins.md013.strict' must be of type 'bool'.\"\n )\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )", "title": "" }, { "docid": "bf886a0eafb85133851c5b974f8c2042", "score": "0.5588463", "text": "def test_badheightvaluefloats(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Rectangle(1, float(1), 1, 2, 3)\n self.assertEqual(str(e.exception), 'height must be an integer')", "title": "" }, { "docid": "2cfc32e0a7b2b7da1e8054fc2801bfab", "score": "0.5582643", "text": "def test_abnormal_indent_count(self):\n # The testline has indentation count of 3, 2 is expected\n testline = ' asd'\n # Check that the appropriate exception is raised\n self.assertRaises(IndentationException,\n indentation_converter, testline, ' ', 2, 4)", "title": "" }, { "docid": "93c6a23dc893e3b2fbab565f7ea02017", "score": "0.5576621", "text": "def test_check_hp_throws_exception(self):\n num_check({'print_log': False, 'circuit': 1, 'loss': 1, 'task': 'optimization', 'init_circuit_params': 1})\n # No task key\n with self.assertRaises(ValueError):\n num_check({'circuit': 1, 'loss': 1, 'init_circuit_params': [{}]})\n with self.assertRaises(ValueError):\n tf_check({'circuit': 1, 'loss': 1})\n # No loss key\n with self.assertRaises(ValueError):\n num_check({'circuit': 1, 'task': 'optimization', 'init_circuit_params': [{}]})\n with self.assertRaises(ValueError):\n tf_check({'circuit': 1, 'task': 'optimization'})\n # No circuit key\n with self.assertRaises(ValueError):\n num_check({'loss': 1, 'task': 'optimization', 'init_circuit_params': [{}]})\n with self.assertRaises(ValueError):\n tf_check({'loss': 1, 'task': 'optimization'})\n # No init param key\n with self.assertRaises(ValueError):\n num_check({'circuit': 1, 'loss': 1, 'task': 'optimization'})\n # Wrong task\n with self.assertRaises(ValueError):\n num_check({'circuit': 1, 'loss': 1, 'task': self.string, 'init_circuit_params': 1.1})\n with self.assertRaises(ValueError):\n tf_check({'circuit': 1, 'loss': 1, 'task': self.string, 'optimizer': 'sthfunny'})\n # Wrong init params\n with self.assertRaises(ValueError):\n num_check({'circuit': 1, 'loss': 1, 'task': 'optimization', 'init_circuit_params': 1.1})\n # Unrecognized key\n with self.assertRaises(ValueError):\n num_check({'circuit': 1, 'loss': 1, 'task': 'optimization', 'init_circuit_params': [{}], 'funny_key': 1.})\n with self.assertRaises(ValueError):\n tf_check({'circuit': 1, 'loss': 1, 'task': 'optimization', 'funny_key': 1.})\n # Unrecognized optimizer\n with self.assertRaises(ValueError):\n num_check({'circuit': 1, 'loss': 1, 'task': 'optimization', 'init_circuit_params': [{}], 'optimizer': 'sthfunny'})\n with self.assertRaises(ValueError):\n tf_check({'circuit': 1, 'loss': 1, 'task': 'optimization', 'optimizer': 'sthfunny'})", "title": "" }, { "docid": "e4cadc132abe59412a4dc31af92048b3", "score": "0.55694926", "text": "def test_numSectionsInvalid(self):\n original_raw_input = __builtins__.raw_input\n __builtins__.raw_input = lambda _: 'wrong'\n printed = StringIO.StringIO()\n with self.assertRaises(SystemExit):\n sys.stdout = printed\n tmp = FinalCalculator.FinalCalculator()\n tmp.promptNumSections()\n sys.stdout = sys.__stdout\n onlyOnePrint = printed.getvalue().split('\\n')[0]\n self.assertEquals('Sorry your answer is not valid. Please enter a ' +\n 'number.', onlyOnePrint)\n __builtins__.raw_input = original_raw_input", "title": "" }, { "docid": "a2fcc3568686b93f2f2259f82beb2b71", "score": "0.5560136", "text": "def test_width_0(self):\n with self.assertRaises(ValueError):\n r1 = Rectangle(0, 3)", "title": "" } ]
ce2fce563527b8e094866bde874c343f
Check Data Objects for Points.
[ { "docid": "e2ebabb16e6e7c6eb007db609116141c", "score": "0.0", "text": "def checkData(file_list):\n for f in file_list:\n f_sp = f[\"syntenic_points\"]\n sp1 = f_sp.keys()[0]\n sp2 = f_sp[sp1].keys()[0]\n data = f_sp[sp1][sp2]\n if len(data) < 1:\n return False\n return True", "title": "" } ]
[ { "docid": "08399fe9669baae6573686b86cd56247", "score": "0.6803532", "text": "def check(self, points):\n pts=[tuple(p) for p in points]\n if sorted(list(set(pts))) != sorted(pts):\n raise Exception(\"No point must be upon another.\")", "title": "" }, { "docid": "cb5b7fc1018125fad1948d0f73f8e9bd", "score": "0.679041", "text": "def test_points(self, obj_test_geo):\n groups = obj_test_geo.pointGroups()\n\n geo = hou.Geometry()\n\n pt1 = geo.createPoint()\n pt2 = geo.createPoint()\n pt3 = geo.createPoint()\n\n houdini_toolbox.inline.api.copy_group_membership(\n obj_test_geo.iterPoints()[2], groups, pt1\n )\n houdini_toolbox.inline.api.copy_group_membership(\n obj_test_geo.iterPoints()[8], groups, pt2\n )\n houdini_toolbox.inline.api.copy_group_membership(\n obj_test_geo.iterPoints()[10], groups, pt3\n )\n\n # Ensure all the groups got copied right.\n assert len(geo.pointGroups()) == len(groups)\n\n group1 = geo.findPointGroup(\"point_group1\")\n group2 = geo.findPointGroup(\"point_group2\")\n\n assert pt1 in group1.points()\n assert pt1 not in group2.points()\n\n assert pt2 in group1.points()\n assert pt2 in group2.points()\n\n assert pt3 in group2.points()\n assert pt3 not in group1.points()", "title": "" }, { "docid": "e7c444a09b34748d8bd06022be83010c", "score": "0.67573476", "text": "def _check_points(points):\n if len(points) < 3:\n raise ValueError('\"points\" must be a list of 3 or more points')", "title": "" }, { "docid": "16963d44496c22b3f8da4a8c85533491", "score": "0.66913044", "text": "def check_points_in_manifold(self):\n all_points_ok, outside_point, reason = self.model.check_all_points()\n if not all_points_ok:\n raise AssertionError(f\"Point outside manifold. Reason: {reason}\\n{outside_point}\")", "title": "" }, { "docid": "f053825f825b4b893dbd5cb6f6ac9f7e", "score": "0.6668135", "text": "def test_points(self, obj_test_geo):\n groups = obj_test_geo.pointGroups()\n\n geo = hou.Geometry()\n\n pt1 = geo.createPoint()\n pt2 = geo.createPoint()\n pt3 = geo.createPoint()\n\n houdini_toolbox.inline.api.batch_copy_group_membership(\n obj_test_geo.globPoints(\"2 8 10\"), groups, geo.points()\n )\n\n # Ensure all the groups got copied right.\n assert len(geo.pointGroups()) == len(groups)\n\n group1 = geo.findPointGroup(\"point_group1\")\n group2 = geo.findPointGroup(\"point_group2\")\n\n assert pt1 in group1.points()\n assert pt1 not in group2.points()\n\n assert pt2 in group1.points()\n assert pt2 in group2.points()\n\n assert pt3 in group2.points()\n assert pt3 not in group1.points()", "title": "" }, { "docid": "43383452e1fb48c9df86fef5a0ebf636", "score": "0.6624522", "text": "def test_points(self, obj_test_geo):\n groups = obj_test_geo.pointGroups()\n\n geo = hou.Geometry()\n\n pt1 = geo.createPoint()\n pt2 = geo.createPoint()\n pt3 = geo.createPoint()\n\n houdini_toolbox.inline.api.batch_copy_group_membership_by_indices(\n obj_test_geo, hou.Point, [2, 8, 10], groups, geo, hou.Point, [0, 2, 1]\n )\n\n # Ensure all the groups got copied right.\n assert len(geo.pointGroups()) == len(groups)\n\n group1 = geo.findPointGroup(\"point_group1\")\n group2 = geo.findPointGroup(\"point_group2\")\n\n assert pt1 in group1.points()\n assert pt1 not in group2.points()\n\n assert pt3 in group1.points()\n assert pt3 in group2.points()\n\n assert pt2 in group2.points()\n assert pt2 not in group1.points()", "title": "" }, { "docid": "7ad6266c107583106867e166dfa77fe6", "score": "0.6607684", "text": "def test_points(self, obj_test_geo):\n attribs = obj_test_geo.pointAttribs()\n\n geo = hou.Geometry()\n\n pt1 = geo.createPoint()\n pt2 = geo.createPoint()\n\n houdini_toolbox.inline.api.copy_attribute_values(\n obj_test_geo.iterPoints()[2], attribs, pt1\n )\n houdini_toolbox.inline.api.copy_attribute_values(\n obj_test_geo.iterPoints()[6], attribs, pt2\n )\n\n # Ensure all the attributes got copied right.\n assert len(geo.pointAttribs()) == len(attribs)\n\n # Ensure P got copied right.\n assert pt1.position().isAlmostEqual(hou.Vector3(1.66667, 0, -5))\n assert pt2.position().isAlmostEqual(hou.Vector3(1.66667, 0, -1.66667))", "title": "" }, { "docid": "5eeb879079228bcb34dc7d5c362f2b1d", "score": "0.65386766", "text": "def _check(self):\n if self.keywords is not None:\n assert isinstance(self.keywords, kws.Keywords)\n\n # Ensure the point charges are given as a list of PointCharge objects\n if self.point_charges is not None:\n assert type(self.point_charges) is list\n assert all(type(pc) is PointCharge for pc in self.point_charges)\n\n if self.added_internals is not None:\n assert type(self.added_internals) is list\n assert all(len(idxs) == 2 for idxs in self.added_internals)", "title": "" }, { "docid": "fcb19a6b1f8dc3abb910756673f12a6a", "score": "0.65188396", "text": "def test_isodose_points(self):\n points = [(106, 20), (108, 20), (110, 20)]\n self.assertEqual(self.dp.GetIsodosePoints()[0:3], points)", "title": "" }, { "docid": "c967e063c3e3a7f7d7422e20ff006b70", "score": "0.6511492", "text": "def is_point(self):\n return True", "title": "" }, { "docid": "9e83a26390373e99124aee7b00e37c87", "score": "0.6496503", "text": "def is_point_data(columns):\n\n result = False\n\n # Check for point data which will contain this in the data not the header\n if columns is not None and ('latitude' in columns or 'easting' in columns):\n result = True\n\n return result", "title": "" }, { "docid": "2f21675aced404efec709260a8413b06", "score": "0.6454537", "text": "def validate(self):\n valid_data_points = len(self.get_data()[COM_NAME]) - self.get_gen_s_count()", "title": "" }, { "docid": "db8a5cb550c9677c673663e54d2c9ce5", "score": "0.6438437", "text": "def validate_points(self, image_dimension):\n # Convert rows, cols, depth to x, y, z\n xyz_dimension = np.copy(image_dimension)\n xyz_dimension[0] = image_dimension[1]\n xyz_dimension[1] = image_dimension[0]\n\n tl_valid = point_within_dimensions(self.tl, xyz_dimension)\n tr_valid = point_within_dimensions(self.tr, xyz_dimension)\n br_valid = point_within_dimensions(self.br, xyz_dimension)\n bl_valid = point_within_dimensions(self.bl, xyz_dimension)\n\n return tl_valid and tr_valid and br_valid and bl_valid", "title": "" }, { "docid": "7cceddd3ea13362e61437a836389fdf5", "score": "0.6384226", "text": "def check_coordinates(self):\n\n overlap = self.data_coords.intersection(self.required_coords)\n assert len(overlap) == len(self.required_coords)", "title": "" }, { "docid": "1aea0be6635f1bb11b15c241b3fd2bab", "score": "0.6357407", "text": "def test_at_points(self):\n self.evaluate_inputs([\n # (start, end, point, dist)\n ((38, -76, 100), (38, -77, 100), (38, -76, 100), 0),\n ((38, -76, 100), (38, -77, 100), (38, -77, 100), 0),\n ((38, -76, 100), (38, -77, 100), (38, -77, 0), 100),\n ((38, -76, 100), (38, -77, 100), (38, -77, 200), 100),\n ]) # yapf: disable", "title": "" }, { "docid": "bc32482cbdf762d1974fa6fbcf4c72d8", "score": "0.6341421", "text": "def checkPoint(self, program: ghidra.program.model.listing.Program) -> None:\n ...", "title": "" }, { "docid": "92c749d1a318519a7056ae6290593fd5", "score": "0.6337926", "text": "def check(self):\n if (self.data is not None):\n if (type(self.data) is not np.ndarray):\n if (type(self.data) is np.float64):\n self.data = np.asarray(self.data)\n else:\n raise TypeError(\"Wrong data array. DataObject.data should be numpy.ndarray.\")\n if (self.data.shape != self.shape):\n raise ValueError(\"DataObject.data shape is not the same as DataObject.shape.\")\n else:\n if (self.shape is None):\n raise ValueError(\"DataObject.shape is none. Even if no data is present shape should be set otherwise coordinates cannot be determined.\") \n if (self.error is not None):\n if (type(self.error) is list):\n if (len(self.error) != 2):\n raise ValueError(\"Wrong number of elements in error. If DataObject.error is a list it should have two elements.\" )\n error = self.error\n else:\n error = [self.error]\n for err in error:\n if (type(err) is not np.ndarray):\n if type(err is np.float64):\n self.error = np.asarray(self.error)\n else:\n raise TypeError(\"Wrong error array in DataObject. It should be numpy.ndarray.\")\n if (err.shape != self.shape):\n raise ValueError(\"Shape of error array un DataObject is different from data.\")\n if (self.coordinates is not None):\n for i,c in enumerate(self.coordinates):\n for j in range(i):\n if (id(self.coordinates[i]) == id(self.coordinates[j])):\n raise ValueError(\"ID of coordinates '{:s}' and '{:s}' are identical.\".\\\n format(self.coordinates[i].unit.name, self.coordinates[j].unit.name))\n if (id(self.coordinates[i].mode) == id(self.coordinates[j].mode)):\n raise ValueError(\"ID of coordinate.mode for '{:s}' and '{:s}' are identical.\".\\\n format(self.coordinates[i].unit.name, self.coordinates[j].unit.name))\n if (type(c.dimension_list) is not list):\n raise TypeError(\"Wrong type for dimension list for coordinate '{:s}'.\".format(c.unit.name))\n if (len(c.dimension_list) > len(self.shape)):\n raise TypeError(\"Too long dimension list for coordinate '{:s}'.\".format(c.unit.name)) \n for d in c.dimension_list:\n if (d is None):\n raise ValueError(\"Null in dimension list in coordinate '{:s}'.\".format(c.unit.name)) \n if (d >= len(self.shape)):\n raise TypeError(\"Wrong dimension number in coordinate '{:s}'.\".format(c.unit.name)) \n if (type(c.unit) is not flap.Unit):\n raise TypeError(\"Wrong coordinate unit in coordinate #{:d}. Should be flap.Unit().\".format(i))\n if (type(c.mode) is not flap.coordinate.CoordinateMode):\n raise TypeError(\"Wrong coordinate mode type in '{:s}'. Should be flap.CoordinateMode().\".format(c.unit.name))\n if (c.mode.equidistant):\n if (c.start is None):\n raise TypeError(\"No start for equdistant coordinate '{:s}'.\".format(c.unit.name))\n if (type(c.start ) is str):\n raise TypeError(\"Invalid start value type for equdistant coordinate '{:s}'.\".format(c.unit.name))\n if (len(c.step) != len(c.dimension_list)):\n raise ValueError(\"Number of step values is different from length of dimension_list in '{:s}'.\".format(c.unit.name))\n for cstep in c.step:\n if (cstep is None):\n raise TypeError(\"One step is None for equdistant coordinate '{:s}'.\".format(c.unit.name))\n if (type(cstep ) is str):\n raise TypeError(\"Invalid step type for equdistant coordinate '{:s}'.\".format(c.unit.name))\n try:\n cstep + c.start\n except:\n raise TypeError(\"Equidistant coordinate '{:s}' start and step should have same type.\".format(c.unit.name))\n if (c.value_ranges is not None):\n if (c.mode.range_symmetric):\n try:\n if (c.value_ranges * 0 != 0):\n raise TypeError(\"Invalid type for value_ranges in coordinate '{:s}'.\".format(c.unit.name))\n except:\n raise TypeError(\"Invalid type for value_ranges in coordinate '{:s}'.\".format(c.unit.name)) \n if (c.mode.equidistant):\n try:\n c.value_ranges + c.start\n except:\n raise TypeError(\"Incompatible value_range and start in coordinate {:s}.\".format(c.unit.name))\n else:\n try:\n c.value_ranges[0] + c.values[0]\n except:\n raise TypeError(\"Incompatible value_range and start in coordinate {:s}.\".format(c.unit.name))\n \n else:\n if (type(c.value_ranges) is not list):\n raise TypeError(\"Invalid type for value_ranges in asymmetric coordinate '{:s}'.\".format(c.unit.name))\n if (len(c.value_ranges) != 2):\n raise TypeError(\"Invalid list length for value_ranges in asymmetric coordinate '{:s}'.\".format(c.unit.name))\n for c_value_ranges in c.value_ranges:\n try:\n if (c_value_ranges * 0 != 0):\n raise TypeError(\"Invalid type for value_ranges in coordinate '{:s}'.\".format(c.unit.name))\n except:\n raise TypeError(\"Invalid type for value_ranges in coordinate '{:s}'.\".format(c.unit.name)) \n else:\n if (c.values is None):\n raise ValueError(\"No values in non-equidistant coordinate '{:s}'.\".format(c.unit.name))\n if (not c.non_interpol(self.shape)):\n raise ValueError(\"Coordinate value and data shape is inconsistent in coordinate '{:s}'.\".format(c.unit.name))", "title": "" }, { "docid": "46a2875230494277cda3a06f1155ae07", "score": "0.632204", "text": "def _check_points(self, points):\n top_left, top_right, bottom_left, bottom_right = points\n\n # top_left\n if not (top_left[0] < top_right[0] and top_left[1] < bottom_left[1]):\n return False\n\n # top_right\n if not (top_right[0] > top_left[0] and top_right[1] < bottom_right[1]):\n return False\n # bottom_left\n if not (bottom_left[0] < bottom_right[0] and bottom_left[1] > top_left[1]):\n return False\n\n # bottom_right\n if not (bottom_right[0] > bottom_left[0] and bottom_right[1] > top_right[1]):\n return False\n\n return True", "title": "" }, { "docid": "11fe7e157494ae4edbf923602e85f1c5", "score": "0.6318292", "text": "def GetPoints(self) -> None:\n ...", "title": "" }, { "docid": "924a8a6819169e8daa2e5946907b0b95", "score": "0.6299523", "text": "def testPointList(self):\n c = CQ(makeUnitCube())\n\n s = c.faces(\">Z\").workplane().pushPoints([(-0.3, 0.3), (0.3, 0.3), (0, 0)])\n self.assertEqual(3, s.size())\n # TODO: is the ability to iterate over points with circle really worth it?\n # maybe we should just require using all() and a loop for this. the semantics and\n # possible combinations got too hard ( ie, .circle().circle() ) was really odd\n body = s.circle(0.05).cutThruAll()\n self.saveModel(body)\n self.assertEqual(9, body.faces().size())\n\n # Test the case when using eachpoint with only a blank workplane\n def callback_fn(loc):\n self.assertEqual(\n Vector(0, 0, 0), Vector(loc.wrapped.Transformation().TranslationPart())\n )\n\n r = Workplane(\"XY\")\n r.objects = []\n r.eachpoint(callback_fn)", "title": "" }, { "docid": "5893ed8f9f575a7e769eeecdb9864a51", "score": "0.62992555", "text": "def checkis(cls, x):\n assert isinstance(x, cls), \"This is not point!!! It's {}\".format(x.__class__.__name__)", "title": "" }, { "docid": "7a0cf332571337baab10a45bb5ff9001", "score": "0.62874985", "text": "def __init__(self, data_x, data_y, point_type):\n if len(data_x) > 0:\n temp_data_x, temp_data_y = zip(*sorted(zip(data_x,data_y)))\n self.data_x = np.array(temp_data_x) \n self.data_y = np.array(temp_data_y) \n self.type = point_type \n else:\n raise EmptyPointsError", "title": "" }, { "docid": "59d25f056cd42eb12c2b8bc186d378d4", "score": "0.62223846", "text": "def test_not_enough_points(self, obj_test_geo_copy):\n with pytest.raises(hou.OperationFailed):\n houdini_toolbox.inline.api.sort_geometry_by_values(\n obj_test_geo_copy, hou.geometryType.Points, [1]\n )", "title": "" }, { "docid": "f94923e697f4a1e86e2eb7859324c9ef", "score": "0.61194813", "text": "def contains_point(self, point):\n raise NotImplementedError", "title": "" }, { "docid": "8cf5b77d93089553dae4b0625be3b834", "score": "0.6103514", "text": "def points(self):", "title": "" }, { "docid": "ebc66ee8e58fcf4a4542048780144502", "score": "0.6089749", "text": "def test_point_validity( generator, x, y, expected ):\r\n if point_is_valid( generator, x, y ) == expected:\r\n print_(\"Point validity tested as expected.\")\r\n else:\r\n raise TestFailure(\"*** Point validity test gave wrong result.\")", "title": "" }, { "docid": "6f3083133a226a24cdb445312fe4d4ba", "score": "0.6039151", "text": "def has_enough_points(self):\n REQ_POINTS = 30\n return self.points >= REQ_POINTS", "title": "" }, { "docid": "1fe7242fd5e1e0212adf45def9530a05", "score": "0.5983064", "text": "def solvedPoints(self):\n\n assert False, \"Not Yet Implemented\"", "title": "" }, { "docid": "03d675e86d547a37890f825c8fa72871", "score": "0.59804195", "text": "def _check_obs_data(self) -> None:\n # TODO: Check if data are available.", "title": "" }, { "docid": "1c7c5239498747d2bc29f379164bbe05", "score": "0.59777683", "text": "def create_points(data):\n for row in data:\n\n if row[\"x\"] and row[\"y\"]:\n try:\n row[\"geometry\"] = point.Point(float(row[\"x\"]), float(row[\"y\"]))\n except:\n row[\"geometry\"] = None\n else:\n row[\"geometry\"] = None\n\n return data", "title": "" }, { "docid": "13bcfada27eab51f0e253faf3762dd28", "score": "0.5958596", "text": "def contain_point(self, point):\n test_val = [point.x - self._point[0].x,\n point.y - self._point[0].y,\n point.z - self._point[0].z]\n if np.absolute(np.dot(test_val, self.normal)) < 1e-4:\n return True\n else:\n return False", "title": "" }, { "docid": "52774b83d678e93abf4fe17a6beb870e", "score": "0.5956586", "text": "def points_inside(self, xypoints, dlogT=0.1, dlogg=0.3):\n p = self.get_boundaries(dlogT=dlogT, dlogg=dlogg)\n return p.contains_points(xypoints)", "title": "" }, { "docid": "710059da29ba7463ca6c84159f2f0fc3", "score": "0.5951401", "text": "def contains_point(self, point: tuple) -> bool:\n if super().contains_point(point):\n return True\n\n for disk in self.disks:\n if disk.contains_point(point):\n return True\n\n return False", "title": "" }, { "docid": "d728324f849bd9a356367f5d1031bb4b", "score": "0.5921348", "text": "def _kpoint_check(input_set, inputs, calcs_reversed, data, kpts_tolerance):\n valid_num_kpts = input_set.kpoints.num_kpts or np.prod(input_set.kpoints.kpts[0])\n\n if calcs_reversed:\n input_dict = calcs_reversed[0].get(\"input\", {})\n\n if not input_dict:\n input_dict = inputs\n\n else:\n input_dict = inputs\n\n num_kpts = input_dict.get(\"kpoints\", {}).get(\"nkpoints\", 0) or np.prod(\n input_dict.get(\"kpoints\", {}).get(\"kpoints\", [1, 1, 1])\n )\n\n data[\"kpts_ratio\"] = num_kpts / valid_num_kpts\n return data[\"kpts_ratio\"] < kpts_tolerance", "title": "" }, { "docid": "c6646265bd3470bd86585b9000dbaab6", "score": "0.5917232", "text": "def validate_data(self):\r\n for data_point in self.data:\r\n if len(data_point) != len(self.data[0]):\r\n raise UnsuitableAttributesException", "title": "" }, { "docid": "6ecc288044ffdc3419b28eb3b9764eeb", "score": "0.5897125", "text": "def point_is_valid( generator, x, y ):\r\n\r\n # These are the tests specified in X9.62.\r\n\r\n n = generator.order()\r\n curve = generator.curve()\r\n if x < 0 or n <= x or y < 0 or n <= y:\r\n return False\r\n if not curve.contains_point( x, y ):\r\n return False\r\n if not n*ellipticcurve.Point( curve, x, y ) == \\\r\n ellipticcurve.INFINITY:\r\n return False\r\n return True", "title": "" }, { "docid": "fa2c7699be7e1ee1770cbe7ba38204da", "score": "0.58754766", "text": "def check_point_correct(point):\n assert isinstance(point, Point), '{} is not a Point'.format(point)\n\n if point.x < 0 or point.y < 0:\n return False\n\n # TODO: Grab constrains from the Board\n if point.x > 7 or point.y > 9:\n return False\n\n return True", "title": "" }, { "docid": "4643247c78797b71b77fac46cdd5fe86", "score": "0.5869564", "text": "def test_point_inside_tesseroid(self, points):\n points = np.atleast_2d(points).T\n tesseroid = np.atleast_2d([-10, 10, -10, 10, 100, 200])\n with pytest.raises(ValueError):\n check_points_outside_tesseroids(points, tesseroid)", "title": "" }, { "docid": "e90452c4776a214924ffc61277789fb0", "score": "0.5868298", "text": "def test_properties(self):\n tests = [\n (Point(0, 0), 0, 0),\n (Point(1, 2), 1, 2),\n ]\n for i, (point, want_x, want_y) in enumerate(tests):\n have_x, have_y = point.x, point.y\n self.assertEqual(have_x, want_x, f'Test {i}: Have = {have_x}, Want = {want_x}.')\n self.assertEqual(have_y, want_y, f'Test {i}: Have = {have_y}, Want = {want_y}.')", "title": "" }, { "docid": "ba1d23bf10846e2419bb778745633fb1", "score": "0.5848609", "text": "def test_has_ungrouped(self):\n geo = hou.Geometry()\n geo.createPoint()\n\n assert houdini_toolbox.inline.api.geometry_has_ungrouped_points(geo)", "title": "" }, { "docid": "85ba831a4a38bf2afc8039ffd782a419", "score": "0.58452713", "text": "def test_points(self):\n m = g.get_mesh('points_ascii.ply')\n assert isinstance(m, g.trimesh.PointCloud)\n assert m.vertices.shape == (5, 3)\n\n m = g.get_mesh('points_bin.ply')\n assert m.vertices.shape == (5, 3)\n assert isinstance(m, g.trimesh.PointCloud)", "title": "" }, { "docid": "4284a701f0fb17d6399a48ce3f1f0b09", "score": "0.5841885", "text": "def check_point(self, x, y):\n\t\treturn self.x1 <= x <= self.x2 and self.y1 <= y <= self.y2", "title": "" }, { "docid": "fd22fd85ff5d78b898f066f7aa19b3c3", "score": "0.5832263", "text": "def in_domain(self, points):\n pass", "title": "" }, { "docid": "7300d9854e4edf58090692b841e13e46", "score": "0.5827248", "text": "def test_coordinates(self):\n\t\tfor i in range(len(self.vertices)):\n\t\t\tself.assertEqual(self.vertices[i].x, i)\n\t\t\tself.assertEqual(self.vertices[i].y, i)\n\t\t\tself.assertEqual(self.vertices[i].z, i)", "title": "" }, { "docid": "db2f9137387fbd62b1fcfa51a5a77f33", "score": "0.58250374", "text": "def test_specifying_points(self):\n result, _ = np.array([[1, 4],\n [2, 8],\n [3, 12]]).T\n expect = np.array([1, 2, 3])\n np.testing.assert_array_almost_equal(expect, result)", "title": "" }, { "docid": "9a805d96603c4f13589787f71c0014ed", "score": "0.5823931", "text": "def check_points(option, maps=None, out_put=None, target_id=None, extra=None):\n spl_opt = option.split(\",\")\n points = ActPharmaPoint.objects.filter(target_id=target_id, in_diff_15=True, num_diff_15__gte=spl_opt[0], num_diff_15__lte=spl_opt[1], act_change__gte=spl_opt[2], act_change__lte=spl_opt[3])\n return len(points)", "title": "" }, { "docid": "abd8204543146348f140ff3aa3bf2cee", "score": "0.5819299", "text": "def contains_point(self, point: tuple[int, int]) -> bool:\n x, y = point\n width, height = self\n return width > x >= 0 and height > y >= 0", "title": "" }, { "docid": "97571b34457a94dd4c608fe8ce3293a0", "score": "0.58160806", "text": "def check_position(self,point):\n # inside the contour\n in_contour = False\n for wall in gc[\"walls\"]:\n in_contour = in_contour or self.is_inside(wall.vertices,point)\n # particle must be located outside the destination vertices\n in_dests = True\n for dest in gc[\"destinations\"]:\n in_dests = in_dests and not(self.is_inside(dest.vertices,point))\n in_obst = True\n for obst in gc[\"obstacles\"]:\n in_obst = in_obst and not(self.is_inside_circle(obst.center,obst.r,point))\n return in_contour and in_obst #and in_dests", "title": "" }, { "docid": "1ed34c207e346ec372c23bb9616e0135", "score": "0.581167", "text": "def test_coords_nonexistent(self):\n obj = ongc.Dso('IC1064')\n assert obj.coords is None", "title": "" }, { "docid": "e26410c35b4d8877baee16a1db8d24b1", "score": "0.5811052", "text": "def test_raster_values_at_points(self):\n # These point values were manually extracted (to within the reported\n # decimal places) in GIS\n raster_file = './test_data/Res_500_initial_stage.tif'\n point_values = [[445496, 9387281, 0.96923],\n [352454, 9389770, -0.02730],\n [344754, 9401707, numpy.nan]]\n point_values = numpy.array(point_values)\n\n extracted_values = bathtub.raster_values_at_points(\n point_values[:, 0:2], raster_file)\n\n assert abs(extracted_values[0] - point_values[0, 2]) < 1.0e-04\n assert abs(extracted_values[1] - point_values[1, 2]) < 1.0e-04\n # To check for nan, note that nan != nan\n assert extracted_values[2] != extracted_values[2]\n\n return", "title": "" }, { "docid": "3e1fdd7d209bc5bfb881453003a5c460", "score": "0.580888", "text": "def validatePoint(value, errorMessage):\n if not isinstance(value, Point):\n raise ShapeException(errorMessage)\n Validator.validateDouble(value.x, \"Invalid x-location\")\n Validator.validateDouble(value.y, \"Invalid y-location\")", "title": "" }, { "docid": "aa1c4fcff0d37b9850a0bce4582f39b3", "score": "0.58024406", "text": "def check_mount_point(self, location):\n if len(self.settings['Points'][location]) > 0:\n return True\n else:\n return False", "title": "" }, { "docid": "90e490ecbd15ee70317d044b4e461467", "score": "0.5802363", "text": "def contains(self, point):\n raise NotImplementedError", "title": "" }, { "docid": "ee0342469d0655ab8e9d24d2cc2f5b60", "score": "0.57996285", "text": "def test_multiple_points_and_tesseroids(self):\n tesseroids = np.atleast_2d(\n [\n [-10, 10, -10, 10, 100, 200],\n [20, 30, 20, 30, 400, 500],\n [-50, -40, -30, -20, 100, 500],\n ]\n )\n points = np.array(\n [\n [0, 0, 150],\n [80, 82, 4000],\n [10, 10, 450],\n ]\n ).T\n with pytest.raises(ValueError):\n check_points_outside_tesseroids(points, tesseroids)", "title": "" }, { "docid": "779c9d8cde00929a6fa490bafc08fb5e", "score": "0.57873183", "text": "def points(self, obj):\n return obj.profile.points()", "title": "" }, { "docid": "26a232afe52b91b4b8d7d138e9a8e556", "score": "0.5779003", "text": "def check_datapoint_inside(\n data_point, df_rules, numerical_cols, categorical_cols, check_opposite=True\n):\n df_plot = df_rules.copy()\n\n if len(df_rules) == 0:\n df_plot[\"check\"] = 0\n return df_plot\n # Default value\n df_plot[\"check\"] = 1\n\n # Check for categorical\n if len(categorical_cols) > 0:\n for col in categorical_cols:\n value = data_point[col]\n df_plot[\"check\"] = df_plot[\"check\"] * (\n df_plot.apply(lambda x: 1 if (x[col] == value) else 0, axis=1)\n )\n # Check for numerical\n if len(numerical_cols) > 0:\n for col in numerical_cols:\n value = data_point[col]\n if check_opposite:\n df_plot[\"check\"] = df_plot[\"check\"] * (\n df_plot.apply(\n lambda x: 1\n if ((x[col + \"_max\"] >= value) & (value >= x[col + \"_min\"]))\n else 0,\n axis=1,\n )\n )\n else:\n df_plot[\"check\"] = df_plot[\"check\"] * (\n df_plot.apply(\n lambda x: 1\n if ((x[col + \"_max\"] > value) & (value > x[col + \"_min\"]))\n else 0,\n axis=1,\n )\n )\n return df_plot", "title": "" }, { "docid": "edd5b9da55c48032c36107b7166f6c3e", "score": "0.57760215", "text": "def testDatasetToSample(self):\n \n meanSample = self.model.DrawMean()\n meanSampleNew = self.model.DatasetToSample(meanSample)\n num_pts = meanSample.GetNumberOfPoints()\n pt_ids = xrange(0, num_pts, num_pts / 10) \n \n for ptId in pt_ids:\n mp = meanSample.GetPoints().GetPoint(ptId)\n mpn = meanSampleNew.GetPoints().GetPoint(ptId)\n self.assertTrue((mp[0] == mpn[0]) and (mp[1] == mpn[1]) and (mp[2] == mpn[2]))", "title": "" }, { "docid": "7ab76e5ed2d5d115f466285d82c25e55", "score": "0.5772203", "text": "def FindPointsInArea(self, *float, **kwargs):\n ...", "title": "" }, { "docid": "b957a56d77ea64ea82568149d965a813", "score": "0.5771947", "text": "def _gather_points(self):\n raise NotImplementedError", "title": "" }, { "docid": "995489569c43a58c4cc8b0bbc5ec3e24", "score": "0.57654387", "text": "def __eq__(self, point):\n return self.x == point.x and self.y == point.y", "title": "" }, { "docid": "218f2977044dad9244b81cdfd9c872f4", "score": "0.57629913", "text": "def test_agents(self):\n _ = pair_xlate.PairXlates(PAIRS)\n result = _.datapoints()\n\n # We should get two DataPoint objects\n self.assertEqual(len(result), 3)\n for item in result:\n self.assertIsInstance(item, pair_xlate.PairXlate)", "title": "" }, { "docid": "d26f031e2e223808ed9a603acac30174", "score": "0.576094", "text": "def has_attributes(dataset):\n pd = dataset.point_data\n if pd is not None and pd.number_of_arrays > 0:\n return True\n cd = dataset.cell_data\n if cd is not None and cd.number_of_arrays > 0:\n return True\n return False", "title": "" }, { "docid": "53c944d2de45e21e169ddb2e98c87a93", "score": "0.5755706", "text": "def test_is_point_on_line(self):\n lp1 = (0, 0)\n lp2 = (10, 10)\n point = (5, 5)\n self.assertTrue(self.figure._is_point_on_line(lp1, lp2, point))", "title": "" }, { "docid": "a149676bcd958cf5c89c9bc0d7dafde4", "score": "0.57377684", "text": "def test_beyond_points(self):\n self.evaluate_inputs([\n (\n (38.145148, -76.427645, 100), # start\n (38.145144, -76.426400, 100), # end\n (38.145165, -76.427923, 100), # point\n 80, # dist\n ),\n (\n (38.145148, -76.427645, 100), # start\n (38.145144, -76.426400, 100), # end\n (38.145591, -76.426127, 200), # point\n 207, # dist\n ),\n ]) # yapf: disable", "title": "" }, { "docid": "3344872ff93fa7c264af3cb12d824d7d", "score": "0.57319003", "text": "def contains_point(\n self, points: np.ndarray, *, coords: str = \"cartesian\", wrap: bool = True\n ) -> np.ndarray:\n cell_coords = self.transform(points, source=coords, target=\"cell\")\n return np.all((0 <= cell_coords) & (cell_coords < self.shape), axis=-1) # type: ignore", "title": "" }, { "docid": "f380046b5863d5725f6ba0d6881c36f0", "score": "0.57303244", "text": "def test_point_group(self, obj_test_geo_copy):\n values = obj_test_geo_copy.globPoints(\n \" \".join([str(val) for val in range(1, 100, 2)])\n )\n\n group = obj_test_geo_copy.pointGroups()[0]\n houdini_toolbox.inline.api.toggle_group_entries(group)\n\n assert group.points() == values", "title": "" }, { "docid": "9b0288444d0ed3becf28caf0e6cfe96a", "score": "0.57149315", "text": "def test_copy_points(self, obj_test_geo):\n attribs = obj_test_geo.pointAttribs()\n\n geo = hou.Geometry()\n\n pt1 = geo.createPoint()\n pt2 = geo.createPoint()\n\n houdini_toolbox.inline.api.batch_copy_attributes_by_indices(\n obj_test_geo, hou.Point, [2, 6], attribs, geo, hou.Point, [0, 1]\n )\n\n # Ensure all the attributes got copied right.\n assert len(geo.pointAttribs()) == len(attribs)\n\n # Ensure P got copied right.\n assert pt1.position().isAlmostEqual(hou.Vector3(1.66667, 0, -5))\n assert pt2.position().isAlmostEqual(hou.Vector3(1.66667, 0, -1.66667))", "title": "" }, { "docid": "aa5dd34c82096544be7e457801704943", "score": "0.571072", "text": "def in_domain(self, points):\n if np.all(np.isreal(points)):\n try:\n are_integer = np.all(np.mod(points, 1) == 0)\n except TypeError:\n are_integer = False\n are_greater = np.all(np.greater_equal(points, self._min))\n are_smaller = np.all(np.less_equal(points, self._max))\n return are_integer and are_greater and are_smaller\n else:\n return False", "title": "" }, { "docid": "6e599de11e53567c523ee59cc0e8cb2a", "score": "0.5705428", "text": "def __contains__(self, point):\n\n # ? readability: len(self) => self.dimension()\n assert (\n len(point) == self.dimension()\n ) ##Test if the point has the same dimension\n return la.norm(self.center - point) <= self.radius", "title": "" }, { "docid": "a1d7cb107bcbccdd68830d3c2116bc3e", "score": "0.5703225", "text": "def points(self):\n return 1", "title": "" }, { "docid": "78c8e3bd88266542e4416f7ed7a4c9aa", "score": "0.56861097", "text": "def test_calc_data(self):\n calc_data = self.datapoint.calc_data\n self.assertIsInstance(calc_data, float)", "title": "" }, { "docid": "37b24e704f7b3dbb387cdeb51c0c3716", "score": "0.56835234", "text": "def check_fields(phtFile):\n requiredPointData = {\n 'dwal': 'distance to wall',\n 'u': 'vector of velocity components',\n 'p': 'pressure',\n 'T': 'temperature'\n }\n if not all(key in phtFile.PointData.keys() for key in requiredPointData.keys()):\n print 'error; required fields are...'\n l = len(max(requiredPointData.keys(), key=len))\n for key, val in requiredPointData.iteritems():\n print ' ' + key.rjust(l), '=', val\n err('ParaView file does not contain all of the required requisite point data fields (see above)')", "title": "" }, { "docid": "cbb173be65dfbb69697ea7a23cf9f370", "score": "0.5678564", "text": "def test_copy_points(self, obj_test_geo):\n attribs = obj_test_geo.pointAttribs()\n\n geo = hou.Geometry()\n\n pt1 = geo.createPoint()\n pt2 = geo.createPoint()\n\n houdini_toolbox.inline.api.batch_copy_attrib_values(\n [obj_test_geo.iterPoints()[2], obj_test_geo.iterPoints()[6]],\n attribs,\n [pt1, pt2],\n )\n\n # Ensure all the attributes got copied right.\n assert len(geo.pointAttribs()) == len(attribs)\n\n # Ensure P got copied right.\n assert pt1.position().isAlmostEqual(hou.Vector3(1.66667, 0, -5))\n assert pt2.position().isAlmostEqual(hou.Vector3(1.66667, 0, -1.66667))", "title": "" }, { "docid": "3165c376f6b7be3dc49c44e1f4c3aac6", "score": "0.56752574", "text": "def contains(self, point: QPointF) -> bool:\n\n if self.shape_type in ['rectangle', 'polygon']:\n return self.vertices.vertices.containsPoint(point, Qt.OddEvenFill)\n\n elif self.shape_type in ['circle']:\n # elliptic formula is (x²/a² + y²/b² = 1) so if the point fulfills the equation respectively\n # is smaller than 1, the points is inside\n rect = self.boundingRect()\n centerpoint = rect.center()\n a = rect.width()/2\n b = rect.height()/2\n value = (point.x()-centerpoint.x()) ** 2 / a ** 2 + (point.y() - centerpoint.y()) ** 2 / b ** 2\n if value <= 1:\n return True\n else:\n return False", "title": "" }, { "docid": "858a9a04b4a5e0a29fc83e3dcbeb641b", "score": "0.56730825", "text": "def test_point():\n \n # validating instance creation without parameters\n point = func.Point()\n assert point.get_x() == 0\n assert point.get_y() == 0\n\n # validating instance creation with parameters\n point = func.Point(5, 6)\n assert point.get_x() == 5\n assert point.get_y() == 6\n\n # validate setting x/y\n point.set_coordinate(2, 4)\n assert point.get_x() == 2\n assert point.get_y() == 4", "title": "" }, { "docid": "b455868d97a65a175d9468020b926146", "score": "0.5669182", "text": "def _validate_point(self, point: Point) -> None:\n if isinstance(point, (tuple, list)):\n point = Point(*point)\n if (point.x < 0 and not self.negative_x) or (point.y < 0 and not self.negative_y):\n raise IndexError(\"Illegal negative index value supplied!\")\n if not -self.height_x <= point.x <= self.height_x or not -self.height_y <= point.y <= self.height_y:\n raise IndexError(\"Index value out of bounds!\")\n return point", "title": "" }, { "docid": "81d45a8065aa8dcc37c04032c5939e51", "score": "0.5666721", "text": "def _return_points(self):\n return", "title": "" }, { "docid": "3a2b7c092ac33cf4371335b238562bf2", "score": "0.56604594", "text": "def isPoint(self):\n return (int(self.IMIN == self.IMAX) + int(self.JMIN == self.JMAX) + int(self.KMIN == self.KMAX)) > 2", "title": "" }, { "docid": "4135328683a83990ddb12e5fddfe809a", "score": "0.5654549", "text": "def check_data(self):\n return hasattr(self, \"data\")", "title": "" }, { "docid": "2e8662bad783a2280085ab23ea348c49", "score": "0.5646633", "text": "def test_positive_points(self):\n result = self.plugin_positive.prepare_for_integration()\n self.assertArrayAlmostEqual(\n result[0].coord(\"height\").points, np.array([10.0, 20.0])\n )\n self.assertArrayAlmostEqual(\n result[1].coord(\"height\").points, np.array([5.0, 10.0])\n )", "title": "" }, { "docid": "cb9a4baf98bec8a756d1183df8d0806a", "score": "0.5644597", "text": "def isscalar(cls, dataset, dim, per_geom=False):\n if not dataset.data:\n return True\n geom_type = cls.geom_type(dataset)\n ds = cls._inner_dataset_template(dataset)\n combined = []\n for d in dataset.data:\n ds.data = d\n values = ds.interface.values(ds, dim, expanded=False)\n unique = list(util.unique_iterator(values))\n if len(unique) > 1:\n return False\n elif per_geom and geom_type != 'Point':\n continue\n unique = unique[0]\n if unique not in combined:\n if combined:\n return False\n combined.append(unique)\n return True", "title": "" }, { "docid": "5eee135283cb7efab2764b2a42ef3a22", "score": "0.56273884", "text": "def test_no_ungrouped(self, obj_test_geo):\n assert not houdini_toolbox.inline.api.geometry_has_ungrouped_points(\n obj_test_geo\n )", "title": "" }, { "docid": "662e9cddca342ae17d4221fa7c84dbfa", "score": "0.5626503", "text": "def process_points(self, x, y):\r\n enough_points = len(y) > 0 and np.max(y) - np.min(y) > self.h * .625\r\n if enough_points or len(self.coefficients) == 0:\r\n self.fit(x, y)", "title": "" }, { "docid": "4ff6198ccfe14ebf8d7bc7dda6131d2b", "score": "0.5620379", "text": "def points_possible(self):\r\n return self._points_possible", "title": "" }, { "docid": "68930c0e1036cbc3621ff9a3856c4eaa", "score": "0.56198055", "text": "def is_valid(self):\n if self.x is not None or self.y is not None or self.z is not None:\n return True\n return False", "title": "" }, { "docid": "b4369f3c794d003c197c07778d8a4626", "score": "0.56192493", "text": "def test_all_same_points(self):\n with pytest.raises(exceptions.PlaneConstructionError):\n plane = polygons.Plane([[0] * 3] * 20)", "title": "" }, { "docid": "906c341fc149a983809821df4e455712", "score": "0.5616468", "text": "def __len__(self):\n return len(self.Points)", "title": "" }, { "docid": "18313540911aa712914bc15e3f8bc244", "score": "0.5607436", "text": "def test_ref_data(self):\n ref_data = self.datapoint.ref_data\n self.assertIsInstance(ref_data, float)", "title": "" }, { "docid": "084c254d5a4b5f479a8ab80dbc2bd111", "score": "0.5605589", "text": "def verifyData(self):\n\t\tpass", "title": "" }, { "docid": "ca407d6a99f5440ac134cdba19f8614e", "score": "0.5604149", "text": "def _createPoints(self):\n \n return [dataPoint(key, self) for key in self.questions.keys()]", "title": "" }, { "docid": "5f85fb23b67f543e2aa5ba0f3fc099b2", "score": "0.5603929", "text": "def __contains__(self, point):\n for component, dim in zip(point, self.dimensions):\n if component not in dim:\n return False\n return True", "title": "" }, { "docid": "cbee4dda1c6522a0e79aca732be4bd9b", "score": "0.5599553", "text": "def check(self):\n map_ds=self.map_ds()\n g=unstructured_grid.UnstructuredGrid.read_ugrid(map_ds)\n self.check_results(map_ds,self.his_ds(),g)", "title": "" }, { "docid": "f9b2f3034dd98bed35b8a66d19474c09", "score": "0.5596533", "text": "def test_sub_check_test_data(self):\n\t\t# Make sure we don't have any empty or trivial length data sets.\n\t\t# This test exists purely to ensure that the generated and filtered\n\t\t# data in setUp is actually present and we don't have any empty\n\t\t# data sets after we have pruned them. This condition should not\n\t\t# arise unless the test has been edited carelessly.\n\n\t\tself.assertTrue(len(self.datax) >= self.simdincr)\n\t\tself.assertTrue(len(self.datay) >= self.simdincr)", "title": "" }, { "docid": "f9b2f3034dd98bed35b8a66d19474c09", "score": "0.5596533", "text": "def test_sub_check_test_data(self):\n\t\t# Make sure we don't have any empty or trivial length data sets.\n\t\t# This test exists purely to ensure that the generated and filtered\n\t\t# data in setUp is actually present and we don't have any empty\n\t\t# data sets after we have pruned them. This condition should not\n\t\t# arise unless the test has been edited carelessly.\n\n\t\tself.assertTrue(len(self.datax) >= self.simdincr)\n\t\tself.assertTrue(len(self.datay) >= self.simdincr)", "title": "" }, { "docid": "f9b2f3034dd98bed35b8a66d19474c09", "score": "0.5596533", "text": "def test_sub_check_test_data(self):\n\t\t# Make sure we don't have any empty or trivial length data sets.\n\t\t# This test exists purely to ensure that the generated and filtered\n\t\t# data in setUp is actually present and we don't have any empty\n\t\t# data sets after we have pruned them. This condition should not\n\t\t# arise unless the test has been edited carelessly.\n\n\t\tself.assertTrue(len(self.datax) >= self.simdincr)\n\t\tself.assertTrue(len(self.datay) >= self.simdincr)", "title": "" }, { "docid": "f9b2f3034dd98bed35b8a66d19474c09", "score": "0.5596533", "text": "def test_sub_check_test_data(self):\n\t\t# Make sure we don't have any empty or trivial length data sets.\n\t\t# This test exists purely to ensure that the generated and filtered\n\t\t# data in setUp is actually present and we don't have any empty\n\t\t# data sets after we have pruned them. This condition should not\n\t\t# arise unless the test has been edited carelessly.\n\n\t\tself.assertTrue(len(self.datax) >= self.simdincr)\n\t\tself.assertTrue(len(self.datay) >= self.simdincr)", "title": "" }, { "docid": "f9b2f3034dd98bed35b8a66d19474c09", "score": "0.5596533", "text": "def test_sub_check_test_data(self):\n\t\t# Make sure we don't have any empty or trivial length data sets.\n\t\t# This test exists purely to ensure that the generated and filtered\n\t\t# data in setUp is actually present and we don't have any empty\n\t\t# data sets after we have pruned them. This condition should not\n\t\t# arise unless the test has been edited carelessly.\n\n\t\tself.assertTrue(len(self.datax) >= self.simdincr)\n\t\tself.assertTrue(len(self.datay) >= self.simdincr)", "title": "" }, { "docid": "f9b2f3034dd98bed35b8a66d19474c09", "score": "0.5596533", "text": "def test_sub_check_test_data(self):\n\t\t# Make sure we don't have any empty or trivial length data sets.\n\t\t# This test exists purely to ensure that the generated and filtered\n\t\t# data in setUp is actually present and we don't have any empty\n\t\t# data sets after we have pruned them. This condition should not\n\t\t# arise unless the test has been edited carelessly.\n\n\t\tself.assertTrue(len(self.datax) >= self.simdincr)\n\t\tself.assertTrue(len(self.datay) >= self.simdincr)", "title": "" }, { "docid": "f9b2f3034dd98bed35b8a66d19474c09", "score": "0.5596533", "text": "def test_sub_check_test_data(self):\n\t\t# Make sure we don't have any empty or trivial length data sets.\n\t\t# This test exists purely to ensure that the generated and filtered\n\t\t# data in setUp is actually present and we don't have any empty\n\t\t# data sets after we have pruned them. This condition should not\n\t\t# arise unless the test has been edited carelessly.\n\n\t\tself.assertTrue(len(self.datax) >= self.simdincr)\n\t\tself.assertTrue(len(self.datay) >= self.simdincr)", "title": "" }, { "docid": "f9b2f3034dd98bed35b8a66d19474c09", "score": "0.5596533", "text": "def test_sub_check_test_data(self):\n\t\t# Make sure we don't have any empty or trivial length data sets.\n\t\t# This test exists purely to ensure that the generated and filtered\n\t\t# data in setUp is actually present and we don't have any empty\n\t\t# data sets after we have pruned them. This condition should not\n\t\t# arise unless the test has been edited carelessly.\n\n\t\tself.assertTrue(len(self.datax) >= self.simdincr)\n\t\tself.assertTrue(len(self.datay) >= self.simdincr)", "title": "" }, { "docid": "f9b2f3034dd98bed35b8a66d19474c09", "score": "0.5596533", "text": "def test_sub_check_test_data(self):\n\t\t# Make sure we don't have any empty or trivial length data sets.\n\t\t# This test exists purely to ensure that the generated and filtered\n\t\t# data in setUp is actually present and we don't have any empty\n\t\t# data sets after we have pruned them. This condition should not\n\t\t# arise unless the test has been edited carelessly.\n\n\t\tself.assertTrue(len(self.datax) >= self.simdincr)\n\t\tself.assertTrue(len(self.datay) >= self.simdincr)", "title": "" } ]
0bf1acc1c0ed809c58b52cf75dd8fd46
Create a socket for an existing file descriptor Duplicate the file descriptor and return a `Socket` for it. The blocking mode can be set via `blocking`. If `close_fd` is True (the default) `fd` will be closed.
[ { "docid": "25560e410d51cd9abd4df3482b2e9811", "score": "0.7590436", "text": "def new_from_fd(cls, fd: int, *, blocking=True, close_fd=True):\n sock = socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_SEQPACKET)\n sock.setblocking(blocking)\n if close_fd:\n os.close(fd)\n return cls(sock, None)", "title": "" } ]
[ { "docid": "846525ea98faa4e7fbc363b4d62f546b", "score": "0.7232971", "text": "def fromfd(fd, family, type, proto=0):\n nfd = dup(fd)\n return socket(family, type, proto, nfd)", "title": "" }, { "docid": "54022c648a189d4cfefa93374116dc19", "score": "0.65049374", "text": "def dup(self):\n fd = dup(self.fileno())\n sock = self.__class__(self.family, self.type, self.proto, fileno=fd)\n sock.settimeout(self.gettimeout())\n return sock", "title": "" }, { "docid": "925f39585ba8cd440b9530864a393e38", "score": "0.6206847", "text": "def _open_socket(self, host, port, sock=None, timeout=None):\n if sock:\n LOGGER.debug('Reusing existing socket')\n return sock\n\n LOGGER.debug('Opening socket to {}:{}'.format(host, port))\n return socket.create_connection((host, port), timeout)", "title": "" }, { "docid": "d9ee4d782d3ff2c892e359d7b71a8dce", "score": "0.6172377", "text": "def _fromListeningDescriptor(cls, reactor, fd, factory):\n port = socket.fromfd(fd, cls.addressFamily, cls.socketType)\n self = cls(port.getsockname(), factory, reactor=reactor)\n self._preexistingSocket = port\n return self", "title": "" }, { "docid": "5cfccabe1211c196df05e86e440e5c52", "score": "0.6134316", "text": "def newSocket():\n s = socket.socket(s_ipv4, s_tcp)\n s.settimeout(timeout)\n return s", "title": "" }, { "docid": "ad6ad73cc2739cebe78e76dc1eeb342b", "score": "0.61254644", "text": "def dup(self):\n return socket(_sock=self._sock)", "title": "" }, { "docid": "c6f268b74843b57decf34ea2f0720aba", "score": "0.6103883", "text": "def create_socket():\n if has_ipv6:\n sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)\n # Explicitly configure socket to work for both IPv4 and IPv6\n sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)\n else:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n return sock", "title": "" }, { "docid": "0ff44e3b535d0d62e39a94b416b30714", "score": "0.59522593", "text": "def create_socket(port):\n def socket_closer(open_sock):\n \"\"\"Close down an opened socket properly.\"\"\"\n open_sock.shutdown(socket.SHUT_RDWR)\n open_sock.close()\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((\"localhost\", port))\n return (sock, lambda: socket_closer(sock))", "title": "" }, { "docid": "098b4dbb36ca01b2492fc11c052c270c", "score": "0.5868638", "text": "def dup(self):\n return _socketobject(_sock=self._sock)", "title": "" }, { "docid": "956ac8f40649ba8ff2ab3117f20836a1", "score": "0.57937735", "text": "def create_socket():\n\n sock = socket.socket(\n socket.AF_INET,\n socket.SOCK_STREAM\n )\n\n sock.connect((SERVER_ADDR, SERVER_PORT))\n return sock", "title": "" }, { "docid": "6cbead8376e6b8f2b6da1f6236a853eb", "score": "0.57845247", "text": "def new_server(cls, bind_to: PathLike):\n\n sock = None\n unlink = None\n path = os.path.split(bind_to)\n\n try:\n # We bind the socket and then open a directory-fd on the target\n # socket. This allows us to properly unlink the socket when the\n # server is closed. Note that sockets are never automatically\n # cleaned up on linux, nor can you bind to existing sockets.\n # We use a dirfd to guarantee this works even when you change\n # your mount points in-between.\n # Yeah, this is racy when mount-points change between the socket\n # creation and open. But then your entire socket creation is racy\n # as well. We do not guarantee atomicity, so you better make sure\n # you do not rely on it.\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)\n sock.bind(os.fspath(bind_to))\n unlink = os.open(os.path.join(\".\", path[0]), os.O_CLOEXEC | os.O_PATH)\n sock.setblocking(False)\n except BaseException:\n if unlink is not None:\n os.close(unlink)\n if sock is not None:\n sock.close()\n raise\n\n return cls(sock, (unlink, path[1]))", "title": "" }, { "docid": "7e2e6962937e0d0b407a1e1c7b744f73", "score": "0.5775214", "text": "def create_socket( ip, port, listen=False ):\n \n assert type(ip) == str\n assert type(port) == int\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n try:\n if listen:\n sock.bind( (ip, port) )\n sock.listen(2) # 2 is enough for our needs\n else:\n sock.connect( (ip, port) )\n\n return sock\n except:\n return None", "title": "" }, { "docid": "ad71ea7f2938ba515eb14bee202c39c9", "score": "0.5768536", "text": "def make_socket():\n print(\"make_socket()\")\n UDP_IP = \"127.0.0.1\"\n UDP_PORT = 2001\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((UDP_IP, UDP_PORT))\n\n return sock", "title": "" }, { "docid": "31a021d8d33d8e5c11df79733f2eef3a", "score": "0.5646887", "text": "def dupfile(f, mode=None, buffering=0, raising=False):\r\n try: \r\n fd = f.fileno() \r\n except AttributeError: \r\n if raising: \r\n raise \r\n return f\r\n newfd = os.dup(fd) \r\n mode = mode and mode or f.mode\r\n return os.fdopen(newfd, mode, buffering)", "title": "" }, { "docid": "561d0cdefc0971d4c3a096d5ba10ce49", "score": "0.5614931", "text": "def makeSocket(self, timeout=1):\n s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n if hasattr(s, 'settimeout'):\n s.settimeout(timeout)\n s.connect(self.host)\n return s", "title": "" }, { "docid": "8288688e85d5fc55f054b0f1c2dbb033", "score": "0.5590571", "text": "def create_socket():\r\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n server_socket.bind((socket.gethostname(), int(sys.argv[2])))\r\n server_socket.settimeout(__TIMEOUT__)\r\n server_socket.listen(__STACK_SIZE__)\r\n return server_socket", "title": "" }, { "docid": "71ea75094f5eb34f23dfc8de31a200fa", "score": "0.55671656", "text": "def _open_socket(self):\n udp_socket = socket.socket(family=socket.AF_INET,\n type=socket.SOCK_DGRAM,\n proto=socket.IPPROTO_UDP)\n udp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)\n try: # Raspbian doesn't recognize SO_REUSEPORT\n udp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, True)\n except AttributeError:\n logging.warning('Unable to set socket REUSEPORT; may be unsupported')\n\n # Set the time-to-live for messages, in case of multicast\n udp_socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL,\n struct.pack('b', self.ttl))\n udp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, True)\n\n try:\n udp_socket.connect((self.destination, self.port))\n return udp_socket\n except OSError as e:\n logging.error('Unable to connect to %s:%d - %s', self.destination, self.port, e)\n return None", "title": "" }, { "docid": "79177846ba9c542fc7e7971fae8ebc30", "score": "0.55167514", "text": "def make_unix_socket(style, nonblock, bind_path, connect_path):\r\n\r\n try:\r\n sock = socket.socket(socket.AF_UNIX, style)\r\n except socket.error, e:\r\n return get_exception_errno(e), None\r\n\r\n try:\r\n if nonblock:\r\n set_nonblocking(sock)\r\n if bind_path is not None:\r\n # Delete bind_path but ignore ENOENT.\r\n try:\r\n os.unlink(bind_path)\r\n except OSError, e:\r\n if e.errno != errno.ENOENT:\r\n return e.errno, None\r\n\r\n ovs.fatal_signal.add_file_to_unlink(bind_path)\r\n sock.bind(bind_path)\r\n\r\n try:\r\n if sys.hexversion >= 0x02060000:\r\n os.fchmod(sock.fileno(), 0700)\r\n else:\r\n os.chmod(\"/dev/fd/%d\" % sock.fileno(), 0700)\r\n except OSError, e:\r\n pass\r\n if connect_path is not None:\r\n try:\r\n sock.connect(connect_path)\r\n except socket.error, e:\r\n if get_exception_errno(e) != errno.EINPROGRESS:\r\n raise\r\n return 0, sock\r\n except socket.error, e:\r\n sock.close()\r\n if bind_path is not None:\r\n ovs.fatal_signal.unlink_file_now(bind_path)\r\n return get_exception_errno(e), None", "title": "" }, { "docid": "9b35805d8763422aedede5c81b7424e1", "score": "0.54639167", "text": "def _create_client_socket(self):\n if self.sock:\n self.throw_exception(message='socket already bound')\n\n try:\n new_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n new_sock.connect((self._server_host, self._server_port))\n\n new_sock.setblocking(0)\n new_sock.settimeout(0.0)\n new_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, \n struct.pack('L', 1))\n except socket.error, socket_exception:\n new_sock.close()\n self.throw_exception(exception=socket_exception)\n\n self.sock = new_sock", "title": "" }, { "docid": "3e070e07d964532790a253c603fb8ee3", "score": "0.54518676", "text": "def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None):\n host, port = address\n err = None\n for res in getaddrinfo(host, port, 0, SOCK_STREAM):\n af, socktype, proto, canonname, sa = res\n sock = None\n try:\n sock = socket(af, socktype, proto)\n if timeout is not _GLOBAL_DEFAULT_TIMEOUT:\n sock.settimeout(timeout)\n if source_address:\n sock.bind(source_address)\n sock.connect(sa)\n return sock\n\n except error as e:\n err = e\n if sock is not None:\n sock.close()\n\n if err is not None:\n raise err", "title": "" }, { "docid": "c71386357569ddcfc861b5f6aa0968ad", "score": "0.52971864", "text": "def create_socket(self):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n return s", "title": "" }, { "docid": "bf3fd7e6595e0060c3e697bdbe656879", "score": "0.5260101", "text": "def new_client(cls, connect_to: Optional[PathLike] = None):\n\n sock = None\n\n try:\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)\n\n # Trigger an auto-bind. If you do not do this, you might end up with\n # an unbound unix socket, which cannot receive messages.\n # Alternatively, you can also set `SO_PASSCRED`, but this has\n # side-effects.\n sock.bind(\"\")\n\n # Connect the socket. This has no effect other than specifying the\n # default destination for send operations.\n if connect_to is not None:\n sock.connect(os.fspath(connect_to))\n except BaseException:\n if sock is not None:\n sock.close()\n raise\n\n return cls(sock, None)", "title": "" }, { "docid": "90cac9e47b1f954c5c4641e401b92d91", "score": "0.5257399", "text": "def create_socket(self):\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n return ssl.wrap_socket(sock, self.keyfile, self.certfile)", "title": "" }, { "docid": "628dd55ff45337d2dc02a61fd32d908e", "score": "0.52565926", "text": "def create_tcp_socket(module):\n type_ = module.SOCK_STREAM\n if hasattr(module, 'SOCK_CLOEXEC'): # pragma: nocover\n # if available, set cloexec flag during socket creation\n type_ |= module.SOCK_CLOEXEC\n sock = module.socket(module.AF_INET, type_)\n _set_default_tcpsock_options(module, sock)\n return sock", "title": "" }, { "docid": "611519bb10172fa4e8864a636791d552", "score": "0.5242645", "text": "def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT):\n\n msg = \"getaddrinfo returns an empty list\"\n host, port = address\n for res in getaddrinfo(host, port, 0, SOCK_STREAM):\n af, socktype, proto, _canonname, sa = res\n sock = None\n try:\n sock = socket(af, socktype, proto)\n if timeout is not _GLOBAL_DEFAULT_TIMEOUT:\n sock.settimeout(timeout)\n sock.connect(sa)\n return sock\n except error, msg:\n if sock is not None:\n sock.close()\n raise error, msg", "title": "" }, { "docid": "46973f9e10fed5ae43ef7508975403a8", "score": "0.5237661", "text": "def init_server_socket() -> socket.socket:\n socket_address = get_socket_address()\n try:\n os.unlink(socket_address)\n except (OSError, EnvironmentError):\n pass\n sock = socket.socket(family=socket.AF_UNIX, type=socket.SOCK_DGRAM)\n sock.bind(socket_address)\n sock.settimeout(1)\n return sock", "title": "" }, { "docid": "2ae45841106f7ffaa3250c06d7554ab7", "score": "0.51974523", "text": "def _fromConnectedSocket(cls, fileDescriptor, factory, reactor):\n skt = socket.fromfd(fileDescriptor, socket.AF_UNIX, socket.SOCK_STREAM)\n protocolAddr = address.UNIXAddress(skt.getsockname())\n\n proto = factory.buildProtocol(protocolAddr)\n if proto is None:\n skt.close()\n return\n\n # FIXME: is this a suitable sessionno?\n sessionno = 0\n self = cls(skt, proto, skt.getpeername(), None, sessionno, reactor)\n self.repstr = \"<%s #%s on %s>\" % (\n self.protocol.__class__.__name__, self.sessionno, skt.getsockname())\n self.logstr = \"%s,%s,%s\" % (\n self.protocol.__class__.__name__, self.sessionno, skt.getsockname())\n proto.makeConnection(self)\n return self", "title": "" }, { "docid": "2e1ea1ee507ce108fe3f65b1582dc0fe", "score": "0.5169577", "text": "def createSocket(self):\n self._sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)\n #self.sock = bluetooth.BluetoothSocket(bluetooth.L2CAP)\n # Cannot bind here, otherwise clients will not be able to connect.\n # self.localhost = socket.gethostbyname(socket.gethostname())\n # self.sock.bind((self.localhost, self.srcPort))\n return self._sock", "title": "" }, { "docid": "076e0ab5ca0202a45c7b159ae9a33df4", "score": "0.51455367", "text": "def create_listen_socket(host, port):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((host, port))\n sock.listen(100)\n return sock", "title": "" }, { "docid": "52407d89f59696aa764527141cc31635", "score": "0.5127823", "text": "def create_listening_socket(self) -> socket.socket:\n self.sock_directory = tempfile.mkdtemp()\n sockname = os.path.join(self.sock_directory, SOCKET_NAME)\n sock = socket.socket(socket.AF_UNIX)\n sock.bind(sockname)\n sock.listen(1)\n return sock", "title": "" }, { "docid": "932cbbdac274b39fe97948a975884a13", "score": "0.510833", "text": "def createSocket(self):\n self.sockfd = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0, None)", "title": "" }, { "docid": "508e543c0a9dd97686d6b8e190e7b62a", "score": "0.5093743", "text": "def createSocket(self):\n now = time.time()\n # Either retryTime is None, in which case this\n # is the first time back after a disconnect, or\n # we've waited long enough.\n if self.retryTime is None:\n attempt = 1\n else:\n attempt = (now >= self.retryTime)\n\n if attempt:\n try:\n self.makeSocket()\n self.retryTime = None # next time, no delay before trying\n except socket.error:\n # Creation failed, so set the retry time and return.\n if self.retryTime is None:\n self.retryPeriod = self.retryStart\n else:\n self.retryPeriod = self.retryPeriod * self.retryFactor\n if self.retryPeriod > self.retryMax:\n self.retryPeriod = self.retryMax\n self.retryTime = now + self.retryPeriod", "title": "" }, { "docid": "33f04d649b47f5227ae13c93b62e381d", "score": "0.5067543", "text": "def restore(fdNum = 1):\n os.dup2(fd, fdNum)", "title": "" }, { "docid": "3d4c494ee6028c4f585012c8c11da08b", "score": "0.504611", "text": "def patched_new_conn(self):\n hostname = global_dns_cache.resolve(self.host)\n try:\n conn = socket.create_connection((hostname, self.port), self.timeout, self.source_address)\n except AttributeError: # Python 2.6\n conn = socket.create_connection((hostname, self.port), self.timeout)\n # conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, self.tcp_nodelay)\n return conn", "title": "" }, { "docid": "5b32d922ec44a7f8de8c242977625bf3", "score": "0.5030806", "text": "def create_connection(address):\n host, port = address\n msg = \"getaddrinfo returns an empty list\"\n sock = None\n for res in socket.getaddrinfo(host, port, 0,\n socket.SOCK_STREAM):\n af, socktype, proto, unused_canonname, sa = res\n try:\n sock = socket.socket(af, socktype, proto)\n logger.info(\"connect: (%s, %s)\", host, port)\n sock.connect(sa)\n except socket.error, msg:\n logger.info('connect fail: %s %s', host, port)\n if sock:\n sock.close()\n sock = None\n continue\n break\n if not sock:\n raise socket.error(msg)\n return sock", "title": "" }, { "docid": "7c3a39caf93e649eafb3bbf83b2f64f7", "score": "0.50014186", "text": "def accept(self):\n while True:\n try:\n fd, addr = self._accept()\n break\n except BlockingIOError:\n if self.timeout == 0.0:\n raise\n self._wait(self._read_event)\n sock = socket(self.family, self.type, self.proto, fileno=fd)\n # Python Issue #7995: if no default timeout is set and the listening\n # socket had a (non-zero) timeout, force the new socket in blocking\n # mode to override platform-specific socket flags inheritance.\n # XXX do we need to do this?\n if getdefaulttimeout() is None and self.gettimeout():\n sock.setblocking(True)\n return sock, addr", "title": "" }, { "docid": "5c2dba4befed77ec4f3038b7bb99ec3c", "score": "0.49870542", "text": "def unix_domain_socket_constructor(linger_time=3):\n new_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n new_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n new_socket.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, linger_time))\n return new_socket", "title": "" }, { "docid": "4be4eb0bcdbdfb58d9d48a6ce3f6ce34", "score": "0.4986293", "text": "def create_socket(family, type):", "title": "" }, { "docid": "7f0336024971c6df1a1bd29f9d4d0db1", "score": "0.4979271", "text": "def _configure_socket(node):\n\t\ttry:\n\t\t\tsock = socket(AF_INET, SOCK_DGRAM)\n\t\t\tsock.bind((node.ip_address,node.port))\n\t\texcept (OSError,IOError) as error:\n\t\t\tAppLogger.error(\"Creation socket error for Node '{id}': {error}'\".format(id=node.id, error=error))\n\t\t\texit(1)\n\t\treturn sock", "title": "" }, { "docid": "6cd464af1a76eccb1205f7dfc6dbfc94", "score": "0.49667406", "text": "def _make_bad_fd():\n file = open(TESTFN, \"wb\")\n try:\n return file.fileno()\n finally:\n file.close()\n os.unlink(TESTFN)", "title": "" }, { "docid": "acf94e7fd63f9f58ebb196f9c2b312e1", "score": "0.49507055", "text": "def _build_sock(fabric):\n\t\tsocket = Config.Sock(fabric[\"id\"], fabric[\"ipaddr\"], fabric[\"port\"], fabric[\"transport\"], fabric[\"proto\"]) # Builds the required attributes of the sock instance\n\t\tfor field in (\"name\", \"profile\", \"network_buffer\", \"proto_type\"): # Builds optional attributes of the sock instance\n\t\t\ttry:\n\t\t\t\tif field == \"name\":\n\t\t\t\t\tsocket.name = fabric[field]\n\t\t\t\telif field == \"profile\":\n\t\t\t\t\tsocket.profile = fabric[field]\n\t\t\t\telif field == \"network_buffer\":\n\t\t\t\t\tsocket.network_buffer = fabric[field]\n\t\t\t\telif field == \"proto_type\":\n\t\t\t\t\tsocket.proto_type = fabric[field]\n\t\t\texcept KeyError:\n\t\t\t\tpass\n\t\treturn socket", "title": "" }, { "docid": "300e403fa9748b57e75ab39b799b775e", "score": "0.49195737", "text": "def connect(addr, family=socket.AF_INET, bind=None):\n sock = socket.socket(family, socket.SOCK_STREAM)\n if bind is not None:\n sock.bind(bind)\n sock.connect(addr)\n return sock", "title": "" }, { "docid": "e1f20ca9eeb4fffd92bb70c8c071a716", "score": "0.4888422", "text": "def create_socket(self):\n # noinspection PyAttributeOutsideInit\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.socket.settimeout(5)\n self.socket.bind(self.localAddr)\n\n if self.threaded:\n self.heartbeatThread = HeartbeatThread(self)\n self.heartbeatThread.start()\n else:\n self.socket.setblocking(0)", "title": "" }, { "docid": "c0bb859915d7ee340a228357cbdee180", "score": "0.48768732", "text": "def tcp_socket_constructor(linger_time=10, tcp_no_delay=1):\n new_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n new_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n new_socket.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, linger_time))\n # Because everything is sent in a single send call, and we're sending \"messages\", not really \"streams\",\n # we turn off Nagle's algorithm to make performance a little better. This might we worth thinking about more though.\n new_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, tcp_no_delay)\n return new_socket", "title": "" }, { "docid": "cd94b60a251888f8d54a88b570aadb08", "score": "0.48758632", "text": "def initWithFileDescriptor_(self, fd):\n self = objc.super(_SignalFDObserver, self).init()\n self._file_handle = Foundation.NSFileHandle.alloc().\\\n initWithFileDescriptor_closeOnDealloc_(fd, True)\n return self", "title": "" }, { "docid": "72806d0fb6da24e5961d4f64d831d518", "score": "0.48735562", "text": "def __call__(self, *args, **kwargs) -> \"Socket.Instance\":\n\n # Make sure we have active PDP contexts\n self._activateContexts()\n\n # Get an instance of the socket\n instance: Socket.Instance = Socket.Instance(self)\n\n # Create it with the proper arguments\n instance.create(*args, **kwargs)\n\n # Check that we were able to create a socket and return None if we\n # weren't able to create the socket\n if instance.connId is None:\n return None\n\n # Return it from the factory\n return instance", "title": "" }, { "docid": "858f3592b8d80f671618d1a1ee83cb02", "score": "0.4873034", "text": "def make_ssocket(protocol, ciphers=ssl._DEFAULT_CIPHERS):\n return ssl.wrap_socket( socket.socket(),\n ssl_version=protocol,\n ciphers=ciphers)", "title": "" }, { "docid": "9ecee41e12b78a79a8c1a5348297d883", "score": "0.4862245", "text": "def open_socket(self):\n try:\n self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)\n self.server.bind((self.host,self.port))\n self.server.listen(5)\n self.server.setblocking(0)\n except socket.error, (value,message):\n if self.server:\n self.server.close()\n print \"Could not open socket: \" + message\n sys.exit(1)", "title": "" }, { "docid": "65faf23e0e60f447d9f0de654ad7bc14", "score": "0.48494422", "text": "def create_socket_connection(url: str, use_https: bool):\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n except:\n print(\"Error forming socket connection.\")\n sys.exit()\n\n # If we want to use https set port to 443 or use default 80\n if use_https:\n port = 443\n else:\n port = 80\n\n # connect socket to our url and given port\n try:\n s.connect((url, port))\n except socket.error:\n raise Exception\n\n\n if use_https:\n try:\n s = ssl.wrap_socket(s)\n except ssl.SSLError:\n raise Exception\n return s", "title": "" }, { "docid": "462b18c02b2f4d8cf3f6bd42e84a75aa", "score": "0.48345518", "text": "def __init__(self, *, loop, fd):\n super().__init__(CFSocketCallback(self._cf_socket_callback), None, loop)\n\n # Retain a reference to the Handle\n self._loop._sockets[fd] = self\n self._reader = None\n self._writer = None\n\n self._fd = fd\n self._cf_socket = libcf.CFSocketCreateWithNative(\n kCFAllocatorDefault, self._fd,\n kCFSocketReadCallBack | kCFSocketWriteCallBack\n | kCFSocketConnectCallBack,\n self._callback,\n None\n )\n libcf.CFSocketSetSocketFlags(\n self._cf_socket,\n kCFSocketAutomaticallyReenableReadCallBack\n | kCFSocketAutomaticallyReenableWriteCallBack\n\n # # This extra flag is to ensure that CF doesn't (destructively,\n # # because destructively is the only way to do it) retrieve\n # # SO_ERROR\n # 1 << 6\n )\n self._src = libcf.CFSocketCreateRunLoopSource(kCFAllocatorDefault, self._cf_socket, 0)\n libcf.CFRunLoopAddSource(self._loop._cfrunloop, self._src, kCFRunLoopCommonModes)\n libcf.CFSocketDisableCallBacks(\n self._cf_socket,\n kCFSocketReadCallBack | kCFSocketWriteCallBack\n | kCFSocketConnectCallBack\n )", "title": "" }, { "docid": "ffa8cb91d85724fbe9f745bd1b1e1747", "score": "0.48270175", "text": "def RegisterFileDescriptor(self, file_descriptor):\r\n self.pool.AddFileDescriptor(file_descriptor)", "title": "" }, { "docid": "2f64f299705f10ca65c5ecd1cd2ac7d0", "score": "0.48088935", "text": "def setNonBlocking(fd):\n flags = fcntl.fcntl(fd, fcntl.F_GETFL)\n flags = flags | os.O_NONBLOCK\n fcntl.fcntl(fd, fcntl.F_SETFL, flags)", "title": "" }, { "docid": "1df10f4eba0f0fd17dfe36f1a27aaf05", "score": "0.47919315", "text": "def _open_nonblocking(self):\n fifo = open(self.path, \"r\")\n fd = fifo.fileno()\n flag = fcntl.fcntl(fd, fcntl.F_GETFL)\n fcntl.fcntl(fd, fcntl.F_SETFL, flag | os.O_NONBLOCK)\n return fifo", "title": "" }, { "docid": "c426cef3f745a173fc15559e485513d6", "score": "0.47858843", "text": "def server_socket(host, port):\n\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n except socket.error as e:\n print( f'(server) Error creating socket: {e}' )\n sys.exit(-1)\n # We use socket.SO_REUSEADDR to avoid this error:\n # socket.error: [Errno 98] Address already in use\n # that can happen if we reinit this script.\n # This is because the previous execution has left the socket in a\n # TIME_WAIT state, and cannot be immediately reused.\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n # the tcp socket\n try:\n s.bind((host, port))\n except:\n print( f'(server.py [{service}]) Error binding {host}:{port}' )\n s.close()\n sys.exit(-1)\n\n # returns the socket object\n return s", "title": "" }, { "docid": "25ed768dee02d8eb2abfe671f9c3ef34", "score": "0.47855797", "text": "def _bind_socket(self, bindaddr):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.setblocking(0)\n try:\n sock.bind(bindaddr)\n except Exception:\n self._logger.exception(\"Unable to bind to %s\" % str(bindaddr))\n raise\n sock.listen(self.BACKLOG)\n return sock", "title": "" }, { "docid": "41d54ebdd0e962641d0e843ce28267f4", "score": "0.47783253", "text": "def SaveAndDup(self, fd1, fd2):\n #log('---- SaveAndDup %s %s\\n', fd1, fd2)\n fcntl.fcntl(fd2, fcntl.F_DUPFD, self.next_fd)\n os.close(fd2)\n fcntl.fcntl(self.next_fd, fcntl.F_SETFD, fcntl.FD_CLOEXEC)\n\n #log('==== dup %s %s\\n' % (fd1, fd2))\n try:\n os.dup2(fd1, fd2)\n except OSError as e:\n #print(\"%s: %s %s\" % (e, fd1, fd2))\n print(e, file=sys.stderr)\n # Restore and return error\n os.dup2(self.next_fd, fd2)\n os.close(self.next_fd)\n # Undo it\n return False\n\n # Oh this is wrong?\n #os.close(fd1)\n\n self.cur_frame.saved.append((self.next_fd, fd2))\n self.next_fd += 1\n return True", "title": "" }, { "docid": "08a2ff6c4648bdf98d7a253f47d875d9", "score": "0.47764266", "text": "def open(name):\r\n if not PassiveStream.is_valid_name(name):\r\n return errno.EAFNOSUPPORT, None\r\n\r\n bind_path = name[6:]\r\n error, sock = ovs.socket_util.make_unix_socket(socket.SOCK_STREAM,\r\n True, bind_path, None)\r\n if error:\r\n return error, None\r\n\r\n try:\r\n sock.listen(10)\r\n except socket.error, e:\r\n vlog.err(\"%s: listen: %s\" % (name, os.strerror(e.error)))\r\n sock.close()\r\n return e.error, None\r\n\r\n return 0, PassiveStream(sock, name, bind_path)", "title": "" }, { "docid": "f6f8563854a11b34889e36d5bddead7f", "score": "0.47710246", "text": "def create_pipe():\n r, w = os.pipe()\n if HAS_FNCTL:\n fcntl.fcntl(r, fcntl.F_SETFL, os.O_NONBLOCK)\n fcntl.fcntl(w, fcntl.F_SETFL, os.O_NONBLOCK)\n _set_fd_cloexec(r)\n _set_fd_cloexec(w)\n return r, w", "title": "" }, { "docid": "7324e86c7baf7de8eb9c187c4c436a08", "score": "0.4748279", "text": "def connect(cls, addr, timeout = TIMEOUT_CURRENT):\n socket = cls.from_address(addr)\n socket._connect(addr, timeout)\n return socket", "title": "" }, { "docid": "7b8863df23ed16ddd0bd1c6206bd26d5", "score": "0.47438186", "text": "def get_for_fd(self, fd):\r\n with self._lock:\r\n return self._descriptor_for_fd[fd]", "title": "" }, { "docid": "384d79cc086554b5239b940b8ff69f12", "score": "0.47342634", "text": "def startListening(self):\n tcp._reservedFD.reserve()\n log.msg(\"%s starting on %r\" % (\n self._getLogPrefix(self.factory),\n _coerceToFilesystemEncoding('', self.port)))\n if self.wantPID:\n self.lockFile = lockfile.FilesystemLock(self.port + b\".lock\")\n if not self.lockFile.lock():\n raise error.CannotListenError(None, self.port,\n \"Cannot acquire lock\")\n else:\n if not self.lockFile.clean:\n try:\n # This is a best-attempt at cleaning up\n # left-over unix sockets on the filesystem.\n # If it fails, there's not much else we can\n # do. The bind() below will fail with an\n # exception that actually propagates.\n if stat.S_ISSOCK(os.stat(self.port).st_mode):\n os.remove(self.port)\n except:\n pass\n\n self.factory.doStart()\n\n try:\n if self._preexistingSocket is not None:\n skt = self._preexistingSocket\n self._preexistingSocket = None\n else:\n skt = self.createInternetSocket()\n skt.bind(self.port)\n except socket.error as le:\n raise error.CannotListenError(None, self.port, le)\n else:\n if _inFilesystemNamespace(self.port):\n # Make the socket readable and writable to the world.\n os.chmod(self.port, self.mode)\n skt.listen(self.backlog)\n self.connected = True\n self.socket = skt\n self.fileno = self.socket.fileno\n self.numberAccepts = 100\n self.startReading()", "title": "" }, { "docid": "b9a511f58b86741a0230bb4580faa581", "score": "0.47275463", "text": "def get_new_connection(self, server):\n sock = socket.socket()\n sock.connect(server)\n Pool.set_keepalive(sock)\n if server in self._pool:\n self._pool[server].append(sock)\n else:\n self._pool[server] = [sock]\n return sock", "title": "" }, { "docid": "12f9d85aaeab8781c3a8a3c6e20755a5", "score": "0.47225252", "text": "def socket(self) -> socket.socket:\n if not self._socket:\n self._socket = socket.socket()\n self._socket.connect(self.address)\n return self._socket", "title": "" }, { "docid": "64dd0817c81eb2dd9e9134d4d5e912a6", "score": "0.47134364", "text": "def setupServerSocket(socktype = UTP):\n sock = socket.socket(socket.AF_INET, socktype)\n #Not Complete Yet\n #if socktype == TCP:\n # sock.listen()\n return sock", "title": "" }, { "docid": "007fe55e96c95e1cb032164c46fd43e1", "score": "0.4709498", "text": "def createSocket(self, myPort):\n\t\tif self.__socket != None:\n\t\t\traise ValueError(\"This Connection already has an active socket!\")\n\t\tif not isinstance(myPort, int):\n\t\t\traise ValueError(\"Mal-formed local port number\")\n\n\t\t# Get local host info\n\t\tself.logMessage(\"New socket on '\" + self.__host_name + \"' \" + self.__local_IP + \" (\" + str(myPort) + \")\", 0)\n\n\t\tself.__socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\tself.__socket.setblocking(False)\n\t\tself.__local_port = myPort\n\t\tself.__socket.bind((self.__local_IP, self.__local_port))\n\n\t\t# Register this thread as active and start it's thread of control\n\t\tself.__active = True", "title": "" }, { "docid": "94a89e5be7e6b115bc257d957b9fab09", "score": "0.46589512", "text": "def createSocket(self, myPort):\n\t\tif self.__socket != None:\n\t\t\traise ValueError(\"This Connection already has an active socket!\")\n\t\tif not isinstance(myPort, int):\n\t\t\traise ValueError(\"Mal-formed local port number\")\n\n\t\t# Get local host info\n\t\tlocalName = socket.gethostname()\n\t\tlocalIP = socket.gethostbyname(localName)\n\t\tself.logMessage(\"New socket on '\" + localName + \"' \" + localIP + \" (\" + str(myPort) + \")\", 0)\n\n\t\tself.__socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\tself.__socket.setblocking(False)\n\t\tself.__socket_IP = localIP\n\t\tself.__socket_port = myPort\n\t\tself.__socket.bind((self.__socket_IP, self.__socket_port))\n\n\t\t# Register this thread as active and start it's thread of control\n\t\tself.__active = True\n\t\tself.start()", "title": "" }, { "docid": "4fb54585ce948c49eb7fe9490fa6bb65", "score": "0.46463767", "text": "def allocate_port():\n sock = socket.socket()\n try:\n sock.bind((\"localhost\", 0))\n return get_port(sock)\n finally:\n sock.close()", "title": "" }, { "docid": "21ff720acc437823435ebe770109b518", "score": "0.4642486", "text": "def get_socket(port: int) -> socket.socket:\n\n SERVER_NAME = 'localhost'\n SERVER_PORT = port\n\n # generic socket and ssl configuration\n socket.setdefaulttimeout(15)\n\n # Configure an ssl client side context which will not check the server's certificate.\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n ctx.set_alpn_protocols(['h2'])\n\n # open a socket to the server and initiate TLS/SSL\n tls_socket = socket.create_connection((SERVER_NAME, SERVER_PORT))\n tls_socket = ctx.wrap_socket(tls_socket, server_hostname=SERVER_NAME)\n return tls_socket", "title": "" }, { "docid": "1cf18fff06e10b8b908fd93763be5611", "score": "0.46400177", "text": "async def create_connection(cls, host: str, port: int) -> \"TCPSocket\":\n r, w = await asyncio.open_connection(host=host, port=port)\n return TCPSocket(r, w)", "title": "" }, { "docid": "5a34fc41865a09768c37d92ddbd46efa", "score": "0.46299386", "text": "def patch_socket(dns=True, aggressive=True):\n from gevent import socket\n # Note: although it seems like it's not strictly necessary to monkey patch 'create_connection',\n # it's better to do it. If 'create_connection' was not monkey patched, but the rest of socket module\n # was, create_connection would still use \"green\" getaddrinfo and \"green\" socket.\n # However, because gevent.socket.socket.connect is a Python function, the exception raised by it causes\n # _socket object to be referenced by the frame, thus causing the next invocation of bind(source_address) to fail.\n if dns:\n items = socket.__implements__ # pylint:disable=no-member\n else:\n items = set(socket.__implements__) - set(socket.__dns__) # pylint:disable=no-member\n _patch_module('socket', items=items)\n if aggressive:\n if 'ssl' not in socket.__implements__: # pylint:disable=no-member\n remove_item(socket, 'ssl')", "title": "" }, { "docid": "8360fcc1bca25071b1f47230a8232aca", "score": "0.4624638", "text": "def accept(self) -> Optional[\"Socket\"]:\n\n if not self._socket:\n raise RuntimeError(\"Tried to accept without socket.\")\n\n # Since, in the kernel, for AF_UNIX, new connection requests,\n # i.e. clients connecting, are directly put on the receive\n # queue of the listener socket, accept here *should* always\n # return a socket and not block, even if the client meanwhile\n # disconnected; we don't rely on that kernel behavior though\n try:\n conn, _ = self._socket.accept()\n except (socket.timeout, BlockingIOError):\n return None\n return Socket(conn, None)", "title": "" }, { "docid": "0a265a3f0de56107b320b271a96e3361", "score": "0.4620532", "text": "def create_socket(port):\n\n def socket_closer(open_sock):\n \"\"\"Close down an opened socket properly.\"\"\"\n open_sock.shutdown(socket.SHUT_RDWR)\n open_sock.close()\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((\"localhost\", port))\n\n # Wait for the ack from the listener side.\n # This is needed to prevent a race condition\n # in the main dosep.py processing loop: we\n # can't allow a worker queue thread to die\n # that has outstanding messages to a listener\n # socket before the listener socket asyncore\n # listener socket gets spun up; otherwise,\n # we lose the test result info.\n read_bytes = sock.recv(1)\n if read_bytes is None or (len(read_bytes) < 1) or (read_bytes != b'*'):\n raise Exception(\"listening socket did not respond with ack byte: response={}\".format(read_bytes))\n\n return sock, lambda: socket_closer(sock)", "title": "" }, { "docid": "480776700cbbba3bed905dc31139e8f1", "score": "0.4616287", "text": "def trampoline(fd, read=None, write=None, timeout=None, timeout_exc=TimeoutError):\n t = None\n hub = get_hub()\n current = greenlet.getcurrent()\n assert hub.greenlet is not current, 'do not call blocking functions from the mainloop'\n fileno = getattr(fd, 'fileno', lambda: fd)()\n def _do_close(_d, error=None):\n if error is None:\n current.throw(socket.error(32, 'Broken pipe'))\n else:\n current.throw(getattr(error, 'value', error)) # XXX convert to socket.error\n def cb(d):\n current.switch()\n # with TwistedHub, descriptor is actually an object (socket_rwdescriptor) which stores\n # this callback. If this callback stores a reference to the socket instance (fd)\n # then descriptor has a reference to that instance. This makes socket not collected\n # after greenlet exit. Since nobody actually uses the results of this switch, I removed\n # fd from here. If it will be needed than an indirect reference which is discarded right\n # after the switch above should be used.\n if timeout is not None:\n t = hub.schedule_call(timeout, current.throw, timeout_exc)\n try:\n descriptor = hub.add_descriptor(fileno, read and cb, write and cb, _do_close)\n try:\n return hub.switch()\n finally:\n hub.remove_descriptor(descriptor)\n finally:\n if t is not None:\n t.cancel()", "title": "" }, { "docid": "718648a6b7f578e88874c4b99aded265", "score": "0.46120548", "text": "def _close_standard_fds():\r\n null_fd = ovs.socket_util.get_null_fd()\r\n if null_fd >= 0:\r\n os.dup2(null_fd, 0)\r\n os.dup2(null_fd, 1)\r\n os.dup2(null_fd, 2)", "title": "" }, { "docid": "04888f17ac2b9e7455715bab110fbc9f", "score": "0.46026808", "text": "def socket_server(self):\n try:\n os.unlink(self.sock_file)\n except OSError:\n pass\n\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.bind(self.sock_file)\n sock.listen(1)\n\n return sock", "title": "" }, { "docid": "41fd450106f51d46305787b14a750dee", "score": "0.45990482", "text": "def unwrap(self):\n if self.native_object != _ffi.NULL:\n _lib.wolfSSL_set_fd(self.native_object, -1)\n\n sock = socket(family=self.family,\n sock_type=self.type,\n proto=self.proto,\n fileno=self.fileno())\n sock.settimeout(self.gettimeout())\n self.detach()\n\n return sock", "title": "" }, { "docid": "174168a1f905689a55a6d0e8851258bb", "score": "0.45878077", "text": "def rdt_socket():\r\n\t######## Your implementation #######\r\n\t### Taken from rdt1.py ###\r\n\ttry:\r\n\t\tsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\texcept socket.error as err_msg:\r\n\t\tprint(\"Socket creation error: \", err_msg)\r\n\t\treturn None\r\n\treturn sock", "title": "" }, { "docid": "76181cf0bd7ae9c13410d657b274d98b", "score": "0.45839363", "text": "def new_pair(cls, *, blocking=True):\n a, b = socket.socketpair(socket.AF_UNIX, socket.SOCK_SEQPACKET)\n\n a.setblocking(blocking)\n b.setblocking(blocking)\n\n return cls(a, None), cls(b, None)", "title": "" }, { "docid": "19e4f3f3ed2c57fabc81a488f08b628c", "score": "0.45746905", "text": "def start_socket_server(protocol, host, port, retries=2):\n try:\n dataserver = TcpDataServer(protocol=protocol, host=host, port=port)\n\n except IOError as error:\n if retries > 0:\n # try a different port when 'Address already in use'.\n port = port + 1\n log.debug(\"Address in use: trying port %d\", port)\n return start_socket_server(protocol, host, port, retries - 1)\n raise error\n\n await_start(dataserver)\n return dataserver, port", "title": "" }, { "docid": "4185a1dd1228115964dfc247157c857b", "score": "0.45654613", "text": "def open_port():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.bind((\"\",0))\n return s.getsockname()[1]\n finally:\n s.close()", "title": "" }, { "docid": "652d7d32b8bd2bee887d347c5c380030", "score": "0.456389", "text": "def socket(self, socket_type):\n if self.closed:\n raise ZMQError(ENOTSUP)\n return _Socket(self, socket_type)", "title": "" }, { "docid": "53bad22071d3a3cde17a3c559a3c9ac1", "score": "0.45559716", "text": "def _create_listener(port, backlog=256):\n # The creating of the socket is similar\n # to gevent.baseserver._tcp_listener().\n sock, sockaddr = inet.create_external_address(port)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind(sockaddr)\n sock.listen(backlog)\n sock.setblocking(0)\n return sock", "title": "" }, { "docid": "0156ea192d566b1ba18ca0625b0bab8d", "score": "0.45532984", "text": "def sock_init(port_num):\n server_socket = socket.socket(\n socket.AF_INET, socket.SOCK_STREAM)\n server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server_socket.bind(('localhost', port_num))\n server_socket.listen(10)\n # cancel the timeout of 3 seconds\n # server_socket.settimeout(3)\n return server_socket", "title": "" }, { "docid": "8fbb242c48ef42aea7e9b22ef9d5f4ff", "score": "0.4538466", "text": "def open_create_only(file_path):\n # use os.open() to avoid a race condition\n return os.fdopen(os.open(fix_path(file_path),\n os.O_CREAT | os.O_EXCL | os.O_RDWR),\n 'a+')", "title": "" }, { "docid": "077df55ad94e5be90918ec78354976bf", "score": "0.45241538", "text": "def bind(self, port, address=\"\"):\n assert not self._socket\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)\n flags = fcntl.fcntl(self._socket.fileno(), fcntl.F_GETFD)\n flags |= fcntl.FD_CLOEXEC\n fcntl.fcntl(self._socket.fileno(), fcntl.F_SETFD, flags)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self._socket.setblocking(0)\n self._socket.bind((address, port))\n self._socket.listen(1000)", "title": "" }, { "docid": "6e733bf8a3f59d0cc41c88c4b3b1cbe2", "score": "0.45227456", "text": "def udp_socket():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.settimeout(CONNECTION_TIMEOUT)\n return sock", "title": "" }, { "docid": "8133720a09af88c890b0e625b5efe8f4", "score": "0.4513869", "text": "def create_fs_connection(fs_version=7):\n return pyuipc.open(fs_version)", "title": "" }, { "docid": "77d651253fd7feb6c284d99a63b3eb8f", "score": "0.44985762", "text": "def create_master_socket(self):\n tcp_master_socket = socket.socket(\n socket.AF_INET, socket.SOCK_STREAM)\n tcp_master_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\n tcp_master_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n tcp_master_socket.bind(\n (utils.server_ip.value, utils.tcp_server_master_port.value))\n tcp_master_socket.listen(1)\n return tcp_master_socket", "title": "" }, { "docid": "63ecc9fc6a3b7aba4dab2324d45eaa38", "score": "0.44911075", "text": "def tcp_listener(address, backlog=50, reuse_addr=True):\n sock = socket()\n bind_and_listen(sock, address, backlog=backlog, reuse_addr=reuse_addr)\n return sock", "title": "" }, { "docid": "247eaec83b3c9b100b6d3433dfafc4e9", "score": "0.44787174", "text": "def ext_open(fd, mode):\n if mode not in [\"r\", \"w\"]:\n raise ValueError(f\"Unsupported mode: {mode}\")\n if fd == \"-\":\n return sys.stdout if mode == \"w\" else sys.stdin\n else:\n return open(fd, mode)", "title": "" }, { "docid": "051abc108943817fb7b4349a7caa81a8", "score": "0.4474098", "text": "def createserver(host=\"127.0.0.1\", port=10123, \n handler_factory=bjsonrpc.handlers.NullHandler,\n sock=None, http=False):\n if sock is None:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((host, port))\n sock.listen(3) \n return bjsonrpc.server.Server(sock, handler_factory=handler_factory, http=http)", "title": "" }, { "docid": "2715ca2b1dc49d9b58ab209ee8790aa4", "score": "0.44668397", "text": "def make_connection(self):\n addr = (self.host, self.port)\n s = None\n # if we defined a pool use it\n if self.connections is not None:\n s = self.connections.get(addr)\n \n if not s:\n # pool is empty or we don't use a pool\n if self.uri.scheme == \"https\":\n s = sock.connect(addr, self.timeout, True, \n self.key_file, self.cert_file)\n else:\n s = sock.connect(addr, self.timeout)\n return s", "title": "" }, { "docid": "76d5ff87c117b52a78dd8079715eb04c", "score": "0.44382724", "text": "def init_client_socket() -> socket.socket:\n sock = socket.socket(family=socket.AF_UNIX, type=socket.SOCK_DGRAM)\n socket_address = get_socket_address()\n try:\n sock.connect(socket_address)\n except socket.error:\n sys.exit(\n \"Error: Couldn't connect to the flashfocus daemon!\\n\"\n \"=> Please check that the flashfocus daemon is running.\"\n )\n return sock", "title": "" }, { "docid": "6faffe8b06c55be09568e0a1410631b6", "score": "0.4437064", "text": "def set_socket_reuse_port(sock: socket.socket) -> bool:\n try:\n # These two socket options will allow multiple process to bind the the\n # same port. Kernel will evenly load balance among the port listeners.\n # Note: this will only work on Linux.\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n if hasattr(socket, \"SO_REUSEPORT\"):\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\n # In some Python binary distribution (e.g., conda py3.6), this flag\n # was not present at build time but available in runtime. But\n # Python relies on compiler flag to include this in binary.\n # Therefore, in the absence of socket.SO_REUSEPORT, we try\n # to use `15` which is value in linux kernel.\n # https://github.com/torvalds/linux/blob/master/tools/include/uapi/asm-generic/socket.h#L27\n else:\n sock.setsockopt(socket.SOL_SOCKET, 15, 1)\n return True\n except Exception as e:\n logger.debug(\n f\"Setting SO_REUSEPORT failed because of {e}. SO_REUSEPORT is disabled.\"\n )\n return False", "title": "" }, { "docid": "bebd9c6047188c2d5cbb3fae8403a8be", "score": "0.44218567", "text": "def addSocket(self, socketIndex, maxPmemGiB):\n # add a socket\n s = Socket(self, socketIndex, maxPmemGiB, self._maxRegionsPerSocket)\n self._sockets[socketIndex] = s\n # now, (re-)define socket pairs\n self._defineSocketPairs()\n return s", "title": "" }, { "docid": "11577cc1b7202f06c560e3f39469a927", "score": "0.4409048", "text": "def establish_tcp_connection():\n return socket.create_connection(('localhost', 80))", "title": "" }, { "docid": "cbf68cdcc4699aba55c4e1a00579cf30", "score": "0.44062197", "text": "def create(\n self,\n family: int = socket.AF_INET,\n type: int = socket.SOCK_STREAM,\n proto: int = 0,\n fileno: int = None\n ) -> None:\n\n # Since the sockets already seem preconfigured with default values,\n # don't do anything yet since we don't want to write to NVM too\n # much.\n\n return None", "title": "" }, { "docid": "918edcf95ff7e38e7bfd59fec8888def", "score": "0.44059214", "text": "def __create_socket(self):\n try:\n self.socket = socket.socket()\n return True\n except socket.error as msg:\n if self.logs:\n self.logger.log(f'WARNING: Socket creation error: {msg}.')\n return False", "title": "" }, { "docid": "c03cd9f9dd90bd2657ce0207dc36c8b1", "score": "0.4395144", "text": "def socket_operations(self):\n\n try:\n my_socket = socket.socket()\n except Exception as e:\n print(\"Socket could not be open! Check your network, Something went wrong in it\")\n my_socket.close()\n else:\n my_socket.connect((self.addr, self.host))\n return my_socket", "title": "" }, { "docid": "09a9a55b5e4eea4e17b560dd1a3cf2aa", "score": "0.43786377", "text": "def _add_descriptor(self, descriptor):\r\n self._descriptors.add(descriptor)\r\n self._kevents.append(descriptor.kevent)\r\n self._descriptor_for_path[descriptor.path] = descriptor\r\n self._descriptor_for_fd[descriptor.fd] = descriptor", "title": "" } ]
948eb2200aadd4573b7870328e1c4953
Create gene fixture for .
[ { "docid": "b987e859690a938cf96237cad8db8225", "score": "0.0", "text": "def loc106783576():\n params = {\n \"match_type\": MatchType.NO_MATCH,\n \"label\": \"nonconserved acetylation island sequence 68 enhancer\",\n \"concept_id\": \"ncbigene:106783576\",\n \"symbol\": \"LOC106783576\",\n \"aliases\": [],\n \"xrefs\": [],\n \"previous_symbols\": [],\n \"associated_with\": [],\n \"symbol_status\": None,\n \"location_annotations\": [],\n \"strand\": None,\n \"locations\": [\n {\n \"_id\": \"ga4gh:VCL.RFN35KQMhqzhmo4QP7AxKAzlPtnh7slL\",\n \"chr\": \"10\",\n \"interval\": {\"end\": \"cen\", \"start\": \"pter\", \"type\": \"CytobandInterval\"},\n \"species_id\": \"taxonomy:9606\",\n \"type\": \"ChromosomeLocation\",\n }\n ],\n \"gene_type\": \"biological-region\",\n }\n return Gene(**params)", "title": "" } ]
[ { "docid": "275881221669d5e4e9ea5e76b98e58c3", "score": "0.69135976", "text": "def setUp(self):\n \n self.gene = self.construct_gene()", "title": "" }, { "docid": "91eb98ca9e3d5b66f096f43a6c9dcbd5", "score": "0.6581844", "text": "def create_fixtures(self):", "title": "" }, { "docid": "9eba776503df093319e242bfa860b08b", "score": "0.635354", "text": "def fixture_ginkgo():\n from twistdb.util import seed\n fixture_root = \"test/ginkgo_fixture\"\n seed.seed_data(db.engine, fixture_root)", "title": "" }, { "docid": "6c9d1c3f850d95418f4fde2e9b2d5c49", "score": "0.6330163", "text": "def create_fixtures(self):\n pass", "title": "" }, { "docid": "9cad02f36b13d385071771fe4f117331", "score": "0.6217224", "text": "def test_creation(self, file_gen, tmpdir):\n assert file_gen.files == [str(tmpdir.join(\"test_particles_1.npz\")),\n str(tmpdir.join(\"test_particles_2.npz\"))]\n assert issubclass(file_gen.interaction_model, NeutrinoInteraction)\n file_gen_2 = NumpyFileGenerator(str(tmpdir.join(\"test_particles_1.npz\")))\n assert file_gen_2.files == [str(tmpdir.join(\"test_particles_1.npz\"))]", "title": "" }, { "docid": "75d302fac7f2c51907877667f2fcea0e", "score": "0.62053466", "text": "def fixtures():", "title": "" }, { "docid": "75d302fac7f2c51907877667f2fcea0e", "score": "0.62053466", "text": "def fixtures():", "title": "" }, { "docid": "75d302fac7f2c51907877667f2fcea0e", "score": "0.62053466", "text": "def fixtures():", "title": "" }, { "docid": "75d302fac7f2c51907877667f2fcea0e", "score": "0.62053466", "text": "def fixtures():", "title": "" }, { "docid": "bc256ff1a38a02870d3fdd95379ed8c0", "score": "0.6185336", "text": "def fixture_name(self):\n return \"genomic_delins\"", "title": "" }, { "docid": "5f3cfdbbee32118acb48d81e56877be5", "score": "0.61243165", "text": "def genes_dat(self):\n datfile = self._read_dat('genes.dat')\n if datfile is not None:\n # WRITING de novo\n if len(self.genes) == 0 and datfile.data is not None:\n\n # creating Gene objects; not all attributes are presented for\n # each _DatObject so it's important to use attr_check method\n for uid in datfile.names:\n obj = datfile.data[uid]\n\n # skipping unmapped genes\n if hasattr(obj, 'UNMAPPED_COMPONENT_OF'):\n continue\n\n location = self._location(\n obj.attr_check(\"LEFT_END_POSITION\"),\n obj.attr_check(\"RIGHT_END_POSITION\"),\n obj.attr_check(\"TRANSCRIPTION_DIRECTION\"))\n gene = Gene(uid=uid,\n name=obj.attr_check(\"COMMON_NAME\", uid),\n start=location[0],\n end=location[1],\n strand=location[2],\n product=obj.attr_check(\"PRODUCT\"))\n self.genes.append(gene)\n\n # creating a Term for gene name\n # (Gene) -[:HAS_NAME]-> (Term)\n self.name_to_terms(gene)\n\n # creating Terms for gene name synonyms\n # (Gene) -[:HAS_NAME]-> (Term)\n obj.links_to_synonyms(gene, self)\n\n # creating XRef and DB nodes for gene name dblinks\n # (Gene) -[:EVIDENCE]-> (XRef) -[:LINK_TO]-> (DB)\n obj.links_to_db(gene, self)\n\n # creating edges to CCP\n # (Gene) -[:PART_OF]-> (CCP)\n obj.feature_ccp_location(gene, self)\n\n # creating edges to the Organism node\n # (Gene) -[:PART_OF]-> (Organism)\n obj.links_to_organism(gene, self)\n\n logging.info(\"A list with %d genes have been created!\"\n % len(self.genes))\n\n # UPGRADING of self.genes\n elif len(self.genes) != 0 and datfile.data is not None:\n for gene in self.genes:\n uid = gene.uid\n try:\n obj = datfile.data[uid]\n\n # let's check that genes with the same id have\n # the same name\n obj.name_check(gene)\n\n # let's check that genes with the same id have\n # the same location\n obj.location_check(gene)\n\n # creating Terms for gene name synonyms\n # (Gene) -[:HAS_NAME]-> (Term)\n obj.links_to_synonyms(gene, self)\n\n # creating XRef and DB nodes for gene name dblinks\n # (Gene) -[:EVIDENCE]-> (XRef) -[:LINK_TO]-> (DB)\n obj.links_to_db(gene, self)\n\n # creating edges to CCP\n # (Gene) -[:PART_OF]-> (CCP)\n obj.feature_ccp_location(gene, self)\n\n # creating edges to the Organism node\n # (Gene) -[:PART_OF]-> (Organism)\n obj.links_to_organism(gene, self)\n\n except:\n pass\n else:\n raise StandardError(\"Something wrong has happened!\")", "title": "" }, { "docid": "be912e491dda0d27487be55489838212", "score": "0.608081", "text": "def add_fixture(self, env):\n size = (0.1, 0.1, 0.24)\n urdf = 'assets/assembly/main_block_circ.urdf'\n pose = self.get_random_pose(env, size)\n startOrientation = p.getQuaternionFromEuler([np.pi/2*3,0,0])\n fixture_id = env.add_object(urdf, (pose[0], startOrientation), 'rigid')\n return fixture_id, pose", "title": "" }, { "docid": "a147f783dfec37e4df3e8080dd735cc6", "score": "0.6041287", "text": "def test_create_gan_exchnage():\n pass", "title": "" }, { "docid": "b5673c03d9033435868b09718b073468", "score": "0.6032693", "text": "def gen(self, filename):", "title": "" }, { "docid": "bd3a9e5b3e51173a2a3e3e19be8051a1", "score": "0.6024035", "text": "def test_e6_example_create_building(self):\n from teaser.examples import e6_generate_building as e6\n\n prj = e6.example_create_building()", "title": "" }, { "docid": "679b82d83b452093a642d08ff853a6a5", "score": "0.601296", "text": "def test_generate_data(self):\n call_command(\"generatedata\")", "title": "" }, { "docid": "790c2bf422286ef7064c99029b943367", "score": "0.60018986", "text": "def test_add_from_genbank_novel_tax(self):\n infile = os.path.join(os.path.dirname(__file__), 'test_files', 'GCF_000005845.2_ASM584v2_genomic.gbff')\n runner = CliRunner()\n result = runner.invoke(cli.main, self.common_params + ['-T', 1236, '-t', '-G', infile, '-D', 'test', 'NewSpecies:species'])\n self.assertEqual(result.exit_code, 0)\n\n server = BioSeqDatabase.open_database(driver = self.dbdriver, user = self.dbuser,\n passwd = self.dbpassword, host = self.dbhost, db = self.dbname)\n\n rows = server.adaptor.execute_and_fetchall(\"SELECT name FROM taxon_name where name_class = 'scientific name'\")\n dbnames = set([x[0] for x in rows])\n names = set(['cellular organisms',\n 'Bacteria',\n 'Proteobacteria',\n 'Gammaproteobacteria',\n 'NewSpecies'])\n self.assertCountEqual(dbnames, names)\n server.close()", "title": "" }, { "docid": "952ce14db9880db366e534c33c2e4748", "score": "0.5993905", "text": "def construct (fileId, sink) :\n \n print (\"\\t\\tconstructing test database from {0}\".format (fileId))\n \n entries = readTransactions (fileId)\n\n pictureList = os.listdir ('test/pic')\n for test in entries :\n header = \"\\tprocessing place name: {name: <40}\\tlat/long: {latitude}:{longitude}\"\n print (header.format (**test))\n\n for test in entries : \n test ['picture'] = random.choice (pictureList)\n recordTransaction (test, sink)", "title": "" }, { "docid": "5e9be42aacd1ecae2733084305f71cc1", "score": "0.59807307", "text": "def _generate():\n output_dir = os.path.join(FLAGS.tfds_dir, \"testing\", \"test_data\",\n \"fake_examples\", \"dsprites\")\n test_utils.remake_dir(output_dir)\n\n images, classes, values = _create_fake_samples()\n\n with h5py.File(os.path.join(output_dir, OUTPUT_NAME), \"w\") as f:\n img_dataset = f.create_dataset(\"imgs\", images.shape, \"|u1\")\n img_dataset.write_direct(images)\n\n classes_dataset = f.create_dataset(\"latents/classes\", classes.shape, \"<i8\")\n classes_dataset.write_direct(np.ascontiguousarray(classes))\n\n values_dataset = f.create_dataset(\"latents/values\", values.shape, \"<f8\")\n values_dataset.write_direct(np.ascontiguousarray(values))", "title": "" }, { "docid": "57548ab613ede35467b65e2ed6099309", "score": "0.59579843", "text": "def gen_test_data(self):\n pass", "title": "" }, { "docid": "4e3b3c17eaebd1e0b4226e3d5a8200c1", "score": "0.59236217", "text": "def fixture_sample():\n return \"sample_1\"", "title": "" }, { "docid": "6faca32560631c81002cd5cee2929ae5", "score": "0.5906595", "text": "def generateGene():\n return random.choice(gene_list)", "title": "" }, { "docid": "89d9947917c2f2531cd70f0648cd2287", "score": "0.5891243", "text": "def createGene(id_bacterium, dna_sequence, start_contig, end_contig, fk_contig, id_db_online, function = None, fasta_head = None):\n geneObjJson = GeneJson(sequence_DNA = dna_sequence, organism = id_bacterium, position_start_contig = start_contig, position_end_contig = end_contig, contig = fk_contig, fasta_head = fasta_head, id_db_online = id_db_online)\n geneObjJson = geneObjJson.setGene()\n return geneObjJson.id", "title": "" }, { "docid": "fe18668641f2f4b741b3128d2af6a4c5", "score": "0.5890209", "text": "def test_import_gene(self):\n\n file_path = os.path.join(os.path.dirname(__file__), 'test_import.json')\n test_gene_file = os.path.abspath(file_path)\n\n # Create genes to update\n update_symbol = []\n update = []\n with open(test_gene_file) as f:\n results = json.load(f)\n for r in results['update']:\n update.append(GeneFactory(gene_symbol=r['gene_symbol']).gene_symbol)\n for r in results['update_symbol']:\n update_symbol.append(GeneFactory(gene_symbol=r[1]).gene_symbol)\n\n with open(test_gene_file) as f:\n url = reverse_lazy('panels:upload_genes')\n self.client.post(url, {'gene_list': f})\n\n for us in update_symbol:\n self.assertFalse(Gene.objects.get(gene_symbol=us).active)\n for u in update:\n gene = Gene.objects.get(gene_symbol=u)\n if gene.ensembl_genes:\n self.assertTrue(gene.active)\n else:\n self.assertFalse(gene.active)", "title": "" }, { "docid": "950e17390709e2294c463af74c42758a", "score": "0.58878344", "text": "def set_up_genome_analysis(records):\n\tif len(records.keys())!=1:\n\t\tprint \"[FATAL] Can't yet deal with more than one chromosome\"\n\t\tsys.exit()\n\n\tchromName = records[records.keys()[0]].id\n\tbaseList = [(x,) for x in range(1,records[records.keys()[0]].seq.__len__()+1)]\n\n\t## we should be smart and make a bunch of objects, each max 100kb, to save memory when running!\n\n\t#\t\t\t\t ttype, name,chrom,baseList,rev,name)\n\treturn [Variation(\"genome\",\"some-name\",chromName,baseList,False,False)]", "title": "" }, { "docid": "602c15fd0365e2fa32e5b1ffed5605db", "score": "0.58852756", "text": "def test_add_from_genbank_novel_tax2(self):\n infile = os.path.join(os.path.dirname(__file__), 'test_files', 'GCF_000005845.2_ASM584v2_genomic.gbff')\n runner = CliRunner()\n result = runner.invoke(cli.main, self.common_params + ['-T', 1236, '-t', '-G', infile, '-D',\n 'test', 'NovelFamily:family', 'NovelGenus:genus', 'NewSpecies:species'])\n self.assertEqual(result.exit_code, 0)\n\n server = BioSeqDatabase.open_database(driver = self.dbdriver, user = self.dbuser,\n passwd = self.dbpassword, host = self.dbhost, db = self.dbname)\n\n rows = server.adaptor.execute_and_fetchall(\"SELECT name FROM taxon_name where name_class = 'scientific name'\")\n dbnames = set([x[0] for x in rows])\n names = set(['cellular organisms',\n 'Bacteria',\n 'Proteobacteria',\n 'Gammaproteobacteria',\n 'NovelFamily',\n 'NovelGenus',\n 'NewSpecies'])\n self.assertCountEqual(dbnames, names)\n server.close()", "title": "" }, { "docid": "fbb05bdefbbc1cf05d8036a47020ffca", "score": "0.58279645", "text": "def makeTestG3():\n f3 = make_temp_file(\"\"\"\n#\n#\nall: montana[32-55]\npara: montana[32-37,42-55]\ngpu: montana[38-41]\nlogin: montana[32-33]\noverclock: montana[41-42]\nchassis1: montana[32-33]\nchassis2: montana[34-35]\nchassis3: montana[36-37]\nsingle: idaho\n\"\"\")\n return f3", "title": "" }, { "docid": "d1ed2f66752cbec3c59322b0840c08a5", "score": "0.57979786", "text": "def test_build_transcript_data_gtf(self):\n \n #tests pre-mrna\n genes = build_transcript_data_gtf(pybedtools.BedTool(clipper.test_file(\"data.gtf\")), True).sort()\n true_genes = pybedtools.BedTool(\n [[\"chrI\", \"AS_STRUCTURE\", \"mRNA\", 7741935, 7950951, \"0\", \"+\", \".\", \"gene_id=NR_070240; transcript_id=NR_070240; effective_length=209016\" ],\n [\"chrI\", \"AS_STRUCTURE\", \"mRNA\", 8378298, 8378421, \"0\", \"-\", \".\", \"gene_id=NM_001129046; transcript_id=NM_001129046; effective_length=123\" ],]\n ).sort()\n \n self.assertEqual(str(genes), str(true_genes))\n \n #tests mrna lengths\n genes = build_transcript_data_gtf(pybedtools.BedTool(clipper.test_file(\"data.gtf\")), False).sort()\n true_genes = pybedtools.BedTool(\n [[\"chrI\", \"AS_STRUCTURE\", \"mRNA\", 7741935, 7950951, \"0\", \"+\", \".\", \"gene_id=NR_070240; transcript_id=NR_070240; effective_length=30\" ],\n [\"chrI\", \"AS_STRUCTURE\", \"mRNA\", 8378298, 8378421, \"0\", \"-\", \".\", \"gene_id=NM_001129046; transcript_id=NM_001129046; effective_length=123\" ],]\n ).sort()\n \n self.assertEqual(str(genes), str(true_genes))", "title": "" }, { "docid": "5f5c61d4bce4dd09ff0b545b3cdb26ce", "score": "0.5795208", "text": "def setUp(self):\n for fixture in [\n \"archivesspace_archival_object\",\n \"ursa_major_accession\",\n \"ursa_major_bag\"]:\n with open(join(\"transformer\", \"fixtures\", \"{}.json\".format(fixture))) as df:\n data = json.load(df)\n setattr(self, fixture, data)", "title": "" }, { "docid": "84e041e7528a9a69fb4a470178fbde9c", "score": "0.5793756", "text": "def _create_test_data():\n\n fixture = {}\n fixture[\"code_pattern\"] = \"(#)\\D+([0-9]{2})\\D+([0-9]{5})(\\D+[0-9]{5})?(.+)\" \n \n fixture[\"data_file\"] = \\\n \"\"\"\n # ---------------------------------- WARNING ----------------------------------------\n # The data you have obtained from this automated U.S. Geological Survey database\n # have not received Director\"s approval and as such are provisional and subject to\n # revision. The data are released on the condition that neither the USGS nor the\n # United States Government may be held liable for any damages resulting from its use.\n # Additional info: http://nwis.waterdata.usgs.gov/ky/nwis/?provisional\n #\n # File-format description: http://nwis.waterdata.usgs.gov/nwis/?tab_delimited_format_info\n # Automated-retrieval info: http://nwis.waterdata.usgs.gov/nwis/?automated_retrieval_info\n #\n # Contact: [email protected]\n # retrieved: 2014-03-11 08:40:40 EDT (nadww01)\n #\n # Data for the following 1 site(s) are contained in this file\n # USGS 03401385 DAVIS BRANCH AT HIGHWAY 988 NEAR MIDDLESBORO, KY\n # -----------------------------------------------------------------------------------\n #\n # Data provided for site 03401385\n # DD parameter Description\n # 02 00065 Gage height, feet\n # 03 00010 Temperature, water, degrees Celsius\n # 04 00300 Dissolved oxygen, water, unfiltered, milligrams per liter\n # 05 00400 pH, water, unfiltered, field, standard units\n # 06 00095 Specific conductance, water, unfiltered, microsiemens per centimeter at 25 degrees Celsius\n # 07 63680 Turbidity, water, unfiltered, monochrome near infra-red LED light, 780-900 nm, detection angle 90 +-2.5 degrees, formazin nephelometric units (FNU)\n #\n # Data-value qualification codes included in this output: \n # Eqp Equipment malfunction \n # P Provisional data subject to revision. \n # ~ Value is a system interpolated value. \n # \n agency_cd\tsite_no\tdatetime\ttz_cd\t02_00065\t02_00065_cd\t03_00010\t03_00010_cd\t04_00300\t04_00300_cd\t05_00400\t05_00400_cd\t06_00095\t06_00095_cd\t07_63680\t07_63680_cd\n 5s\t15s\t20d\t6s\t14n\t10s\t14n\t10s\t14n\t10s\t14n\t10s\t14n\t10s\t14n\t10s\n USGS\t03401385\t2013-06-06 00:00\tEDT\t1.0\tP\t5.0\tP\t2.0\tP\t-4.0\tP\t2.0\tP\t8.25\tP\n USGS\t03401385\t2013-06-06 00:15\tEDT\t2.0\tP\t10.0\tP\t1.25\tP\t4.0\tP\t1.0\tP\t8.25\tP\n USGS\t03401385\t2013-06-06 00:30\tEDT\t3.0\tP\t15.0\tP\t1.25\tP\t3.5\tP\t0.0\tP\t3.5\tP\n USGS\t03401385\t2013-06-06 00:45\tEDT\t4.0\tP\t20.0\tP\t0.25\tP\t3.5\tP\t-1.0\tP\t2.5\tP\n USGS\t03401385\t2013-06-06 01:00\tEDT\t5.0\tP\t25.0\tP\t0.25\tP\t3.0\tP\t-2.0\tP\t2.5\tP\n \"\"\"\n\n stage_data = np.array([1.0, 2.0, 3.0, 4.0, 5.0])\n temperature_data = np.array([5.0, 10.0, 15.0, 20.0, 25.0]) \n dissolvedoxygen_data = np.array([2.0, 1.25, 1.25, 0.25, 0.25])\n ph_data = np.array([-4.0, 4.0, 3.5, 3.5, 3.0])\n conductance_data = np.array([2.0, 1.0, 0.0, -1.0, -2.0])\n turbidity_data = np.array([8.25, 8.25, 3.5, 2.5, 2.5])\n\n fixture[\"parameters\"] = [{\"description\": \"Gage height, feet\", \"code\": \"02_00065\", \"index\": 4, \"data\": stage_data,\n \"mean\": np.mean(stage_data), \"max\": np.max(stage_data), \"min\": np.min(stage_data)},\n \n {\"description\": \"Temperature, water, degrees Celsius\", \"code\": \"03_00010\", \"index\": 6, \"data\": temperature_data,\n \"mean\": np.mean(temperature_data), \"max\": np.max(temperature_data), \"min\": np.min(temperature_data)}, \n\n {\"description\": \"Dissolved oxygen, water, unfiltered, milligrams per liter\", \"code\": \"04_00300\", \"index\": 8, \"data\": dissolvedoxygen_data,\n \"mean\": np.mean(dissolvedoxygen_data), \"max\": np.max(dissolvedoxygen_data), \"min\": np.min(dissolvedoxygen_data)}, \n\n {\"description\": \"pH, water, unfiltered, field, standard units\", \"code\": \"05_00400\", \"index\": 10, \"data\": ph_data,\n \"mean\": np.mean(ph_data), \"max\": np.max(ph_data), \"min\": np.min(ph_data)}, \n\n {\"description\": \"Specific conductance, water, unfiltered, microsiemens per centimeter at 25 degrees Celsius\", \"code\": \"06_00095\", \"index\": 12, \"data\": conductance_data,\n \"mean\": np.mean(conductance_data), \"max\": np.max(conductance_data), \"min\": np.min(conductance_data)}, \n\n {\"description\": \"Turbidity, water, unfiltered, monochrome near infra-red LED light, 780-900 nm, detection angle 90 +-2.5 degrees, formazin nephelometric units (FNU)\", \"code\": \"07_63680\", \"index\": 14, \"data\": turbidity_data,\n \"mean\": np.mean(turbidity_data), \"max\": np.max(turbidity_data), \"min\": np.min(turbidity_data)}\n ]\n\n return fixture", "title": "" }, { "docid": "2f8be9ff47b6535922bca8ae3fc71865", "score": "0.57869095", "text": "def func_Gen(self, args):\r\n\t\tr, theta, mu, M = rvd27.generate_sample(args.phi, args.n, args.N, args.J, args.seedint)\r\n\t\trvd27.save_model(args.outputfile, args.phi, mu, theta, r, args.n, args.J)\r\n\t\tpass", "title": "" }, { "docid": "fe4591efc56a0aa0ffa498d6134daead", "score": "0.5780717", "text": "def build_transcript_data(species, gene_bed, gene_mrna, gene_pre_mrna, pre_mrna):\n\n # error checking\n\n acceptable_species = get_acceptable_species()\n if (species is None and\n gene_bed is None and\n (gene_mrna is None or gene_pre_mrna is None)):\n raise ValueError(\"You must set either \\\"species\\\" or \\\"geneBed\\\"+\\\"geneMRNA\\\"+\\\"genePREMRNA\\\"\")\n\n if species is not None and gene_bed is not None:\n raise ValueError(\"You shouldn't set both geneBed and species, defaults exist for %s\" % (acceptable_species))\n\n # Now actually assign values\n if species is not None:\n try:\n gene_bed = clipper.data_file(species + \".AS.STRUCTURE_genes.BED.gz\")\n gene_mrna = clipper.data_file(species + \".AS.STRUCTURE_mRNA.lengths\")\n gene_pre_mrna = clipper.data_file(species + \".AS.STRUCTURE_premRNA.lengths\")\n\n except ValueError:\n raise ValueError(\n \"Defaults don't exist for your species: %s. Please choose from: %s or supply \\\"geneBed\\\"+\\\"geneMRNA\\\"+\\\"genePREMRNA\\\"\" % (\n species, acceptable_species))\n\n # Selects mRNA or preMRNA lengths\n if pre_mrna is True:\n lenfile = gene_pre_mrna\n else:\n lenfile = gene_mrna\n\n if lenfile is None:\n raise IOError(\"\"\"didn't pass correct mRNA length file option \n with given length file\"\"\")\n\n # builds dict to do processing on,\n genes = build_geneinfo(gene_bed)\n lengths = build_lengths(lenfile)\n\n # this is a stopgap until it can be fully factored out, returing a gtf file of\n # genes and effective lengths, eventually this is the file we want to pass in\n gtf_list = []\n\n for gene in genes.keys():\n gtf_list.append(pybedtools.create_interval_from_list([genes[gene][0],\n \"AS_STRUCTURE\",\n \"mRNA\",\n str(genes[gene][2]),\n str(genes[gene][3]),\n \".\",\n str(genes[gene][4]),\n \".\",\n \"gene_id=\" + gene + \"; effective_length=\" + str(\n lengths[gene])]))\n\n return pybedtools.BedTool(gtf_list)", "title": "" }, { "docid": "3cdf4181980a138926fd5499bd621c50", "score": "0.57763636", "text": "def generate_genepop_file(df_selected, vcf_canis, out_dir='./'):\n # chroms = list(df_selected.CHROM.drop_duplicates())\n # df_chrom = df_selected.sort()\n df_selected_sorted = df_selected.sort_index()\n species_dict = {}\n for i, locus in df_selected_sorted.iterrows():\n species_in = locus['SPECIES'].split(';')\n locus_species = []\n for j in range(len(species_in)):\n taxon = species_in[j].split(':')[0]\n if taxon not in species_dict:\n species_dict[taxon] = {'output': out_dir + taxon + '_genepop.txt', 'locus_id': [],\n 'samples': [], 'locus_gt': {}, 'loci_gt': []}\n species_dict[taxon]['locus_id'] += [locus.ID]\n locus_species.append(taxon)\n\n # record = vcf_canis.fetch(locus.CHROM, locus.POS)\n for x in vcf_canis.fetch(locus.CHROM, locus.POS - 1, locus.POS):\n record = x\n # print locus.CHROM, locus.POS\n for sample in record.samples:\n taxon = sample.sample.split('_')[0]\n genotype = sample['GT']\n if taxon in locus_species:\n if sample.sample not in species_dict[taxon]['samples']: # Generate sample list for each species\n species_dict[taxon]['samples'].append(sample.sample)\n # if genotype is None:\n if genotype == './.': # missing genotype\n species_dict[taxon]['locus_gt'].update({sample.sample: '0000'})\n else:\n coded_gt = ''\n genotype = genotype.split('/')\n for allele in genotype:\n coded_gt += str(int(allele) + 1).zfill(2)\n species_dict[taxon]['locus_gt'].update({sample.sample: coded_gt})\n for tag in species_dict:\n if tag in locus_species:\n species_dict[tag]['loci_gt'].append(species_dict[tag]['locus_gt'])\n species_dict[tag]['locus_gt'] = {}\n\n for tag in species_dict:\n output = species_dict[tag]['output']\n if not os.path.exists(output):\n my_dir, file = os.path.split(output)\n if not os.path.exists(my_dir):\n os.makedirs(my_dir)\n os.mknod(output)\n # Generate Genepop input file\n with open(output, 'w') as fout:\n fout.write('Genepop canis selected\\n' +\n '\\n'.join([str(i) for i in species_dict[tag]['locus_id']]) + '\\n' + 'Pop\\n')\n # tag = 'cat'\n for sample in species_dict[tag]['samples']:\n indv = []\n # if tag != sample.split('_')[0]:\n # fout.write('Pop\\n')\n # tag = sample.split('_')[0]\n for i in range(len(species_dict[tag]['loci_gt'])):\n indv += [species_dict[tag]['loci_gt'][i][sample]]\n fout.write(sample + ', ' + ' '.join(indv) + '\\n')\n all_output = [species_dict[tag]['output'] for tag in species_dict]\n return all_output", "title": "" }, { "docid": "13de5cd23b40fdb5369784b153b638ca", "score": "0.5749473", "text": "def init_individual():\n genome = create_genome()\n\n # Create i/p and o/p neurons\n nid = 0\n for i in range(INPUTS):\n neuron = create_neuron(layer=Layer.INPUT)\n neuron['id'] = nid\n genome['neurons'][nid] = neuron\n genome['ip_neurons'].append(nid)\n nid += 1\n\n for i in range(OUTPUTS):\n neuron = create_neuron(layer=Layer.OUTPUT)\n neuron['id'] = nid\n genome['neurons'][nid] = neuron\n genome['op_neurons'].append(nid)\n nid += 1\n\n for i in range(BIAS):\n neuron = create_neuron(layer=Layer.BIAS)\n neuron['id'] = nid\n genome['neurons'][nid] = neuron\n genome['bias_neurons'].append(nid)\n nid += 1\n\n genome['last_neuron'] = nid - 1\n # Create a gene for every ip, op pair\n innov_no = 0\n for i in range(INPUTS):\n for j in range(OUTPUTS):\n gene = create_gene(innov_no=innov_no)\n gene['ip'] = genome['ip_neurons'][i]\n gene['op'] = genome['op_neurons'][j]\n gene['wt'] = random.random() * 2 - 1\n genome['genes'][innov_no] = gene\n #genome['genes'][(gene['ip'], gene['op'])] = gene\n innov_no += 1\n\n for i in range(BIAS):\n for j in range(OUTPUTS):\n gene = create_gene(innov_no=innov_no)\n gene['ip'] = genome['bias_neurons'][i]\n gene['op'] = genome['op_neurons'][j]\n gene['wt'] = random.random() * 2 - 1\n genome['genes'][innov_no] = gene\n #genome['genes'][(gene['ip'], gene['op'])] = gene\n innov_no += 1\n\n return genome", "title": "" }, { "docid": "80ab3abc1de2d67076e94da55c59535f", "score": "0.57435954", "text": "def create_gene_per_chrom_file(self, gene_strand_data):\n gene_df = pd.read_table(gene_strand_data)\n strand_dict = dict(zip(gene_df.gene, gene_df.Strand))\n tbx_gene_handle = pysam.TabixFile(self.gene_pheno_loc)\n gene_bed_loc = re.sub(\n \"tmp\", \"tmp_gene\", self.var_bed_loc) % self.current_chrom\n line_count = 0\n with open(gene_bed_loc, 'w') as gene_bed_f:\n search_chrom = self.current_chrom\n if not self.ucsc_ref_genome:\n search_chrom = re.sub('chr', '', search_chrom)\n for line in tbx_gene_handle.fetch(search_chrom, 0, 3e8):\n line_list = line.strip().split(\"\\t\")[0:4]\n line_list.append(str(line_count))\n if line_list[3] in strand_dict:\n line_list.append(strand_dict[line_list[3]])\n else:\n line_list.append(\"NA\")\n out_line = \"\\t\".join(line_list)\n # if not self.ucsc_ref_genome:\n # out_line = \"chr\" + out_line\n gene_bed_f.write(out_line + \"\\n\")\n line_count += 1\n return gene_bed_loc", "title": "" }, { "docid": "2be9fbcc5d581948b83208b3435ed443", "score": "0.57429826", "text": "def fixture_name(self):\n return \"protein_delins\"", "title": "" }, { "docid": "2be9fbcc5d581948b83208b3435ed443", "score": "0.57429826", "text": "def fixture_name(self):\n return \"protein_delins\"", "title": "" }, { "docid": "53b0c9293eebc6a9077c150fe6411be0", "score": "0.5734139", "text": "def makeTestG2():\n f2 = make_temp_file(\"\"\"\n#\n#\npara: montana[32-37,42-55]\ngpu: montana[38-41]\nescape%test: montana[87-90]\nesc%test2: @escape%test\n\"\"\")\n return f2", "title": "" }, { "docid": "c87edb4aeb518fafce09d43d8a840109", "score": "0.57299274", "text": "def gen_sample(self):", "title": "" }, { "docid": "c87edb4aeb518fafce09d43d8a840109", "score": "0.57299274", "text": "def gen_sample(self):", "title": "" }, { "docid": "21d5d31b6df6bd34b601c09d94beb7a3", "score": "0.5717493", "text": "def test_add_from_genbank_tax_override(self):\n infile = os.path.join(os.path.dirname(__file__), 'test_files', 'GCF_000005845.2_ASM584v2_genomic.gbff')\n runner = CliRunner()\n result = runner.invoke(cli.main, self.common_params + ['-T', 656, '-t', '-G', infile, '-D', 'test'])\n self.assertEqual(result.exit_code, 0)\n\n server = BioSeqDatabase.open_database(driver = self.dbdriver, user = self.dbuser,\n passwd = self.dbpassword, host = self.dbhost, db = self.dbname)\n\n rows = server.adaptor.execute_and_fetchall(\"SELECT name FROM taxon_name where name_class = 'scientific name'\")\n dbnames = set([x[0] for x in rows])\n names = set(['cellular organisms',\n 'Bacteria',\n 'Proteobacteria',\n 'Gammaproteobacteria',\n 'Aeromonadales',\n 'Aeromonadaceae',\n 'Aeromonas',\n 'Aeromonas allosaccharophila'])\n self.assertCountEqual(dbnames, names)\n server.close()", "title": "" }, { "docid": "3d2a7a9f845cdc87c4340bce5777b894", "score": "0.57010996", "text": "def test_add_from_genbank(self):\n infile = os.path.join(os.path.dirname(__file__), 'test_files', 'GCF_000005845.2_ASM584v2_genomic.gbff')\n runner = CliRunner()\n result = runner.invoke(cli.main, self.common_params + ['-G', infile, '-D', 'test'])\n self.assertEqual(result.exit_code, 0)\n\n server = BioSeqDatabase.open_database(driver = self.dbdriver, user = self.dbuser,\n passwd = self.dbpassword, host = self.dbhost, db = self.dbname)\n\n rows = server.adaptor.execute_and_fetchall(\"SELECT name FROM taxon_name where name_class = 'scientific name'\")\n self.assertEqual(rows, [('Escherichia coli str. K-12 substr. MG1655',)])\n server.close()", "title": "" }, { "docid": "0080e9a13fed59701854cea228d1cc0e", "score": "0.56980836", "text": "def create_combination_gene_file(data_type, gene_type):\n\n # Read the strings\n true_strings = EF.read_gene_file(data_type, True, gene_type)\n false_strings = EF.read_gene_file(data_type, False, gene_type)\n\n # Preprocess the strings\n true_strands, true_names = preprocess_gene_file(true_strings)\n false_strands, false_names = preprocess_gene_file(false_strings)\n\n # Outcomes of the strings\n true_outcome = [True] * len(true_strands)\n false_outcome = [False] * len(false_strands)\n\n # Combine lists\n strands = np.append(true_strands, false_strands, axis=0)\n names = true_names\n names.extend(false_names)\n outcome = true_outcome\n outcome.extend(false_outcome)\n\n # Create an order to shuffle\n range_list = [x for x in range(len(strands))]\n random.shuffle(range_list)\n\n # Shuffle the lists\n rand_strands = np.squeeze(np.array([strands[i] for i in range_list]))\n rand_names = [names[i] for i in range_list]\n rand_outcome = [outcome[i] for i in range_list]\n\n\n\n return rand_strands, rand_names, rand_outcome", "title": "" }, { "docid": "c71b318c948a5dfe62535b76d1eff350", "score": "0.5697081", "text": "def fixture_name(self):\n return \"protein_substitution\"", "title": "" }, { "docid": "7e4ca872d3ef2d777a7d25143a65b1c4", "score": "0.5688193", "text": "def simulate_variants(vcfout, fasta, sample, bed, seed, p_het, model):\n import mitty.simulation.genome.simulatevariants as simvar\n simvar.main(fp_out=vcfout, fasta_fname=fasta, sample_name=sample, bed_fname=bed, seed=seed, p_het=p_het, models=model)", "title": "" }, { "docid": "849a4e4df87f8439b788ebc5f3c14163", "score": "0.5674445", "text": "def fixture_gridfile(region):\n with GMTTempFile(suffix=\".nc\") as tmpfile:\n grdcut(grid=\"@earth_relief_01d_g\", region=region, outgrid=tmpfile.name)\n yield tmpfile.name", "title": "" }, { "docid": "bc7c6fdfb642431c927abc173f543577", "score": "0.56611866", "text": "def createGene(id_bacterium, dna_sequence, id_db_online, function = None, fasta_head = None, start_contig = None, end_contig = None, fk_contig = None):\n geneObjJson = GeneJson(sequence_DNA = dna_sequence, organism = id_bacterium, position_start_contig = start_contig, position_end_contig = end_contig, contig = fk_contig, fasta_head = fasta_head, id_db_online = id_db_online)\n geneObjJson = geneObjJson.setGene()\n return geneObjJson['id']", "title": "" }, { "docid": "c9e0e6ac8b2d73fc5f13109b836099fa", "score": "0.5620338", "text": "def fixture_gridfile(grid, region):\n with GMTTempFile(suffix=\".nc\") as tmpfile:\n grdcut(grid=grid, region=region, outgrid=tmpfile.name)\n yield tmpfile.name", "title": "" }, { "docid": "cf674f2b47d29278bac733a9310db63c", "score": "0.5614135", "text": "def test_dna_create(self):\r\n\r\n db_path = os.path.join(self.test_dir, 'test.grail')\r\n db_obj = dna.DNAFile(db_path, create=True)\r\n db_obj.close()\r\n\r\n self.assertTrue(os.path.isfile(db_path))", "title": "" }, { "docid": "3e3772e30d39d3b9e57c067e2af85210", "score": "0.5613102", "text": "def fixture_sample() -> str:\n return \"sample_1\"", "title": "" }, { "docid": "4ce39bb70cfc6675494cd1145a49a53c", "score": "0.5584262", "text": "def generate_fixture(args):\n numpy.random.seed(seed=0) # Deterministic random results\n\n fixture = generate_custom_fixture(args)\n rigid_bodies = fixture[\"rigid_body_problem\"][\"rigid_bodies\"]\n\n hx = 5\n hy = 5\n half_thickness = 0.5\n box_polygons = [\n generate_rectangle(hx, half_thickness, numpy.array([0, 0]), 0),\n generate_rectangle(\n half_thickness, hy,\n numpy.array([hx - half_thickness, hy + half_thickness]), 0),\n generate_rectangle(\n half_thickness, hy,\n numpy.array([-hx + half_thickness, hy + half_thickness]), 0),\n generate_rectangle(\n hx, half_thickness,\n [hx - 2 * half_thickness, 2 * hy + half_thickness] +\n create_2D_rotation_matrix(numpy.pi / 4) @ [hx, -half_thickness],\n numpy.pi / 4),\n generate_rectangle(\n hx, half_thickness,\n [-hx + 2 * half_thickness, 2 * hy + half_thickness] +\n create_2D_rotation_matrix(-numpy.pi / 4) @ [-hx, -half_thickness],\n -numpy.pi / 4)\n ]\n\n box = shapely.ops.cascaded_union(box_polygons)\n box = shapely.geometry.polygon.orient(box, 1)\n box_vertices = numpy.array(box.exterior.coords)[:-1]\n assert is_polygon_ccw(box_vertices)\n box_polygons = numpy.array(\n [polygon.exterior.coords for polygon in box_polygons])[:, :-1]\n for polygon in box_polygons:\n assert is_polygon_ccw(polygon)\n box_edges = generate_ngon_edges(box_vertices.shape[0])\n\n # Add the box\n rigid_bodies.append({\n \"vertices\": box_vertices.tolist(),\n \"polygons\": box_polygons.tolist(),\n \"edges\": box_edges.tolist(),\n \"oriented\": True,\n \"is_dof_fixed\": [True, True, True]\n })\n\n radius = 0.5\n block_hx = block_hy = numpy.sqrt(radius**2 / 2)\n radius += 5e-2 # inflate the radius slightly\n block = generate_box_body(block_hx, block_hy, [0, 0], 0, 1)\n\n centers = numpy.zeros((args.num_blocks, 2))\n\n width = 2 * (hx - 2 * half_thickness - radius)\n height = 6 * hy\n for i in range(args.num_blocks):\n invalid_center = True\n num_tries = 0\n while invalid_center:\n if num_tries > 100:\n height *= 2\n num_tries = 0\n center = (numpy.random.random(2) * [width, height] +\n [-width / 2, 2 * half_thickness + radius])\n invalid_center = (numpy.linalg.norm(centers - center, axis=1) <\n 2 * radius).any()\n num_tries += 1\n\n centers[i] = center\n block[\"position\"] = center.tolist()\n block[\"theta\"] = numpy.random.random() * 45\n rigid_bodies.append(block.copy())\n\n return fixture", "title": "" }, { "docid": "9604d3e3b7aba8a63d7463fe257ca726", "score": "0.5579958", "text": "def test_create_entity(self):\n pass", "title": "" }, { "docid": "a905b399bb4db7e05b054bb849a563c9", "score": "0.55793107", "text": "def create(ctx, args):\n\n raise NotImplementedError\n\n log.info(\"Creating '{}' experiment\".format(args.name))\n if args.template_file is not None:\n log.info(\" Using {} as a template\".format(args.template_file))\n\n # TODO: Implement so source files can be analysed (as when run) to\n # extract parameters (numerical assigns).\n\n # TODO: Implement so other experiment configuration files can be used\n # to generate a new experiment file.\n\n # TODO: In both of the above cases; prompt user for correction of\n # parameters.", "title": "" }, { "docid": "b1a2721f3a709c42571fbfe406ba4bb5", "score": "0.55705625", "text": "def make_fake_file(filename, \n nrns,\n trials,\n length,\n state_order,\n palatability_state,\n ceil_p,\n jitter_t,\n jitter_p,\n jitter_p_type,\n min_duration,\n data_type = 'cat'):\n if data_type is 'ber':\n data, t, p, all_p, scaling = fake_ber_firing(\n nrns,\n trials,\n length,\n state_order,\n palatability_state,\n ceil_p,\n jitter_t,\n jitter_p,\n jitter_p_type,\n min_duration)\n elif data_type is 'cat':\n data, t, p, all_p, scaling = fake_cat_firing(\n nrns,\n trials,\n length,\n state_order,\n palatability_state,\n ceil_p,\n jitter_t,\n jitter_p,\n jitter_p_type,\n min_duration) \n \n params = {\n 'nrns' : nrns,\n 'trials' : trials,\n 'length' : length,\n 'state_order' : state_order,\n 'palatability_state' : palatability_state,\n 'ceil_p' : ceil_p,\n 'jitter_t' : jitter_t,\n 'jitter_p' : jitter_p,\n 'jitter_p_type' : jitter_p_type,\n 'min_duration' : min_duration\n #'scaling' : scaling\n }\n \n with open(filename + 'params.csv','w') as f:\n w = csv.writer(f)\n w.writerows(params.items())\n \n hf5 = tables.open_file(filename + '.h5', mode = 'w', title = 'Fake Data')\n \n hf5.create_array('/', 'scaling', scaling, createparents=True)\n# =============================================================================\n# hf5.create_array('/', 'spike_array', data, createparents=True)\n# hf5.create_array('/', 'transition_times', t, createparents=True)\n# hf5.create_array('/', 'probability_values', p, createparents=True)\n# =============================================================================\n for taste in range(len(data)):\n hf5.create_array('/spike_trains/dig_in_%i' % (taste), 'spike_array', data[taste], createparents=True)\n hf5.create_array('/spike_trains/dig_in_%i' % (taste), 'transition_times', t[taste], createparents=True)\n hf5.create_array('/spike_trains/dig_in_%i' % (taste), 'probability_values', p[taste], createparents=True)\n \n \n hf5.flush()\n hf5.close()\n \n return data, t, p, all_p, scaling", "title": "" }, { "docid": "ad50870ce41e940086a021c4b2f1fd5e", "score": "0.5568525", "text": "def simulate_variants(vcf, vcf_out, sample_name, default_allele_freq, seed_for_random_number_generator):\n import mitty.simulation.genome.sampledgenome as sample_genome\n\n sample_genome.assign_random_gt(input_vcf=vcf, output=vcf_out, sample_name=sample_name,\n default_af=default_allele_freq, seed=seed_for_random_number_generator)", "title": "" }, { "docid": "9a46c3bf04dd833b2a8f94b3bd44cb3d", "score": "0.5556274", "text": "def add_gene(self):\n available_genes = self.__available_genes()\n if available_genes:\n new_gene = random.choice(available_genes)\n self.genes[new_gene[0], new_gene[1]] = random.uniform(- self.config.WEIGHT_AMP, self.config.WEIGHT_AMP)", "title": "" }, { "docid": "412195c97227235b2b7d4a3ba27a4420", "score": "0.5553869", "text": "def setUp(self):\n# self.ConFem_ = ConFem.ConFem()\n self.ConSimFem_ = ConSimFem.ConSimFem()\n self.ConPlaD_ = ConPlaD.ConPlaD()\n self.ConSimplex_ = ConSimplex.ConSimplex()\n self.NameLog = \"../DataExamples/tmp\"\n \"\"\"\n tensile bars\n \"\"\"", "title": "" }, { "docid": "70f6db8879d1c5281a298fe95a1fdc1d", "score": "0.55507284", "text": "def test_fixture_population(self):\n with auto_populate('required'):\n call_command('loaddata', 'fixture.json', verbosity=0)\n m = models.TestModel.objects.get()\n assert m.title_en == 'foo'\n assert m.title_de == 'foo'\n assert m.text_en == 'bar'\n assert m.text_de is None", "title": "" }, { "docid": "69a958e27ca69d505b8d45ff4e1a73f4", "score": "0.5547283", "text": "def perform_generation():\n\n # import data provider (e.g. dtr, rel, or events)\n data = importlib.import_module(args.data_reader)\n\n # load pretrained T5 tokenizer\n tokenizer = T5Tokenizer.from_pretrained(args.model_name)\n\n # load the saved model\n model = T5ForConditionalGeneration.from_pretrained(args.model_dir)\n\n # add event markers to tokenizer\n tokenizer.add_tokens(new_tokens)\n model.resize_token_embeddings(len(tokenizer)) # todo: need this?\n\n test_dataset = data.Data(\n xml_dir=args.xml_test_dir,\n text_dir=args.text_test_dir,\n out_dir=args.xml_out_dir,\n xml_regex=args.xml_regex,\n tokenizer=tokenizer,\n chunk_size=args.chunk_size,\n max_input_length=args.max_input_length,\n max_output_length=args.max_output_length)\n test_data_loader = DataLoader(\n test_dataset,\n shuffle=False,\n batch_size=args.gener_batch_size)\n\n # generate output from the saved model\n predicted_relations = generate(model, test_data_loader, tokenizer)\n print('writing xml...')\n test_dataset.write_xml(predicted_relations)", "title": "" }, { "docid": "b11d97a70b93cf40ed8650801cd9648c", "score": "0.55455285", "text": "def create_generators(args,data,DeepForest_config):\n # create random transform generator for augmenting training data\n if args.random_transform:\n transform_generator = random_transform_generator(\n min_rotation=-0.1,\n max_rotation=0.1,\n min_translation=(-0.1, -0.1),\n max_translation=(0.1, 0.1),\n min_shear=-0.1,\n max_shear=0.1,\n min_scaling=(0.9, 0.9),\n max_scaling=(1.1, 1.1),\n flip_x_chance=0.5,\n flip_y_chance=0.5,\n )\n else:\n transform_generator = random_transform_generator(flip_x_chance=0.5)\n\n #Split training and test data\n train,test=preprocess.split_training(data,\n DeepForest_config,\n single_tile=DeepForest_config[\"single_tile\"],\n experiment=None)\n \n #Write out for debug\n if args.save_path:\n train.to_csv(os.path.join(args.save_path,'training_dict.csv'), header=False)\n \n #Training Generator\n train_generator = OnTheFlyGenerator(\n data,\n train,\n batch_size=args.batch_size,\n DeepForest_config=DeepForest_config,\n group_method=\"none\",\n shuffle_tile_epoch=False,\n name=\"training\")\n\n #Validation Generator \n\n validation_generator=OnTheFlyGenerator(\n data,\n test,\n batch_size=args.batch_size,\n DeepForest_config=DeepForest_config,\n group_method=\"none\",\n name=\"validation\")\n\n return train_generator, validation_generator", "title": "" }, { "docid": "aa15fc7f6161540df7d611f9aaf8bf22", "score": "0.55399877", "text": "def fixture_data():\n data = np.array(\n [\n [2.00000000e02, 2.30079975e10, 3.92142453e12, 1.70437454e02],\n [2.50000000e02, 2.30079975e10, 2.77102465e12, 1.20437454e02],\n [3.00000000e02, 2.30079975e10, 1.62062477e12, 7.04374542e01],\n [3.50000000e02, 1.76916116e10, 4.53991397e11, 2.56613930e01],\n [4.00000000e02, 2.81602292e09, 2.34764859e10, 8.33675242e00],\n ]\n )\n return data", "title": "" }, { "docid": "d4302a01dc586cdfd889a1ce42dcf808", "score": "0.55348754", "text": "def load_genes(train: bool = True) -> Dataset:\n # Create Path objects using the paths where the train and test files should be.\n train_file = Path(__GENES_TRAIN_PATH)\n test_file = Path(__GENES_TEST_PATH)\n\n # If the files from the given paths do not exist, create them by splitting the genes dataset.\n if not train_file.is_file() or not test_file.is_file():\n # Read the dataset an get its values.\n # Use string datatypes, so that we take the information as it is.\n # If floats were to be used, then the labels would be converted to floats too.\n x = read_csv(__GENES_DATA_PATH, engine='python')\n y = read_csv(__GENES_LABELS_PATH, engine='python')\n # Get x and y.\n x, y = x.iloc[:, 1:].values, y.iloc[:, 1].values\n\n from sklearn import preprocessing\n le = preprocessing.LabelEncoder()\n y = le.fit_transform(y)\n\n database_split(x, y, train_file.absolute(), test_file.absolute())\n\n # Create a filename based on the train value.\n filename = train_file.absolute() if train else test_file.absolute()\n # Read the dataset.\n dataset = read_csv(filename, engine='python')\n # Get x and y.\n x, y = dataset.iloc[:, :-1].values, dataset.iloc[:, -1].values\n\n return x, y", "title": "" }, { "docid": "9e85bd8ab47adfc3f963b8cd0168a84a", "score": "0.5519713", "text": "def new_dataset(annex_path):\n ds_path = str(annex_path.join(id_generator()))\n ds = Dataset(ds_path)\n ds.create()\n ds.no_annex(BIDS_NO_ANNEX)\n\n json_path = os.path.join(ds_path, 'dataset_description.json')\n dsdesc = {\n 'BIDSVersion': '1.0.2',\n 'License': 'This is not a real dataset',\n 'Name': 'Test fixture new dataset',\n }\n with open(json_path, 'w') as f:\n json.dump(dsdesc, f, ensure_ascii=False)\n ds.add(json_path)\n\n changes_path = os.path.join(ds_path, 'CHANGES')\n with open(changes_path, 'w') as f:\n f.write(CHANGES)\n ds.add(changes_path)\n return ds", "title": "" }, { "docid": "13b2abef74c41f31c3783c367a3b77b4", "score": "0.5516691", "text": "def test_build_geneinfo(self): \n \n #checks error mode\n self.assertRaises(TypeError, build_geneinfo, None)\n \n self.assertRaises(IOError, build_geneinfo, \"foo\")\n\n #checks working mode\n geneinfo = build_geneinfo(\n clipper.data_file(\"test.AS.STRUCTURE_genes.BED.gz\"))\n\n \n true_values = {\n \"ENSG00000232113\" : [\"chr1\", \"ENSG00000232113\", 173604911, 173606273, \"+\"],\n \"ENSG00000228150\" : [\"chr1\", \"ENSG00000228150\", 10002980, 10010032, \"+\"],\n \"ENSG00000223883\" : [\"chr1\", \"ENSG00000223883\", 69521580, 69650686, \"+\"],\n \"ENSG00000135750\" : [\"chr1\", \"ENSG00000135750\", 233749749, 233808258, \"+\"],\n \"ENSG00000227280\" : [\"chr1\", \"ENSG00000227280\", 145373053, 145375554 ,\"-\"],\n }\n\n\n self.assertDictEqual(geneinfo, true_values)", "title": "" }, { "docid": "d908c7284c3916d10198895587a4c106", "score": "0.5514534", "text": "def genS(spec_files, year_start, year_end, filetype='feather'):\n loglines = []\n\n if isinstance(spec_files, list):\n pass\n else:\n spec_files = [spec_files]\n \n #Save data to disk\n root_name = '_'.join(spec_files)\n file_name = root_name + '_' + str(year_start) + '+'+ str(year_end-year_start) + '.' + filetype\n dir_path = os.path.join(fdata_dir, root_name)\n os.makedirs(dir_path , exist_ok=True)\n file_path = os.path.join(dir_path, file_name)\n \n try:\n try:\n evidence = feather.read_dataframe(file_path)\n evidence.set_index('ProfileID',inplace=True)\n except:\n evidence = pd.read_csv(file_path).set_index('ProfileID')\n \n except:\n #Generate evidence data\n evidence = generateSociosSetMulti(spec_files, year_start, year_end)\n status = 1 \n message = 'Success!'\n if filetype == 'feather':\n feather.write_dataframe(evidence.reset_index(), file_path)\n print('Success! Saved to data/feature_data/' + root_name + '/' + file_name)\n elif filetype == 'csv':\n evidence.to_csv(file_path)\n print('Success! Saved to data/feature_data/' + root_name + '/' + file_name)\n else:\n status = 0\n message = 'Cannot save to specified file type'\n print(message)\n \n l = ['featureExtraction', year_start, year_end, status, message, spec_files, file_name]\n loglines.append(l) \n logs = pd.DataFrame(loglines, columns = ['process','from year','to year','status','message',\n 'features', 'output_file'])\n writeLog(logs,'log_generateData')\n \n evidence.sort_index(inplace=True)\n \n return evidence", "title": "" }, { "docid": "bdd01592fbea0251d27c2db2c91ebf23", "score": "0.5512829", "text": "def simple_fixture():\n print('|fixture is working!')", "title": "" }, { "docid": "488fe17409c5e97cb9e600bcf9acf1d6", "score": "0.5510602", "text": "def test_create():\n rng = Random()", "title": "" }, { "docid": "c3e9948380ba13ff5834a1537e169d1b", "score": "0.5510377", "text": "def _create_test_data():\n\n fixture = {}\n fixture[\"data file\"] = \\\n \"\"\" \n # data_type\tsite_num\tstart_date\tend_date\tparameters\n dv\t03284000\t2014-01-01\t2014-03-10\t00060\t00065 \n iv\t03375000\t2014-02-12\t2014-02-19\t00010\t00045\t00060 \n \"\"\" \n\n fixture[\"request_0\"] = {\"end date\": \"2014-01-15\", \n \"data type\": \"dv\", \n \"start date\": \"2014-01-01\", \n \"parameters\": [\"00060\"], \n \"site number\": \"03284000\"}\n\n fixture[\"request_1\"] = {\"end date\": \"2014-01-15\", \n \"data type\": \"dv\", \n \"start date\": \"2014-01-01\", \n \"parameters\": [\"00060\", \"00065\"], \n \"site number\": \"03284000\"}\n\n fixture[\"request_2\"] = {\"end date\": \"2014-02-19\", \n \"data type\": \"dv\", \n \"start date\": \"2014-02-12\", \n \"parameters\": [\"00060\", \"00065\", \"00045\"], \n \"site number\": \"03284000\"}\n \n fixture[\"request_3\"] = {\"data type\": \"\",\n \"site number\": \"\",\n \"start date\": \"\",\n \"end date\": \"\",\n \"parameters\": \"\",\n \"site number\": \"\"} \n\n return fixture", "title": "" }, { "docid": "75a832fc66bddaf069b6b3d6b8fb67bc", "score": "0.55058485", "text": "def create_tetgen_volume():\n types = [ 'STL/OFF Files (*.stl *.off)', 'All Files (*)' ]\n fn = askFilename(GD.cfg['workdir'],types,exist=True,multi=False)\n if os.path.exists(fn):\n sta,out = utils.runCommand('tetgen -z %s' % fn)\n GD.message(out)", "title": "" }, { "docid": "2655c8d75eb99f932b8a40557e749940", "score": "0.5502411", "text": "def fixture(func):\n return _Fixture(func)", "title": "" }, { "docid": "ad31247f3f2c96cccf13e5f32f29b6e8", "score": "0.54972476", "text": "def test_fixtureception(letters_fixture, numbers_fixture):\n coordinate = letters_fixture + str(numbers_fixture)\n\n print('\\nRunning test_fixtureception with \"{}\"'.format(coordinate))", "title": "" }, { "docid": "31dff173df7cf6ee778dc5f4da950eb7", "score": "0.5496629", "text": "def buildANDgenerate(clobber=True):\n sourcecatdir = '/Users/kschmidt/work/MUSE/uvEmissionlineSearch/tdose_sourcecats/'\n mtu.gen_sourcecat(sourcecatdir,LAEinfofile,modelcoord=True)\n\n SETUPinfofile = '/Users/kschmidt/work/MUSE/uvEmissionlineSearch/tdose_setupfiles/MUSEWide_infofile_arche_PSFupdate_LAEs.txt'\n mtu.gen_TDOSEsetupfiles(SETUPinfofile,clobber=clobber)", "title": "" }, { "docid": "273ff19bae451e08598d9f02d8d2765d", "score": "0.54946077", "text": "def fixture_name(self):\n return \"coding_dna_substitution\"", "title": "" }, { "docid": "45d390d33a72b5236c3a7d102adaa154", "score": "0.5494514", "text": "def test_database(self):\n log.debug(\"Test creating a database with the metagenome data\")\n db = MetagenomeDatabase.MetagenomeDatabase()\n fn_database = os.path.join(self.datadir,\"tmp_database.db\")\n db.create(fn_database, overwrite=True)\n db.connect(fn_database)\n # test the phylogenetic markers table\n fn_markers = os.path.join(self.datadir, \"2061766001_marker_cogs.csv\")\n db.create_markers_table(fn_markers)\n sql_command = \"SELECT * FROM {0}\".format(db.MarkersTable)\n cog_markers = db.retrieve_data(sql_command)\n self.assertEqual(len(cog_markers),70)\n\n # test the gene table\n fn_genes = os.path.join(self.datadir, \"gene_info_test_file.xls\")\n db.create_genes_table(fn_genes)\n sql_command = \"SELECT * FROM {0}\".format(db.GenesTable)\n genes = db.retrieve_data(sql_command)\n self.assertEqual(len(genes),171)\n sql_command = \"\"\" SELECT *\n FROM {0}\n WHERE locus_tag=\"sg4i_00000050\" \"\"\".format(db.GenesTable)\n genes = db.retrieve_data(sql_command)\n self.assertEqual(len(genes),1)\n gene_t = GeneParser.GeneRecordTuple._make(genes[0])\n self.assertEqual(gene_t.gene_id, \"2061973757\", \"Gene id test failed\")\n\n # test the table of sequences\n fn_sequences = os.path.join(self.datadir, \"proteins.faa\")\n db.create_protein_sequences_table(fn_sequences)\n sql_command = \"\"\" SELECT * FROM {0}\"\"\".format(db.SequenceTable)\n sequences = db.retrieve_data(sql_command)\n self.assertEqual(len(sequences),5)\n sql_command = \"\"\" SELECT * FROM {0}\n WHERE gene_id=\"2061973757\" \"\"\".format(db.SequenceTable)\n sequences = db.retrieve_data(sql_command)\n self.assertEqual(len(sequences),1)\n seq_t = MetagenomeDatabase.SequenceRecordTuple._make(sequences[0])\n self.assertEqual(gene_t.protein_length,len(seq_t.sequence))\n db.close()\n os.remove(fn_database)", "title": "" }, { "docid": "6d747683cfcd45b1f3ef9e10b3074b9d", "score": "0.54928577", "text": "def make_dataset():\n\tprint(\"Starting prep_dataset...\")\n\tprep_dataset()\n\tprint(\"Splitting datasets...\")\n\tsplit_dataset()\n\tprint(\"Done\")", "title": "" }, { "docid": "69280d94f66d92b0b968d66c91c73d47", "score": "0.5490856", "text": "def generateFastaFile(name,seq):\n \n\toutput = 'mia_genes.fasta'", "title": "" }, { "docid": "79e6b7cd5cf79147e5cb9c61a82b384c", "score": "0.5486194", "text": "def buildGencodeGene(conn):\n pass # FIXME finish", "title": "" }, { "docid": "9241c072860fad277d1200f5d4dc6ccd", "score": "0.5485098", "text": "def create_random_data():\n np.random.seed({{seed}})\n\n n_samples = 100\n\n constant = 0.1\n epsilon = np.random.normal(size=n_samples)\n beta = np.random.uniform(-1, 1)\n\n df = pd.DataFrame({\"x\": np.random.normal(loc=0, scale=2, size=n_samples)})\n\n z = constant + beta * df.x + epsilon\n pr = 1 / (1 + np.exp(-z))\n\n df[\"y\"] = np.random.binomial(1, pr)\n\n df.to_csv(\"{{ produces }}\")", "title": "" }, { "docid": "4173be9e170b8d654fae99a34fcfc500", "score": "0.5478924", "text": "def fixture_region():\n return (-3e3, -1e3, 5e3, 7e3)", "title": "" }, { "docid": "e4a6f31e24ac5564523b60eba6fd48ea", "score": "0.5478647", "text": "def setUp(self):\n rng = np.random.default_rng(465234543084621232141567865641323)\n\n discrimnation = rng.uniform(-2, 2, (20, 2))\n thetas = rng.standard_normal((2, 1000))\n difficulty = -1 * np.sort(rng.standard_normal((20, 3))*.5, axis=1)\n\n syn_data = create_synthetic_irt_polytomous(difficulty, discrimnation, \n thetas, model='grm_md', seed=rng)\n self.data = syn_data", "title": "" }, { "docid": "0b6bd3931f457390948bc94c61e37f10", "score": "0.547837", "text": "def fixture_warp1():\n from twistdb.util import seed\n fixture_root = \"test/fixture_warp1\"\n seed.seed_data(db.engine, fixture_root)", "title": "" }, { "docid": "343866cc230eb41e3856401f6cab0e56", "score": "0.54747725", "text": "def main(species, data_dir, output_dir, test_size, overwrite, file_ext):\n print(species)\n\n species_names = \"_\".join(sorted(species))\n data_dir = Path(data_dir)\n output_dir = Path(output_dir, species_names)\n\n if output_dir.exists() and not overwrite:\n raise ValueError(f\"{str(output_dir)}. Use the --overwrite flag to overwrite existing data\")\n\n if not output_dir.exists():\n os.makedirs(str(output_dir))\n\n print(str(output_dir))\n\n prepare_dataset(str(data_dir), str(output_dir), species, test_size, file_ext)", "title": "" }, { "docid": "54d34d835bb009c7c43bf6ee1d980d1c", "score": "0.54726267", "text": "def fixture_rna_sample_father_id() -> str:\n return \"rna_father\"", "title": "" }, { "docid": "e9a294063cbd95ec7b1b446f140a5436", "score": "0.5471487", "text": "def make_genes(self):\n\n # The regex to get all of the coding DNA seqeunces.\n cds_info = re.findall('CDS(.+?)/translation', self.file_string, re.DOTALL)\n if len(cds_info) < 1:\n raise exceptions.ParseException('No genes found in the genbank file: ')\n\n # Creates the dict for the creation of an reverse complement.\n trans_table = str.maketrans(\"atcg\", \"tagc\")\n gene_list = list()\n strand = str()\n\n for cur_cds in cds_info:\n\n # The regex to get all of the info from an cds.\n exon_seqs = list()\n\n exon_regions = re.findall('.(\\d+)\\.\\.>?(\\d+)', cur_cds, re.DOTALL)\n gene_id = re.search('/db_xref=\"GI:(\\d*)', cur_cds)\n protein_id = re.search('/protein_id=\"(.*)\"', cur_cds)\n protein_name = re.search('/product=(\".+?\")', cur_cds, re.DOTALL)\n\n if not all([exon_regions, gene_id, protein_id, protein_name]):\n raise exceptions.ParseException()\n\n gene_id = gene_id.group(1)\n protein_id = protein_id.group(1)\n protein_name = protein_name.group(1).split(',')[0]\n protein_name = re.sub(' +', ' ', protein_name)\n protein_name = re.sub('\\n|\"', '', protein_name)\n\n for i, exon in enumerate(exon_regions):\n\n # If a strand is a complement excecute this.\n if re.match('.*complement', cur_cds):\n strand = '-'\n gen_seq = self.chromosome.seq[int(exon[0]):int(exon[1])][::-1] # Reverse the strand.\n gen_seq = gen_seq.translate(trans_table) # Make the stand complement.\n exon_seqs.append(gen_seq)\n\n else:\n strand = '+'\n gen_seq = self.chromosome.seq[int(exon[0])-1:int(exon[1])-1] # Get the strand\n exon_seqs.append(gen_seq)\n\n # Create the seqeunce string for the gene.\n if strand == '-':\n exon_seqs = ''.join(exon_seqs[::-1]) # If the strand is negative it needs to be joined in reverse\n\n elif strand == '+':\n exon_seqs = ''.join(exon_seqs) # Join the strands normally\n\n # Creation of the gene objects.\n gene_list.append(Gene(gene_id,\n strand,\n exon_regions,\n exon_seqs,\n protein_name,\n protein_id,\n self.chromosome))\n\n self.genes = gene_list\n\n return self.genes", "title": "" }, { "docid": "21e29a5c0e8744617cdace45f4717303", "score": "0.54706615", "text": "def test_make_meta_recipe_all_params():\n pytest_enable_socket()\n\n recipe = CreateRecipe(\n\n \"\"\"\n test-meta-recipe2-ucsc-v1:\n recipe.sh: |\n\n genome=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg19/hg19.genome\n wget --quiet -O - http://hgdownload.cse.ucsc.edu/goldenpath/hg19/database/gap.txt.gz \\\\\n | gzip -dc \\\\\n | awk -v OFS=\"\\t\" 'BEGIN {print \"#chrom\\tstart\\tend\\tsize\\ttype\\tstrand\"} {print $2,$3,$4,$7,$8,\"+\"}' \\\\\n | gsort /dev/stdin $genome \\\\\n | bgzip -c > gaps.bed.gz\n\n tabix gaps.bed.gz \n\n \"\"\", from_string=True)\n\n recipe.write_recipes() \n\n ggd_package = \"meta-recipe-test-metarecipe2-ucsc-v1\"\n\n recipe_file = os.path.join(recipe.recipe_dirs[\"test-meta-recipe2-ucsc-v1\"],\"recipe.sh\")\n\n args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider=\"UCSC\",\n dependency=['vt','samtools','bedtools'], genome_build='meta-recipe', package_version='1', keyword=['gaps', 'region'], \n name='test-metarecipe2', platform='none', script=recipe_file, species='meta-recipe', summary='some meta-recipe',\n coordinate_base=\"1-based-inclusive\", file_type= [],final_file=[], extra_scripts=[])\n\n assert make_meta_recipe.make_bash((),args) \n\n new_recipe_file = os.path.join(\"./\", ggd_package, \"recipe.sh\") \n assert os.path.exists(new_recipe_file)\n assert os.path.isfile(new_recipe_file)\n new_meta_recipe_file = os.path.join(\"./\", ggd_package, \"metarecipe.sh\") \n assert os.path.exists(new_recipe_file)\n assert os.path.isfile(new_recipe_file)\n new_metayaml_file = os.path.join(\"./\", ggd_package, \"meta.yaml\") \n assert os.path.exists(new_metayaml_file)\n assert os.path.isfile(new_metayaml_file)\n new_postlink_file = os.path.join(\"./\", ggd_package, \"post-link.sh\") \n assert os.path.exists(new_postlink_file)\n assert os.path.isfile(new_postlink_file)\n new_checksums_file = os.path.join(\"./\", ggd_package, \"checksums_file.txt\")\n assert os.path.exists(new_checksums_file)\n assert os.path.isfile(new_checksums_file)\n\n ## Test meta.yaml\n try:\n with open(new_metayaml_file, \"r\") as mf:\n yamldict = yaml.safe_load(mf)\n assert yamldict[\"build\"][\"number\"] == 0\n assert \"noarch\" not in yamldict[\"build\"].keys()\n assert yamldict[\"extra\"][\"authors\"] == \"me\"\n assert yamldict[\"package\"][\"name\"] == ggd_package\n assert yamldict[\"package\"][\"version\"] == \"1\"\n assert yamldict[\"requirements\"][\"build\"] == ['bedtools', 'gsort', 'htslib', 'samtools', 'vt', 'zlib']\n assert yamldict[\"requirements\"][\"run\"] == ['bedtools', 'gsort', 'htslib', 'samtools', 'vt', 'zlib']\n assert yamldict[\"source\"][\"path\"] == \".\"\n assert yamldict[\"about\"][\"identifiers\"][\"genome-build\"] == \"meta-recipe\"\n assert yamldict[\"about\"][\"identifiers\"][\"species\"] == \"meta-recipe\"\n assert yamldict[\"about\"][\"keywords\"] == ['gaps','region']\n assert yamldict[\"about\"][\"summary\"] == \"some meta-recipe\"\n assert yamldict[\"about\"][\"tags\"][\"genomic-coordinate-base\"] == \"1-based-inclusive\"\n assert yamldict[\"about\"][\"tags\"][\"data-version\"] == \"27-Apr-2009\"\n assert yamldict[\"about\"][\"tags\"][\"file-type\"] == [] ## Should be converted to lower case\n assert yamldict[\"about\"][\"tags\"][\"final-files\"] == [] \n assert yamldict[\"about\"][\"tags\"][\"final-file-sizes\"] == {} \n assert yamldict[\"about\"][\"tags\"][\"ggd-channel\"] == \"genomics\"\n\n\n\n\n except IOError as e:\n print(e)\n assert False\n\n os.remove(new_recipe_file)\n os.remove(new_meta_recipe_file)\n os.remove(new_metayaml_file)\n os.remove(new_postlink_file)\n os.remove(new_checksums_file)\n os.rmdir(ggd_package)", "title": "" }, { "docid": "c4ae83f033003d7db25c82fecbc4f9bf", "score": "0.54687047", "text": "def local_fixture():\n print(\"\\n(Doing Local Fixture setup stuff!)\")", "title": "" }, { "docid": "ff5404b4a731d81d331a3b04a8a448a3", "score": "0.54568905", "text": "def generate_setup_template(outputfile='./tdose_setup_template.txt',clobber=False,verbose=True):\n if verbose: print(' --- tdose_utilities.generate_setup_template() --- ')\n #------------------------------------------------------------------------------------------------------\n if os.path.isfile(outputfile) & (clobber == False):\n sys.exit(' ---> Outputfile already exists and clobber=False ')\n else:\n if verbose: print((' - Will store setup template in '+outputfile))\n if os.path.isfile(outputfile) & (clobber == True):\n if verbose: print(' - Output already exists but clobber=True so overwriting it ')\n\n setuptemplate = \"\"\"\n#-------------------------------------------------START OF TDOSE SETUP-------------------------------------------------\n#\n# Template for Three Dimensional Optimal Spectral Extracion (TDOSE, http://github.com/kasperschmidt/TDOSE) setup file\n# Template was generated with tdose_utilities.generate_setup_template() on %s\n# Setup file can be run with tdose.perform_extraction() or tdose.perform_extractions_in_parallel()\n#\n# - - - - - - - - - - - - - - - - - - - - - - - - - - DATA INPUT - - - - - - - - - - - - - - - - - - - - - - - - - - -\ndata_cube /path/datacube.fits # Path and name of fits file containing data cube to extract spectra from\ncube_extension DATA_DCBGC # Name or number of fits extension containing data cube\n\nvariance_cube /path/variancecube.fits # Path and name of fits file containing variance cube to use for extraction\nvariance_extension VARCUBE # Name or number of fits extension containing noise cube\n\nref_image /path/referenceimage.fits # Path and name of fits file containing image to use as reference when creating source model\nimg_extension 0 # Name or number of fits extension containing reference image\n\nwht_image /path/refimage_wht.fits # Path and name of fits file containing weight map of reference image (only cut out; useful for galfit modeling)\nwht_extension 0 # Name or number of fits extension containing weight map\n\nsource_catalog /path/tdose_sourcecat.fits # Path and name of source catalog containing sources to extract spectra for\nsourcecat_IDcol id # Column containing source IDs in source_catalog\nsourcecat_xposcol x_image # Column containing x pixel position in source_catalog\nsourcecat_yposcol y_image # Column containing y pixel position in source_catalog\nsourcecat_racol ra # Column containing ra position in source_catalog (used to position cutouts if model_cutouts = True)\nsourcecat_deccol dec # Column containing dec position in source_catalog (used to position cutouts if model_cutouts = True)\nsourcecat_fluxcol fluxscale # Column containing a flux scale used for the modeling if no gauss_guess is provided\nsourcecat_parentIDcol None # Column containing parent source IDs grouping source IDs into objects. Set to None to used id column\n # corresponding to assigning each source to a single object\n # if not None the parentid is used to group source models when storing 1D spectra. All models keep sources separate.\n# - - - - - - - - - - - - - - - - - - - - - - - - OUTPUT DIRECTORIES - - - - - - - - - - - - - - - - - - - - - - - - -\n\nmodels_directory /path/tdose_models/ # Directory to store the modeling output from TDOSE in\ncutout_directory /path/tdose_cutouts/ # Directory to store image and cube cutouts in if model_cutouts=True\nspec1D_directory /path/tdose_spectra/ # Output directory to store spectra in.\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - CUTOUT SETUP - - - - - - - - - - - - - - - - - - - - - - - - - -\nmodel_cutouts True # Perform modeling and spectral extraction on small cutouts of the cube and images to reduce run-time\ncutout_sizes /path/tdose_setup_cutoutsizes.txt # Size of cutouts [ra,dec] in arcsec around each source to model.\n # To use source-specific cutouts provide ascii file containing ID xsize[arcsec] and ysize[arcsec].\n\n# - - - - - - - - - - - - - - - - - - - - - - - - SOURCE MODEL SETUP - - - - - - - - - - - - - - - - - - - - - - - - -\nmodel_image_ext tdose_modelimage # Name extension of fits file containing reference image model. To ignored use None\nmodel_param_reg tdose_modelimage_ds9 # Name extension of DS9 region file for reference image model. To ignored use None\nmodel_image_cube_ext tdose_modelimage_cubeWCS # Name extension of fits file containing model image after conversion to cube WCS. To ignored use None.\n\nsource_model gauss # The source model to use for sources. Choices are:\n # gauss Each source is modeled as a multivariate gaussian using the source_catalog input as starting point\n # galfit The sources in the field-of-view are defined based on GALFIT header parameters; if all components are # Not enabled yet\n # Gaussians an analytical convolution is performed. Otherwise numerical convolution is used. # Not enabled yet\n # modelimg A model image exists, e.g., obtained with Galfit, in modelimg_directory. To disentangle/de-blend individual\n # components, a model cube and parent_ids should be provided (see comments to modelimg_directory). If a model\n # image is provded, TDOSE assumes it to represent the 1 object in the field-of-view.\n # If the model image is not found a gaussian model of the FoV (source_model=gauss) is performed instead.\n # aperture A simple aperture extraction on the datacubes is performed, i.e., no modeling of sources.\n\n# - - - - - - - - - - - - - - - - - - - - - - - - GAUSS MODEL SETUP - - - - - - - - - - - - - - - - - - - - - - - - - -\ngauss_guess /path/sextractoroutput.fits # To base initial guess of gaussian parameters on a SExtractor output provide SExtractor output fits file here\n # If gauss_initguess=None the positions and flux scale provided in source_catalog will be used.\ngauss_guess_idcol ID # Column of IDs in gauss_guess SExtractor file\ngauss_guess_racol RA # Column of RAs in gauss_guess SExtractor file\ngauss_guess_deccol DEC # Column of Decs in gauss_guess SExtractor file\ngauss_guess_aimg A_IMAGE # Column of major axis in gauss_guess SExtractor file\ngauss_guess_bimg B_IMAGE # Column of minor axis in gauss_guess SExtractor file\ngauss_guess_angle THETA_IMAGE # Column of angle in gauss_guess SExtractor file\ngauss_guess_fluxscale ACS_F814W_FLUX # Column of flux in gauss_guess SExtractor file to us for scaling\ngauss_guess_fluxfactor 3 # Factor to apply to flux scale in initial Gauss parameter guess\ngauss_guess_Nsigma 1 # Number of sigmas to include in initial Gauss parameter guess\n\nmax_centroid_shift 10 # The maximum shift of the centroid of each source allowed in the gaussian modeling. Given in pixels to\n # set bounds ypix_centroid +/- max_centroid_shift and xpix_centroid +/- max_centroid_shift\n # If none, no bounds are put on the centroid position of the sources.\n# - - - - - - - - - - - - - - - - - - - - - - - - GALFIT MODEL SETUP - - - - - - - - - - - - - - - - - - - - - - - - -\ngalfit_directory /path/models_galfit/ # If source_model = galfit provide path to directory containing galfit models.\n # TDOSE will look for galfit_*ref_image*_output.fits (incl. the cutout string if model_cutouts=True)\n # If no model is found a source_model=gauss run on the object will be performed instead.\ngalfit_model_extension 2 # Fits extension containing galfit model with model parameters of each source in header.\n\n# - - - - - - - - - - - - - - - - - - - - - - - - MODEL IMAGE SETUP - - - - - - - - - - - - - - - - - - - - - - - - -\nmodelimg_directory /path/models_cutouts/ # If source_model = modelimg provide the path to directory containing the individual source models\n # TDOSE will look for model_*ref_image*.fits (incl. the cutout string if model_cutouts=True). If no model is found the object is skipped\n # If a model image named model_*ref_image*_cube.fits is found, TDOSE assumes this file contains a cube with the individual model\n # components isolated in individual layers of the cube. TDOSE will use this model instead of one generated within TDOSE.\n # Parent IDs in the source catalog can be used to define what components belong to the object of interest (i.e., to extract a spectrum for)\n # GALFIT models can be converted to TDOSE-suited model-cubes with tdose_utilities.galfit_convertmodel2cube()\n # A TDOSE-suited model-cube can be build from individual 2D models with tdose_utilities.build_modelcube_from_modelimages()\nmodelimg_extension 0 # Fits extension containing model\n\n# - - - - - - - - - - - - - - - - - - - - - - - - APERTURE MODEL SETUP - - - - - - - - - - - - - - - - - - - - - - - -\naperture_size 1.5 # Radius of apertures (float or list) to use given in arc seconds. For longer list of\n # object-specific apertures provide ascii file containing ID and aperturesize[arcsec].\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - PSF MODEL SETUP - - - - - - - - - - - - - - - - - - - - - - - - - -\npsf_type gauss # Select PSF model to build. Choices are:\n # gauss Model the PSF as a symmetric Gaussian with sigma = FWHM/2.35482\n # kernel_gauss An astropy.convolution.Gaussian2DKernel() used for numerical convolution # Not enabled yet\n # kernel_moffat An astropy.convolution.Moffat2DKernel() used for numerical convolution # Not enabled yet\npsf_FWHM_evolve linear # Evolution of the FWHM from blue to red end of data cube. Choices are:\n # linear FWHM wavelength dependence described as FWHM(lambda) = p0[''] + p1[''/A] * (lambda - p2[A])\npsf_FWHMp0 0.940 # p0 parameter to use when determining wavelength dependence of PSF\npsf_FWHMp1 -3.182e-5 # p1 parameter to use when determining wavelength dependence of PSF\npsf_FWHMp2 7050 # p2 parameter to use when determining wavelength dependence of PSF\npsf_savecube True # To save fits file containing the PSF cube set psf_savecube = True\n # This cube is used for the \"source_model = modelimg\" numerical PSF convolution\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - NON_DETECTIONS - - - - - - - - - - - - - - - - - - - - - - - -\nnondetections None # List of IDs of sources in source_catalog that are not detected in the reference image or which\n # have low flux levels in which cases the Gaussian modeling is likely to be inaccurate.\n # For long list of objects provide ascii file containing ids.\n # If source_model = gauss then sources will be extracted by replacing models within ignore_radius\n # with a single point source in the reference image model, which will then\n # be convolved with the PSF specified when extracting, as usual.\n # If source_model = modelimg TDOSE assumes that the input model already represents the desired extraction model\n # of the non-detection. I.e., if the object should be extracted as a (PSF\n # convolved) point source, the model image should include a point source.\n # Hence, for source_model = modelimg the keyword nondetections is ignored.\nignore_radius 0.3 # Models within a radius of ignore_radius [arcsec] of the non-detection location will be replaced with a\n # point source for extractions with source_model = gauss before convolving with the PSF and adjusting the flux\n # leves in each model cube layer.\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - CUBE MODEL SETUP - - - - - - - - - - - - - - - - - - - - - - - - -\nmodel_cube_layers all # Layers of data cube to model [both end layers included]. If 'all' the full cube will be modeled.\n # To model source-specific layers provide ascii file containing ID layerlow and layerhigh.\n # If layerlow=all and layerhigh=all all layers will be modeled for particular source\nmodel_cube_optimizer matrix # The optimizer to use when matching flux levels in cube layers:\n # matrix Optimize fluxes analytically using matrix algebra to minimize chi squared of\n # the equation set comparing model and data in each layer.\n # nnls Optimize fluxes using Scipy's non-negative least squares solver restricting\n # flux scales to >= 0 (assuming source models are non-negative too).\n # curvefit Optimize fluxes numerically using least square fitting from scipy.optimize.curve_fit().\n # Only enabled for analytic convolution of Gaussian source models.\n # lstsq Optimize fluxes analytically using scipy.linalg.lstsq().\n\nmodel_cube_ext tdose_modelcube # Name extension of fits file containing model data cube.\nresidual_cube_ext tdose_modelcube_residual # Name extension of fits file containing residual between model data cube and data. To ignored use None.\nsource_model_cube_ext tdose_source_modelcube # Name extension of fits file containing source model cube (used to modify data cube).\n\n# - - - - - - - - - - - - - - - - - - - - - - - - SPECTRAL EXTRACTION - - - - - - - - - - - - - - - - - - - - - - - - -\nsources_to_extract [8685,9262,10195,29743] # Sources in source_catalog to extract 1D spectra for.\n # If sourcecat_parentIDcol is not None all associated spectra are included in stored object spectra\n # If set to 'all', 1D spectra for all sources in source_catalog is produced (without grouping according to parents).\n # For long list of objects provide ascii file containing containing ids (here parent grouping will be performed)\nspec1D_name tdose_spectrum # Name extension to use for extracted 1D spectra\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - PLOTTING - - - - - - - - - - - - - - - - - - - - - - - - - - -\nplot_generate True # Indicate whether to generate plots or not\nplot_1Dspec_ext fluxplot # Name extension of pdf file containing plot of 1D spectrum\nplot_1Dspec_xrange [4800,9300] # Range of x-axes (wavelength) for plot of 1D spectra\nplot_1Dspec_yrange [-100,1500] # Range of y-axes (flux) for plot of 1D spectra\nplot_1Dspec_shownoise True # Indicate whether to show the noise envelope in plot or not\n\nplot_S2Nspec_ext S2Nplot # Name extension of pdf file containing plot of S/N spectrum\nplot_S2Nspec_xrange [4800,9300] # Range of x-axes (wavelength) for plot of S2N spectra\nplot_S2Nspec_yrange [-1,15] # Range of y-axes (S2N) for plot of S2N spectra\n#--------------------------------------------------END OF TDOSE SETUP--------------------------------------------------\n\n\"\"\" % (tu.get_now_string())\n fout = open(outputfile,'w')\n fout.write(setuptemplate)\n fout.close()", "title": "" }, { "docid": "4908b0b870f2c83fc63a71bc14ce331a", "score": "0.5456467", "text": "def mate(self, gene):", "title": "" }, { "docid": "101cfbc55d160dd9134c36d5b6b9a8eb", "score": "0.5452769", "text": "def test_00_create_svs(self): # pylint: disable=too-many-locals\n os.environ['PATH'] = os.environ['PATH'] + \":/share/software/bin\"\n\n bgname = \"HG002-NA24385-50x.bg\"\n bedfile = get_dataset_path(\"HG002/giab_v0.6/HG002_SVs_Tier1_v0.6.chr20.bed\")\n # bedfile = get_dataset_path(\"mini-chr20.bed\")\n truth_v = get_dataset_path(\"HG002/giab_v0.6/HG002_SVs_Tier1_v0.6.vcf.gz\")\n bg_file = get_dataset_path(\"HG002/\" + bgname)\n ref_seq = \"/reference/hs37d5/\"\n truth_baseline = \"python/functest/sv_perf_baseline.txt\"\n out_var = os.path.join(self.data_dir, \"var.vcf\")\n\n refmap_arg = \"\"\n if os.access(REFMAP_DIR, os.W_OK):\n refmap_arg = \" --ref-map \" + REFMAP_DIR + \"/\" + bgname + \".refmap\"\n bg_cmd = \"bgbinary discovery --in {0} --bed {1} --ref {2} --out {3} {4}\".format(bg_file, bedfile, ref_seq, out_var, refmap_arg)\n self.assertEqual(0, subprocess.call(bg_cmd, shell=True))\n\n zip_out = out_var + \".gz\"\n sort_cmd = \"vcf-sort {0} | bgzip > {1} && tabix {1}\".format(out_var, zip_out)\n self.assertEqual(0, subprocess.call(sort_cmd, shell=True))\n\n tru_out = os.path.join(self.data_dir, \"truvari_sv\")\n tru_cmd = \"/usr/bin/env python3.6 external/*pypi__Truvari_*/Truvari-*.data/scripts/truvari -b {0} -c {1} -f {2}/source.fasta -o {3} --passonly --includebed {4}\".format(\n truth_v, zip_out, ref_seq, tru_out, bedfile)\n self.assertEqual(0, subprocess.call(tru_cmd, shell=True))\n\n with open(truth_baseline, 'r') as f:\n base = json.load(f)\n with open(os.path.join(tru_out, \"summary.txt\"), 'r') as f:\n comp = json.load(f)\n p_diff = comp[\"precision\"] - base[\"precision\"]\n r_diff = comp[\"recall\"] - base[\"recall\"]\n\n fn_sample = subprocess.check_output(f\"egrep -v '^#' {tru_out}/fn.vcf | shuf -n 10\", shell=True).decode()\n err = f\"\"\"\nPrecision went from {base['precision']*100:.2f}% to {comp['precision']*100:.2f}%, a {p_diff*100:.2f}% difference\nRecall went from {base['recall']*100:.2f}% to {comp['recall']*100:.2f}%, a {r_diff*100:.2f}% difference\n\nRandom sample of false negatives:\n{fn_sample}\n\"\"\"\n\n self.assertLess(abs(p_diff), .03, err)\n self.assertLess(abs(r_diff), .03, err)\n\n # Still print the stats for informational purposes, even if we succeeded\"\n print(err)", "title": "" }, { "docid": "95d8f667038b612f76a38243e7d007d4", "score": "0.54519254", "text": "def fixture_dna_sample_son_id() -> str:\n return \"dna_son\"", "title": "" }, { "docid": "34490649d1b3c2d9d377c04a43027ecb", "score": "0.54485834", "text": "def makeTestG1():\n f1 = make_temp_file(\"\"\"\n#\noss: montana5,montana4\nmds: montana6\nio: montana[4-6]\n#42: montana3\ncompute: montana[32-163]\nchassis1: montana[32-33]\nchassis2: montana[34-35]\n \nchassis3: montana[36-37]\n \nchassis4: montana[38-39]\nchassis5: montana[40-41]\nchassis6: montana[42-43]\nchassis7: montana[44-45]\nchassis8: montana[46-47]\nchassis9: montana[48-49]\nchassis10: montana[50-51]\nchassis11: montana[52-53]\nchassis12: montana[54-55]\nUppercase: montana[1-2]\ngpuchassis: @chassis[4-5]\ngpu: montana[38-41]\nall: montana[1-6,32-163]\n\"\"\")\n # /!\\ Need to return file object and not f1.name, otherwise the temporary\n # file might be immediately unlinked.\n return f1", "title": "" }, { "docid": "7812d0b6f6822fb7ea42ec0ba219e0c5", "score": "0.5445818", "text": "def setUp(self):\n self.directory = utils.create_temp_directory(self.sample_data['dir'])\n\n self.base_filename = self.sample_data['iso'].replace('.iso', '')\n\n # Generate RPM Info Data\n rpminfo = '\\n'.join(self.sample_data['rpmdata'])\n\n utils.create_file(self.directory, \"%s.iso\" % self.base_filename, checksums=['md5', 'sha256'])\n utils.create_file(self.directory, \"%s.zip\" % self.base_filename, checksums=['md5', 'sha256'])\n utils.create_file(self.directory, \"%s.rpminfo\" % self.base_filename, data=rpminfo)\n utils.create_file(self.directory, \"%s.metadata.md5\" % self.base_filename, data=self.sample_data['metadata_md5'])", "title": "" }, { "docid": "228d071e186f67a3bae540c1520104e2", "score": "0.5440285", "text": "def fixture_name(self):\n return \"coding_dna_delins\"", "title": "" }, { "docid": "d7b7b7d38faff4610433a623d3059831", "score": "0.5436846", "text": "def setUpClass(cls):\n cls.binsize = 10000\n\n cls.tx_bbfile = REF_FILES[\"100transcripts_bigbed\"]\n cls.cds_bbfile = REF_FILES[\"100cds_bigbed\"]\n cls.as_cds_bbfile = REF_FILES[\"100cds_antisense_bigbed\"]\n\n cls.tx_hash = BigBedGenomeHash(cls.tx_bbfile)\n cls.cds_hash = BigBedGenomeHash(cls.cds_bbfile)\n cls.as_cds_hash = BigBedGenomeHash(cls.as_cds_bbfile)\n\n cls.transcripts = list(BigBedReader(cls.tx_bbfile, return_type=Transcript))\n cls.coding_regions = list(BigBedReader(cls.cds_bbfile))\n cls.shuffled_indices = list(range(len(cls.transcripts)))\n\n cls.tx_dict = {X.get_name(): X for X in cls.transcripts}\n cls.cds_dict = {X.get_name(): X for X in cls.coding_regions}\n shuffle(cls.shuffled_indices)", "title": "" }, { "docid": "494c521550772fb39180501358726bf9", "score": "0.5436166", "text": "def genomize(self):\n for i in range(3):\n self.genes[i] = [random.randint(0, 2), random.randint(0, 2)]", "title": "" }, { "docid": "0c23a71098ef096dc76b76488c569e64", "score": "0.5436165", "text": "def create():", "title": "" }, { "docid": "0c23a71098ef096dc76b76488c569e64", "score": "0.5436165", "text": "def create():", "title": "" }, { "docid": "0c23a71098ef096dc76b76488c569e64", "score": "0.5436165", "text": "def create():", "title": "" }, { "docid": "6dd3dacaee7bd79a4d4ed69d9f8a3e33", "score": "0.54330176", "text": "def create_machineset_factory_fixture(request):\n\n machineset_name = []\n\n def factory(additional_nodes=3):\n \"\"\"\n Args:\n additional_nodes (int): Number of additional nodes to be added (default=3).\n \"\"\"\n log.info(\"Creating machineset\")\n machineset_name.append(\n machine.create_custom_machineset(instance_type=\"m5.4xlarge\", zone=\"a\")\n )\n machine.wait_for_new_node_to_be_ready(machineset_name[0])\n log.info(\n f\"Adding {additional_nodes} more nodes to machineset {machineset_name[0]}\"\n )\n node.add_new_node_and_label_it(\n machineset_name=machineset_name[0],\n num_nodes=additional_nodes,\n mark_for_ocs_label=False,\n )\n machine.wait_for_new_node_to_be_ready(machineset_name[0])\n\n def finalizer():\n \"\"\"\n Delete machineset\n \"\"\"\n if config.ENV_DATA[\"deployment_type\"].lower() == \"ipi\":\n if machineset_name[0] is not None and machine.check_machineset_exists(\n machine_set=machineset_name[0]\n ):\n log.info(f\"Deleting machineset {machineset_name[0]}\")\n machine.delete_custom_machineset(machineset_name[0])\n\n request.addfinalizer(finalizer)\n return factory", "title": "" }, { "docid": "c4402dc71707cdc5d5908e252ec0aa55", "score": "0.54310596", "text": "def __init__(self, default: list = [], number_of_genes: int = 11, perturb: bool = False):\n print(ELITE_GENES)\n exit(0)\n if len(default) == 0:\n default = ELITE_GENES[np.random.randint(len(ELITE_GENES))]\n self.fitness = None\n self.train_error, self.validate_error = None, None\n self.genes = default\n if perturb:\n self.genes += np.random.normal(loc=0.0, scale=0.0005, size=(number_of_genes))", "title": "" }, { "docid": "99798d3924a9c9c5c9bba322a0a91b2e", "score": "0.5430776", "text": "def setUp(self):\n # define the data\n self.data = \"\"\" 105.2 323.4 star 20\n 102.4 529.0 galaxy 21\n 834.1 343.7 galaxy 23\"\"\"\n\n # define a test file\n # delete in case it just exists\n self.testfile = 'test_file.tmp'\n if os.path.isfile(self.testfile):\n os.unlink(self.testfile)\n\n # open the test file\n tfile = open(self.testfile, 'w')\n\n # fill data into the test file\n tfile.write(self.data)\n\n #close the test file\n tfile.close()\n\n # create the test instance\n self.tdata = asciifunction.open(self.testfile)", "title": "" } ]
6b417230d88e9e07ef0045cbddfe6ec8
Uses the user's chosen automatic allocation method, or if none, assigns all to STR.
[ { "docid": "1ff12aa63eb4f1aca8f2e6e12021aa91", "score": "0.0", "text": "def allocateAllAttributePoints(self):\n\t\turl = \"https://habitica.com/api/v3/user/allocate-now\"\n\t\treturn(postUrl(url, self.credentials))", "title": "" } ]
[ { "docid": "905e351024885095ad50ec9017ad8686", "score": "0.52877396", "text": "def autostring(min_length=3):\n\n assert min_length >= 2\n addr = memorymanager.BinaryAddr(0)\n while addr < len(memory_binary):\n i = 0\n while (addr + i) < len(memory_binary) and memory_binary[addr + i] is not None and not disassembly.is_classified(addr + i, 1) and utils.isprint(memory_binary[addr + i]):\n i += 1\n if movemanager.b2r(addr + i) in labelmanager.labels:\n break\n if i >= min_length:\n # TODO: I suspect the next two line fragment should be wrapped up if I keep it, probably repeated a bit (probably something like \"with movemanager.b2r(binary_addr) as runtime_addr:...\", although I probably can't reuse the b2r function, but maybe think about it)\n runtime_addr = movemanager.b2r(addr)\n with movemanager.move_id_for_binary_addr[addr]:\n string(runtime_addr, i)\n addr += max(1, i)", "title": "" }, { "docid": "865cc88d073e3e122023d124abbb69eb", "score": "0.5237056", "text": "def gen_build_str_def():\n\treturn \"\"", "title": "" }, { "docid": "c542240b863d4c3dcb21992524ab4970", "score": "0.5111121", "text": "def SysAllocString(self, emu, argv, ctx={}):\n psz, = argv\n alloc_str = self.read_mem_string(psz, 2)\n if alloc_str:\n argv[0] = alloc_str\n alloc_str += '\\x00'\n ws = alloc_str.encode('utf-16le')\n ws_len = len(ws)\n\n # https://docs.microsoft.com/en-us/previous-versions/windows/desktop/automat/bstr\n bstr_len = 4 + ws_len\n bstr = self.mem_alloc(bstr_len)\n bstr_bytes = struct.pack('<I', ws_len - 2) + ws\n\n self.mem_write(bstr, bstr_bytes)\n\n return bstr + 4\n\n return 0", "title": "" }, { "docid": "d5f65164a8498dc8290bd3a18c7f664d", "score": "0.5078475", "text": "def test_memorystr():\n for val in ['123G', '123g', '123M', '123m', '123.5m', '25k', '25K']:\n MemoryStr(val)\n\n for val in [123, '123', '123mm', '123a', 'G']:\n print(val)\n with pytest.raises(ValueError):\n MemoryStr(val)\n\n assert MemoryStr('1024m').asGB() == 1.0\n assert MemoryStr('3G').asGB() == 3.0", "title": "" }, { "docid": "34abd6390b339e5784ff8d8b83a2a41b", "score": "0.5014933", "text": "def _str_make(self):\n return self._name if self._fact is None else f\"{self._fact} × {self._name}\"", "title": "" }, { "docid": "0ff68ae75f9e0cc84b4fb161ca63ecb2", "score": "0.4884268", "text": "def _unicode_output(cursor, name, default_type, size, precision, scale):\n if default_type in (\n cx_Oracle.STRING,\n cx_Oracle.LONG_STRING,\n cx_Oracle.FIXED_CHAR,\n cx_Oracle.CLOB,\n ):\n return cursor.var(str, size, cursor.arraysize)", "title": "" }, { "docid": "588ccafcff50968164dc390ad1800ecc", "score": "0.48803422", "text": "def __init__ (self, string, weight=10):\n self.weight = weight\n str.__init__(self, string)", "title": "" }, { "docid": "561b90fd3872b55a7233d213bd245c4e", "score": "0.48080927", "text": "def _random_string(self, size, chars=None):\n return ''.join(random.choice(chars or _DEFAULT_CHARS) for _ in range(size))", "title": "" }, { "docid": "f5bcabcd4b28a8e39595eb3abbb9f749", "score": "0.4791626", "text": "def make_string(self, offset, size):\n self.ret = idc.create_strlit(offset, offset + size)\n return self.ret", "title": "" }, { "docid": "5be4a245f089f4a84abd16db34f1bd92", "score": "0.47768155", "text": "def override_str_factory(obj):\n\n def new_str_method(self):\n return \": \".join([str(self.description), str(self.value)])\n\n # This used to use type create a new class, along these lines:\n # https://stackoverflow.com/questions/5918003/python-override-str-in-an-exception-instance\n #\n # That no longer seems to work, because using the class of the widget from type give a traitlets\n # object not an ipywidgets object, and the value is no longer related to the UI setting.\n #\n # This new way works, but changes the __str__ for every widget type it touches.\n # This whole thing really needs a re-design.\n\n original_class = type(obj)\n original_class.__str__ = new_str_method\n return obj", "title": "" }, { "docid": "0accb8e8e5d590d2f0b1e415f53786a0", "score": "0.47707027", "text": "def safeToString():", "title": "" }, { "docid": "be899b4521d1dee9fad221b6a073f914", "score": "0.46682712", "text": "def get(self,name):\n return self.dp2(self.name+'Alloc',name)", "title": "" }, { "docid": "be899b4521d1dee9fad221b6a073f914", "score": "0.46682712", "text": "def get(self,name):\n return self.dp2(self.name+'Alloc',name)", "title": "" }, { "docid": "d52663fc976efbbdc5d5ae05eed98d5e", "score": "0.46482927", "text": "def __str__(self):\n if self.default_kind:\n kind_str = \"\"\n else:\n kind_str = \", kind={}\".format(self.kind)\n # End if\n return \"{}(len={}{})\".format(self.typestr, self.lenstr, kind_str)", "title": "" }, { "docid": "befbf07bd8cbd6832515950720eee35e", "score": "0.4632501", "text": "def __init__(self) -> None:\n str.__init__(self)", "title": "" }, { "docid": "629669b909cc4b818d7a10fbd937ffde", "score": "0.46316814", "text": "def Str4R(obj):\n # for objects known by PypeR\n if type(obj) in str_func:\n return(str_func[type(obj)](obj))\n # for objects derived from basic data types\n for tp in base_tps:\n if isinstance(obj, tp):\n return(str_func[tp](obj))\n # for any other objects\n return(OtherStr(obj))", "title": "" }, { "docid": "95c1fa59963e3402f23bf9453954b560", "score": "0.46197683", "text": "def simple_str(self):\n pass", "title": "" }, { "docid": "36566a7418fc2aeff14839cc3be724e7", "score": "0.4598859", "text": "def str_(object_):\n return str(object_)", "title": "" }, { "docid": "03c9c6b7693db59b2a2da488b8c39d57", "score": "0.45763764", "text": "def _assign_sizes(self):", "title": "" }, { "docid": "17f6430dd9279725d81569b2c3c71d24", "score": "0.45500386", "text": "def setDefaultAllocation(alloc):\n return Cuebot.getStub('allocation').SetDefault(\n facility_pb2.AllocSetDefaultRequest(allocation=alloc), timeout=Cuebot.Timeout)", "title": "" }, { "docid": "ae3b659295b5b4f7fc01ed44bd5cc50d", "score": "0.454832", "text": "def get_allocation_mode(self):\n return \"geni_many\"", "title": "" }, { "docid": "e35a8139afedb48b460ffa224b0a27d6", "score": "0.45442566", "text": "def allocate(self, val):\n self.at_options.allocate = 1 if val else 0", "title": "" }, { "docid": "32fa589d4cf5745ffb9e38250f89df73", "score": "0.45273912", "text": "def __str__ (self) :\r\n a = []\r\n next_get = self.nextGet_\r\n buffer = self.buff_\r\n length = self.capacity()\r\n for x in xrange(len(self)) :\r\n a.append(str(buffer[next_get]))\r\n a.append(\" \")\r\n next_get = (next_get+1) % length\r\n \r\n return \"\".join(a)", "title": "" }, { "docid": "b79a5703d57fd8b896dd09647f5aa881", "score": "0.45196858", "text": "def stringGen(size, chars=string.ascii_uppercase + string.digits):\n\treturn ''.join(random.choice(chars) for _ in range(size))", "title": "" }, { "docid": "32a22750d89a054f7469b24156cb883d", "score": "0.44957742", "text": "def to_repr(obj: Any, max_string: int | None = None) -> str:\n if isinstance(obj, str):\n obj_repr = obj\n else:\n try:\n obj_repr = repr(obj)\n except Exception as error: # noqa: BLE001\n obj_repr = f\"<repr-error {str(error)!r}>\"\n\n if max_string is not None and len(obj_repr) > max_string:\n truncated = len(obj_repr) - max_string\n obj_repr = f\"{obj_repr[:max_string]!r}+{truncated}\"\n\n return obj_repr", "title": "" }, { "docid": "d52f490138c45136ef54267655a77cd5", "score": "0.44830635", "text": "def __init__(self):\n self.i, self.pool = 0, list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')", "title": "" }, { "docid": "3f5fca747b2ee585e174a64b5b73badf", "score": "0.44818646", "text": "def _as_string(self, name):\n org_type = self._get_type(name)\n if org_type == 'string': return None\n valid = ['single', 'delimited set', 'int', 'float', 'date']\n if not org_type in valid:\n msg = 'Cannot convert variable {} of type {} to text!'\n raise TypeError(msg.format(name, org_type))\n self._meta['columns'][name]['type'] = 'string'\n if self._get_type in ['single', 'delimited set']:\n self._meta['columns'][name].pop('values')\n self._data[name] = self._data[name].astype(str)\n return None", "title": "" }, { "docid": "057b5db0bc4f40348f8826648c0ac8eb", "score": "0.44798347", "text": "def __str__(self):\n\n string = \"[\"\n for i in range(1, self.i , 1):\n try:\n string += str(self.heap[i]) + \"\\n\"\n except:\n string += \"Nan \"\n return string + \"]\"", "title": "" }, { "docid": "a536e424bcfe5382a911916e1549b5cd", "score": "0.44774675", "text": "def __str__(self) -> str:\n return 'HEAP ' + str(self.heap)", "title": "" }, { "docid": "a536e424bcfe5382a911916e1549b5cd", "score": "0.44774675", "text": "def __str__(self) -> str:\n return 'HEAP ' + str(self.heap)", "title": "" }, { "docid": "f6e9114a315810c6b9a51b43557bad25", "score": "0.4476866", "text": "def __call__(self, val, *shapes, **kwargs):\r\n ret = super(Alloc, self).__call__(val, *shapes, **kwargs)\r\n try:\r\n # It makes optimization difficult when useless allocs are thrown\r\n # into the graph at every stage of optimization. This little logic\r\n # tries to help at least in some cases.\r\n if hasattr(val, 'fgraph') and (val.type == ret.type):\r\n return val\r\n except AttributeError:\r\n pass\r\n return ret", "title": "" }, { "docid": "584ecac1b8d654d5e64f1079c55c6775", "score": "0.44603643", "text": "def __str__(self, nw=30):\r\n names = self._get_param_names()\r\n #if names is None:\r\n # names = self._get_print_names()\r\n #name_indices = self.grep_param_names(\"|\".join(names))\r\n N = len(names)\r\n\r\n if not N:\r\n return \"This object has no free parameters.\"\r\n header = ['Name', 'Value', 'Constraints', 'Ties']\r\n values = self._get_params() # map(str,self._get_params())\r\n #values = self._get_params()[name_indices] # map(str,self._get_params())\r\n # sort out the constraints\r\n constraints = [''] * len(names)\r\n #constraints = [''] * len(self._get_param_names())\r\n for i, t in zip(self.constrained_indices, self.constraints):\r\n for ii in i:\r\n constraints[ii] = t.__str__()\r\n for i in self.fixed_indices:\r\n for ii in i:\r\n constraints[ii] = 'Fixed'\r\n # sort out the ties\r\n ties = [''] * len(names)\r\n for i, tie in enumerate(self.tied_indices):\r\n for j in tie:\r\n ties[j] = '(' + str(i) + ')'\r\n\r\n if values.size == 1:\r\n values = ['%.4f' %float(values)]\r\n else:\r\n values = ['%.4f' % float(v) for v in values]\r\n max_names = max([len(names[i]) for i in range(len(names))] + [len(header[0])])\r\n max_values = max([len(values[i]) for i in range(len(values))] + [len(header[1])])\r\n max_constraint = max([len(constraints[i]) for i in range(len(constraints))] + [len(header[2])])\r\n max_ties = max([len(ties[i]) for i in range(len(ties))] + [len(header[3])])\r\n cols = np.array([max_names, max_values, max_constraint, max_ties]) + 4\r\n # columns = cols.sum()\r\n\r\n header_string = [\"{h:^{col}}\".format(h=header[i], col=cols[i]) for i in range(len(cols))]\r\n header_string = map(lambda x: '|'.join(x), [header_string])\r\n separator = '-' * len(header_string[0])\r\n param_string = [\"{n:^{c0}}|{v:^{c1}}|{c:^{c2}}|{t:^{c3}}\".format(n=names[i], v=values[i], c=constraints[i], t=ties[i], c0=cols[0], c1=cols[1], c2=cols[2], c3=cols[3]) for i in range(len(values))]\r\n\r\n\r\n return ('\\n'.join([header_string[0], separator] + param_string)) + '\\n'", "title": "" }, { "docid": "19095a4e1d3f64c2d7717d9812c0cd7b", "score": "0.44490156", "text": "def String(num=None, text=None, value=None):\n if value:\n return VBString(value)\n elif num is None and text is None:\n return VBString()\n else:\n return VBString(text[:1] * CInt(num))", "title": "" }, { "docid": "64b0793d2981b524a7111ef3b83c1f1a", "score": "0.443438", "text": "def convert_method_4_nb_into_str(method_4_nb):\n if method_4_nb is None:\n msg = 'method_4_nb is None. Going to return None.'\n warnings.warn(msg)\n return None\n\n dict_method_4 = {0: 'metal_1', 1: 'metal_2', 2: 'warehouse'}\n\n method_4_str = dict_method_4[method_4_nb]\n\n return method_4_str", "title": "" }, { "docid": "8aea6d52a8e82301fe577937efc65af7", "score": "0.44328383", "text": "def basestr(cls: Any) -> str:\n return baserepr(cls)", "title": "" }, { "docid": "1a73710579a58ddf91e4aaba7edffbe4", "score": "0.44325802", "text": "def lw(max_no, str_obj):\n x = max_no - len(str_obj)\n y = 0\n string = ''\n for y in range(x):\n string = string + ' '\n return string", "title": "" }, { "docid": "8c7f898d5702119fa101508b4085532a", "score": "0.44233865", "text": "def default_length(self) -> int:\r\n ...", "title": "" }, { "docid": "7c9a7e8774b171230feb83732d76dac3", "score": "0.44224402", "text": "def capacity_conversion(\n capacity: str,\n conversion_factor: int = 1024 ** 2) -> str:\n if capacity.isdigit():\n return str(int(capacity) / conversion_factor)\n return str(0)", "title": "" }, { "docid": "3b0d7aec50dc1b2c1e8b92409a0da50c", "score": "0.44196588", "text": "def _build_string(self, text):\n if self.text:\n string = self.text + \": \" + text\n else:\n string = text\n return fix_length(string, self.width)", "title": "" }, { "docid": "bb3e3061bf0f24a3afdcb3d8d994c467", "score": "0.44114584", "text": "def get_capacity():\n\n try:\n raw_capacity = PLIST[\"TotalSize\"]\n raw_capacity = str(raw_capacity)\n\n except KeyError:\n return \"Unknown\", \"Unknown\"\n\n #Round the sizes to make them human-readable.\n unit_list = [None, \"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\"]\n unit = \"B\"\n human_readable_size = int(raw_capacity)\n\n try:\n while len(str(human_readable_size)) > 3:\n #Shift up one unit.\n unit = unit_list[unit_list.index(unit)+1]\n human_readable_size = human_readable_size//1000\n\n except IndexError:\n return \"Unknown\", \"Unknown\"\n\n #Include the unit in the result for both exact and human-readable sizes.\n return raw_capacity, str(human_readable_size)+\" \"+unit", "title": "" }, { "docid": "bab5b9583e25403df01017678fdc5ed6", "score": "0.4409972", "text": "def format_memory(self, memory):\n\n memory = float( memory) \n\n if memory is None or memory == 0:\n return \"N/A\"\n elif ( memory > 1000000000):\n return \"{:.2f}GB\".format(memory / 1000000000)\n elif ( memory > 1000000):\n return \"{:.2f}MB\".format(memory / 1000000) \n elif ( memory > 1000):\n return \"{:.2f}KB\".format(memory / 1000) \n else:\n return \"{:}\".format(int(memory))", "title": "" }, { "docid": "c03bdbcd58ade88129e2f29b15fa7217", "score": "0.44030175", "text": "def __str__(self) -> str:\n return _get_default_str(self)", "title": "" }, { "docid": "59cb9e64de0b2a5d57855423482ed32e", "score": "0.43977994", "text": "def capacity(self) -> typing.Optional[str]:\n value = self._properties.get(\"capacity\")\n return f\"{value}\" if value is not None else None", "title": "" }, { "docid": "40938a27a4c322830d3f0cbeee41f165", "score": "0.43884405", "text": "def lw2(max_no, str_obj):\n x = max_no - len(str_obj)\n y = 0\n string = ''\n for y in range(x):\n string = string + ' '\n return string", "title": "" }, { "docid": "01f1a70c2352e97d3814eb09c1762d1f", "score": "0.43884185", "text": "def _make_array(self, capacity):\n return (capacity * ctypes.py_object)()", "title": "" }, { "docid": "ce706ec755c272039c25c9fd0c636f70", "score": "0.43881083", "text": "def make_string(value):\n if value:\n return str(value)\n return None", "title": "" }, { "docid": "143a15a8cbb204e53851336cbadb8b05", "score": "0.43796006", "text": "def _allocate(self, n_resource, beliefs):\n raise NotImplementedError", "title": "" }, { "docid": "91ae16322bd7e0553e85a170dfe519cc", "score": "0.43772808", "text": "def __str__(self):\n return str(self._heap)", "title": "" }, { "docid": "ec429126431efdc3b911fd2d74bfc8b0", "score": "0.4373783", "text": "def __str__(self):\n raise NotImplementedError('Subclasses must define how to layout Character printing')", "title": "" }, { "docid": "cf40165752ff403f5f40239b7dbe3258", "score": "0.43716744", "text": "def test__str__method(self):\n Rectangle.reset_objects()\n s1 = Square(5)\n self.assertEqual(str(s1), \"[Square] (1) 0/0 - 5\")\n s2 = Square(2, 2)\n self.assertEqual(str(s2), \"[Square] (2) 2/0 - 2\")\n s3 = Square(3, 1, 3)\n self.assertEqual(str(s3), \"[Square] (3) 1/3 - 3\")", "title": "" }, { "docid": "3e2fad3c6db2588c0b7dae6950f99647", "score": "0.43626896", "text": "def pudb_stringifier(obj):\r\n try:\r\n return run_with_timeout(\"str(obj)\", 0.5, {'obj':obj})\r\n except TimeOutError:\r\n return (type(obj), \"(str too slow to compute)\")", "title": "" }, { "docid": "182ad096d45dc318316f096ccc9b124d", "score": "0.43604168", "text": "def auto(self) -> 'Size':\n self.maximum = 'auto'\n return self", "title": "" }, { "docid": "18919133d3c504051fb53900a2080743", "score": "0.43575728", "text": "def STRtoMEM(string, address, constraint, assertion, limit=None, lmax=STR_TO_MEM_LMAX\\\n ,addr_str=None, hex_info=False, optimizeLen=False): \n if( not limit is None and address > limit ):\n return (None, None)\n \n chain = None\n chain2 = None\n chain3 = None\n \n # Try with memcpy \n verbose(\"Trying with memcpy()\")\n (addr,chain) = STRtoMEM_memcpy(string, address, constraint, assertion, limit, lmax, addr_str, hex_info)\n res = (addr,chain)\n # Try with strcpy\n verbose(\"Trying with strcpy()\")\n if( optimizeLen or (not chain)):\n (addr2,chain2) = STRtoMEM_strcpy(string, address, constraint, assertion, limit, lmax, addr_str, hex_info)\n if( not res[1] ):\n res = (addr2,chain2)\n elif (chain2 and ( chain2 < res[1])):\n res = (addr2,chain2)\n # Try with a direct write gadget \n verbose(\"Trying with gadgets only\")\n if(optimizeLen or (not chain2)):\n (addr3,chain3) = STRtoMEM_write(string, address, constraint, assertion, limit, lmax, addr_str, hex_info) \n if( not res[1] ):\n res = (addr3, chain3)\n elif (chain3 and (chain3 < res[1])):\n res = (addr3, chain3)\n \n return res", "title": "" }, { "docid": "3f13f8ce08ef3f3e949b1faa5319e87a", "score": "0.4355672", "text": "def __init__(self, default_setup = False):\n self.size = 0\n if default_setup:\n self.setup()\n else:\n # restore saved options\n ida_strlist.get_strlist_options()\n self.refresh()\n\n self._si = ida_strlist.string_info_t()", "title": "" }, { "docid": "9c522776adef0a85ad9c638f7192bef8", "score": "0.43552616", "text": "def allocate(self):\n raise NotImplementedError", "title": "" }, { "docid": "f8bfae7b33ebce17f9ff5e327d6d7751", "score": "0.43539622", "text": "def test_string_default(self):\r\n default = 'BLAKE!'\r\n prop = String(default=default, required=True)\r\n self.assertEqual(prop.to_database(None), prop.to_database(default))", "title": "" }, { "docid": "640ff381d343e080e23993955c9ecb3f", "score": "0.43432242", "text": "def alloc_method(self, alloc_method):\n allowed_values = [\"dynamic\", \"static\"]\n if alloc_method is not None and alloc_method not in allowed_values:\n raise ValueError(\n \"Invalid value for `alloc_method`, must be one of {0}\"\n .format(allowed_values)\n )\n\n self._alloc_method = alloc_method", "title": "" }, { "docid": "ed1f0d865da3fc2169f38c43bcdd167e", "score": "0.43302807", "text": "def get_NSString(string):\n return CFSTR(string).autorelease()", "title": "" }, { "docid": "ed1f0d865da3fc2169f38c43bcdd167e", "score": "0.43302807", "text": "def get_NSString(string):\n return CFSTR(string).autorelease()", "title": "" }, { "docid": "8c23f6c52be8e84caed7a9d7c359c914", "score": "0.43257925", "text": "def gen_dps_strings():\n return [f\"{dp} (value: ?)\" for dp in range(1, 256)]", "title": "" }, { "docid": "448f4b2ef220dffc3a38b1bc6b75d2b8", "score": "0.43247217", "text": "def alloc_method(self):\n return self._alloc_method", "title": "" }, { "docid": "9608f4eddd2e4a94b472ced60397d68e", "score": "0.43204615", "text": "def __str__(self):\n return f\"{self.sz}\"", "title": "" }, { "docid": "42b12408c7eb14abff5abf29efa43f53", "score": "0.4315671", "text": "def set_str_precision(cls, dp=3):\n cls.str_decimal_places = dp", "title": "" }, { "docid": "9f9fdcafeb0da35e73631e312dba2dd9", "score": "0.43061346", "text": "def test_creation_str():\n with pytest.raises(ValueError) as __:\n value = \"42\"\n __ = param.Integer(value=value)", "title": "" }, { "docid": "93772f29272bbca9efc8c4b2f4243a46", "score": "0.43050653", "text": "def arbitrary_string(size=4, base_text=None):\n\n if (base_text is None) or (base_text == ''):\n base_text = 'test'\n\n if size <= 0:\n return ''\n\n extra = size % len(base_text)\n body = ''\n\n if extra == 0:\n body = base_text * size\n\n if extra == size:\n body = base_text[:size]\n\n if extra > 0 and extra < size:\n body = (size / len(base_text)) * base_text + base_text[:extra]\n\n return body", "title": "" }, { "docid": "b27d5d6b738c9e4695a782630c2d168f", "score": "0.43005365", "text": "def assign(self, *args):\n return _libsbml.string_assign(self, *args)", "title": "" }, { "docid": "cfb35156823b734db997f0efaf7a680c", "score": "0.4289393", "text": "def _format_string(value, size):\n\n return '{:<{}}'.format(value, size)", "title": "" }, { "docid": "aeabcf0c2f90c736e36216603d7476c0", "score": "0.4288562", "text": "def __init__(self) -> None:\n # Determine if strings should have a sharp symbol prepended or not\n self.prepend_strings = True\n # Only register the task\n self.register_only = False\n # Convert small objects to string if enabled\n self.object_conversion = False", "title": "" }, { "docid": "0af19a052d6bbcfa8034ccf0bd6c6ce9", "score": "0.42842293", "text": "def generate_str(self, num):\n str_list = []\n for _ in range(num):\n str_list.append(self.fake.pystr())\n return str_list", "title": "" }, { "docid": "4fc6256cec6e7fa6b7ab384a2cd2750f", "score": "0.42830998", "text": "def set_strmem_type(self, *args):\n return _ida_hexrays.vdui_t_set_strmem_type(self, *args)", "title": "" }, { "docid": "2d425f7353ae0c93063a7a720f95e894", "score": "0.42700452", "text": "def _get_problem_str(self):\n return ''", "title": "" }, { "docid": "5c8243954339b589af28a30519694091", "score": "0.42696697", "text": "def acquire(capacity):\n if capacity <= StringCache.MAX_BUILDER_SIZE:\n b = StringCache.buf\n if b is not None:\n if capacity <= len(b):\n StringCache.buf = None\n b[0] = 0\n return b\n return _ffi.new(f\"unsigned char[{capacity}]\")", "title": "" }, { "docid": "f1cf3ffd28b4e4adf55f84990e5eeabf", "score": "0.42695186", "text": "def __init__(self, *args, **kwargs):\n super(SL1024AFR55, self).__init__(\n max_seq_len=1024, alloc_free_ratio=.55, *args, **kwargs)", "title": "" }, { "docid": "467097e0c5b4fe00aab6a5c6845cc042", "score": "0.42653155", "text": "def allocatememory(self):\n pass", "title": "" }, { "docid": "0b9706a823275f36ba77afb58da206a6", "score": "0.42611238", "text": "def construct(self, current, target):\n nfilled = int(current / target * self.length)\n return self._filledChar*nfilled + self._emptyChar*(self.length-nfilled)", "title": "" }, { "docid": "0e01263ad00907b00b8b75d09709623d", "score": "0.42603102", "text": "def test_limit_str(self):\n limit = self.test_limit_memory()\n l = Limit.objects.get(uuid=limit['uuid'])\n self.assertEqual(str(l), \"{}-{}\".format(limit['app'], limit['uuid'][:7]))", "title": "" }, { "docid": "3f617e0fb96af3bf5a63d55fb8c5291b", "score": "0.42472658", "text": "def _ensure_str_array_size(array, strlen):\n current_length = array.itemsize // np.dtype(\"1U\").itemsize\n if current_length < strlen:\n return array.astype(f\"{2 * strlen}U\")\n else:\n return array", "title": "" }, { "docid": "b5bb3afdbe4b7b0d461504e8a1f9d9b7", "score": "0.42460227", "text": "def randomstr(ctx, nbytes=\"\"):\n # deprecated function\n logger.info(\"DeprecationWarning: randomstr is deprecated. Use random:str instead\")\n random(ctx, \"str\", nbytes)", "title": "" }, { "docid": "3dfd2064ea0617e89e1f00a528105ec1", "score": "0.42445916", "text": "def __str__(self):\n quota = self.id + \" = \" + str(self.usage)\n if self.units != 'integer':\n quota += \"\" + self.units\n return quota", "title": "" }, { "docid": "22e1d02e4ff6e9e50e366d63050d3f9e", "score": "0.42396277", "text": "def asString(obj):\n if type(obj) in _STR_TYPES:\n return obj\n return str(obj)", "title": "" }, { "docid": "990130a083ea9ee18317f56639953ae0", "score": "0.42368135", "text": "def __init__(self, *args, **kwargs):\n super(SL1024AFR98, self).__init__(\n max_seq_len=1024, alloc_free_ratio=.98, *args, **kwargs)", "title": "" }, { "docid": "21de3714916337ee03dbc105bc28d9fb", "score": "0.4235017", "text": "def string(runtime_addr, n=None):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n binary_addr, _ = movemanager.r2b_checked(runtime_addr)\n if n is None:\n assert not disassembly.is_classified(binary_addr)\n n = 0\n while not disassembly.is_classified(binary_addr + n) and utils.isprint(memory_binary[binary_addr + n]):\n n += 1\n if n > 0:\n disassembly.add_classification(binary_addr, String(n))\n return movemanager.b2r(binary_addr + n)", "title": "" }, { "docid": "539585907964f801e28094e2a03d58d1", "score": "0.42302346", "text": "def CreateFiller(filler_def):\n filler_def = filler_def if isinstance(filler_def, str) \\\n else filler_def.SerializePartialToString()\n _C.CreateFiller(filler_def)", "title": "" }, { "docid": "3a4309266d2ba81283cbc5c9419dc0ad", "score": "0.42203322", "text": "def u(obj):\n return obj if isinstance(obj, str) else str(obj)", "title": "" }, { "docid": "20e4704a09ead7fc4b46817c66c13f4b", "score": "0.42178178", "text": "def serialize_str(self, obj):\n if len(obj) < 0x100:\n return 'U' + struct.pack('<B', len(obj)) + obj\n return 'T' + struct.pack('<I', len(obj)) + obj", "title": "" }, { "docid": "6d3bd0272cec6f82f0411fa69cc95a80", "score": "0.42176977", "text": "def str(self, space, quiet=False):\n raise InterpreterError(\"Unimplemented str()\")", "title": "" }, { "docid": "90e633fbc6cbb87d280e4bfdabba1f9d", "score": "0.42148933", "text": "def test_str_method(self):\n s1 = Square(4, 6, 2, 12)\n self.assertEqual(s1.__str__(), '[Square] (12) 6/2 - 4')", "title": "" }, { "docid": "bd0f9367a7d281545078d160a5c94936", "score": "0.42082718", "text": "def str(x) -> String:\n pass", "title": "" }, { "docid": "9a8830bc19c4023065ee4be988ecb521", "score": "0.42073962", "text": "def __str__(self):\n raise RuntimeError(\"Needs to be implemented in base class\")", "title": "" }, { "docid": "c2fdbedcd3c4c911f196d66ff11b1c5e", "score": "0.4206756", "text": "def __init__(self):\n self.string = None", "title": "" }, { "docid": "03c325a44bd8469bce0d878260795164", "score": "0.42062312", "text": "def default(str):\n return str + ' [Default: %default]'", "title": "" }, { "docid": "e2c2b38a0e5b03d48f5eacc75c31f141", "score": "0.42039916", "text": "def str_diagnostic(self):\r\n sio = StringIO()\r\n val_str_len_limit = 800\r\n print >> sio, \"BadOptimization Error\", super(BadOptimization,\r\n self).__str__()\r\n print >> sio, \" Variable: id\", id(self.new_r), self.new_r\r\n print >> sio, \" Op\", self.new_r.owner\r\n print >> sio, \" Value Type:\", type(self.new_r_val)\r\n try:\r\n ssio = StringIO()\r\n print >> ssio, \" Old Value shape, dtype, strides:\",\r\n print >> ssio, self.old_r_val.shape,\r\n print >> ssio, self.old_r_val.dtype,\r\n print >> ssio, self.old_r_val.strides\r\n # only if all succeeds to we add anything to sio\r\n print >> sio, ssio.getvalue()\r\n except Exception:\r\n pass\r\n\r\n str_old_r_val = str(self.old_r_val)\r\n if len(str_old_r_val) > val_str_len_limit:\r\n print >> sio, \" Old Value: \", str(self.old_r_val)[\r\n :val_str_len_limit], '...'\r\n else:\r\n print >> sio, \" Old Value: \", str(self.old_r_val)\r\n\r\n try:\r\n ssio = StringIO()\r\n print >> ssio, \" New Value shape, dtype, strides:\",\r\n print >> ssio, self.new_r_val.shape,\r\n print >> ssio, self.new_r_val.dtype,\r\n print >> ssio, self.new_r_val.strides\r\n # only if all succeeds to we add anything to sio\r\n print >> sio, ssio.getvalue()\r\n except Exception:\r\n pass\r\n str_new_r_val = str(self.new_r_val)\r\n if len(str_new_r_val) > val_str_len_limit:\r\n print >> sio, \" New Value: \", str(self.new_r_val)[\r\n :val_str_len_limit], '...'\r\n else:\r\n print >> sio, \" New Value: \", str(self.new_r_val)\r\n\r\n try:\r\n ov = numpy.asarray(self.old_r_val)\r\n nv = numpy.asarray(self.new_r_val)\r\n ssio = StringIO()\r\n print >> ssio, \" Max Abs Diff: \", numpy.max(numpy.absolute(nv -\r\n ov))\r\n print >> ssio, \" Mean Abs Diff: \", numpy.mean(numpy.absolute(nv -\r\n ov))\r\n print >> ssio, \" Median Abs Diff: \", numpy.median(numpy.absolute(\r\n nv - ov))\r\n print >> ssio, \" Std Abs Diff: \", numpy.std(numpy.absolute(\r\n nv - ov))\r\n\r\n # N.B. the maximum(..., 1e-8) protects against div by 0 when\r\n # nv == ov == 0\r\n reldiff = (numpy.absolute(nv - ov)\r\n / numpy.maximum(\r\n numpy.absolute(nv) + numpy.absolute(ov),\r\n 1e-8))\r\n print >> ssio, \" Max Rel Diff: \", numpy.max(reldiff)\r\n print >> ssio, \" Mean Rel Diff: \", numpy.mean(reldiff)\r\n print >> ssio, \" Median Rel Diff: \", numpy.median(reldiff)\r\n print >> ssio, \" Std Rel Diff: \", numpy.std(reldiff)\r\n # only if all succeeds to we add anything to sio\r\n print >> sio, ssio.getvalue()\r\n except Exception:\r\n pass\r\n\r\n print >> sio, \" Reason: \", str(self.reason)\r\n print >> sio, \" Old Graph:\"\r\n print >> sio, self.old_graph\r\n print >> sio, \" New Graph:\"\r\n print >> sio, self.new_graph\r\n print >> sio, \"\"\r\n print >> sio, \"Hint: relax the tolerance by setting tensor.cmp_sloppy=1\"\r\n print >> sio, \" or even tensor.cmp_sloppy=2 for less-strict comparison\"\r\n return sio.getvalue()", "title": "" }, { "docid": "4ee320f32c654ccaaa9a32eb840dc774", "score": "0.42009506", "text": "def __str__(self):\n raise NotImplementedError(\"__str__ not implemented for \"+str(type(self)))", "title": "" }, { "docid": "0b83f2a0bbbe894fc5f5ebb3d073a319", "score": "0.42000028", "text": "def toString():", "title": "" }, { "docid": "7aba75167bf6d7a7b4001b04c21081c0", "score": "0.41982216", "text": "def represent_pkgs(cls, dlpkgs, numpkgs, a, b):\n if numpkgs == 0:\n return '{0:^{1}s}'.format('-', a + b + 1)\n elif dlpkgs >= numpkgs:\n return '{0:>{1}}'.format(dlpkgs, a + b + 1)\n else:\n return '{0:>{2}}/{1:<{3}}'.format(dlpkgs, numpkgs, a, b)", "title": "" }, { "docid": "750e458c125d0d42e752171a7370295d", "score": "0.4194001", "text": "def prepare_input(seating_plan):\n height = len(seating_plan)\n width = len(seating_plan[0]) \n concat='.'.join([ '.' * width ] + seating_plan )\n return width, list(map( DECODE.get, concat ))", "title": "" }, { "docid": "4521cd937dde62f76c148e62260d8bb1", "score": "0.4192147", "text": "def _from_physical_space(self, a_n, lobatto, use_mp, dps):\n pass", "title": "" }, { "docid": "a729df7a966067b09378cf0f8b07c624", "score": "0.41899502", "text": "def get_default_value(field, current_message_package):\n if field.is_array:\n if not field.array_len:\n return '[]'\n else:\n field_copy = deepcopy(field)\n field_copy.is_array = False;\n field_default = get_default_value(field_copy, current_message_package)\n return 'new Array({}).fill({})'.format(field.array_len, field_default)\n elif field.is_builtin:\n if is_string(field.type):\n return '\\'\\''\n elif is_time(field.type):\n return '{secs: 0, nsecs: 0}'\n elif is_bool(field.type):\n return 'false'\n elif is_float(field.type):\n return '0.0'\n else:\n return '0';\n # else\n (package, msg_type) = field.base_type.split('/')\n if package == current_message_package:\n return 'new {}()'.format(msg_type)\n else:\n return 'new {}.msg.{}()'.format(package, msg_type)", "title": "" }, { "docid": "9ba1ada0c331ce83afbbb6feed31000b", "score": "0.4178813", "text": "def from_dual(self):\n return \"\"", "title": "" }, { "docid": "b11ea8c5139efa8c75bf5cc523bfb7a4", "score": "0.41760975", "text": "def test_to_String(self) -> None:\n assert to_String(1) == \"1\", to_String(1)\n assert to_String([1, 2, 3]) == str([1, 2, 3]), to_String([1, 2, 3])\n assert to_String(\"foo\") == \"foo\", to_String(\"foo\")\n assert to_String(None) == 'None'\n # test low level string converters too\n assert to_str(None) == 'None'\n assert to_bytes(None) == b'None'\n\n s1 = UserString('blah')\n assert to_String(s1) == s1, s1\n assert to_String(s1) == 'blah', s1\n\n class Derived(UserString):\n pass\n\n s2 = Derived('foo')\n assert to_String(s2) == s2, s2\n assert to_String(s2) == 'foo', s2", "title": "" }, { "docid": "a02e9a0c78b837ec85c44bc61d5aabcd", "score": "0.41746575", "text": "def build_from_string(self, obj):\n if self.string_type is unicode and not isinstance(obj, unicode):\n obj = str(obj).decode('utf-8')\n if self.string_type is str and not isinstance(obj, str):\n obj = unicode(obj).encode('utf-8')\n return self.art_type(obj.splitlines())", "title": "" } ]
7ccb997b2a94b9bc16a7af1440063646
Registers the existence of an image with `name`, and that the image used displayable d. `name` A tuple of strings.
[ { "docid": "aced93de0282fe4d81942a6afc252a95", "score": "0.72605515", "text": "def register_image(name, d):\r\n\r\n tag = name[0]\r\n rest = name[1:]\r\n\r\n images[name] = d\r\n image_attributes[tag].append(rest)", "title": "" } ]
[ { "docid": "63112d4bac877160bcf20cebe0265b09", "score": "0.66050875", "text": "def add_image(self, name, camera_instance):\n lbl = ClientLabel(self.canvas, bg=\"white\", bd=self.canvas.label_border_size).define(name, camera_instance)\n self.canvas.images[name] = lbl\n # self.canvas.itemconfig(lbl, tags=(\"all\", name))\n lbl.pack()", "title": "" }, { "docid": "a4056f1916a0be15534e2b295c0f0c59", "score": "0.61607885", "text": "def img(self, name, img_, **kwargs):\n self.vis.images(img_.cpu().numpy(),\n win=name,\n opts=dict(title=name),\n **kwargs\n )", "title": "" }, { "docid": "7ef9ccf4f3479b967cf67a78c9cbb66c", "score": "0.60791755", "text": "def image_exists(name, expression, tag, precise=True):\r\n\r\n\r\n # Add the tag to the set of known tags.\r\n tag = tag or name[0]\r\n image_prefixes[tag] = True\r\n\r\n if expression:\r\n return\r\n\r\n namelist = list(name)\r\n names = \" \".join(namelist)\r\n\r\n # Look for the precise name.\r\n while namelist:\r\n if tuple(namelist) in renpy.display.image.images:\r\n return\r\n\r\n namelist.pop()\r\n\r\n # If we're not precise, then we have to start looking for images\r\n # that we can possibly match.\r\n if not precise and image_exists_imprecise(name):\r\n return\r\n\r\n report(\"The image named '%s' was not declared.\", names)", "title": "" }, { "docid": "18023123b66ec6b289bd866dda05f4f2", "score": "0.6072991", "text": "def addIcon( self, filepath, wxBitmapType, name ):\n try:\n if os.path.exists( filepath ):\n key = self.imagelist.Add( wx.Bitmap( filepath, wxBitmapType ) )\n self.iconentries[name] = key\n except Exception, e:\n print e", "title": "" }, { "docid": "0792ee2e81122c6aadec9508c1a20ed9", "score": "0.60023457", "text": "def update_image_name(self):", "title": "" }, { "docid": "686be54b3138d972b418647acdcd6881", "score": "0.5841859", "text": "def load_image(self, name):\n return load_image(name)", "title": "" }, { "docid": "78b44f59d4f238b7d974eb7bf1f9b813", "score": "0.58242965", "text": "def get_image_by_name(name):\n generator = GLANCE_CLI.images.list(name=name)\n for img in generator:\n if img.name == name:\n return img\n return None", "title": "" }, { "docid": "c233e5ebc3c1977e7f3e0ace1636c695", "score": "0.58172166", "text": "def show_image_add(self, mysurf, imagename, xoffset, yoffset, alpha=None):\n \n image = self.resources[imagename]\n image_rect = image.get_rect()\n if alpha != None:\n image.set_alpha(alpha)\n image_rect.centerx = mysurf.get_rect().centerx + xoffset\n image_rect.centery = mysurf.get_rect().centery + yoffset\n \n mysurf.blit(image,image_rect)\n return mysurf", "title": "" }, { "docid": "17bb685133591e8e0faad9a6c133c529", "score": "0.5740113", "text": "def image_with_name(image, class_name):\n return image_grid([{image, class_name}], 1)", "title": "" }, { "docid": "11e34c143b4de2a21a2a2c10c1c32df9", "score": "0.5695345", "text": "def save_image(self, img, name):\n save_image(img, name)", "title": "" }, { "docid": "28bbcfccb39ae0cede2d094cf5810703", "score": "0.56143486", "text": "def ensure_single_image_name(request, result):\n if result['image'] and 'id' in result['image']:\n image = api.glance.image_get(request, result['image']['id'])\n if image:\n result['image_display_name'] = image.name", "title": "" }, { "docid": "e9b73f6fdb3e58326d5d3394a77b0cc5", "score": "0.5612377", "text": "def image_exists(image_name):\n image_list = get_image_list()\n for image in image_list:\n if \"name\" in image and image_name in image[\"name\"]:\n return True\n\n return False", "title": "" }, { "docid": "0921ec521982fa8b9a25d8df00575771", "score": "0.55685055", "text": "def add_image(self, image, name=None):\n if isinstance(image, np.ndarray):\n ip = ImageProcessor(image)\n ip.fit_in(self._fixed_width, self._fixed_height)\n image = ip.get_image('HWC')\n q_pixmap = QPixmap_from_np(image)\n elif isinstance(image, QPixmap):\n q_pixmap = image\n else:\n raise ValueError(f'Unsupported type of image {image.__class__}')\n\n self._pixmaps.append(q_pixmap)\n self.update()", "title": "" }, { "docid": "8d1a94980fe3645a48638a63a85fa301", "score": "0.55016553", "text": "def _add_image(self, fname):\n raise NotImplementedError", "title": "" }, { "docid": "8fcbcbf458b1d02d11b59f259fd8a803", "score": "0.54932165", "text": "def go_grab_a_picture(name):\n img_data = pull_website(name)\n filename = os.path.join(config.NAMES_DIR, '{}.jpg'.format(name))\n create_image(img_data, filename)", "title": "" }, { "docid": "3199480cb09355ca168a159b4c8aa6fa", "score": "0.5491895", "text": "def Gload_image(name):\n \n fullname = os.path.join('data', name)\n try:\n image = pygame.image.load(fullname)\n except:\n image = pygame.Surface((800, 800))\n image.fill(colors['CKEY'])\n image.set_colorkey(colors['CKEY'])\n image = image.convert_alpha()\n # Preserving per-pixel-alphas for semi-transparent to transparent\n # overlay images...\n return image", "title": "" }, { "docid": "53bad62ab76bac6e975f6c2aa330a9b3", "score": "0.5489908", "text": "def add_image_def(self, filename: str, size_in_pixel: tuple[int, int], name=None):\n if \"ACAD_IMAGE_VARS\" not in self.rootdict:\n self.objects.set_raster_variables(frame=0, quality=1, units=\"m\")\n if name is None:\n name = filename\n return self.objects.add_image_def(filename, size_in_pixel, name)", "title": "" }, { "docid": "bbd7b73edf288411b4ba84bb6ee75f85", "score": "0.54760325", "text": "def add_image(self, image_id, path, class_name, **kwargs):\r\n image_info = {'id':image_id, 'path':path, 'class_name':class_name, }\r\n image_info.update(kwargs)\r\n self.image_info.append(image_info)", "title": "" }, { "docid": "1d297ed19ad3de9f04a801c0a4b50c08", "score": "0.5473989", "text": "def images_exist(name):\n log = logging.getLogger(\"cirrus.instance.Instance\")\n\n log.debug(\"Describing images.\")\n response = resources.ec2_client.describe_images(\n Filters=[{\"Name\": \"name\", \"Values\": [name]}], Owners=[\"self\"])\n result = len(response[\"Images\"]) > 0\n\n log.debug(\"Done.\")\n\n return result", "title": "" }, { "docid": "894772998b74734bc25697ef35c1b3dc", "score": "0.54681206", "text": "def create_image(name):\n new_uuid = create_uuid()\n\n i_api = ImageApi(bind=engine)\n i_api.open()\n img = i_api.add_image_file(uuid=new_uuid, name=name)\n i_api.commit()\n img_dict = img.as_dict()\n i_api.close()\n\n return img_dict", "title": "" }, { "docid": "41a79fc8fce12b9d4d6e3563c6618056", "score": "0.5467313", "text": "def profiled(image_name):\r\n if image_name in profiled_images and image_name not in appeared:\r\n appeared.add(image_name)\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "a58ae29525ed0a0cecc79f716215e0f3", "score": "0.53910017", "text": "def ensure_image_name(request, result, image_id_str='id'):\n try:\n images, has_more_data, has_prev_data = api.glance.image_list_detailed(\n request,\n )\n except Exception:\n images = []\n return\n images_dict = SortedDict([(t.id, t) for t in images])\n for item in result:\n \"\"\"In case that instance booted from volume\n has no images information \"\"\"\n if item.image:\n if item.image.has_key(image_id_str) and \\\n not hasattr(item, 'image_display_name'):\n image = images_dict.get(item.image.get(image_id_str), None)\n # this is to manage the v2.0 glance image properties\n if image is not None:\n item.image['properties'] = ensure_image_prop_dict(getattr(image, 'properties'))\n item.image_display_name = getattr(image, \"name\", None)", "title": "" }, { "docid": "bd705003c2a6abb55201196f83b886e7", "score": "0.53707236", "text": "def register_artifact(self, name: Text, artifact_type, description,\n uri, properties):", "title": "" }, { "docid": "a972a3c62cc50eb17943e6205e2bdf7e", "score": "0.536339", "text": "def get_image(name):\n root = os.path.join(settings.PROJECT_ROOT, 'nodes', 'images')\n if name in os.listdir(root):\n return name\n else:\n return 'raw'", "title": "" }, { "docid": "edb332c7a238606e2a27f533b27306e3", "score": "0.5351752", "text": "def step_see_image_listed(context, image_name):\n wrappers.expect_exact(context, 'hello-world')", "title": "" }, { "docid": "ab3d57613225df2047851ac6c7d61f3c", "score": "0.5343823", "text": "def showImage(image, name=\"Test image\"):\n cv2.imshow(name, image)\n cv2.waitKey()", "title": "" }, { "docid": "8917f18eaaa3ea773d7e9e8b9c2f515c", "score": "0.5321826", "text": "def addTexture(self, name, texture):\n self.textures[name] = texture", "title": "" }, { "docid": "d619a317e099c9849c3acc36d75c7c56", "score": "0.5317397", "text": "def write_image(self, image, name):\n image.save(name)", "title": "" }, { "docid": "c954b89dd99a9befd78d41a9e79546fe", "score": "0.53109133", "text": "def load_png(self, name):\n fullname = os.path.join('data', name)\n dimensions = self.conf['dimensions']\n try:\n image = pygame.image.load(fullname)\n if image.get_alpha is None:\n image = image.convert()\n else:\n image = image.convert_alpha()\n image = pygame.transform.smoothscale(image, (dimensions[0], dimensions[1]))\n except pygame.error, message:\n print 'Cannot load image:', fullname\n raise SystemExit, message\n return image, image.get_rect()", "title": "" }, { "docid": "1b76a7b873b4c7a2f8cf49724e9d1a22", "score": "0.527536", "text": "def _add_image(self, fname):\n self._images.append(fname)", "title": "" }, { "docid": "22a25ae7ea23d3581cff42b3dbe89e7b", "score": "0.52544653", "text": "def displayNeighborImage(self, id, image_name):\n \n # store the neighbor image\n if self.neighbor_item.has_key(id):\n self.neighbor_item[id][6] = image_name\n\n # refresh the drawing\n #self.DoDrawing()\n self.toRefresh = TRUE", "title": "" }, { "docid": "c8629807735f74c4acc649906af7f08b", "score": "0.5252123", "text": "def load_img(name):\n\n fullname = name\n try:\n image = pygame.image.load(fullname)\n if image.get_alpha() is None:\n image = image.convert()\n else:\n image = image.convert_alpha()\n except pygame.error as message:\n print(\"Error: couldn't load image: \", fullname)\n raise SystemExit(message)\n return (image, image.get_rect())", "title": "" }, { "docid": "ff950c4a7067d9a6a5bc365fc38b75ca", "score": "0.52477354", "text": "def update_image(self,val):\n Image.objects.filter(id = self.id).update(name = val)", "title": "" }, { "docid": "c40ca3b94297d75b0de57bf8a9e3eeef", "score": "0.52382904", "text": "def draw(self, var_name, images): \n if var_name not in self.plots:\n self.plots[var_name] = self.viz.images(images, env=self.env)\n else:\n self.viz.images(images, env=self.env, win=self.plots[var_name])", "title": "" }, { "docid": "3f5a51c1f2beac1f44e5af017a9935c0", "score": "0.5231609", "text": "def image(self, nameofpic):\n image = Image.open(nameofpic)\n photo = ImageTk.PhotoImage(image)\n hintpic = tk.LabelFrame(self.parent, relief=\"flat\")\n label = tk.Label(hintpic, image=photo)\n label.image = photo\n hintpic.grid(row=0, column=2, sticky='W', padx=5, pady=5, ipadx=1, ipady=1)\n label.grid()", "title": "" }, { "docid": "95b688c4116e7c22df4dcc78dce6b11e", "score": "0.52265394", "text": "def get_image_name(self, name):\n images = glance_image_list(self.glance)\n img = glance_images_by_name(name, images)\n return img[0]", "title": "" }, { "docid": "b06dbd987742b5c7f415821f8cbf87ab", "score": "0.5210992", "text": "def display_image(image_data, image_name_for_title):\n fig = plt.figure(figsize=(15,15))\n plt.imshow(image_data)\n plt.xticks([])\n plt.yticks([])\n plt.title(image_name_for_title)\n plt.draw()\n plt.show()", "title": "" }, { "docid": "9ce98a786341377531c53124582ec751", "score": "0.51993287", "text": "def load_png(name):\n fullname = os.path.join('data', name)\n try:\n image = pygame.image.load(fullname)\n if image.get_alpha is None:\n image = image.convert()\n else:\n image = image.convert_alpha()\n except pygame.error, message:\n print \"Impossible de charger l'image : \", fullname\n raise SystemExit, message\n return image, image.get_rect()", "title": "" }, { "docid": "b10830c5ca1f5d5b9df6cd0740c29156", "score": "0.5182446", "text": "def image(file_name, alignment=Align.CENTER, coordinate=None, clear=True):\n return", "title": "" }, { "docid": "e014dbd9c0e785c5655f25b44e1d2758", "score": "0.51595837", "text": "def load_image(name):\n path = os.path.join(DATA_DIR, name + '.png')\n try:\n raw_image = pygame.image.load(path)\n return pygame.transform.scale(raw_image, (CARD_WIDTH, CARD_HEIGHT))\n except pygame.error:\n logging.error('Cannot load image: {}'.format(path))\n logging.error(\n 'Using text instead of image: {}'.format(name.upper()))\n return card_font.render(\n name.upper(), ANTIALIASING, CARD_FONT_COLOR)", "title": "" }, { "docid": "c6a75d413e895770d2546c87f3e032ce", "score": "0.5154223", "text": "def add_img_to_list(self, fname):\n if fname in self.imglist.values():\n self.set_status_message('{} {}'.format(fname, \" already in list\"))\n print(\"already in list:\", fname)\n return\n alias = ntpath.basename(fname)\n self.imglist[alias] = fname\n print(\"loading file:\", fname)\n self.imglistbox.insert(END, alias)", "title": "" }, { "docid": "8d39dd1c98907f1a5545eb27be826cfd", "score": "0.51504683", "text": "def available(name):\n images = list_images()\n available = False\n if not images:\n return False\n\n for image in images:\n if name in image.get(\"repoTags\", []):\n available = True\n break\n if name in image.get(\"repoDigests\", []):\n available = True\n break\n return available", "title": "" }, { "docid": "d9a220fd65387d0487a89270efe52142", "score": "0.51481205", "text": "def replace_sprite(self, name, img):\n i = self.load_image(img)\n @self.step\n def _replace_sprite():\n self.sprites[name].image = i", "title": "" }, { "docid": "99f9edcf54ec41496a3c74e5863b2a9c", "score": "0.5141354", "text": "def add_image(self, img):\n return", "title": "" }, { "docid": "a4627035180219c23ca84ef458b645cf", "score": "0.514026", "text": "def show_image(name, flag, width, height, n_channel, line_step, data_address, data_symbol):\n\n width = int(width)\n height = int(height)\n n_channel = int(n_channel)\n line_step = int(line_step)\n data_address = int(data_address)\n\n infe = gdb.inferiors()\n memory_data = infe[0].read_memory(data_address, line_step * height)\n\n # Calculate the memory padding to change to the next image line.\n # Either due to memory alignment or a ROI.\n if data_symbol in ('b', 'B'):\n elem_size = 1\n elif data_symbol in ('h', 'H'):\n elem_size = 2\n elif data_symbol in ('i', 'f'):\n elem_size = 4\n elif data_symbol == 'd':\n elem_size = 8\n padding = line_step - width * n_channel * elem_size\n\n # Format memory data to load into the image.\n image_data = []\n if n_channel == 1:\n mode = 'L'\n fmt = '%d%s%dx' % (width, data_symbol, padding)\n for line in chunker(memory_data, line_step):\n image_data.extend(struct.unpack(fmt, line))\n elif n_channel == 3:\n mode = 'RGB'\n fmt = '%d%s%dx' % (width * 3, data_symbol, padding)\n for line in chunker(memory_data, line_step):\n image_data.extend(struct.unpack(fmt, line))\n else:\n gdb.write('Only 1 or 3 channels supported\\n', gdb.STDERR)\n return\n\n scale_alpha = 1\n scale_beta = 0\n # Fit the opencv elemente data in the PIL element data\n if data_symbol == 'b':\n image_data = [i+128 for i in image_data]\n elif data_symbol == 'H':\n image_data = [i>>8 for i in image_data]\n elif data_symbol == 'h':\n image_data = [(i+32768)>>8 for i in image_data]\n elif data_symbol == 'i':\n image_data = [(i+2147483648)>>24 for i in image_data]\n elif data_symbol in ('f','d'):\n # A float image is discretized in 256 bins for display.\n max_image_data = max(image_data)\n min_image_data = min(image_data)\n img_range = max_image_data - min_image_data\n if img_range > 0:\n scale_beta = min_image_data\n scale_alpha = img_range / 255.0\n image_data = [int(255 * (i - min_image_data) / img_range) \\\n for i in image_data]\n else:\n image_data = [0 for i in image_data]\n \n dump_data = []\n if n_channel == 3:\n for i in range(0, len(image_data), 3):\n dump_data.append((image_data[i+2], image_data[i+1], image_data[i]))\n if n_channel == 1:\n dump_data = image_data\n\n # Show image.\n if n_channel == 1:\n img = Image.new(mode, (width, height))\n if n_channel == 3:\n img = Image.new(mode, (width, height), color=(0,0,0))\n\n #img = np.reshape(dump_data, (height,width,n_channel)).astype(np.uint8)\n #print(img.shape)\n #cv2.imshow('image', img)\n #cv2.waitKey(0)\n\n img.putdata(dump_data)\n img = pl.asarray(img);\n\n fig = pl.figure()\n fig.canvas.set_window_title(name)\n b = fig.add_subplot(111)\n\n\n if n_channel == 1:\n b.imshow(img, cmap = pl.cm.Greys_r, interpolation='nearest')\n elif n_channel == 3:\n b.imshow(img, interpolation='nearest')\n\n def format_coord(x, y):\n col = int(x+0.5)\n row = int(y+0.5)\n if col>=0 and col<width and row>=0 and row<height:\n if n_channel == 1:\n z = img[row,col] * scale_alpha + scale_beta\n return '(%d, %d), [%1.2f]'%(col, row, z)\n elif n_channel == 3:\n z0 = img[row,col,0] * scale_alpha + scale_beta\n z1 = img[row,col,1] * scale_alpha + scale_beta\n z2 = img[row,col,2] * scale_alpha + scale_beta\n return '(%d, %d), [%1.2f, %1.2f, %1.2f]'%(col, row, z0, z1, z2)\n else:\n return 'x=%d, y=%d'%(col, row)\n\n b.format_coord = format_coord\n if(flag == 'block'):\n pl.show(block=True)\n else:\n pl.show(block=False)", "title": "" }, { "docid": "4ab330b956a9f09d803f363964405e09", "score": "0.51395696", "text": "def show_image(tens, imgname=None, scale=SCALE_01):\r\n r = tens.max() - tens.min()\r\n img = PIL.Image.new(\"L\", (28,28))\r\n scaled = tens\r\n if scale == SCALE_RANGE:\r\n scaled = (tens - tens.min())*255/r\r\n elif scale == SCALE_01:\r\n scaled = tens*255\r\n img.putdata(to_list(scaled))\r\n if imgname is None:\r\n img.show()\r\n else:\r\n img.save(imgname)", "title": "" }, { "docid": "f375b06033753dbb5e03b8348cd97104", "score": "0.51380444", "text": "def updateMaskPlayer(self, name, image): \n for item in self.items:\n if item.text.find(name) > -1:\n filePath = GG.genteguada.GenteGuada.getInstance().getDataPath(image)\n size = 46, 31 \n try:\n generateImageSize(filePath, [46, 31], os.path.join(GG.utils.LOCAL_DATA_PATH, name))\n except:\n return \n filePath = os.path.join(GG.utils.LOCAL_DATA_PATH, name)\n item._icon = ocempgui.draw.Image.load_image(filePath).convert_alpha()", "title": "" }, { "docid": "d4679184889af33e41df878d61abb33c", "score": "0.51362616", "text": "def create_image(self, Name: str, BlockDeviceMappings: List = None, Description: str = None, DryRun: bool = None, NoReboot: bool = None) -> 'Image':\n pass", "title": "" }, { "docid": "236f02fb5b3b177e1fa4eefe03f583df", "score": "0.51337725", "text": "def imgCallout(url, name):\n if name is None:\n name = url\n return \"\"\"<div class=\"ic\"><a href=\"%s\"><img class=\"pt\" src=\"%s\" width=\"100px\"><br>%s</a></div>\"\"\" % (\n url, url, name)", "title": "" }, { "docid": "9b9737a0ed8a241281b0485289340d23", "score": "0.5131534", "text": "def image_load() -> Image:\n image = input(\"Name of image to load:\")\n print(\"Loading Image\")\n try:\n loaded_image = load_image(image)\n show(loaded_image)\n print(\"Image has been loaded\")\n return loaded_image\n except:\n print(\"Image could not be loaded\")\n return 'Not Loaded'", "title": "" }, { "docid": "e1da2413539bf122e64d11de39b8601a", "score": "0.5120619", "text": "def is_a_pic(self, name):\n if len(name.split('.')) < 2:\n return False\n ext = name.split('.')[-1]\n return ext.upper() in self.pic_exts", "title": "" }, { "docid": "14b00261303ed624457a648d303139ec", "score": "0.51085", "text": "def add_extension(self, name):\n return ExtensionImage(self, name)", "title": "" }, { "docid": "47cf67f2da2df79c7990bb7aa29a9893", "score": "0.5105681", "text": "def upload_video_image_name(self, name):\n\n if not self.mode == 'normal_video':\n self._set_dmd_to_video_mode()\n\n print \"Uploading by name\"\n if name in os.listdir(self.dmd_image_directory):\n print \"found file {}\".format(name)\n self.video_image = VideoImage(pkl.load(open(os.path.join(self.dmd_image_directory, name), 'rb'))['pattern'])\n else:\n print \"file {} not found\".format(name)\n return 0", "title": "" }, { "docid": "db1657510d11c422669aec6fc90eff39", "score": "0.51022816", "text": "async def register(event,\n name: ('str', 'Please provide a name to register.'),\n):\n check_permission(event)\n \n plugin = get_plugin(name)\n if (plugin is not None):\n abort(f'There is already a plugin added with the given name: `{plugin.name}`.')\n \n try:\n register_plugin(name)\n except ImportError:\n title = f'Registering {name!r} plugin failed.'\n description = 'There is no such plugin.'\n else:\n title = f'Registering {name!r} was successful.'\n description = None\n \n return Embed(title, description)", "title": "" }, { "docid": "6ce6f0adfb9a6ae51df6a4a257f6566a", "score": "0.51017797", "text": "def load_png(name):\n try:\n image = pygame.image.load(name)\n if image.get_alpha is None:\n image = image.convert()\n else:\n image = image.convert_alpha()\n except pygame.error as message:\n print('Cannot load image:' + name)\n raise SystemExit(message)\n return image, image.get_rect()", "title": "" }, { "docid": "67943eb575b90d9eef933f4dcaf7f9dc", "score": "0.50998604", "text": "def display (im, stretch=False, bgr=False, name=\"Eve image\", wait=False):\n global use_graphics, sixel_quant\n\n # Ensure the graphics type is set.\n if use_graphics is None:\n select_graphics_type ()\n\n if use_graphics == \"default\":\n display_external (im, stretch=stretch, bgr=bgr, name=name, wait=wait)\n elif use_graphics == \"sixel\":\n display_sixel (im, stretch=stretch, bgr=bgr, name=name)\n elif use_graphics == \"tty\":\n lppic (im, [\" ./WX#\"], width=80)\n elif use_graphics == \"lp\":\n lppic (im)\n else:\n print (\"Internal error in display ('%s')!\" % use_graphics,\n file=sys.stderr)\n sys.exit (99)", "title": "" }, { "docid": "da1a0f46b4b04f67985bc8bdbe6f3c12", "score": "0.508614", "text": "def image(query, q, edit, e, annotation, backgroundColor, defineTemplate, docTag,\n dragCallback, dropCallback, enable, enableBackground, exists, fullPathName,\n height, highlightColor, image, isObscured, manage, noBackground,\n numberOfPopupMenus, parent, popupMenuArray, preventOverride, useTemplate,\n visible, visibleChangeCommand, width):", "title": "" }, { "docid": "3caef4d5e63f0b763319cae440ebcaea", "score": "0.50829476", "text": "def set_name(self, name):\n self.name = name\n \n cmd = \"sudo hciconfig %s name '%s'\" % (self.interface, name)\n exitcode = os.system(cmd)\n if exitcode != 0:\n return False\n return True", "title": "" }, { "docid": "cdb01405f1f1e0e74d5331687429e3bd", "score": "0.5077727", "text": "def test_if_name_exists():\r\n\r\n global pic_id\r\n global name\r\n picture_path = path.join(BASE_FOLDER, name)\r\n\r\n if not os.path.isdir(picture_path):\r\n os.makedirs(picture_path)\r\n\r\n files = [f for f in os.listdir(picture_path) if path.isfile(path.join(picture_path, f))]\r\n\r\n curr_max_id = -1\r\n\r\n for file in files:\r\n if name == file[:len(name)]:\r\n curr_id = int(file.split(\"_\")[-1].split(\".\")[0]) #TODO: BAD DETECTION use regex instead\r\n if curr_id > curr_max_id:\r\n curr_max_id = curr_id\r\n\r\n if curr_max_id > -1:\r\n print(\"[WARNING] Name already exists\")\r\n print(f\"[WARNING] Starting with id: {curr_max_id + 1}\")\r\n pic_id = curr_max_id + 1", "title": "" }, { "docid": "9af5bc3874248d8a17b5308362b60c0f", "score": "0.50734234", "text": "def haveImageWriter(filename): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "6168d94fa6847c22d51eb57e302478f9", "score": "0.5063011", "text": "def add_plot(self, name):\n self.plots += [name]", "title": "" }, { "docid": "9fff2013b792ef7eeaaebf62abb78e26", "score": "0.50583184", "text": "def load_png(name):\r\n fullname = os.path.join('media', name)\r\n try:\r\n image = pygame.image.load(fullname)\r\n if image.get_alpha() is None:\r\n image = image.convert()\r\n else:\r\n image = image.convert_alpha()\r\n except pygame.error as message:\r\n print('Cannot load image:', fullname)\r\n raise SystemExit(message)\r\n return image, image.get_rect()", "title": "" }, { "docid": "068a4ef5c8516656b925be97a34e4d9d", "score": "0.50440526", "text": "def texturePlacementContext(query, q, edit, e, exists, history, image1, image2, image3, labelMapping, name):", "title": "" }, { "docid": "33e5b309dd90dc64101d0f97493473eb", "score": "0.5038853", "text": "def receive_register_image(self, queue_name):\n # face_id | image_id\n msg = self.receive_once(queue_name)\n if msg:\n face_id = msg[0].lower().title()\n # print(\"Face id\", face_id)\n image_id = msg[1]\n # image_id = msg[2]\n return (face_id, image_id)\n return None, None", "title": "" }, { "docid": "d7345c1c75530697377f27641ad38160", "score": "0.50357425", "text": "def get_pictures_by_name(name='Ariel Sharon'):\n lfw_people = load_data()\n selected_images = []\n n_samples, h, w = lfw_people.images.shape\n target_label = list(lfw_people.target_names).index(name)\n for image, target in zip(lfw_people.images, lfw_people.target):\n if (target == target_label):\n image_vector = image.reshape((h*w, 1))\n selected_images.append(image_vector)\n return np.hstack(selected_images).T, h, w", "title": "" }, { "docid": "f2f9168f6f9bc00bc839f29aa4cf1539", "score": "0.50289756", "text": "def withName(self, name):", "title": "" }, { "docid": "52abe49bdfb9a2cd1fd258bdcc842bd9", "score": "0.50186515", "text": "def __init__(self, name, image_path, position=(0, 0)):\n self.name = name\n self.sub_name = self.name[0:1]\n self.image = pygame.transform.scale(pygame.image.load(image_path), \\\n (IMAGE_FACTOR, IMAGE_FACTOR))\n self.image.convert_alpha()\n self.is_collected = False\n self.position = (position[0], position[1])", "title": "" }, { "docid": "d6dd2d51e55f9cb881b824c4a35141d1", "score": "0.5016224", "text": "def add_person(self, image: str, name: str) -> bool:\n # begin facial recognition on image\n loaded_image = face_recognition.load_image_file(image)\n # Run facial recognition on loaded image\n encoding = face_recognition.face_encodings(loaded_image)[0]\n # passes the first encoding as a parameter to create the encoding file\n enc = self.create_enc_file(encoding, name)\n # Add the new user to the current knownMatrix and add mapping\n if enc is not None:\n self.knownMatrix.append(np_json_read(os.path.join(self.known, enc)))\n self.columnToName[len(list(self.knownMatrix)) - 1] = self.who.clear()\n return True\n else:\n return False", "title": "" }, { "docid": "2ea02758f781f7506c7dfb8a2624b40c", "score": "0.5009576", "text": "def picture(query, q, edit, e, annotation, backgroundColor, defineTemplate, docTag,\n dragCallback, dropCallback, enable, enableBackground, exists, fullPathName,\n height, highlightColor, image, isObscured, manage, noBackground,\n numberOfPopupMenus, parent, popupMenuArray, preventOverride, tile, useTemplate,\n visible, visibleChangeCommand, width):", "title": "" }, { "docid": "ac5436a095090cd2f1f92ffa2ef7d806", "score": "0.50082433", "text": "def __init__(self, input_name=\"image\", output_names=[\"image\", \"rotation_label\"]):\n super().__init__(input_name=input_name, output_names=output_names)", "title": "" }, { "docid": "c7c174e2bedc5f5ccb975edb56e107ed", "score": "0.5006522", "text": "def augmentation_visualize_and_save(config, images, images_names, path, times: int = 2):\n\n rows = len(images)\n cols = times + 1\n for (index, image), name in zip(enumerate(images), images_names):\n plt.subplot(rows, cols, index * cols + 1)\n plt.axis('off')\n plt.title(name)\n _image = bgr2rgb_using_opencv(image)\n plt.imshow(_image)\n for col in range(1, cols):\n plt.subplot(rows, cols, index * cols + col + 1)\n plt.axis('off')\n plt.title(\"Augmented NO. \" + str(col))\n # augment image\n augmented_image = augment_image_using_imgaug(_image, config)\n plt.imshow(augmented_image)\n\n # Save the full figure\n isExists = os.path.exists(path)\n if not isExists:\n os.makedirs(path)\n now_time = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')\n savefig(os.path.join(path, \"%s_Comp.png\" % now_time), dpi=600)\n # Clear the current figure\n plt.clf()\n plt.cla()\n plt.close()", "title": "" }, { "docid": "c4136ac99acc4c95fab7ad96c8e1aacc", "score": "0.5002044", "text": "def save(self,strname=None,ext=\".png\",name=\"img\"):\n if not strname:\n strname = self.win\n elif \"{win}\" in strname:\n strname = strname.format(win=self.win)\n strname+=ext\n r = cv2.imwrite(strname,getattr(self,name))\n if FLAG_DEBUG and r: print(name, \"from Plotim saved as\",strname)\n return r", "title": "" }, { "docid": "49687c4b0aa698c3f3f1e8424609d54e", "score": "0.49987918", "text": "def add_roi(self, name, pngdata, add_path=True):\n #self.svg deletes the images -- we want to save those, so let's load it again\n svg = etree.parse(self.svgfile, parser=parser)\n imglayer = _find_layer(svg, \"data\")\n if add_path:\n _make_layer(_find_layer(svg, \"rois\"), name)\n\n #Hide all the other layers in the image\n for layer in imglayer.findall(\".//{%s}g\"%svgns):\n layer.attrib[\"style\"] = \"display:hidden;\"\n\n layer = _make_layer(imglayer, \"img_%s\"%name)\n layer.append(E.image(\n {\"{http://www.w3.org/1999/xlink}href\":\"data:image/png;base64,%s\"%pngdata},\n id=\"image_%s\"%name, x=\"0\", y=\"0\",\n width=str(self.svgshape[0]),\n height=str(self.svgshape[1]),\n ))\n\n with open(self.svgfile, \"w\") as xml:\n xml.write(etree.tostring(svg, pretty_print=True))", "title": "" }, { "docid": "e1f97f646cb6890e46d58d6adeba0245", "score": "0.49858454", "text": "def saveImage(self, image, name, image_dir=None):\n if self.enabled==False: return None\n if image_dir == None: image_dir = self.image_dir\n try:\n image.figure.savefig(image_dir + name)\n except Exception as e:\n print(\"Error\",e)\n image.savefig(image_dir + name)", "title": "" }, { "docid": "994f4bf0a331e226debe082c56edcdc2", "score": "0.49854133", "text": "def loadImage(imageName):\n try:\n from PIL.Image import open\n except ImportError:\n from Image import open\n im = open(imageName)\n # im = im.convert(\"RGBA\")\n try:\n ix, iy, image = im.size[0], im.size[1], im.tobytes(\"raw\", \"RGB\", 0, -1)\n except SystemError:\n ix, iy, image = im.size[0], im.size[1], im.tobytes(\"raw\", \"RGB\", 0, -1)\n # generate a texture ID\n ID=glGenTextures(1)\n # make it current\n glBindTexture(GL_TEXTURE_2D, ID)\n glPixelStorei(GL_UNPACK_ALIGNMENT,1)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n # copy the texture into the current texture ID\n glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, ix, iy, 0, GL_RGB, GL_UNSIGNED_BYTE, image)\n print(ix,iy)\n glGenerateMipmap(GL_TEXTURE_2D)\n # return the ID for use\n return ID", "title": "" }, { "docid": "39f25ad0695cfdb225316161ae8b5613", "score": "0.49847588", "text": "def pixel_storei(self, pname: int, param: int):\n pass", "title": "" }, { "docid": "4744b3faed23c076ef4d7e71a322a4a9", "score": "0.49845544", "text": "def get_pictures_by_name(name='Ariel Sharon'):\n lfw_people = load_data()\n selected_images = []\n n_samples, h, w = lfw_people.images.shape\n target_label = list(lfw_people.target_names).index(name)\n for image, target in zip(lfw_people.images, lfw_people.target):\n if target == target_label:\n image_vector = image.reshape(h * w)\n selected_images.append(image_vector)\n return selected_images, h, w", "title": "" }, { "docid": "7038626ee2c8ee1759ee0b31e9317b3e", "score": "0.49820462", "text": "def find_image(self, name):\n images = self.connection.list_images()\n return next((image for image in images if name == image.name), None)", "title": "" }, { "docid": "69cfc95c0eec5aba825dc27c74c3be3f", "score": "0.49816996", "text": "def name(self, name):\n\n self.container['name'] = name", "title": "" }, { "docid": "d27cf68f2463caf3c8a0dca4651fb292", "score": "0.49801925", "text": "def load_image(name, colorkey=None):\n fullname = os.path.join('data', 'images')\n fullname = os.path.join(fullname, name)\n image = None\n try:\n image = pygame.image.load(fullname).convert()\n except pygame.error, message:\n print 'Cannot load image:', fullname\n exit(message)\n if colorkey is not None:\n if colorkey is -1:\n colorkey = image.get_at((0, 0))\n image.set_colorkey(colorkey, pygame.RLEACCEL)\n return image, image.get_rect()", "title": "" }, { "docid": "b453c198b776eb1dce9c9a4cbadd9a5a", "score": "0.49787015", "text": "def show_and_load():\n load_image()\n showImg(file_path)", "title": "" }, { "docid": "f879fbdc48eecc223a383992cd312fcd", "score": "0.49677974", "text": "def get_img(ob, name, width, height, map_type):\n img = check_image_id(bpy.context, ob, map_type)\n if img is None:\n img = bpy.data.images.new(name, width, height)\n img.use_fake_user = True\n img['ID'] = ob['ID']\n img['mask'] = map_type\n return img", "title": "" }, { "docid": "1a4392b873418665e2b535abb298dfe3", "score": "0.49636632", "text": "def bind_sprite(self, objid, name, name1=''):\n\n obj = self.engine.containers[objid]\n\n\n if not objid in self.containers:\n self.add_container(obj)\n\n if name1=='':\n name1 = name\n\n obj.data[self.id][name1] = {}\n obj.data[self.id][name1]['sprite'] = self.sprites_container.data[self.id][name]\n obj.data[self.id][name1]['original_name'] = name", "title": "" }, { "docid": "6209d644f540da169d6a705f98d3df1a", "score": "0.49626902", "text": "def load_artwork (image_names):\n return [simplegui.load_image(asset_url + img + \".png\") for img in image_names]", "title": "" }, { "docid": "9790af0629b91c8eea18d59b0a8f531c", "score": "0.4959522", "text": "def paint_detected_face_on_image(frame, location, name=None):\n # unpack the coordinates from the location tuple\n top, right, bottom, left = location\n\n if name is None:\n name = 'Unknown'\n color = (0, 0, 255) # red for unknown face\n else:\n color = (0, 128, 0) # darkgreen for known face\n\n # Draw a box around the face\n cv2.rectangle(frame, (left, top), (right, bottom), color, 2)\n\n # Draw a label with a name below the face\n cv2.rectangle(frame, (left, bottom -35), (right, bottom), color, cv2.FILLED)\n cv2.putText(frame, name, (left + 6, bottom -6), cv2.FONT_HERSHEY_DUPLEX, 1.0, (255, 255, 255), 1)", "title": "" }, { "docid": "d92a4ad9152b6193a6224c593b9c5ae1", "score": "0.49487048", "text": "def load_image(file_name): \r\n\r\n # Load the surface\r\n surface = load_surface(file_name)\r\n \r\n return Image(surface)", "title": "" }, { "docid": "02a01e3b6a4e93b92cb12f0c6c980ea3", "score": "0.4936718", "text": "def __init__(self, name: str, outType: FileType, icon: Optional[str] = 'settings', outExtension: Optional[str] = ''):\n super(ImagePlugin, self).__init__(name, FileType.IMAGE, outType, icon, outExtension)", "title": "" }, { "docid": "3bb33f7639b27b884bc7efca3254397b", "score": "0.49358815", "text": "def __init__(self, filename:None, width=None, height=None, x=None, y=None, name=None):\n if width:\n self.__width = width\n if height:\n self.__height = height\n if name:\n self.__name = name\n if x:\n self.__x = x\n if y:\n self.__y = y\n if filename is not None:\n self.__image = self.loadicons(filename)", "title": "" }, { "docid": "813f6ac3035f17302786b6ec054dfc33", "score": "0.49343523", "text": "def augment(self, name):\n\n # create an instance and gather known material\n artist = Artist(name)\n artist.catalog()\n artist.gather()\n\n # append\n self.append(artist)", "title": "" }, { "docid": "8da7f8e6ebc869b9e1909378ba7936ab", "score": "0.49293184", "text": "def load_image(name, colorkey=None, alpha=False):\n if name in _IMAGE_CACHE:\n image = _IMAGE_CACHE[name].copy()\n image.set_alpha(_IMAGE_CACHE[name].get_alpha())\n else:\n try:\n image = pygame.image.load(name)\n _IMAGE_CACHE[name] = image\n except pygame.error, message:\n print 'Cannot load image:', name\n raise pygame.error, message\n if alpha:\n image = image.convert_alpha()\n else:\n image = image.convert()\n if colorkey is not None:\n if colorkey is -1:\n colorkey = image.get_at((0,0))\n image.set_colorkey(colorkey, RLEACCEL)\n return image, image.get_rect()", "title": "" }, { "docid": "7260e3c5819e025e2987e7ba098d719a", "score": "0.49279523", "text": "def test_name():\n np.random.seed(0)\n data = np.random.random((10, 15, 40))\n layer = Volume(data)\n assert layer.name == 'Volume'\n\n layer = Volume(data, name='random')\n assert layer.name == 'random'\n\n layer.name = 'img'\n assert layer.name == 'img'", "title": "" }, { "docid": "75b35fa0b41be6851b19b5083a61d1ee", "score": "0.49271157", "text": "def display_images(path, name_list, num_of_imgs):\n\n resize_dims = (32, 32)\n for i in range(len(name_list)):\n for j in range(num_of_imgs):\n showimg = cv2.imread(path + name_list[i][j])\n imgtitle = \"Class id\" + str(i) + name_list[i][j]\n cv2.imshow(imgtitle, cv2.resize(showimg, resize_dims))\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "title": "" }, { "docid": "d577937d1bca98adbbe34badbe6f8961", "score": "0.49156547", "text": "def display(self,img):\n pass", "title": "" }, { "docid": "7db5987d56be8c0e92c1cb68726155e8", "score": "0.4905639", "text": "def load_image(self, **kwargs):\n ...", "title": "" }, { "docid": "05476430bcf5596ccc509fc4cddea3cf", "score": "0.49003926", "text": "def inject_screenshot(name):\n WebDriverModule.screenshot_listener.custom_screenshot(WebDriverModule.driver, name)", "title": "" }, { "docid": "7ea75f3e6f469b68349c9e95b8abfe10", "score": "0.48993063", "text": "def load_image(name):\n suffix = re.findall(r'\\.{1}\\w+', name)[0]\n \n if suffix in [\".pkl\", \".pklz\"]:\n return load_obj(name).astype(float)\n \n elif suffix in [\".jpg\", \".png\"]:\n return skimage.io.imread(name, as_grey=True).astype(float)", "title": "" }, { "docid": "48785915a2f30c4f56bb1d93b1ca2ac6", "score": "0.4895657", "text": "def register(name):\n\tdef registration(cls):\n\t\tcls.ul4onname = name\n\t\t_registry[name] = cls\n\t\treturn cls\n\treturn registration", "title": "" }, { "docid": "1ed05da891a7352d4230358fee6efa45", "score": "0.48904538", "text": "def showimage(s):\n global ymargin\n fn = '{id}_2d/{name}_2d.png'.format(id=options['id'], name=s.name)\n if os.path.exists(fn):\n img = mpimg.imread(fn)\n extent = None\n if s.name in extsd:\n extent = extsd[s.name]\n else:\n if not options['rdkit4depict']:\n options['dpi'] = 120\n # end if\n\n imy = len(img) + 0.\n imx = len(img[0]) + 0.\n imw = (xhigh-xlow+0.)/(options['fw']+0.)*imx/options['dpi']\n imh = (yhigh-ylow+0.)/(options['fh']+0.)*imy/options['dpi']\n\n if isinstance(s, bimolec):\n if s.x > (xhigh-xlow)/2.:\n extent = (s.x + xmargin/5.,\n s.x + xmargin/5. + imw,\n s.y-imh/2.,\n s.y+imh/2.)\n else:\n extent = (s.x - xmargin/5. - imw,\n s.x - xmargin/5.,\n s.y-imh/2.,\n s.y+imh/2.)\n # end if\n else:\n extent = (s.x - imw/2.,\n s.x + imw/2.,\n s.y-ymargin/5. - imh,\n s.y-ymargin/5.)\n # end if\n # end if\n im = ax.imshow(img, aspect='auto', extent=extent, zorder=-1)\n # add to dictionary with the well it belongs to as key\n imgsd[s] = im\n # end if", "title": "" }, { "docid": "a53710478ba65be06a25d722dab79af8", "score": "0.48851556", "text": "def add_data_asset(\n self,\n name: str,\n config: dict,\n ):\n self._assets[name] = config", "title": "" }, { "docid": "c19b2c0784ed68306d2ff942f063f5e6", "score": "0.48844054", "text": "def add_image(self, key):\n if not key in self.cache:\n self.cache[key] = pygame.image.load(key).convert_alpha()", "title": "" } ]
288646a7a29a1398ae5c2c55e1c5a00e
Override of handle function
[ { "docid": "984cb6beaa8290b18073f668e7e95949", "score": "0.0", "text": "def handle(self):\n server_interface = Authorization(self.server.database, self._set_auth_user)\n self.transport.start_server(server=server_interface)\n channel = self.transport.accept(self.TIMEOUT)\n logging.info(\"new connection from: {addr}\".format(addr=channel.getpeername()))\n SFTPHandler.users[self.transport] = self._get_auth_user()[\"email\"]\n if channel is None:\n logging.info(\"connection faild :(\")\n raise Exception(\"session channel not opened (auth failed?)\")\n\n SFTPHandler.count += 1\n self.transport.join()", "title": "" } ]
[ { "docid": "b207aca386aaf79999970d1b3215a694", "score": "0.82228774", "text": "def _handle(self, *args, **options):\n return super()._handle(*args, **options)", "title": "" }, { "docid": "2d78b26b871ee02791e22a63f67b33c3", "score": "0.81764233", "text": "def handle(self):", "title": "" }, { "docid": "6f0c7a14402ef9ecaf53b426a559b560", "score": "0.8169001", "text": "def handle(self):\n\n pass", "title": "" }, { "docid": "c1381f57c70051bb62da538f808c55b1", "score": "0.8022935", "text": "def handle(self, *args):\n raise NotImplementedError", "title": "" }, { "docid": "7f0c6d0b0728b04c67fe2cf613235493", "score": "0.7757905", "text": "def handle(self, *args, **opts):\n raise NotImplementedError( )", "title": "" }, { "docid": "c10562356c51cb133e60dfa180a2ceb2", "score": "0.7746135", "text": "def handle(self, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "52115c9205aea34cbd8fb0169e571a60", "score": "0.7680188", "text": "def handle(self):\n raise NotImplementedError()", "title": "" }, { "docid": "52115c9205aea34cbd8fb0169e571a60", "score": "0.7680188", "text": "def handle(self):\n raise NotImplementedError()", "title": "" }, { "docid": "6223d7cf495db02a959ec43624e2f5ce", "score": "0.7549576", "text": "def handle(self, data):", "title": "" }, { "docid": "a7b2ced29dc8bb4c274980a74b3ba8a0", "score": "0.7512702", "text": "def handle(self, *args, **options):\n pass", "title": "" }, { "docid": "dc69a9fc5d9a5cb19053d532ddab078f", "score": "0.735317", "text": "def handler(self):\n pass", "title": "" }, { "docid": "735330797f327fa8ee8c1e08583174db", "score": "0.7212276", "text": "def handle(self, data):\n raise NotImplementedError()", "title": "" }, { "docid": "5bdb602443cd2ac32aa6ab3eff01be3f", "score": "0.72037274", "text": "def handle(self,data: dict) -> None:\n pass", "title": "" }, { "docid": "0b341b32e38111b5cfa10ae5f9586a14", "score": "0.718053", "text": "def _handle(self, event, context):\n raise NotImplementedError()", "title": "" }, { "docid": "5969df6637b93dbd83ae4f70f93028ca", "score": "0.70623046", "text": "def _handle(self, msg):\n\t\traise NotImplementedError", "title": "" }, { "docid": "c8f307428ae5df162b701c9d10b97264", "score": "0.70165753", "text": "def handle(self, message):", "title": "" }, { "docid": "d3953270b560c1b861e88b9dfdcdd43e", "score": "0.6994736", "text": "def handle(self) -> int:\n raise NotImplementedError()", "title": "" }, { "docid": "d3953270b560c1b861e88b9dfdcdd43e", "score": "0.6994736", "text": "def handle(self) -> int:\n raise NotImplementedError()", "title": "" }, { "docid": "a7fe4909deb7c99caa229be827eea3f2", "score": "0.6900043", "text": "def handle(self, event):\n pass", "title": "" }, { "docid": "c7ca98defccae28542f01edc5c8c8e21", "score": "0.6876701", "text": "def handle(self, request: Any) -> Any:\n pass", "title": "" }, { "docid": "15f5f7789a20ed6f995ca72751cb719d", "score": "0.6799872", "text": "def handle(self, *args, **options):\r\n super(Command, self).handle(*args, **options)", "title": "" }, { "docid": "d34a639d67def338a400d32f53e3020a", "score": "0.6794585", "text": "def handle(self, request):\n raise NotImplementedError()", "title": "" }, { "docid": "83cb718d22a4a0a6786e5c7ed418323d", "score": "0.6716706", "text": "def handle_request(self):\n raise NotImplementedError()", "title": "" }, { "docid": "9ab330a537dd17a4d8dd0fd0af75f52d", "score": "0.6714401", "text": "def handler(self):\n return None", "title": "" }, { "docid": "46f0c3983aa867a28d198efbdd0ec3d7", "score": "0.6709485", "text": "def parse(self, handle):\n raise NotImplementedError(\"Please implement in a derived class\")", "title": "" }, { "docid": "545f942a09b3debcabd541d57f3d9725", "score": "0.6697693", "text": "def handle_other(self):\n pass", "title": "" }, { "docid": "de6d8e6dcde8f0f6e7cab8746ecc5367", "score": "0.6619171", "text": "def Handle(self):\n return self.HandleCommand()", "title": "" }, { "docid": "3440207dfa7de6ca7322c7f93e501d01", "score": "0.6617048", "text": "def handle(self, args, opts):\n\n raise NotImplementedError()", "title": "" }, { "docid": "5ef26b63fe405e39421d153746181fe4", "score": "0.6611026", "text": "def Handle(self) -> _n_1_t_8:", "title": "" }, { "docid": "3f445da979cb05bcd4e3a99d6a135175", "score": "0.65953064", "text": "def handle_input(self): \n\t\treturn", "title": "" }, { "docid": "3f445da979cb05bcd4e3a99d6a135175", "score": "0.65953064", "text": "def handle_input(self): \n\t\treturn", "title": "" }, { "docid": "80a5deb720b47b30e7a733a68a24ae01", "score": "0.65751547", "text": "def _handle(self, command: Command):\n raise NotImplementedError()", "title": "" }, { "docid": "fcdb5436d2aa48e1a3024a4fc4d2d781", "score": "0.6525647", "text": "def __init__(self, handle):\n self.handle = handle", "title": "" }, { "docid": "bee73a6ba32e588cd01df5274566dcce", "score": "0.6472151", "text": "def handleObject(self, object):", "title": "" }, { "docid": "6dd2b10bd482ccf84a22d36ff8a3d3f9", "score": "0.64638054", "text": "def special_handler(self, tag, data):\r\n pass", "title": "" }, { "docid": "3a15f9d793edba01e80366909571e64d", "score": "0.6421354", "text": "def SafeHandle(self) -> _n_0_t_0:", "title": "" }, { "docid": "42d78254d22a66c2a5fb4df1c33f5dd5", "score": "0.6350445", "text": "def _process(self,*args,**kwargs):\n\t\tpass", "title": "" }, { "docid": "95c2ae268bbbbc02a1e2bbf0f8f85a59", "score": "0.6268413", "text": "def handle_data(self, data):\n raise NotImplementedError(\"subclass must implement handle_data()\")", "title": "" }, { "docid": "57f40ca6e93bfca2d01c73a7cab23154", "score": "0.62210375", "text": "def runnable(self, handle):\n raise NotImplementedError(\"You must override the 'runnable' method\")", "title": "" }, { "docid": "a30c3b44af441153c1995d46b2711ef6", "score": "0.62181914", "text": "def process(self):\n raise NotImplementedError(\"Subclass Responsiblity\")", "title": "" }, { "docid": "69b626711458854c00b8c04a77bbc895", "score": "0.62129", "text": "def handle_response(self, response):\n raise NotImplementedError", "title": "" }, { "docid": "c8cf2f1ba764faadeac7588877514f45", "score": "0.6212886", "text": "def handleEvent(self, event):\r\n pass", "title": "" }, { "docid": "fdeaf20d800b1bbbfc6b33de60179edd", "score": "0.62024325", "text": "async def handle(self, request: web.Request, *args, **kwargs) -> web.Response:\n\n pass", "title": "" }, { "docid": "3dba66a805d3ff588bcf00a0968a7ac3", "score": "0.61990255", "text": "def callback(self, *args):\n\t\tpass", "title": "" }, { "docid": "065f7eb287836eabf6fd0060d89f9427", "score": "0.61961746", "text": "def handles(cls, log):\n raise NotImplementedError()", "title": "" }, { "docid": "9351ce0d31f282c1ed67968fc31c3d9a", "score": "0.61677927", "text": "def handle_message(self, msg):\n raise NotImplementedError", "title": "" }, { "docid": "13e5d0dd8e8f0064d0327f905204100d", "score": "0.6162957", "text": "def simplified_handler(self, resource, event, handle=None):\n self.session = resource.session\n self.handle = handle\n event_type = event.event_type\n if event_type == constants.EventType.service_request:\n self.event_success = True\n self.srq_success = True\n return None\n elif event_type == constants.EventType.io_completion:\n self.event_success = True\n self.io_completed = True\n return None\n else:\n self.event_success = True\n return None", "title": "" }, { "docid": "dc3abc1645fcd5d82f178dc60916dfcd", "score": "0.6152851", "text": "def handle(self, form):\n\n pass", "title": "" }, { "docid": "97e7c5eafec6ce8952affd0d1c8c4fd2", "score": "0.6151098", "text": "def funtion(self):\n pass", "title": "" }, { "docid": "8ae2a41ca8e538d1f074d88d33b943da", "score": "0.6144577", "text": "def handle_event(self, event):\n raise NotImplementedError", "title": "" }, { "docid": "f5463e6100d301cfa172b9aaae4ce99b", "score": "0.6142427", "text": "def handle_job(self, job):\n raise NotImplementedError", "title": "" }, { "docid": "9b563cfa225e84f9374283878b24d4b5", "score": "0.61315036", "text": "def _dispatch(self):", "title": "" }, { "docid": "c59b347872191c34947242e2c644bc40", "score": "0.6120032", "text": "def handle_request(self, request, context):\n raise NotImplementedError(\"The handle_request method must be implemented in a subclass.\")", "title": "" }, { "docid": "ea38ad85676093fb0a19df0ba29d2ae6", "score": "0.61100966", "text": "def handle(self) -> int:\n return self.__handle", "title": "" }, { "docid": "7d7a7b9ae8732e3ca2082ebd9008b04f", "score": "0.6092963", "text": "def handle_messages(self):\r\n raise NotImplementedError # pragma: nocover\r", "title": "" }, { "docid": "30defb9e524bf411499e0a33555a3cc5", "score": "0.6051451", "text": "def handle(self, tok, context):\n ...", "title": "" }, { "docid": "f121933450b3d8b39c9ec540317e7021", "score": "0.6049883", "text": "def _process(self):\n raise NotImplementedError", "title": "" }, { "docid": "0cbbce9654954c60b336470208d2eb72", "score": "0.6033969", "text": "def __bound_handler(*args, **kwargs):\n return self(obj, *args, **kwargs)", "title": "" }, { "docid": "fcb3e7cadc48d5badfaa5fba7058110e", "score": "0.6027719", "text": "def goto_handle(self, handle):\n raise NotImplementedError", "title": "" }, { "docid": "fc6580cf49a404fa3730bfc55706f0aa", "score": "0.60262", "text": "def process(self):\n\t\tpass", "title": "" }, { "docid": "4c54c717de5289e3bb0d447e1fab9e0d", "score": "0.6021108", "text": "def __init__(self, handle=None):\n self._handle = BAD_CONTEXT", "title": "" }, { "docid": "5e770b60c60efceca5b3716accde45cc", "score": "0.6020084", "text": "def __call__(self,e):\n pass", "title": "" }, { "docid": "a97731d2eac0b03d2e763d912b97122f", "score": "0.6017816", "text": "def handle_events(self):\r\n raise NotImplementedError", "title": "" }, { "docid": "56172107ebc96ac02a44bd21f767aeda", "score": "0.601214", "text": "async def handle_chunk(self, chunk: Chunk) -> None:", "title": "" }, { "docid": "7e5aa57e45381ae179a1b4931ac8417f", "score": "0.6011866", "text": "def dispatch(self):\n pass", "title": "" }, { "docid": "7e5aa57e45381ae179a1b4931ac8417f", "score": "0.6011866", "text": "def dispatch(self):\n pass", "title": "" }, { "docid": "2ab39eaef5168bf1ac3ead4c029cce3d", "score": "0.5998305", "text": "def _handle(self, token: Token, lineage_result: LineageResult) -> None:\n raise NotImplementedError", "title": "" }, { "docid": "5dd9bd058f84247c5f2bbc34875babae", "score": "0.59899974", "text": "def _handle(self, obj):\n\n # dispatch to the correct handler\n if \"method\" in obj:\n # request\n self._handle_request(obj)\n elif \"error\" not in obj:\n # response\n self._handle_response(obj)\n else:\n # error\n self._handle_error(obj)", "title": "" }, { "docid": "e1cf76cf471e1a59bbd93c59e74c23f8", "score": "0.5985885", "text": "def handleCommand(self, args = None):", "title": "" }, { "docid": "5e0fa4354a95e8b0ccfd36a9ab402978", "score": "0.59782666", "text": "def postHandle(self, arg):\n # self.transport.write('senz')\n print \"handled\"", "title": "" }, { "docid": "11b15883ee5bad2c8292ef79d590fad2", "score": "0.59625554", "text": "def handle_error(self, *args):\n\t\tpass", "title": "" }, { "docid": "cab408ee2d90564e63c7e985bfd603bc", "score": "0.5960691", "text": "def handleEvent(self, event):\n raise NotImplementedError", "title": "" }, { "docid": "f34f57c945602235ddb5ab0e29e7a4f6", "score": "0.59582055", "text": "def dispatch_call(self):", "title": "" }, { "docid": "da73b323cddfeedc806baf82a7cc0eda", "score": "0.59341073", "text": "def process(self, context, event):\n\t\traise NotImplementedError()", "title": "" }, { "docid": "d388939ba349a300a7a20adb599c127d", "score": "0.5907255", "text": "def __call__(self):\n pass", "title": "" }, { "docid": "d388939ba349a300a7a20adb599c127d", "score": "0.5907255", "text": "def __call__(self):\n pass", "title": "" }, { "docid": "47b313c6a11142a2531707d165182d1c", "score": "0.5905499", "text": "def handle_arguments(self, args):", "title": "" }, { "docid": "7965d40444873c2a41d25fc3ff55bfc0", "score": "0.59042007", "text": "def _receive(self):", "title": "" }, { "docid": "ab27aa4f0de17a2e4deecf3fc8d97275", "score": "0.58980596", "text": "def process(self):", "title": "" }, { "docid": "ab27aa4f0de17a2e4deecf3fc8d97275", "score": "0.58980596", "text": "def process(self):", "title": "" }, { "docid": "ab27aa4f0de17a2e4deecf3fc8d97275", "score": "0.58980596", "text": "def process(self):", "title": "" }, { "docid": "ab27aa4f0de17a2e4deecf3fc8d97275", "score": "0.58980596", "text": "def process(self):", "title": "" }, { "docid": "e0b8de45c2b37769d98ba843ecdf2aac", "score": "0.5873502", "text": "def incoming(self):", "title": "" }, { "docid": "105009e2a2daf4a9a951961f8b6e5b71", "score": "0.5871808", "text": "def __call__(self, context):\n\t\traise NotImplementedError", "title": "" }, { "docid": "9edee307a658d36c0ca027ec0efb35f0", "score": "0.5866004", "text": "def process(self):\n raise NotImplementedError()", "title": "" }, { "docid": "82bd304ea361bfaba02210865d24d1fd", "score": "0.5848253", "text": "def work(self):\r\n print \"OVERRIDE ME\"", "title": "" }, { "docid": "6890556e5f2e90cda0953101ae3c6fbf", "score": "0.5839112", "text": "def process(self, event):", "title": "" }, { "docid": "6b7f361dd7fa4effc12638d9cfb7cd8a", "score": "0.58326393", "text": "def handle_message(self, msg, status):", "title": "" }, { "docid": "192b6d2f754fd434cdd9b2237dfbc0ba", "score": "0.5830123", "text": "def parse(self, handle): # -> Record:\n ...", "title": "" }, { "docid": "a507931f3c899decd34a28b96433e4dc", "score": "0.58264583", "text": "def handle(self) -> State:\n\n # Write the main words\n self.write_main_words()\n\n # Call the superclass handle\n return super().handle()", "title": "" }, { "docid": "3e254594a970871273e1b50c564cf11e", "score": "0.5823697", "text": "def handle(*objects):", "title": "" }, { "docid": "d4d49c3ffbbd5beb53db4b0c289f6a5c", "score": "0.5819489", "text": "def handle(self):\n return self._handle", "title": "" }, { "docid": "34228ecb534b829016125b768f48bd93", "score": "0.5817602", "text": "def process(self) -> None:\n pass", "title": "" }, { "docid": "4951e50a050b9a60414c82af375e8b38", "score": "0.5814466", "text": "def handle(self) -> None:\n error = \"Request method not allowed.\"\n self.send({'error': error})", "title": "" }, { "docid": "d48aea7523181decfeea5b82e587d0c0", "score": "0.5787636", "text": "def act(self, handler, user_events):\n pass", "title": "" }, { "docid": "6553b727df73e9b38072b72b489c43a6", "score": "0.57872903", "text": "def handle_msg(self, msg):\n raise NotImplementedError(\"_handle_msg() not implemented\")", "title": "" }, { "docid": "02e396669e9768f7339ac441332ed08d", "score": "0.57858694", "text": "def getHandle(self):\r\n\t\treturn self.myHandle", "title": "" }, { "docid": "be4ef647aa7ca7ad6538cd492bffcc8e", "score": "0.57855445", "text": "def handle_processing(self, entity, *args, **kwargs):", "title": "" }, { "docid": "f262895b5816d1e59d566820aacaffc2", "score": "0.5772051", "text": "def consume(self, handler, model):\n raise NotImplementedError", "title": "" }, { "docid": "f1d41818274d7490611b0e1708a2bbd8", "score": "0.57697237", "text": "def handles_task(self, task):\n raise NotImplementedError()", "title": "" }, { "docid": "908298756b2a9b8c70e66a037ebd080e", "score": "0.576424", "text": "def _dispatcher(self, c, e):", "title": "" } ]
8dd9313f810d85d87377c4c83da5271c
Returns list comprising names of Stan parameters. Note that the length of this list will not generally equal ``n_parameters`` since each name may correspond to a vector.
[ { "docid": "72a6fbab24b7172766423478b29438e6", "score": "0.0", "text": "def names(self):\n return self._names", "title": "" } ]
[ { "docid": "0704c257661eb8a6786f81ceb412dd13", "score": "0.85193115", "text": "def get_parameters_names(self) -> List:\n return [variable[\"name\"] for variable in self.parameters]", "title": "" }, { "docid": "afd09fdd4979c219478623e76aec6235", "score": "0.7589632", "text": "def parameter_names(self):\n return self._parameter_names", "title": "" }, { "docid": "c752a6265875c8cbc04f54d8b3ae65b0", "score": "0.75374264", "text": "def input_names(self) -> List[str]:\n return self.parameter_space.parameter_names", "title": "" }, { "docid": "48f2c58470271f6410e709c9888a509a", "score": "0.7512591", "text": "def all_parameter_names(self) -> List[str]:\n return self.all_parameters.keys()", "title": "" }, { "docid": "9d832df725a7ae718b371f9e7fcc3501", "score": "0.74932724", "text": "def parameter_names(self):\n return self.PARAMETERS", "title": "" }, { "docid": "f842c31a9ce5991041f0247f319010a5", "score": "0.7448168", "text": "def getParamNames(self):\n names = []\n names.append('Sigma')\n return names", "title": "" }, { "docid": "391a7b6de76982225e7bc24a18bf4e5c", "score": "0.7393334", "text": "def param_names(self):\n if hasattr(self, '_param_names'):\n return self._param_names\n else:\n try:\n names = ['param.%d' % i for i in range(len(self.start_params))]\n except NotImplementedError:\n names = []\n return names", "title": "" }, { "docid": "e14202b2cabbd4afb625ca12f1ef155e", "score": "0.72744524", "text": "def get_param_names(self):\n return list(self.params._fields)", "title": "" }, { "docid": "8b0ce1b91d656a21508b9c53ff5dd195", "score": "0.7236258", "text": "def parameter_names(self):\n return getattr(self.chain.params, \"keys\", [])", "title": "" }, { "docid": "1d31f83834c905a13053bc0d00fcb06c", "score": "0.7152876", "text": "def get_param_names(fnode):\n p_markers = get_parametrization_markers(fnode)\n param_names = []\n for paramz_mark in p_markers:\n param_names += get_param_argnames_as_list(paramz_mark.args[0])\n return param_names", "title": "" }, { "docid": "890e57888a78198560d8368d6661d39f", "score": "0.7109715", "text": "def list_parameters(self):\n params = self.parameters.values()\n return sorted(params, key=lambda p: (p.index, p.identifier))", "title": "" }, { "docid": "fab0bf2fe0b3a6299ad4171a32f17ccd", "score": "0.7104763", "text": "def get_parameters(self):\n names = self.parameters.parameters.keys()\n names.extend(self.global_parameters.parameters.keys())\n return names", "title": "" }, { "docid": "b4ab19607e6a703a8cbdcda9c48628ba", "score": "0.70914847", "text": "def parameter_names(self):\n return self._queryobj.parameter_names()", "title": "" }, { "docid": "6a8935d0715a040f415e38ddb91d580f", "score": "0.70880836", "text": "def get_parameter_names(self):", "title": "" }, { "docid": "967dc40d9b5fb13779cdc6daa1d8d478", "score": "0.7043725", "text": "def parameter_names(self):\n return self.param_names or self.model.parameter_names", "title": "" }, { "docid": "16807bf1f63428e82b75f72244adfe5e", "score": "0.70081776", "text": "def get_parameters_modifier_names(self):\n return list(self.modifiers['parameters'].keys())", "title": "" }, { "docid": "b69689dac2c3f0c78823a8d9a672f4c5", "score": "0.6965251", "text": "def parameter_names(*, timeout:Optional[float] = None) -> List[cryptoltypes.CryptolNameInfo]:\n return __get_designated_connection().parameter_names(timeout=timeout)", "title": "" }, { "docid": "aa105c1be6c3d0a8c85ce537d2fb9da4", "score": "0.6896179", "text": "def get_param_names(self):\n return self._param_names", "title": "" }, { "docid": "6f0d82226bdc68970508a2601451b2b6", "score": "0.68591255", "text": "def parameter_names(self, add_self=False, adjust_for_printing=False, recursive=True, intermediate=False):\n if adjust_for_printing: adjust = adjust_name_for_printing\n else: adjust = lambda x: x\n names = []\n if intermediate or (not recursive):\n names.extend([adjust(x.name) for x in self.parameters])\n if intermediate or recursive: names.extend([\n xi for x in self.parameters for xi in\n x.parameter_names(add_self=True,\n adjust_for_printing=adjust_for_printing,\n recursive=True,\n intermediate=False)])\n if add_self: names = map(lambda x: adjust(self.name) + \".\" + x, names)\n return names", "title": "" }, { "docid": "da2a0ec0ac3219ab96a12062e12e9698", "score": "0.68575084", "text": "def get_params_names(self):\n names = []\n key_0 = list(self.mp_dict.keys())[0]\n names = list(self.mp_dict[key_0].keys()) \n #if we assume that number of information between wells is constant\n return names", "title": "" }, { "docid": "8c9a15b7d4e4a4f1463f83e1db2a56d1", "score": "0.6811872", "text": "def get_all_numeric_parameter_names(self):\n result = []\n import inspect\n for name, index in self._numeric_parameter_indices:\n task = self._workflow.get_task(name)\n func = task[0]\n sig = inspect.signature(func)\n parameter_names = list(sig.parameters.keys())\n result.append([name, parameter_names[index - 1]])\n\n return result", "title": "" }, { "docid": "e462aee44f479af0525cd6e5acd4c04b", "score": "0.6738139", "text": "def par_names(self) -> List[str]:\n # We infer the parameter names from the signature of the model function\n if self._par_names is None:\n self._par_names = [p.name for p in list(inspect.signature(self.model).parameters.values())[1:]]\n\n return self._par_names", "title": "" }, { "docid": "6f661b4c987dc4c2e7f3ecffd50a7ff0", "score": "0.6731993", "text": "def parameters(self):\n return list(self._parameters.keys())", "title": "" }, { "docid": "d2c30e64e9b050c4321ac9d5fe2a5abf", "score": "0.6706403", "text": "def parameter_names(self):\n\n results = []\n paramname = \"\"\n state = self.STATE_START\n\n for c in self.metadata[\"cypher\"]:\n if state == self.STATE_START:\n if c == \"\\\\\":\n state = self.STATE_BACKSLASH\n if c == \"$\":\n state = self.STATE_GOTDOLLAR\n elif state == self.STATE_BACKSLASH:\n state = self.STATE_START\n else: # STATE_GOTDOLLAR\n if c.isalnum():\n paramname += c\n else:\n if paramname != \"\":\n results.append(paramname)\n paramname = \"\"\n state = self.STATE_START\n return results", "title": "" }, { "docid": "3b3b740d18a11f54c7b7440436c58c73", "score": "0.66818666", "text": "def parameter_names_flat(self, include_fixed=False):\n name_list = []\n for p in self.flattened_parameters:\n name = p.hierarchy_name()\n if p.size > 1:\n name_list.extend([\"{}[{!s}]\".format(name, i) for i in p._indices()])\n else:\n name_list.append(name)\n name_list = np.array(name_list)\n\n if not include_fixed and self._has_fixes():\n return name_list[self._fixes_]\n return name_list", "title": "" }, { "docid": "f698a9de3ff4e05f9948acc6c2c49c6b", "score": "0.65399456", "text": "def get_parameters(self):\n \n parameters = []\n for layer in self.layers:\n parameters.extend(layer.params)\n return parameters", "title": "" }, { "docid": "ae078e845ba53b306bc5191e44b87496", "score": "0.6535529", "text": "def parameter_keys(self):\n return [p.key for p in self.parameters]", "title": "" }, { "docid": "82fc1b51a5147b8d9225b3df603aa00e", "score": "0.6532134", "text": "def _parameter_names(nharms, mod_nharms):\n\n names = ['y_0']\n for n in range(1, nharms + 1):\n names += ['c_%d'%(n), 's_%d'%(n)]\n\n for m in range(1, mod_nharms[0] + 1):\n names += ['a_0%d'%(m), 'b_0%d'%(m)]\n\n for n in range(1, nharms + 1):\n for m in range(1, mod_nharms[n] + 1):\n names += ['ac_%d%d'%(n, m), 'bc_%d%d'%(n, m),\n 'as_%d%d'%(n, m), 'bs_%d%d'%(n, m)]\n\n return names", "title": "" }, { "docid": "868417ebc81b78662bd7f26ac29fa2c5", "score": "0.6516007", "text": "def expected_parameters(self):\n return [self.parameter_name]", "title": "" }, { "docid": "a7a8632cd8d0809d796d47d29cdf4b0a", "score": "0.6487864", "text": "def get_input_names(self) -> List:\n return [variable[\"name\"] for variable in self.inputs]", "title": "" }, { "docid": "5f7131c49ed61d09af658133246382e1", "score": "0.6473038", "text": "def parameters_list(self):\n if not self._hook._parameters:\n return None\n\n params_list = \"\"\n\n for parameter in self._hook._parameters.values():\n params_list += \"[\" + parameter.name\n\n if parameter.annotation is not Parameter.empty:\n params_list += \":\" + parameter.annotation.__name__\n\n if parameter.default is not Parameter.empty:\n params_list += \"=\" + str(parameter.default)\n\n params_list += \"] \"\n\n return params_list.strip()", "title": "" }, { "docid": "a1decf2f3b0dc80d60d34b8879944d10", "score": "0.64699686", "text": "def names(self):\n return [var.name for var in self.vars]", "title": "" }, { "docid": "a1decf2f3b0dc80d60d34b8879944d10", "score": "0.64699686", "text": "def names(self):\n return [var.name for var in self.vars]", "title": "" }, { "docid": "c7c9c90eb557dfef6f72187cf42b8b59", "score": "0.64393854", "text": "def get_params(self):\n return self.params_names", "title": "" }, { "docid": "a89bbbc51f20c2b118c99ea20b99e9dc", "score": "0.6409813", "text": "def param_names(self):\n return self.left.param_names + self.right.param_names", "title": "" }, { "docid": "5262157b30a5b879a93667b3a89990a9", "score": "0.63954777", "text": "def param_names(self):\n names = (self.exog_names + self.ar_names + self.ma_names +\n self.seasonal_ar_names + self.seasonal_ma_names)\n if not self.concentrate_scale:\n names.append('sigma2')\n return names", "title": "" }, { "docid": "e77a2d06224909f244993f7a0c336f71", "score": "0.63953364", "text": "def _get_param_names(cls):\n # noinspection PyPep8\n\n # fetch the constructor or the original constructor before\n # deprecation wrapping if any\n init = getattr(cls.__init__, 'deprecated_original', cls.__init__)\n if init is object.__init__:\n # No explicit constructor to introspect\n return []\n\n # introspect the constructor arguments to find the model parameters\n # to represent\n init_signature = signature(init)\n # Consider the constructor parameters excluding 'self'\n parameters = [p for p in init_signature.parameters.values()\n if p.name != 'self' and p.kind != p.VAR_KEYWORD]\n for p in parameters:\n if p.kind == p.VAR_POSITIONAL:\n raise RuntimeError(\"scikit-learn estimators should always \"\n \"specify their parameters in the signature\"\n \" of their __init__ (no varargs).\"\n \" %s with constructor %s doesn't \"\n \" follow this convention.\"\n % (cls, init_signature))\n # Extract and sort argument names excluding 'self'\n return sorted([p.name for p in parameters])", "title": "" }, { "docid": "d62313dca165cbc84d62f4035b18e9a3", "score": "0.6384889", "text": "def params(self):\n return list(self.parameters.values())", "title": "" }, { "docid": "e5fcf40baa7de37e7832beba0020a727", "score": "0.63786125", "text": "def parameter_spec(self):\n return OrderedDict(sorted(SAMModule.parameter_spec().items()))", "title": "" }, { "docid": "86226465bbcf315ca94ebdf53a4ca4c2", "score": "0.63768744", "text": "def getParameters(self):\n return list(self._parameters_.items())", "title": "" }, { "docid": "a36095444f2138d8673629c69a1c0c93", "score": "0.6367286", "text": "def getVariableNames(self):\r\n\r\n return [var.name for var in self.variables]", "title": "" }, { "docid": "a6c545d3efd3f529ed4f2f0ebeb77516", "score": "0.63192147", "text": "def get_param_names(spins=None, full=False):\n\n # Initialise the structure.\n names = []\n\n # Loop over the parameters.\n for param_name, param_index, spin_index, r20_key in loop_parameters(spins=spins):\n # Set the initial text.\n param_text = param_name\n\n # The parameters with additional details.\n if full and param_name in PARAMS_R20:\n param_text += \" (%s)\" % r20_key\n \n # Append the text.\n names.append(param_text)\n\n # Return the structure.\n return names", "title": "" }, { "docid": "13d282a60bf5f268bb6755a295b9321c", "score": "0.63075095", "text": "def get_params(self, pnames=None):\n l = []\n if pnames is None:\n pnames = self.params.keys()\n for pname in pnames:\n p = self.params[pname]\n if isinstance(p, Parameter):\n l.append(p)\n return l", "title": "" }, { "docid": "31b8d82f111594ad113d6f2e646b9702", "score": "0.630534", "text": "def parameters(self) -> 'list[str]':\n pass", "title": "" }, { "docid": "b949afa46053adee63462df7e9352d59", "score": "0.629045", "text": "def parameters(self) -> List[Parameter]:\n pass", "title": "" }, { "docid": "1386b0ef400ca7bf3b4198b5d09caa96", "score": "0.6217062", "text": "def _get_param_names(cls):\n # fetch the constructor or the original constructor before\n # deprecation wrapping if any\n init = getattr(cls.__init__, \"deprecated_original\", cls.__init__)\n if init is object.__init__:\n # No explicit constructor to introspect\n return []\n\n # introspect the constructor arguments to find the model parameters\n # to represent\n init_signature = inspect.signature(init)\n # Consider the constructor parameters excluding 'self'\n parameters = [\n p\n for p in init_signature.parameters.values()\n if p.name != \"self\" and p.kind != p.VAR_KEYWORD\n ]\n for p in parameters:\n if p.kind == p.VAR_POSITIONAL:\n raise RuntimeError(\n \"scikit-learn estimators should always \"\n \"specify their parameters in the signature\"\n \" of their __init__ (no varargs).\"\n \" %s with constructor %s doesn't \"\n \" follow this convention.\" % (cls, init_signature)\n )\n # Extract and sort argument names excluding 'self'\n return sorted([p.name for p in parameters])", "title": "" }, { "docid": "a826e4aedf8240c6e45c4ff3bec05ba1", "score": "0.620959", "text": "def _get_param_names(cls):\n # fetch the constructor or the original constructor before\n # deprecation wrapping if any\n init = getattr(cls.__init__, 'deprecated_original', cls.__init__)\n if init is object.__init__:\n # No explicit constructor to introspect\n return []\n\n # introspect the constructor arguments to find the model parameters\n # to represent\n init_signature = signature(init)\n # Consider the constructor parameters excluding 'self'\n parameters = [p for p in init_signature.parameters.values()\n if p.name != 'self' and p.kind != p.VAR_KEYWORD]\n for p in parameters:\n if p.kind == p.VAR_POSITIONAL:\n raise RuntimeError(\"scikit-learn estimators should always \"\n \"specify their parameters in the signature\"\n \" of their __init__ (no varargs).\"\n \" %s with constructor %s doesn't \"\n \" follow this convention.\"\n % (cls, init_signature))\n # Extract and sort argument names excluding 'self'\n return sorted([p.name for p in parameters])", "title": "" }, { "docid": "e3a219a5a6f7d030d18227158d272632", "score": "0.620656", "text": "def getParameters(self):\n\t\treturn []", "title": "" }, { "docid": "cfb74d159d2ac1bd5b442c9cd279602e", "score": "0.6206366", "text": "def getParameters(el, parameter):\r\n return tuple(p.Definition.Name for p in el.Parameters)", "title": "" }, { "docid": "7e3d64eaa7afe5eb5936ef91471cd0a9", "score": "0.61807597", "text": "def _get_param_names(cls):\n # fetch the constructor or the original constructor before\n # deprecation wrapping if any\n init = getattr(cls.__init__, 'deprecated_original', cls.__init__)\n if init is object.__init__:\n # No explicit constructor to introspect\n return []\n\n # introspect the constructor arguments to find the model parameters\n # to represent\n init_signature = inspect.signature(init)\n # Consider the constructor parameters excluding 'self'\n parameters = [p for p in init_signature.parameters.values()\n if p.name != 'self' and p.kind != p.VAR_KEYWORD]\n for p in parameters:\n if p.kind == p.VAR_POSITIONAL:\n raise RuntimeError(\"scikit-learn estimators should always \"\n \"specify their parameters in the signature\"\n \" of their __init__ (no varargs).\"\n \" %s with constructor %s doesn't \"\n \" follow this convention.\"\n % (cls, init_signature))\n # Extract and sort argument names excluding 'self'\n return sorted([p.name for p in parameters])", "title": "" }, { "docid": "0cc654b16a21fb192091273f17c93df3", "score": "0.61677873", "text": "def parameters(self):\n\n with self.graph.as_default():\n parameters = tf.trainable_variables()\n\n parameters_string_parts = [\"Trainable parameters\"]\n width = max(map(len, [p.name for p in parameters]))\n\n for parameter in parameters:\n parameters_string_parts.append(\n \"{:{}} {}\".format(\n parameter.name, width, parameter.get_shape()\n )\n )\n\n parameters_string = \"\\n \".join(parameters_string_parts)\n\n return parameters_string", "title": "" }, { "docid": "8a4c01dd8672a842063a73a1e9841183", "score": "0.6139431", "text": "def parameters(self):\n Parameter = namedtuple('Parameter', 'id datatype length value')\n _result = []\n for param in self._parameters:\n if type(param[3]) == int:\n value_id = param[3]+1\n else:\n value_id = param[3]\n _result.append(Parameter(param[3], param[1], param[4], self._param_values[value_id]))\n return _result", "title": "" }, { "docid": "5a16a5379df02a8ad64ed27cff75f943", "score": "0.6109301", "text": "def name_parameter_tensor(self):\n for name, param in self.named_parameters():\n param.tensor()._attrs[\"name\"] = name.replace(\".\", \"_\")", "title": "" }, { "docid": "30d28e1762a5e4c0cc582f0e3480ae71", "score": "0.6090363", "text": "def param_list(self) -> List[str]:\n raise NotImplementedError()", "title": "" }, { "docid": "15097e8ed690a5aa8bfc22101752909e", "score": "0.6088603", "text": "def parameter_spec(self):\n return OrderedDict(sorted(SAMGraph.parameter_spec(len(self.dependencies)).items()))", "title": "" }, { "docid": "25ef56cacfde7b913a2b34bbe6dde68e", "score": "0.6056104", "text": "def get_hyperparameter_names(self) -> list[str]:\n warnings.warn(\n \"Prefer using `list(space.keys())` over `get_hyperparameter_names`\",\n DeprecationWarning,\n stacklevel=2,\n )\n return list(self._hyperparameters.keys())", "title": "" }, { "docid": "f0be30cc98513c131a0ea7f8e4ac2f63", "score": "0.6030182", "text": "def parameters(self):\n\t\tcount = ctypes.c_ulonglong()\n\t\tparams = core.BNGetTypeParameters(self.handle, count)\n\t\tresult = []\n\t\tfor i in xrange(0, count.value):\n\t\t\tresult.append((Type(core.BNNewTypeReference(params[i].type)), params[i].name))\n\t\tcore.BNFreeTypeParameterList(params, count.value)\n\t\treturn result", "title": "" }, { "docid": "3f3ae4c85db39b8342f7951c041b4f5e", "score": "0.602093", "text": "def parameters(self):\n return list(self.network.parameters())", "title": "" }, { "docid": "5b076485e34e533d7c5c6384613cf300", "score": "0.601895", "text": "def kwarg_names(self) -> List[str]:\n return [arg.name for arg in self._kwargs.values()]", "title": "" }, { "docid": "7dd9434e865fee54acc43d7511c233c2", "score": "0.60137564", "text": "def get_params_types_names(proto):\n a = list(map(split_type, get_params(proto)))\n #print proto, a\n #import sys; sys.exit(1)\n return a", "title": "" }, { "docid": "a6dd3d36340f4db0142d132465c143c3", "score": "0.60031444", "text": "def getOptionalParamsNames():\n \treturn jarray.array([], java.lang.String);", "title": "" }, { "docid": "635ef8fc0a06856731b174041a32e299", "score": "0.6002941", "text": "def names(self):\n names = []\n if self.filters_list:\n names = [f.name for f in self.filters_list]\n return names", "title": "" }, { "docid": "a09c04a91ee06766bd7e67263d633800", "score": "0.5986396", "text": "def get_parameters(modules: Iterable[Module]): \n model_parameters = [] \n for module in modules: \n model_parameters += list(module.parameters()) \n return model_parameters", "title": "" }, { "docid": "a8b98ce564f7ccaf8940f9ad886c8cc1", "score": "0.59676987", "text": "def get_anonymous_parameters(self):\n tmp = {}\n for param in self.parameters:\n if type(param.key) == int:\n tmp[param.key] = param.value\n lst = [None]\n for param in range(1, max(tmp.keys()) + 1):\n lst.append(tmp[param])\n return lst", "title": "" }, { "docid": "4dcab21eaafee4c011ffd8723e86b5d3", "score": "0.5942363", "text": "def varied_parameters(parameters, varied, names):\n indices_varied = [names.index(i) for i in varied]\n varied_parameters = [parameters[i] for i in indices_varied]\n return varied_parameters", "title": "" }, { "docid": "945e40a9ab775bf9f214af52342747a8", "score": "0.5939893", "text": "def parameters(self):\n parameters = []\n for parser in self._parsers:\n parameters.extend(parser.get_parameters())\n return parameters", "title": "" }, { "docid": "e38070f7731149e96c557318133b31ed", "score": "0.59391916", "text": "def get_parameters(self):\n self.paramlist = np.array([])\n for L in self.layers[1:-1]:\n self.paramlist = np.r_[self.paramlist, L.W.flat, L.b]\n return self.paramlist", "title": "" }, { "docid": "12472c90c6a628cc466597aabb0e70ae", "score": "0.59295845", "text": "def varnames(self):\n return [x.name for x in self.VAR]", "title": "" }, { "docid": "dabf9d41411a57ae9a811698cc9c620b", "score": "0.5917773", "text": "def get_params(self, module):\r\n return [getattr(module, n) for n in self.reparameterization_names]", "title": "" }, { "docid": "aba7a798cd89c8df324f96dd0bbb96a2", "score": "0.590923", "text": "def names(self):\n\n ls = []\n for i in [self.lhs, self.rhs]:\n if not isinstance(i, DottedVariable):\n ls.append(str(i))\n else:\n ls += i.names\n return ls", "title": "" }, { "docid": "42829db09012288019ef21a001164050", "score": "0.5905367", "text": "def parameters(self) -> to.Tensor:\n return to.stack([s.params for s in self._samples])", "title": "" }, { "docid": "5edc956b5c2386490954f71c607c475b", "score": "0.58869153", "text": "def parameters(self) -> List[torch.Tensor]:\n return self.opt_params", "title": "" }, { "docid": "51ebd2159ecee60835574d7800bfc8c6", "score": "0.58813566", "text": "def parameter_spec(self):\n return OrderedDict(sorted(SPINetwork.parameter_spec(len(self.dependencies)).items()))", "title": "" }, { "docid": "2c84d6789e3804bdbac47125995d7cbd", "score": "0.58802867", "text": "def getPerParticleParameterName(self, index):\n return self.parameters[index].name", "title": "" }, { "docid": "2bb693d19eb9f8d1d23fbe22318d8992", "score": "0.58590627", "text": "def parameters(self) -> List[tf.Tensor]:", "title": "" }, { "docid": "edd4fa6a704bba6523dd0f57a53cd2fb", "score": "0.5857014", "text": "def var_names(self):\n return list(self._vars.keys())", "title": "" }, { "docid": "398ff407050175129f1b06d9b9a5b099", "score": "0.5842804", "text": "def _get_param_names(cls):\n try:\n args, varargs, kw, default = inspect.getargspec(cls.__init__)\n assert varargs is None, (\n 'scikit learn estimators should always specify their '\n 'parameters in the signature of their init (no varargs).'\n )\n # Remove 'self'\n # XXX: This is going to fail if the init is a staticmethod, but\n # who would do this?\n args.pop(0)\n except TypeError:\n # No explicit __init__\n args = []\n args.sort()\n return args", "title": "" }, { "docid": "5c455d35bba55c6f2c9790796b611dc5", "score": "0.58360785", "text": "def getParamList(self):\n list = _ordered_keys(self.params)\n # WARNING: Extending the list with the dispersion parameters\n list.extend(self.getDispParamList())\n return list", "title": "" }, { "docid": "31b77a8b796a3569922f715f577bccf6", "score": "0.58337355", "text": "def params(self):\n return self.keys()", "title": "" }, { "docid": "d8a058f77091763ed9f5b7f9ce31b23c", "score": "0.5815739", "text": "def named_parameters(\n self, prefix: str = \"\", recurse: bool = True\n ) -> Iterator[Tuple[str, Parameter]]:\n gen = self._named_members(\n lambda module: module._parameters.items(), prefix=prefix, recurse=recurse\n )\n for elem in gen:\n yield elem", "title": "" }, { "docid": "9c7e872aa3cc97733d4c923e90a5db0f", "score": "0.58156747", "text": "def parameters(self) -> List[tf.Tensor]:\n return self.network.variables", "title": "" }, { "docid": "9c7e872aa3cc97733d4c923e90a5db0f", "score": "0.58156747", "text": "def parameters(self) -> List[tf.Tensor]:\n return self.network.variables", "title": "" }, { "docid": "ddaad2be91a5288080e27af388a90055", "score": "0.58121186", "text": "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "title": "" }, { "docid": "ddaad2be91a5288080e27af388a90055", "score": "0.58121186", "text": "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "title": "" }, { "docid": "ddaad2be91a5288080e27af388a90055", "score": "0.58121186", "text": "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "title": "" }, { "docid": "ddaad2be91a5288080e27af388a90055", "score": "0.58121186", "text": "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "title": "" }, { "docid": "ddaad2be91a5288080e27af388a90055", "score": "0.58121186", "text": "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "title": "" }, { "docid": "f463d4b104573bd1d44125776d60b63e", "score": "0.58111036", "text": "def get_layer_norm_params_names(model):\n params_names = []\n for name, sub_module in model.named_modules():\n if isinstance(sub_module, (T5LayerNorm, nn.LayerNorm)):\n for param_name, param in sub_module.named_parameters():\n params_names.append(name+\".\"+param_name)\n return params_names", "title": "" }, { "docid": "2aca2ca246d6ba2a05918442c9e56df3", "score": "0.57980984", "text": "def parameters(self):\r\n return self.vars", "title": "" }, { "docid": "3a4e983c41942fac7ea7217e8e1c7bcb", "score": "0.5797665", "text": "def extra_parameters(self):\n return [\n param_name\n for param_name, _ in self.input_parameters.items()\n if param_name not in self.parameters\n ]", "title": "" }, { "docid": "2a2515fb23b4c626163530386b5338bb", "score": "0.5780098", "text": "def names(self):\n return [element.name for element in self._elements]", "title": "" }, { "docid": "f9f67b08eacc742a144c7db5e583bb77", "score": "0.57719374", "text": "def vnames(self):\n if self.narg() == 0:\n if not isinstance(self._ovn,float) and self._ovn not in KEYWORDS:\n s = set([self._ovn])\n else:\n s = set()\n else:\n s = set()\n for n in range(self.narg()):\n s = s.union(self.arg(n).vnames())\n return s", "title": "" }, { "docid": "c6277b50ba6af57597b50eee519c89a7", "score": "0.5764844", "text": "def parameters(self) -> Mapping[str, str]:\n return pulumi.get(self, \"parameters\")", "title": "" }, { "docid": "97a9c3ad996384b1558954e754d0d57e", "score": "0.5761834", "text": "def potential_parameters(cls) -> Iterable[str]:\n return \"sigma\", \"epsilon\"", "title": "" }, { "docid": "c799885d6f4445e4ad63b0dd01461360", "score": "0.5760518", "text": "def parameter_names(df):\n\n pdf = df.copy().replace({'p1': {'kmax': r'$k_{max}$',\n 'g1T': r'$g_{1,Tuz}$',\n 'kmax2': r'$k_{max}$',\n 'kmaxS1': r'$k_{max}$',\n 'kmaxS2': r'$k_{max}$',\n 'kmaxLC': r'$k_{max}$',\n 'Lambda': r'$\\lambda$',\n 'Alpha': '$a$',\n 'krlC': r'$k_{max}$',\n 'krlM': r'$k_{max}$'}})\n params = [[pdf.loc[i, 'p1'], pdf.loc[i, 'p2']] for i in range(len(pdf))]\n params = [str(e) for ee in params for e in ee\n if str(e) not in ['nan', 'kmaxCN', 'kmaxWUE']]\n\n # deal with special characters and font case\n params[params.index('PrefT')] = r'$\\varPsi_{ref}$'\n params[params.index('Beta')] = '$b$'\n params[params.index('Kappa')] = r'$\\varpi$'\n params[params.index('Eta')] = r'$\\eta$'\n params[params.index('PcritC')] = r'$\\varPsi_{\\varphi,lim}$'\n params[params.index('PcritM')] = r'$\\varPsi_{\\varphi,lim}$'\n\n return params", "title": "" }, { "docid": "c9cc15f3eb02e0802b96cfbcd1f2c9cf", "score": "0.57586503", "text": "def get_parameters(self):\n parameters=[self.number_of_clients,self.Alpha_matrix,\n self.lamb_vector]\n return parameters", "title": "" }, { "docid": "1cc41a782ca20a40d461947a40aa121d", "score": "0.57507104", "text": "def var_names(self):\n pass", "title": "" }, { "docid": "a356695563d6fcc8d448ac66961340f3", "score": "0.5750465", "text": "def keys(self):\n return self.params.keys()", "title": "" }, { "docid": "aa862246e8c1389bd987c86acae9d55d", "score": "0.57403487", "text": "def get_output_names(self) -> List:\n return [variable[\"name\"] for variable in self.outputs]", "title": "" }, { "docid": "03a8192c988861aee0850f9433d1fd65", "score": "0.57399464", "text": "def _get_argument_names(self, node):\n assert isinstance(\n node, gast.FunctionDef\n ), \"Input node is not function define node\"\n names = list(node.args.args)\n names.append(node.args.vararg)\n names.append(node.args.kwarg)\n names = [i.id for i in names if i is not None]\n return names", "title": "" }, { "docid": "5b4f192d58cf9a1371bf8e40fa301122", "score": "0.5723666", "text": "def getVarNames(self):\n return( self.__io.getVarNames() )", "title": "" } ]
3bcad392966a6fcd6b42d6cc935c47f2
Get `SENTENCE BREAK` property.
[ { "docid": "6a5a4a570ee367fd4599aecc9f3924bc", "score": "0.68026906", "text": "def get_sentence_break_property(value, binary=False):\n\n obj = unidata.ascii_sentence_break if binary else unidata.unicode_sentence_break\n\n if value.startswith('^'):\n negated = value[1:]\n value = '^' + unidata.unicode_alias['sentencebreak'].get(negated, negated)\n else:\n value = unidata.unicode_alias['sentencebreak'].get(value, value)\n\n return obj[value]", "title": "" } ]
[ { "docid": "ca993ec8155e0498f61f8c1284fbd91e", "score": "0.5473725", "text": "def _get_sent(self):\n return self.__sent", "title": "" }, { "docid": "ca993ec8155e0498f61f8c1284fbd91e", "score": "0.5473725", "text": "def _get_sent(self):\n return self.__sent", "title": "" }, { "docid": "ca993ec8155e0498f61f8c1284fbd91e", "score": "0.5473725", "text": "def _get_sent(self):\n return self.__sent", "title": "" }, { "docid": "ca993ec8155e0498f61f8c1284fbd91e", "score": "0.5473725", "text": "def _get_sent(self):\n return self.__sent", "title": "" }, { "docid": "21d145806495b7885a8a96a693da00fd", "score": "0.54096115", "text": "def get_sent(self) -> dict:\n return self.__sent", "title": "" }, { "docid": "bf65f71d46c8720c90b9af090a63fe23", "score": "0.5407665", "text": "def get_word_break_property(value, binary=False):\n\n obj = unidata.ascii_word_break if binary else unidata.unicode_word_break\n\n if value.startswith('^'):\n negated = value[1:]\n value = '^' + unidata.unicode_alias['wordbreak'].get(negated, negated)\n else:\n value = unidata.unicode_alias['wordbreak'].get(value, value)\n\n return obj[value]", "title": "" }, { "docid": "02d3a977962d291be1b1400dc09f811e", "score": "0.5274939", "text": "def get_line_break_property(value, binary=False):\n\n obj = unidata.ascii_line_break if binary else unidata.unicode_line_break\n\n if value.startswith('^'):\n negated = value[1:]\n value = '^' + unidata.unicode_alias['linebreak'].get(negated, negated)\n else:\n value = unidata.unicode_alias['linebreak'].get(value, value)\n\n return obj[value]", "title": "" }, { "docid": "7939870a5a8420028b14a079070fb676", "score": "0.5249099", "text": "def get_weedline_status(self):\n return self._properties['weedline']", "title": "" }, { "docid": "8721ffad763756702a68064b40307365", "score": "0.5198865", "text": "def sentence(self) -> str:\n return self._sentence", "title": "" }, { "docid": "af7594d808dde56b2151af71e53acaa6", "score": "0.51923984", "text": "def isBreak(self):\t\n\t\t#تكون الكلمة فاصلة إذا كانت منفصلة عمّا قبلها.\t\n\t\t# الحالات التي تقطع\n\t\t# - حرف جر متصل\n\t\t# فاصلة أو نقطة\n\t\treturn self.tagBreak;", "title": "" }, { "docid": "ac734376ba78a3fdbd5c8d842ace54d1", "score": "0.5131816", "text": "def streak(self):\n return self._streak", "title": "" }, { "docid": "b404ce90a0db8b70e9608edd34e3773f", "score": "0.5112712", "text": "def sentence(cls):\n return cls._namespace_SIO('SIO_000113')", "title": "" }, { "docid": "4219e6393b69d785d2bcb4c2d1570822", "score": "0.5046945", "text": "def get_sentences(self):\n return self.text", "title": "" }, { "docid": "a0cec55339f05279fd76946648884b19", "score": "0.50263083", "text": "def _get_peer_lblspc(self):\n return self.__peer_lblspc", "title": "" }, { "docid": "5179a71399c7cf77e52afe93b73852b8", "score": "0.500306", "text": "def get_sent_to(self):\n return self.sent_to", "title": "" }, { "docid": "2bd852aa6067f668ce3d243a68946174", "score": "0.49974194", "text": "def ParagraphBreak(self) -> str:", "title": "" }, { "docid": "ef9a6fd5b9fab8999d65cb1f7a21147f", "score": "0.49367473", "text": "def line(self) -> GenericIntegrationConversation:\n return self._integration_convs['line']", "title": "" }, { "docid": "6bb1e70a9ccfac8d3660503b2493247e", "score": "0.48881328", "text": "def verbal_language_entity(cls):\n return cls._namespace_SIO('SIO_000786')", "title": "" }, { "docid": "8ae3d9c4c789ad9ef2d9ec243a0f4145", "score": "0.48682848", "text": "def getSpalten(self):\n return self.spalten", "title": "" }, { "docid": "5634491c7bfbe8ecd4a4094b34dd470c", "score": "0.4858458", "text": "def boldness(cls):\n return cls._namespace_SIO('SIO_000819')", "title": "" }, { "docid": "bd807d78509cf758a45f2a2763e95e50", "score": "0.48505658", "text": "def ssltotencbe(self) :\n\t\ttry :\n\t\t\treturn self._ssltotencbe\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "a279da93a366ed71b85e1784e2f479b0", "score": "0.48500875", "text": "def get_break(self, break_date) -> Optional[\"SchoolBreak\"]:\n return self._school_breaks_by_date.get(break_date)", "title": "" }, { "docid": "e55bd9875d84912a7b50f982ed836b3e", "score": "0.48371297", "text": "def get_strike(self):\n return self.strike", "title": "" }, { "docid": "ac7a7b52323e64fab59b6bda380b4af8", "score": "0.482564", "text": "def breaks(self):\n return self._breaks", "title": "" }, { "docid": "007b8cb00a7b320bf00cf4d3ac387f46", "score": "0.47524104", "text": "def ssltotswencbe(self) :\n\t\ttry :\n\t\t\treturn self._ssltotswencbe\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "2d2dbc113720ae3f052992f524cf89f9", "score": "0.47523043", "text": "def get_headline(self, rec):\n return rec.get('headline')", "title": "" }, { "docid": "d47fc802a9928e983e10d4b819daec65", "score": "0.47507188", "text": "def get_sentences(self):\n return self.sentences", "title": "" }, { "docid": "842551c7929a05183946e9717da4dff0", "score": "0.47306454", "text": "def sentence(self, i):\n assert i < len(self._sentences), \"Sentence %d requested from list of length %d\" % (i, len(self._sentences))\n return self._sentences[i]", "title": "" }, { "docid": "688f7028c8276401e4c0fdde02579b7c", "score": "0.4691481", "text": "def sentence_id(self) -> int:\n return self._sentence_id", "title": "" }, { "docid": "b356374bcd623e6fc1cd1895763fce6b", "score": "0.46677488", "text": "def LineBreak(self) -> str:", "title": "" }, { "docid": "4c351b46a2b052fefe57422666a945e7", "score": "0.46638167", "text": "def __getStr__(self):\n return self.__getDict__()['commentTxt']", "title": "" }, { "docid": "d0b5f1642bed43b4617582e721ba8e1f", "score": "0.4656429", "text": "def sentiment_configuration(self) -> Optional['outputs.MediaInsightsPipelineConfigurationRealTimeAlertConfigurationRuleSentimentConfiguration']:\n return pulumi.get(self, \"sentiment_configuration\")", "title": "" }, { "docid": "a02e5f9cb62916d4084c0bb2b58b6e37", "score": "0.4655316", "text": "def line(self) -> Optional[str]:\n return pulumi.get(self, \"line\")", "title": "" }, { "docid": "a483252e96db938840996934974dee2f", "score": "0.46153057", "text": "def sentence(self):\n return self.lorem_ipsum(quantity=1)", "title": "" }, { "docid": "6ddad2de2785fb45774d2d0d440b5d9e", "score": "0.4592868", "text": "def get_sent_timestamp(self):\n if not self.sent_timestamp:\n return None\n\n clock = Clock()\n return (clock.from_time(self.sent_timestamp)\n .strftime('%Y-%m-%d %H:%M:%S'))", "title": "" }, { "docid": "dcb34ec9504925ee1d8c32dcd8ff7ec8", "score": "0.45831576", "text": "def ssltotdecbe(self) :\n\t\ttry :\n\t\t\treturn self._ssltotdecbe\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "1f821d2bef7a295b71597e4204557b97", "score": "0.45826712", "text": "def ssltotswdecbe(self) :\n\t\ttry :\n\t\t\treturn self._ssltotswdecbe\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "e61870ea12d6abc8419709f2e67565b4", "score": "0.45535555", "text": "def getBreakPointModel(self):\n return self.breakpointModel", "title": "" }, { "docid": "bb01d52aa65ee9793c157947d1ab1195", "score": "0.4528459", "text": "def text_span(cls):\n return cls._namespace_SIO('SIO_001073')", "title": "" }, { "docid": "d914a46607ccbcae33e52f6ee13bf793", "score": "0.450251", "text": "def conversation(cls):\n return cls._namespace_SIO('SIO_000941')", "title": "" }, { "docid": "7e5371c3c190a55bf6a798f795142f6c", "score": "0.45016238", "text": "def paragraph(cls):\n return cls._namespace_SIO('SIO_000110')", "title": "" }, { "docid": "36d26d3fc37189ca0417a8524aaf5744", "score": "0.449061", "text": "def part_end(self):\n return self.leads[-1].lead_head", "title": "" }, { "docid": "e48bc65d425fd8a53caf4268122e3137", "score": "0.4487322", "text": "def getPhrase(self):\r\n return self.__foundPhrase", "title": "" }, { "docid": "bb412286e8e3bbe8e0255b6c82e75033", "score": "0.44868717", "text": "def decode_BREAK(self, break_ascii: str):\r\n break_result = {\r\n \"serial_number\": 0,\r\n \"serial_number_str\": \"\",\r\n \"firmware_str\": \"\",\r\n \"freq_list_str\": \"\",\r\n \"mode\": \"\",\r\n }\r\n\r\n # Break up the result to lines\r\n break_lines = break_ascii.splitlines(keepends=False)\r\n\r\n for break_line in break_lines:\r\n # Get the serial number\r\n if \"SN\" in break_line:\r\n serial_line = break_line.split(':')\r\n if len(serial_line) > 1:\r\n break_result[\"serial_number_str\"] = serial_line[1]\r\n\r\n # Convert the last 5 charaters to a serial number integer\r\n break_result[\"serial_number\"] = int(serial_line[1][-5:])\r\n\r\n # Get the Firwmare\r\n if \"FW\" in break_line:\r\n fw_line = break_line.split(':')\r\n if len(fw_line) > 1:\r\n break_result[\"firmware_str\"] = fw_line[1]\r\n\r\n # Get the remaining information\r\n if len(break_lines) >= 6:\r\n break_result[\"mode\"] = break_lines[2]\r\n break_result[\"freq_list_str\"] = break_lines[3]\r\n\r\n return break_result", "title": "" }, { "docid": "e83213e0ba4d07ac1a77906ee043598e", "score": "0.44862106", "text": "def sentences(self):\n return self._sentences", "title": "" }, { "docid": "d0559eed2c068820e02ae76054051058", "score": "0.4482653", "text": "def get_line(self, key):\n return self.lines.get(key, \"\")", "title": "" }, { "docid": "f25113bbca67602956e6dc74bc4e3580", "score": "0.44811586", "text": "def get_participant(self):\n\n\t\treturn self.get_key_value('participant')", "title": "" }, { "docid": "ddd0a12f9e1370aa2088efcc55495958", "score": "0.44807902", "text": "def getStopTimeString( self ):\n return self.end().AMPMMinutes()", "title": "" }, { "docid": "db2fb4b5086999d816b768055c227114", "score": "0.44795644", "text": "def flowToSpacing(self):\n \n return self._flowToSpacing", "title": "" }, { "docid": "4139c4afa2e94c02d45e257c9e677c8a", "score": "0.44779533", "text": "def bry(self):\n return self.br()[1]", "title": "" }, { "docid": "c75583e5f712d60204d396804989876f", "score": "0.4469151", "text": "def message(self, obj):\n message = obj.smsmessagesent\n if message is None:\n message = obj.smsmessagereceived\n return message.smsmsg", "title": "" }, { "docid": "60be3257f4738c3839dccd37d05e59d2", "score": "0.44682962", "text": "def equivalence_key(self):\n return ContLanguage.equivalence_key(self, prefix=\"BlankLine\")", "title": "" }, { "docid": "1033a55889e864c4a9a968321c5851ab", "score": "0.44602075", "text": "def get_generated_sentence(self):\n sentence = self.text_model.make_sentence(tries=30)\n\n if sentence is not None:\n return sentence[::-1]\n else:\n return sentence", "title": "" }, { "docid": "b69c5888b841a11d5b26bda1ebf50ca0", "score": "0.44571915", "text": "def spl(self) -> \"str\":\n return self._attrs.get(\"spl\")", "title": "" }, { "docid": "186ecf90f8019173484a89a6f8ebb28c", "score": "0.444744", "text": "def conversation(self):\n return self._conversation", "title": "" }, { "docid": "0cd03476bd3e85d96e0bc582696f930b", "score": "0.4441777", "text": "def get_text_stroke(self):\r\n return self.attrs.text_stroke", "title": "" }, { "docid": "d76ac415d1558b29f0a50f9b5b54d6be", "score": "0.44355196", "text": "def lex_break(self) -> None:\r\n s = self.match(self.break_exp)\r\n if self.tok and isinstance(self.tok[-1], Break):\r\n self.tok[-1].string += self.pre_whitespace + s\r\n self.i += len(s)\r\n self.line += 1\r\n self.pre_whitespace = ''\r\n elif self.ignore_break():\r\n self.add_pre_whitespace(s)\r\n self.line += 1\r\n else:\r\n self.add_token(Break(s))\r\n self.line += 1\r\n self.lex_indent()", "title": "" }, { "docid": "7adbf39a1f1882512a9d2ae421201edb", "score": "0.44268492", "text": "def get_transcript_coordinate_cds_stop(self):\n return self.chromosome_coordinate_to_transcript(self.thick_stop)", "title": "" }, { "docid": "7029a3b97aedd692fc82f66bc8654768", "score": "0.4422325", "text": "def visit_Break(self, node): #pylint: disable=invalid-name\n if self.label is None:\n self.label = self.id_generator.get_unique_id()\n return Goto(self.label, coord=node.coord)", "title": "" }, { "docid": "b4a286a58c897a49bdf9c3db9b7b9af9", "score": "0.44195637", "text": "def get_in_ph(self):\n return self.confidence_relation_ph, self.confidence_entity_ph, self.entity_bb_ph, self.word_embed_relations_ph, self.word_embed_entities_ph", "title": "" }, { "docid": "064ea3a8c76427bf44a1c7e886bca74e", "score": "0.44195384", "text": "def lf(self):\n return self.__STRING_LF", "title": "" }, { "docid": "27e49267bce5ff002329dbfc360f1c7d", "score": "0.4419428", "text": "def _get_evidence_text(self, event_tag):\n par_id = event_tag.attrib.get('paragraph')\n uttnum = event_tag.attrib.get('uttnum')\n event_text = event_tag.find('text')\n if self.sentences is not None and uttnum is not None:\n sentence = self.sentences[uttnum]\n elif event_text is not None:\n sentence = event_text.text\n else:\n sentence = None\n return sentence", "title": "" }, { "docid": "fe6de8e5ced42c906a0601f20ad34589", "score": "0.44069108", "text": "def get_total_sent(self):\n return self.details['total_sent']", "title": "" }, { "docid": "cd05839947bb7b8445033fee09d87730", "score": "0.43951234", "text": "def _get_stop(self):\n return self.__stop", "title": "" }, { "docid": "180cfbddba701e9d823f64efbdc073ac", "score": "0.43941587", "text": "def _get_word(self):\n global PUNCTUATION\n READING_STATE = 'READING'\n INIT_STATE = 'INIT STATE'\n state = INIT_STATE\n char_buffer = \"\"\n non_word_delimeters = [\"-\", \"'\"]\n sentence_delimeters = [\".\", \";\", \":\"]\n while(True):\n char = self.text_stream.get_character()\n \n if char is not None:\n if state == INIT_STATE:\n # Skip whitespace and non-alphanumeric characters.\n if char.isspace() or not char.isalnum():\n continue\n else:\n state = READING_STATE\n char_buffer += char\n elif state == READING_STATE:\n # Read until we hit whitespace character or \n # non-alpha numeric character with the exeption of\n # some chars which are not considered to mark end of a word. \n \n if (not char in non_word_delimeters and \n (char.isspace() or not char.isalnum())):\n # If character is a sentence terminator/delimeter\n # include it, so that downstream clients know this\n # was the last word. \n if char in sentence_delimeters:\n char_buffer += char\n \n return char_buffer\n else:\n char_buffer += char\n \n else: # End of stream\n return char_buffer", "title": "" }, { "docid": "2fe18d686216c69a7e9c442a217a54d0", "score": "0.43880925", "text": "def __getitem__ (self, index):\n if len(self.__sentence)-1 < index:\n raise IndexError ( \"Invalid index \" + str(index) + \" for list of \" + str(len(self.__sentence)) + \" sentences\" )\n else:\n return self.__sentence[index]", "title": "" }, { "docid": "540dcf031c038d8f9804be396264c3f7", "score": "0.43855217", "text": "def get_conversation2(self):\r\n return self.conversation2", "title": "" }, { "docid": "49786a0584f7b35e1283fa040a035a8d", "score": "0.43799558", "text": "def get_whole(self):\n\t\treturn self.text", "title": "" }, { "docid": "2c7a5f3bb419ef509b668f40454ec896", "score": "0.43748432", "text": "def get_word(self):\n return self.word", "title": "" }, { "docid": "a8bc4a8f2b6ec3e234490bffe60efb36", "score": "0.43717855", "text": "def getNewLine():\n return \"<br>\"", "title": "" }, { "docid": "f9cb7736d1d5f5f59d38f3aa58440bd8", "score": "0.4369878", "text": "def get_sent_from(self):\n return self.sent_from", "title": "" }, { "docid": "2850c1f19832681dee9a4eeb312ba0e5", "score": "0.4365979", "text": "def getENDL_CS_ENDF_MT( self ) :\n\n from fudge.legacy.converting import endf_endl\n\n MT = self.ENDF_MT\n C, S = endf_endl.getCSFromMT( MT )\n return( { 'C' : C, 'S' : S, 'MT' : MT } )", "title": "" }, { "docid": "28e386fdf4231374e981023caff5465e", "score": "0.43596554", "text": "def segment(self):\n return self._current_line.split()[1]", "title": "" }, { "docid": "f8f6dd2c6c63e3fbd73bbeff79a9e6d3", "score": "0.4355152", "text": "def comment(self):\n return self.properties.get('comment', None)", "title": "" }, { "docid": "948dc51fb0d4a8e004a1bae4ac720e64", "score": "0.4347383", "text": "def get_weedline_padding(self):\n return self._properties['weedline_padding']", "title": "" }, { "docid": "8f6ca7e53a4eba41946b1b4bf4fb3659", "score": "0.4339661", "text": "def bent(cls):\n return cls._namespace_SIO('SIO_001094')", "title": "" }, { "docid": "e0d74b4fb7b0c1836f88e56f66314885", "score": "0.43366507", "text": "def get_note_text(self):\n if self.note:\n return self.note.note # pylint: disable=no-member\n return None", "title": "" }, { "docid": "f26f9d2bbf35cb9b395bf19d6f9404bf", "score": "0.43271786", "text": "def get_text(self):\n\t\treturn self.text.get('1.0', END+'-1c')", "title": "" }, { "docid": "71cc681a155f30d8fbfc7a683ce57a91", "score": "0.4326446", "text": "def ssltotencsw(self) :\n\t\ttry :\n\t\t\treturn self._ssltotencsw\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "c158c285e16492360bc81fe73fd74845", "score": "0.43218237", "text": "def ssltotenc(self) :\n\t\ttry :\n\t\t\treturn self._ssltotenc\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "7d05dfcaaef46f8e10c8ab28c773e85e", "score": "0.43179157", "text": "def get_word(self):\n new_word = self._get_word()\n\n if not new_word:\n # Update the {prev,curr}_word varibles \n # to indicate end of stream. \n self.prev_word = self.curr_word\n self.curr_word = None\n return None\n \n new_word = self.normalize_word(new_word)\n self._update_prev_word(new_word)\n return new_word", "title": "" }, { "docid": "104dd9ff25d83e567be507091ea13d51", "score": "0.43143", "text": "def getSentences(self):\n doc = self.doc\n if doc is None:\n console(\"No results available from the NLP pipeline\")\n return []\n\n if not self.canSentence:\n console(\"No sentence results available from the NLP pipeline\")\n return []\n\n result = []\n\n whiteRe = re.compile(r\"^[.?!\\s]*$\", re.S)\n spuriousNlBefore = re.compile(r\"\\n+(\\W)\")\n spuriousNlAfter = re.compile(r\"(\\W)\\n+\")\n\n for s in doc.sents:\n text = s.text.strip(\"\\n\")\n if whiteRe.match(text):\n continue\n\n tokenStart = doc[s.start]\n tokenEnd = doc[s.end - 1]\n sentStart = tokenStart.idx\n sentEnd = tokenEnd.idx + len(tokenEnd.text)\n\n text = spuriousNlBefore.sub(r\"\\1\", text)\n text = spuriousNlAfter.sub(r\"\\1\", text)\n result.append((sentStart, sentEnd, text))\n\n return result", "title": "" }, { "docid": "c6b366943e7a180d0f6ddb368b0bf890", "score": "0.4312276", "text": "def _get_tweet_text(data, numb):\n try:\n return data['statuses'][numb]['text']\n except IndexError:\n return None", "title": "" }, { "docid": "ec391a7d50892b5cb8e9ec921758d4a8", "score": "0.4308536", "text": "def get_sail(self):\n return self.__sail", "title": "" }, { "docid": "3ffa07a5bac6bc7344b1545be9727c15", "score": "0.43067375", "text": "def get_sent_to_object(self, o):\n return self.__sent.get(o)", "title": "" }, { "docid": "725c1e0e64e10503ec7a6c3cf9d03adf", "score": "0.4305617", "text": "def LmTxStep(self):\n return self._get_attribute('lmTxStep')", "title": "" }, { "docid": "cc6f3716e7eaa4018a3224d1bf79c695", "score": "0.4305445", "text": "def getStartText(self):\n return self.importedText[:self.startOffset] #", "title": "" }, { "docid": "030c80d553c03db15e22c2d5b2947962", "score": "0.43020508", "text": "def msrc_number(self):\n if self._msrc_number is None:\n details = self._get_details()\n raw_info = details.find(id='ScopedViewHandler_labelSecurityBulliten_Separator')\n self._msrc_number = list(raw_info.next_siblings)[0].strip()\n\n return self._msrc_number", "title": "" }, { "docid": "033a79acc62347d1af16636ed6c82735", "score": "0.43005148", "text": "def get_message(self):\n rv = self.message\n return(rv)", "title": "" }, { "docid": "b1c562c8ed9a700ffddff03cbe57a928", "score": "0.4296138", "text": "def get(self):\n\n return self.buffer[0:self.bufferEnd]", "title": "" }, { "docid": "4a1f985628e24f850663a3ca1cd2ea47", "score": "0.42938897", "text": "def GetProperties(self):\n return [FOLD_COMMENT]", "title": "" }, { "docid": "5e1a9e12dc29fc86eda8b12c24efa28a", "score": "0.42921555", "text": "def flowFromSpacing(self):\n \n return self._flowFromSpacing", "title": "" }, { "docid": "9bf25730a5cde34eb201865e1ebaf7ce", "score": "0.42839044", "text": "def getSpVerbosity(self):\n return self._spVerbosity", "title": "" }, { "docid": "88fd12591d7d837587189919002806c7", "score": "0.42823526", "text": "def get_symbol(self):\n if self.team == \"white\":\n return 'wp '\n elif self.team == \"black\":\n return 'bp '\n else:\n raise ValueError(\"this team:\" +\n self.team +\n \" doesn't exist in the realm of this game\")", "title": "" }, { "docid": "cee18652086f3ab7a77f2092b8fac112", "score": "0.42818782", "text": "def get_linewidth(self):\n return self._linewidth", "title": "" }, { "docid": "8964d89dc907103d0b4782e9c8f1d556", "score": "0.4276719", "text": "def break_in_syllables(word):\n if 'ss' in word or 'rr' in word:\n return PortugueseTextualProcessing().PT_DICT.inserted(word).split('-')\n return [s for s in PortugueseTextualProcessing().SILVA_SYLLABLE_SEPARATOR.separate(word) if s != '']", "title": "" }, { "docid": "75c303ab27c0527dcac9a76d5259316b", "score": "0.42751512", "text": "def get_mail_text(self):\n return self.mail_text", "title": "" }, { "docid": "ed30bb698a429e1cdff360eae116101e", "score": "0.42744875", "text": "def GetLineSpacing(self):", "title": "" }, { "docid": "d6041801e8f78fd2fe900ca998076325", "score": "0.42722967", "text": "def blurb(self):\n return self._blurb", "title": "" }, { "docid": "04c52020109db9b0c7a0928dc113c584", "score": "0.42718378", "text": "def getline(self):\n return self.getpos()[0]", "title": "" } ]
bc12ac2bd81b6cd878534a3d4f323653
Provides the user with a list of options to search the csv file and calls the appropriate search functions
[ { "docid": "4d768fac5d7955f410d296ac111496db", "score": "0.587037", "text": "def search_entry():\n clear_screen()\n print(\"Search for an Existing Entry:\")\n print('''\n [A] Search by Date\n [B] Amount of Time\n [C] Exact Search\n [D] Regex Pattern\n [E] Return to Main Menu\n ''')\n\n while True:\n rows = open_file()\n user_input = input(\"How Do You Want To Search? \")\n\n if user_input.upper() == \"A\": # Search by date\n clear_screen()\n print(\"All Existing Dates:\")\n for row in rows:\n print(row[\"Date\"])\n\n while True:\n search_date = input(\"Enter a Date from the List Above: \")\n if re.search(r'(\\d\\d/\\d\\d/\\d\\d\\d\\d)', search_date):\n break\n print(\"Please, Enter a valid date!\")\n search_by_date(search_date, rows)\n new_search()\n\n elif user_input.upper() == \"B\": # search by time\n clear_screen()\n print(\"All Logged Time in Minutes:\")\n for row in rows:\n print(row[\"Time\"])\n\n while True:\n amount_of_time = input(\"Enter Time from the List Above: \")\n try:\n int(amount_of_time)\n except ValueError:\n print(\"Please enter a valid number!\")\n else:\n search_by_time(amount_of_time, rows)\n new_search()\n\n elif user_input.upper() == \"C\": # search by exact search\n clear_screen()\n try:\n user_input = \\\n str(input(\"Enter a String for Exact Search: \"))\n except ValueError:\n print(\"Please, Enter a String of Characters\")\n else:\n exact_search(user_input, rows)\n new_search()\n\n elif user_input.upper() == \"D\": # search by regex pattern\n clear_screen()\n print(\"Enter a Regex Pattern! Example: \\d{3}\\s\\d{3}\")\n regex_input = input(\"Enter Pattern: \")\n regex_search(regex_input, rows)\n new_search()\n\n elif user_input.upper() == \"E\": # return to main menu\n clear_screen()\n start_screen()\n\n else:\n print(\"Please, Enter a Valid Choice!\")\n pass", "title": "" } ]
[ { "docid": "0f8e592987aa93a7d33fabc7963d8d61", "score": "0.64782333", "text": "def search(bot, update, args):\n csv_file = read_csv(file)\n user_string = update.message.text.split()\n result = str()\n if len(user_string) == 2:\n for i in range(len(csv_file)):\n csv_file[i] = csv_file[i].replace(' | ', ',')\n old_string = csv_file[i]\n csv_file[i] = csv_file[i].split(',')[:2]\n elem = ','.join(csv_file[i])\n if user_string[1] in elem:\n result += old_string.replace(',', ' | ')\n else:\n continue\n if result != str():\n update.message.reply_text(\n 'The book(-s) you searched for are: \\n' + result)\n else:\n update.message.reply_text('No books found on this result')\n else: # exception\n update.message.reply_text('Check the correction of your input!')", "title": "" }, { "docid": "78ef49b733b4128effd829c026a08b7d", "score": "0.6278241", "text": "def search(self):\n print(\"\"\" Search Menu\n(1) Search by date.\n(2) Search by task\n(3) Search by duration\n(4) Search by comment\n(exit) type end to program at any time\n(menu) to return to main menu\n\"\"\")\n option = input(\"please select an option 1, 2 or exit: \")\n self.keywords(option)\n if option == \"1\" or option == \"(1)\":\n clear_screen()\n self.search_date()\n\n elif option == \"2\" or option == \"(2)\":\n clear_screen()\n self.search_string('task')\n elif option == \"3\" or option == \"(3)\":\n clear_screen()\n self.search_duration()\n elif option == \"4\" or option == \"(4)\":\n clear_screen()\n self.search_string('comments')\n else:\n clear_screen()\n \"please enter one of the options in brakets\"\n self.keywords(option)", "title": "" }, { "docid": "c816c821a33bc0006176b4a6ca87ccb8", "score": "0.6217566", "text": "def main():\n\tcommand_line_arguments = get_command_line_arguments()\n\n\tif need_help(command_line_arguments): \n\t\tprint_usage_txt()\n\telif invalid_year_format(command_line_arguments):\n\t\tprint_invalid_year_format_error_message()\n\telse: \n\t\tdictionary_of_search_commands = turn_arguments_into_dictionary(command_line_arguments)\n\n\t\tdictionary_of_search_results = search_csv_into_dictionary(dictionary_of_search_commands)\n\n\t\tprint_search_results(dictionary_of_search_commands, dictionary_of_search_results)", "title": "" }, { "docid": "68abca9d2a3bb2e04b6dd0d1327c4d54", "score": "0.61331815", "text": "def main() -> None:\n args: Dict[str, str] = read_args()\n search_file(args[\"FILE\"], args[\"COLUMN\"], args[\"OPERATION\"])", "title": "" }, { "docid": "20a64a8d53764a70b4d7f2abd1e5e1c8", "score": "0.59435296", "text": "def search(self, *args):\n\n with open('all_melk.csv') as csv_file:\n # reading the csv file using DictReader\n csv_reader = csv.DictReader(csv_file)\n\n # converting the file to dictionary\n # by first converting to list\n # and then converting the list to dict\n dict_from_csv = dict(list(csv_reader)[0])\n\n print(dict_from_csv['area'])\n # it's defective", "title": "" }, { "docid": "e0363100064403e37eec83df49e25618", "score": "0.5862175", "text": "def load_search():\n city = ['FI', 'MI', 'PA', 'NA', 'BO', 'TO', 'VE', 'CA']\n prefix = ['055', '02', '091', '081', '051', '011', '041', '070']\n\n # search is a control parameter.\n # For search == 'GO' the search continues\n # For search == 'ALT' the search stops\n search = 'GO'\n\n while search == 'GO':\n name = raw_input('data to search for: ')\n search2(city, prefix, name)\n search = raw_input('continue? ')", "title": "" }, { "docid": "f0a45f1d8a11b6ea7a621fb881b98a66", "score": "0.5856241", "text": "def main(argv):\n try:\n opts, args = getopt.getopt(argv,\"hf:s:c:\",[\"support_threshold=\",\"confidence_threshold=\"])\n except getopt.GetoptError:\n print ('file.csv -s <support_threshold> -c <confidence_threshold>')\n sys.exit(2)\n\n for opt, arg in opts:\n if opt == '-h':\n print ('file.csv -s <support> -c <confidence>')\n sys.exit()\n elif opt in (\"-f\", \"--file\"):\n infile = arg\n elif opt in (\"-s\", \"--support\"):\n support_threshold = float(arg)\n elif opt in (\"-c\", \"--confidence\"):\n confidence_threshold = float(arg)\n\n data = read_data(infile)\n\n final_results = [\"{} => {} s = {:0.2f}, c = {:0.2f}\".format(\n item_set, item, support_frequency(data[1:], item_set.union({item})),\n confidence(data[1:], item_set, {item})) for item_set, item in apriori(\n data[1:], support_threshold, confidence_threshold)]\n for result in final_results:\n print(result)", "title": "" }, { "docid": "772d062664fbc8104f8491d668016bbb", "score": "0.5852055", "text": "def do_search(self,args):\n self.ctlr.search_cmd(args)", "title": "" }, { "docid": "9f201e401d6e09f08b6a7d5b7d75a7b4", "score": "0.5843696", "text": "def search_data():", "title": "" }, { "docid": "531d71d3f9ba6114859b85b53b7de356", "score": "0.5745585", "text": "def search(self, **kwargs):\n with open(self.filename, 'rb') as f:\n reader = csv.DictReader(f)\n self.schema = reader.next()\n\n # Make kwargs into QueryParameter objects\n query = self.__prepare_query(kwargs)\n\n return filter(lambda row: self.__matches_row(row, query=query), reader)", "title": "" }, { "docid": "a32139c46ead12b6105705984b4e6568", "score": "0.5744557", "text": "def open_file(self):\n self.column_checkboxes = []\n self.selected_column_targets = []\n file_name, filter = QFileDialog.getOpenFileName(\n self, 'Open CSV', os.getenv('HOME'), 'CSV(*.csv)')\n if file_name:\n self.load_file(file_name)", "title": "" }, { "docid": "702980fe874a3a33652ddda3f1152015", "score": "0.5732689", "text": "def search():\n try:\n corp = filter_corpus()\n print('Finished filtering')\n term = search_term.get()\n is_sensitive = sensitive.get()\n write_to_file = to_file.get()\n if term == '':\n messagebox.showerror('Error', 'No search term was entered. Please try again.')\n return\n if not is_regex.get():\n term = words_to_regex(term)\n if output_type.get() == 'concordance':\n concordance_lines = corp.get_concordances(term, is_sensitive, context_len.get())\n if write_to_file:\n concordance_to_file(concordance_lines)\n else:\n show_concordances(concordance_lines, term)\n elif output_type.get() == 'total frequency':\n raw = corp.get_total_raw_count(term, is_sensitive)\n norm = corp.get_normalized_count(raw)\n if write_to_file:\n counts_to_file(raw, norm)\n else:\n show_counts(raw, norm)\n else:\n cat = category.get()\n freq_type = frequency_type.get()\n print('Getting counts by category')\n counts_by_category = corp.get_counts_by_category(term, is_sensitive, cat, freq_type)\n if write_to_file:\n categories_to_file(counts_by_category, cat, freq_type)\n else:\n show_by_category(counts_by_category, cat, freq_type)\n except Exception as e:\n messagebox.showerror('Error', e)", "title": "" }, { "docid": "6b7d60c294eb9b73ae68b7e06a13f0dc", "score": "0.56831276", "text": "def main(option, args):\n print 'option:', option\n print 'args:', args\n if option.csv:\n print 'csv:', option.csv", "title": "" }, { "docid": "55718bb0245ef685dd7ae1c11816d88e", "score": "0.5680772", "text": "def test_stock_search(self):\n\n rows = read_csv_file(os.path.dirname(os.path.abspath(__file__)) + \"/sample.csv\")\n stock_search = StockSearch()\n\n for row in rows:\n stock_search.insert(row[0])\n\n self.assertEqual(stock_search.search(\"AICIXE\"), (True, \"AICIXE\"))\n self.assertEqual(stock_search.search(\"AICIE\"), (False, \"AICIXE\"))\n self.assertEqual(stock_search.search(\"AM\"), (False, \"AMBKP\"))", "title": "" }, { "docid": "ab3ce96af31d810f8a37399f1afd4b62", "score": "0.566289", "text": "def _cmd_search(self, options, *args):\n found = []\n for pattern in args:\n entries = self.phonebook.search(pattern, options.ignore_case,\n options.regexp, options.fields)\n # add all entries which aren't already in found. This avoids\n # printing entries twice, which are matched by more than one\n # pattern\n new = filter(lambda entry: entry not in found, entries)\n found += new\n self.print_table(found, options.output, options.sortby,\n options.descending)", "title": "" }, { "docid": "d69be548affe14f8302baa37e20b37f4", "score": "0.5660264", "text": "def handle(self, *params, **options):\n if len(params) != 2:\n raise CommandError('a single .csv file amd type_slug are required as input.')\n\n region_only = ''\n while region_only != 'Y' and region_only != 'N':\n region_only = raw_input('Does the covariate data file have region column and no iso3 column? Y|N ')\n\n # read the csv file into a list of dicts\n fname = params[0]\n type_slug = params[1]\n csv_f = csv.DictReader(open(fname))\n cov_data = [d for d in csv_f]\n \n # process the data\n if cov_data and type_slug:\n # make an iso3 list\n iso3_data = [x[1:] for x in csv.reader(open(CSV_PATH + 'country_region.csv'))]\n iso3_list = []\n for r in iso3_data:\n iso3_list += r\n\n # make a region list\n region_list = [x[0] for x in csv.reader(open(CSV_PATH + 'country_region.csv'))]\n\n # make a region_country_dict\n region_country_dict = {}\n for x in csv.reader(open(CSV_PATH + 'country_region.csv')):\n region_country_dict[x[0]] = x[1:]\n\n # check the cov data content\n for ii, d in enumerate(cov_data):\n try:\n d['value'] = float(d[type_slug])\n except KeyError:\n print('Could not find column %s (is it spelled correctly?)' % type_slug)\n return\n except ValueError:\n print('Could not interpret value for %s in line %d' % (type_slug, ii+2))\n return\n\n if d.has_key('year'):\n try:\n d['year'] = int(d['year'])\n except ValueError:\n print('Could not interpret year in line %d' % (ii+2))\n return\n else:\n d['year'] = gbd.fields.ALL_YEARS\n \n d['sex'] = d.get('sex', '')\n if not d['sex'] in ['male', 'female', 'total', '']:\n print('Could not interpret sex in line %d' % (ii+2))\n return\n\n if region_only == 'Y' and not d.has_key('region'):\n print('Could not find column region (is it spelled correctly?)')\n return\n\n if not d.has_key('iso3') and not d.has_key('region'):\n print('Could not find either column iso3 or column region (is it spelled correctly?)')\n return\n\n if d.has_key('iso3') and not d['iso3'] in iso3_list:\n print('Could not interpret iso3 in line %d' % (ii+2))\n return\n\n if d.has_key('region') and not d['region'] in region_list:\n print('Could not interpret region in line %d' % (ii+2))\n return\n\n if d.has_key('iso3') and d.has_key('region') and d['iso3'] not in region_country_dict[d['region']]:\n print('The iso3 and the region are inconsistent in line %d' % (ii+2))\n return\n \n if d.has_key('age'):\n try:\n int(d['age'])\n except ValueError:\n print('Could not interpret age in line %d' % (ii+2))\n return\n\n # get or create the CovariateType object\n cov_type, is_new = CovariateType.objects.get_or_create(slug=type_slug, defaults={'year_start': 0, 'year_end': 0})\n\n # get infomation\n uploader = ''\n while uploader == '':\n uploader = raw_input('Please enter your DisMod username. ')\n cov_type.uploader = uploader\n\n source = ''\n while source == '':\n source = raw_input('Where did you get this covariate data file? ')\n cov_type.source = source\n\n description = ''\n while description == '':\n description = raw_input('Please enter a description, how the data was created, and how the missing values were filled in. ')\n cov_type.description = description\n\n year_start = ''\n while year_start == '':\n year_start = raw_input('Please enter the starting year of the covariate. ')\n try:\n int(year_start)\n except ValueError:\n year_start = ''\n cov_type.year_start = year_start\n\n year_end = ''\n while year_end == '':\n year_end = raw_input('Please enter the ending year of the covariate. ')\n try:\n int(year_end)\n except ValueError:\n year_end = ''\n cov_type.year_end = year_end\n\n if region_only == 'Y':\n cov_type.region_only = True\n else:\n cov_type.region_only = False\n\n # calculate mean and variance\n vals = [d['value'] for d in cov_data]\n cov_type.mean = pl.mean(vals)\n scale = pl.std(vals)\n cov_type.variance = scale * scale\n\n cov_type.last_modified_time = datetime.now()\n\n # save covariate type \n cov_type.save()\n\n # add cov data to cov objects\n print \"Adding covariate data from %s\" % fname\n save_covariates(cov_type, cov_data)", "title": "" }, { "docid": "d03b31a3d5febd9191f22f7528214ce8", "score": "0.5650011", "text": "def main():\n\n opts = _get_parser() # get options from command line\n\n if opts.i[-4:] != '.csv':\n raise IOError('Input file is not a csv')\n\n if opts.o != 'none':\n if opts.o[-4:] != '.csv':\n raise IOError('Output file is not a csv')\n\n process_csv(opts.i, opts.o, opts.s)", "title": "" }, { "docid": "d88e4a0f32b900db51d3551c2c1522bb", "score": "0.56272006", "text": "def search_and_process_data( data ):\n target= search_place( data )\n if target is not None:\n print( target )\n print()\n print( \" (1) SELECT this listing for Great Circle calculation\" )\n print( \" (2) EDIT Listing\" )\n print( \" (3) DELETE Listing\" )\n print()\n print( \" (0) RETURN to menu\" )\n z= input( \"CHOICE? \" )\n if z == \"1\":\n pass # Need to switch to pathfind with this city in tow.\n elif z == '2':\n remove_data( target, data )\n add_data( data )\n elif z == '3':\n confirm= ''\n while confirm not in ('y', 'n'):\n confirm= input( \" Are you SURE you want to delete this place? (y/n) \" )\n if confirm == 'y':\n remove_data( target, data )", "title": "" }, { "docid": "d69db5f3f94c45aa1ee489b10278fee2", "score": "0.5622514", "text": "def readCsv(self, typ: str, names: List[str]) -> str:\n settings = QSettings()\n if settings.contains('/QSWATPlus/LastInputPath'):\n path = str(settings.value('/QSWATPlus/LastInputPath'))\n else:\n path = ''\n if typ == 'plant' or typ == 'urban' or typ == 'usersoil':\n caption = 'Choose {0} csv file'.format(typ)\n else:\n caption = 'Choose {0} lookup csv file'.format(typ)\n caption = QSWATUtils.trans(caption)\n filtr = FileTypes.filter(FileTypes._CSV)\n csvFile, _ = QFileDialog.getOpenFileName(None, caption, path, filtr)\n if csvFile is not None and os.path.isfile(csvFile):\n settings.setValue('/QSWATPlus/LastInputPath', os.path.dirname(str(csvFile)))\n return self.readCsvFile(csvFile, typ, names)\n else:\n return '';", "title": "" }, { "docid": "7836e79c00121b27fc9f7fac216ffa4a", "score": "0.557361", "text": "def get_batch_searches_csv(app: Flask, batch_searches: dict) -> dict:\n # read csv + prep for search tests\n filename = app.config['FILE_NAME']\n with open(f'csvs/{filename}', 'r') as csvfile:\n reader = csv.DictReader(csvfile)\n # organize searches by time/criteria + split into exact/similar results\n batch_searches = parse_results(batch_searches, reader, False)\n return batch_searches", "title": "" }, { "docid": "24eccf8bd2faa27b6fcbf9310371f3eb", "score": "0.5570384", "text": "def test_csv_file_selection(self):", "title": "" }, { "docid": "cbb4d4d2f54f6c0584d53aaf11b3fe79", "score": "0.5563974", "text": "def search():\n\n data = None\n with open(INDEX_FILE_DIR, 'rb') as f:\n data = pickle.load(f)\n\n while True:\n query = input('入力:')\n\n if query == '':\n break\n\n print('出力')\n results = query_data(search_term=query, data=data)\n for row in results:\n print(','.join(['\"{}\"'.format(value) for value in row]))", "title": "" }, { "docid": "dd07bedf6243c0b846998b86d1cdb752", "score": "0.5519876", "text": "def main(csv_file_path):\n\n obp_logger.info(\"Start Main\")\n obp_logger.debug(\"csv_file_path is: %s\" % csv_file_path)\n obp_logger.debug(\"Check that csv_file_path is valid path\")\n check_for_existing_csv(csv_file_path)\n\n obp_logger.debug(\"Start parse_row_of_csv\")\n parse_row_of_csv(csv_file_path)", "title": "" }, { "docid": "e319c858fd77da5bd5bb3ea0660bd35d", "score": "0.55111814", "text": "def search():\n parent = request.args.get(\"number\").replace(\".\", \"\")\n results = []\n\n # If no input, automatically search by section\n if len(parent) == 0:\n with open(\"HTS_Sections.csv\", newline = '', encoding = \"latin-1\") as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n results.append('<button type=\"button\" class=\"list-group-item list-group-item-action\"'\n + 'onClick=\"sectionSearch(' + \"'\" + row[\"hts_number\"] + \"'\" + ')\"' + '>'\n + row[\"hts_number\"] + \" - \" + row[\"Parsed Description\"] + '</button>')\n\n # If not complete code, search by chapter\n elif len(parent) < 2:\n with open(\"HTS_Chapters.csv\", newline = '', encoding = \"latin-1\") as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n if row[\"hts_number\"][:len(parent)] == parent:\n results.append(constructButton(row))\n\n # Otherwise search the master code sheet\n else:\n with open(\"HTS_Data.csv\", newline = '', encoding = \"latin-1\") as csvfile:\n reader = csv.DictReader(csvfile)\n\n if len(parent) < 4:\n for row in reader:\n if row[\"Indent\"] == \"0\" and parent == str(row[\"hts_number\"])[:len(parent)]:\n results.append(constructButton(row))\n else:\n # Detect if code is complete (minus parents)\n if len(parent) % 2 == 0:\n # If so, find all whose direct parent is code\n for row in reader:\n if row[\"Direct Parent\"].replace(\".\", \"\") == parent:\n # Account only for non-blank codes\n if row[\"hts_number\"] != \"\":\n results.append(constructButton(row))\n # If not, return all codes whose direct parent is 1 digit longer using hts_number\n else:\n for row in reader:\n # First check if direct parent starts the same\n tempCode = row[\"hts_number\"].replace(\".\", \"\")\n if parent == tempCode[:len(parent)]:\n # Then check if direct parent is correct length\n if len(tempCode) == len(parent) + 1:\n # Account only for non-blank codes\n if row[\"hts_number\"] != \"\":\n results.append(constructButton(row))\n return jsonify(results)", "title": "" }, { "docid": "af8b48df93467c00da86d60f65fb1964", "score": "0.54899895", "text": "def csv_organisations(csv_path: click.Path):\n pass", "title": "" }, { "docid": "a5602209aabac3144470a89858b27be8", "score": "0.5460069", "text": "def main(q=\"\"):\n\n search = q.decode('utf-8')\n num_delims = search.count(alfred_delim)\n searchsplit = search.split(alfred_delim)\n\n # If the user hasn't typed anything, give some instructions and the\n # option to open the settings menu.\n if search.strip() == \"\":\n result = [\n alp.Item(\n title = \"Search INSPIRE\",\n subtitle = \"Begin typing to search INSPIRE\",\n valid = \"no\",\n autocomplete = \"\"\n ),\n alp.Item(\n title = \"Settings\",\n subtitle = \"Change ainspire's settings\",\n valid = \"no\",\n autocomplete = \"settings\" + alfred_delim\n )\n ]\n # Settings menu.\n elif searchsplit[0] == \"settings\":\n result = settings_menu(searchsplit[1:])\n # If the search has no delimiters the user is still typing the query:\n elif num_delims == 0:\n result = typing_menu(searchsplit[0])\n # Has the string one delimiter? Then perform a regular Inspire search.\n elif num_delims == 1:\n result = inspire_search(searchsplit[0].strip())\n # Are there two delimiters? Then it's a context menu.\n elif num_delims == 2:\n result = context_menu(searchsplit[1],searchsplit[0])\n # Three delimiters? Then it's an author search menu.\n elif num_delims == 3:\n result = author_menu(searchsplit[2])\n\n return alp.feedback(result)", "title": "" }, { "docid": "c0d17bb59afa237cbfc8a78d2628b8df", "score": "0.5416616", "text": "def handle(self, *args, **options):\n path = args[0]\n term_filter = ['userid','ipaddress','dateadded',\n 'datelastupdated','sectionid', 'active','complete',\n 'adminEmailSent','stamp','useridlastupdated']\n self.port_table(term_filter, '/home/pykun/4560/dbs/term.txt')", "title": "" }, { "docid": "80aa1c49d73d3b24b59645992ddd30bb", "score": "0.53945", "text": "def read_files():\n if len(sys.argv) >= 4:\n # read output file name\n global out_csv\n out_csv = sys.argv[3]\n # read text file\n valid_titles = pd.read_csv(sys.argv[1], header=None, names=[\"title\"], delimiter = '\\n')\n # read csv file\n ratings = pd.read_csv(sys.argv[2])\n return valid_titles, ratings\n else:\n print(\"There must be three arguments: 2 input file names (txt and csv) and 1 output file name (csv)\")\n return None, None", "title": "" }, { "docid": "122887e2964bdfde4f21b4dffbab157e", "score": "0.5367485", "text": "def evaluate_positions(self):\n \n if self.use_csv:\n if not self.csv_file:\n self.txt.setText(\"No csv file specified\")\n else:\n self.txt.setText('')\n self.run_csv_input()\n else:\n self.run_GUI_input()", "title": "" }, { "docid": "35e0cab6058474e325508e831856a532", "score": "0.5366659", "text": "def search_entries(some_entries):\n search_menu = {1: \"date\",\n 2: \"date range\",\n 3: \"time spent\",\n 4: \"search title or comments\",\n 5: \"regex pattern (advanced)\",\n 6: \"main menu\"}\n print(\"Here are your search options:\")\n for item in search_menu:\n print(\"{}. {}\".format(item, search_menu[item]))\n search_type = verify_int(\"Please enter the number of your chosen search type: \")\n\n choosing = True\n while choosing:\n if search_type == 1:\n choosing = False\n dates = []\n for entry in some_entries:\n if entry.date not in dates:\n dates.append(entry.date)\n print(\"Here are the dates with entries: \")\n for index, date in enumerate(dates, 1):\n print(\"{}. {}\".format(index, datetime.datetime.strftime(date, DATE_FORMAT)))\n selection = verify_int(\"Please enter the number of the date you would like to view entries for: \")\n results = search_by_date(some_entries, dates[selection - 1])\n\n page_entries(results, some_entries)\n\n elif search_type == 2:\n choosing = False\n date_1 = verify_date(\"Please enter the beginning of your desired date range: \", DATE_FORMAT)\n date_2 = verify_date(\"Please enter the end of your desired date range: \", DATE_FORMAT)\n results = []\n for entry in some_entries:\n if date_2 >= entry.date >= date_1:\n results.append(entry)\n\n page_entries(results, some_entries)\n\n elif search_type == 3:\n choosing = False\n time_query = verify_int(\"Please enter the time spent you would like to search entries for: \")\n results = []\n for entry in some_entries:\n if entry.time == time_query:\n results.append(entry)\n\n page_entries(results, some_entries)\n\n elif search_type == 4:\n choosing = False\n search_phrase = input(\"Please enter the phrase you would like to search with: \")\n results = []\n for entry in some_entries:\n if search_phrase in entry.work_done or search_phrase in entry.comments:\n results.append(entry)\n\n page_entries(results, some_entries)\n\n elif search_type == 5:\n choosing = False\n proper_regex = False\n results = []\n while not proper_regex:\n regex = input(\"Please enter the regex pattern you would like to search by: \")\n results = []\n try:\n for entry in some_entries:\n if re.search(regex, entry.work_done) or re.search(regex, entry.comments):\n results.append(entry)\n except re.error:\n print(\"Improper regex. Please check your regex and try again.\")\n else:\n proper_regex = True\n\n page_entries(results, some_entries)\n\n elif search_type == 6:\n choosing = False\n\n else:\n search_type = verify_int(\"Please select a proper menu selection (1 - 6): \")", "title": "" }, { "docid": "fb8d8e0ff0a28ed2bbe84415767ce448", "score": "0.5357014", "text": "def search(self):\n e = simpledialog.askstring(title=\"Data Crawled\",\n prompt=\"What do you want to search?:\")\n self.refresh()\n x = self.db.search(e)\n self.update(x)", "title": "" }, { "docid": "87e98505c792fca73d517400780745e1", "score": "0.5352421", "text": "def sectionSearch():\n parent = request.args.get(\"number\")\n results = []\n with open(\"HTS_Chapters.csv\", newline = '', encoding = \"latin-1\") as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n if row[\"Section\"] == parent:\n results.append(constructButton(row))\n return jsonify(results)", "title": "" }, { "docid": "3f3c9d9f00f89c32576e50261490a37a", "score": "0.5338798", "text": "def function(choose):\n if choose == 0:\n '''Create CSV templates'''\n template()\n elif choose == 1:\n '''Create Normalization CSV File'''\n data_normalization()\n elif choose == 2:\n '''Create Combinatorial CSV File'''\n combinatorial_data()\n else:\n print('Invalid option')\n sys.exit()", "title": "" }, { "docid": "015a648e1562d0178b13c1441ddb9d03", "score": "0.5333438", "text": "def search_csv_into_dictionary(dictionary_of_search_commands):\n\tdictionary_of_search_results = defaultdict(list)\n\twith open('books.csv', newline=\"\\n\") as csvbooks:\n\t\t\treader = csv.reader(csvbooks, delimiter=\",\", quotechar=\"\\\"\")\n\t\t\tfor csv_row in reader:\n\t\t\t\tfor search_category in dictionary_of_search_commands:\n\t\t\t\t\tfor search_string in dictionary_of_search_commands[search_category]:\n\t\t\t\t\t\tif search_string_is_in_row(search_category, search_string, csv_row):\n\t\t\t\t\t\t\tdictionary_of_search_results[search_string].append(csv_row)\n\treturn dictionary_of_search_results", "title": "" }, { "docid": "33ac297c868abe94f36c05f302adb1de", "score": "0.53113925", "text": "def search(self):\n\n if self.combo_selectManufacture.get() == '' and self.combo_selectModel.get() == '':\n mb.showwarning('Message from system', 'Select at least one model or manufacture')\n\n else:\n # call the controller class method and get the result as list.\n searchList = OfficeStaff().searchCar(self.combo_selectModel.get(), self.combo_selectManufacture.get())\n\n # Create a temporary window to show the result as table\n tempWindow = Tk()\n tempWindow.title(\"Search Result\")\n\n # code for creating table\n # find total number of rows and columns in list\n for i in range(len(searchList)):\n for j in range(len(searchList[0])):\n tempEntry = Entry(tempWindow, width=20, fg='blue',\n font=('Arial', 16, 'bold'))\n tempEntry.grid(row=i, column=j)\n tempEntry.insert(END, searchList[i][j])\n tempWindow.mainloop()", "title": "" }, { "docid": "d56915d71af34b5c23a0e08f014146c6", "score": "0.53112775", "text": "def pbn_search_clicked(self):\r\n # 获取 UI 数据\r\n file_path = self.__ln_file_path.text()\r\n file_name = self.__ln_file_name.text()\r\n\r\n # 检查参数\r\n if file_path == '':\r\n QMessageBox(QMessageBox.Warning, '缺少参数!', '请输入搜索路径!', QMessageBox.Ok, self).exec_()\r\n return\r\n if file_name == '':\r\n QMessageBox(QMessageBox.Warning, '缺少参数!', '请输入匹配条件!', QMessageBox.Ok, self).exec_()\r\n return\r\n\r\n # 判断搜索模式\r\n mode = self.__search_mode['fuzzy']\r\n if self.__rbn_reg.isChecked():\r\n mode = self.__search_mode['reg']\r\n elif self.__rbn_fuzzy.isChecked():\r\n mode = self.__search_mode['fuzzy']\r\n elif self.__rbn_precise.isChecked():\r\n mode = self.__search_mode['precise']\r\n\r\n # 大小写敏感标记\r\n I = True\r\n if self.__rbn_reg_Ino.isChecked():\r\n I = False\r\n\r\n self.__browser_result.clear()\r\n self.__browser_error.clear()\r\n self.__tabView.setTabText(0, '匹配结果(0)')\r\n self.__tabView.setTabText(1, '错误结果(0)')\r\n self.__searching = True\r\n\r\n # 开启子线程,后台深度遍历\r\n self.__thread_killer = False\r\n if self.__rbn_search_file.isChecked():\r\n self.__lab_state.setText('正在搜索......已搜索到 0 个文件')\r\n self.__sub_thread_search = Thread(target=self.search_from_filename, args=(file_path, file_name, mode, I))\r\n self.__sub_thread_search.start()\r\n else:\r\n self.__lab_state.setText('正在搜索......')\r\n self.__sub_thread_search = Thread(target=self.search_from_content, args=(file_path, file_name, mode, I))\r\n self.__sub_thread_search.start()\r\n\r\n # 开启子线程,显示搜索结果\r\n self.__sub_thread_show_result = Thread(target=self.show_search_result)\r\n self.__sub_thread_show_result.start()\r\n\r\n # 开启子线程,显示错误结果\r\n self.__sub_thread_show_error = Thread(target=self.show_error_result)\r\n self.__sub_thread_show_error.start()\r\n\r\n # self.__pbn_search_file.setEnable(False)\r\n # self.__pbn_search_content.setEnable(False)\r", "title": "" }, { "docid": "e4d8a5c4fa815ac6924fee274aebff9d", "score": "0.5302967", "text": "def Search(self, path):\n pass", "title": "" }, { "docid": "828569f7af402570194a8e43926b92d7", "score": "0.53010553", "text": "def search(snippet, filename):\n logging.info(\n \"Searching {} for a snippet matching '{}'\".format(filename, snippet))\n logging.debug(\"Opening file\")\n with open(filename, \"r\") as f:\n reader = csv.reader(f, delimiter=\",\")\n for row in reader:\n if row[1] == snippet:\n return row[0], snippet", "title": "" }, { "docid": "371168d735acc07f1969c5eabb19d846", "score": "0.52941626", "text": "def read_csv_input(self):\n logging.info('INICIANDO: leitura do arquivo csv com os resultados e resultados esperados')\n expected_results = {}\n results = {}\n with open(\"result/resultados_esperados.csv\", newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n if int(row['QueryNumber']) in expected_results:\n expected_results[int(row['QueryNumber'])].append((int(row['DocNumber']), int(row['DocVotes'])))\n else:\n expected_results[int(row['QueryNumber'])] = [(int(row['DocNumber']), int(row['DocVotes']))]\n with open(self.results_file, newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader: \n doc_ranking = row['RankingDoc'].replace('[', '').replace(']', '').split(\",\")\n ranking = int(doc_ranking [1])\n score = float(doc_ranking[2])\n if int(row['QueryNumber']) in results:\n results[int(row['QueryNumber'])].append((ranking, score))\n else:\n results[int(row['QueryNumber'])]=[(ranking, score)] \n logging.info('FINALIZADO: leitura do arquivo csv com os resultados e resultados esperados')\n return expected_results, results", "title": "" }, { "docid": "eb03fd908a375f3846a6abc96d03082a", "score": "0.5293505", "text": "def main():\n message = input('Please enter your query:\\n')\n where = which_where(message)\n select = select_message(message)\n squeal.print_csv(select)\n \n while message != '': \n message = input('Please enter your query:\\n')\n if query != '':\n where = which_where(message)\n select = select_message(message)\n squeal.print_csv(select)\n else:\n pass", "title": "" }, { "docid": "6d5ce88d12685550956742c4d35a1b25", "score": "0.5281954", "text": "def searchFile(self):\n \"\"\"Update text box with path value\"\"\"\n def test(string, expression):\n test = False\n if string in expression:\n test = True\n return test \n validFormat = \"xls\"\n file = QtGui.QFileDialog.getOpenFileName(None, 'Open file')\n \"\"\"Valid file format\"\"\"\n isValid = test(validFormat, file)\n if not isValid or isValid == \"\" :\n file = \"Please, select valid file !\"\n \"\"\"Update text box with path value\"\"\"\n return self.dlg.pathTpl.setText(file)", "title": "" }, { "docid": "33b02a49f74bd139e6d35f98b6bf4f59", "score": "0.5273229", "text": "def get_filters():\r\n print('Hello! Let\\'s explore some US bikeshare data! \\n')\r\n # Get user input for city (chicago, new york city, washington).\r\n # Collecting only a valid answer for city input using while loop, lower case answers acceptable\r\n city = input('Choose a city from: Chicago, New York City or Washington: ')\r\n city = city.lower();\r\n # Validate input to match file name with while loop\r\n while city not in ['chicago', 'new york city', 'washington']:\r\n print ('You may have a typo or an invalid city, please try again.')\r\n city = input('Choose a city from: Chicago, New York City or Washington: ')\r\n city = city.lower()\r\n\r\n # Get user input for month (all, january, february, ... , june)\r\n # Collecting only a valid answer for month input using while loop, lower case answers acceptable\r\n month = input('Choose a month from: January, February, March, April, May, June or choose All: ')\r\n month = month.lower();\r\n # Validate input to match file name with while loop\r\n while month not in ['january', 'february', 'march', 'april', 'may', 'june', 'all']:\r\n print ('You may have a typo or an invalid month, please try again.')\r\n month = input('Choose a month from: January, February, March, April, May, June or choose All: ')\r\n month = month.lower()\r\n\r\n # Get user input for day of week (all, monday, tuesday, ... sunday)\r\n # Collecting only a valid answer for day input using while loop, lower case answers acceptable\r\n day = input('Choose a day of the week: Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday or choose All: ')\r\n day = day.lower();\r\n # Validate input to match file name with while loop\r\n while day not in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday', 'all']:\r\n print ('You may have a typo or an invalid selection, please try again.')\r\n day = input('Choose a day of the week: Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday or choose All: ')\r\n day = day.lower()\r\n # Printing the selection made by the user\r\n print(\"\\nGreat, thanks for choosing this combination, City: {}, Month: {} and Day: {}!\".format(city.title(),month.title(),day.title()))\r\n\r\n print('-'*40)\r\n return city, month, day", "title": "" }, { "docid": "a18212da8e11bf73fc86ad9fbc923bd6", "score": "0.5271173", "text": "def read_file():\n # usage statement, with optional flags for header and delimiter \n # (limited to common ones for csv)\n usage = (\"usage: \" + sys.argv[0] + \" csvfile [-h] [-d <p | s | sc | t>)]\"\n \"\\n\\t-h: csv file contains header (default is no header)\"\n \"\\n\\t-d p | s | sc | t: delimiter as p (pipe), s (space), sc (semicolon) or t (tab)\"\n \"\\n\\t(default delimiter is comma)\")\n\n # Checks if file is provided. If not, program is aborted.\n if len(sys.argv) < 2:\n raise(SystemExit)(f\"csv file must be provided in the command line.\"\n f\"\\n{usage}\")\n\n # Checks if file exists and is csv. If not, program is aborted.\n try:\n filename = sys.argv[1]\n if filename.endswith('.csv'):\n csv_file = open(filename, 'r', encoding = 'utf-8')\n\n # Process optional flags in command line for header and/or delimiter\n opts = [opt for opt in sys.argv[1:] if opt.startswith(\"-\")]\n args = [arg for arg in sys.argv[1:] if not arg.startswith(\"-\")]\n\n header = ('y' if \"-h\" in opts else 'n')\n\n try:\n # Process argument for flag -d\n if \"-d\" in opts:\n if args[1] == 'p':\n separator = '|'\n elif args[1] == 's':\n separator = ' '\n elif args[1] == 'sc':\n separator = ';'\n elif args[1] == 't':\n separator = '\\t'\n else:\n separator = ','\n except:\n # Program is aborted if flag -d was not followed with valid delimiter\n raise(SystemExit)(f\"-d: No valid delimiter provided\"\n f\"\\n{usage}\")\n else:\n raise(SystemExit)(f\"{filename}: No csv file\"\n f\"\\n{usage}\")\n except FileNotFoundError:\n raise(SystemExit)(f\"{filename}: No such file or directory\"\n f\"\\n{usage}\")\n return csv_file, separator, header", "title": "" }, { "docid": "7f41a2e5fe7846f176d0ecb7c1c8d0bc", "score": "0.5250878", "text": "def onSelectResultsFile(self):\n if not self.resultsFileDialog:\n self.resultsFileDialog = qt.QFileDialog(self.parent)\n self.resultsFileDialog.options = self.resultsFileDialog.DontUseNativeDialog\n self.resultsFileDialog.acceptMode = self.resultsFileDialog.AcceptOpen\n self.resultsFileDialog.defaultSuffix = \"csv\"\n self.resultsFileDialog.setNameFilter(\"Comma Separated Values (*.csv)\")\n self.resultsFileDialog.connect(\"fileSelected(QString)\", self.onResultsFileSelected)\n self.resultsFileDialog.show()", "title": "" }, { "docid": "416d40f96ec7f6dddabc82c7c44291e8", "score": "0.52392995", "text": "def tc_search(self, args, range):\n # Save the current explorer for restoration when the searcher finish\n self.expSave = self.explorers[self.selectedExplorer]\n # Replace the current explorer with a searcher and borrow its buffer\n se = searcher(self.nvim, self.expSave.buffer, self.expSave.cwd)\n se.window = self.expSave.window\n # Perfor the search with the correct parameters\n dir = self.expSave.cwd\n filePattern = args[1]\n if(len(args) > 2):\n inputPattern = args[2]\n else:\n inputPattern = ''\n se.search(dir, filePattern, inputPattern)\n self.explorers[self.selectedExplorer] = se\n self.explorers[self.selectedExplorer].draw()\n self.nvim.command('startinsert')\n self.nvim.command('normal! $')", "title": "" }, { "docid": "47f6d4f8feeb04f339b2cda8ce0270ec", "score": "0.5225744", "text": "def pandas_search(team,phone_number):\n\n file,search,display = load_team_information(team)\n if os.path.exists(file):\n data = []\n dest_head = []\n df = pd.read_csv(r'%s' % file, dtype='str', sep='|', low_memory=True)\n dest_head = get_column(display, df.columns)\n m = 5 #Max 5 rows are matched with the phone_number\n n = 0\n for target_column in search:\n df_mobile = df[target_column].values.tolist()\n i = 0\n for E164_number in df_mobile:\n if str(E164_number).replace(' ', '').upper() == phone_number.upper():\n record = list(df.iloc[i, ])\n data.append(record)\n n += 1\n i += 1\n if n == m:\n return data, dest_head\n else:\n i += 1\n if n == m:\n return data, dest_head\n\n if data:\n return data, dest_head\n else:\n return [], []\n else:\n return 'The CSV path %s is not correct!' %file, []", "title": "" }, { "docid": "30b7049ecf1057c16bc1abd860e3decc", "score": "0.522262", "text": "def csvConsumer(self, filename):\n\n # Open the csv file\n f = self.openWriteHandle(filename)\n delim = \",\"\n\n if self.options.control is None:\n\n # Columns for in-silico control\n if self.options.methylFraction:\n cols = [\"refName\", \"tpl\", \"strand\", \"base\", \"score\", \"tMean\", \"tErr\", \"modelPrediction\", \"ipdRatio\", \"coverage\", FRAC, FRAClow, FRACup]\n else:\n if self.options.useLDA:\n # FIXME: For testing LDA model, to look at LDA scores in csv output (run without --methylFraction or --control):\n cols = [\"refName\", \"tpl\", \"strand\", \"base\", \"score\", \"tMean\", \"tErr\", \"modelPrediction\", \"ipdRatio\", \"coverage\", \"Ca5C\"]\n else:\n cols = [\"refName\", \"tpl\", \"strand\", \"base\", \"score\", \"tMean\", \"tErr\", \"modelPrediction\", \"ipdRatio\", \"coverage\"]\n\n else:\n # Columns for case-control\n if self.options.methylFraction:\n cols = [\"refName\", \"tpl\", \"strand\", \"base\", \"score\", \"pvalue\", \"caseMean\", \"controlMean\", \"caseStd\", \"controlStd\", \"ipdRatio\", \"testStatistic\", \"coverage\", \"controlCoverage\", \"caseCoverage\", FRAC, FRAClow, FRACup]\n else:\n cols = [\"refName\", \"tpl\", \"strand\", \"base\", \"score\", \"pvalue\", \"caseMean\", \"controlMean\", \"caseStd\", \"controlStd\", \"ipdRatio\", \"testStatistic\", \"coverage\", \"controlCoverage\", \"caseCoverage\"]\n\n # Special cases for formatting columns of the csv\n handlers = dict()\n threeF = lambda x: \"%.3f\" % x\n\n handlers[\"refName\"] = lambda x: \"\\\"%s\\\"\" % x\n\n handlers[\"tpl\"] = lambda x: str(x.item() + 1)\n handlers[\"score\"] = lambda x: \"%d\" % x\n\n handlers[\"tMean\"] = threeF\n handlers[\"modelPrediction\"] = threeF\n handlers[\"caseMean\"] = threeF\n handlers[\"controlMean\"] = threeF\n handlers[\"ipdRatio\"] = threeF\n handlers[\"pvalue\"] = lambda x: \"%.3e\" % x\n\n handlers[\"controlStd\"] = threeF\n handlers[\"controlStd\"] = threeF\n handlers[\"tErr\"] = threeF\n\n # FIXME: remove this line later:\n handlers[\"Ca5C\"] = threeF\n\n handlers[FRAC] = threeF\n handlers[FRAClow] = threeF\n handlers[FRACup] = threeF\n\n print >>f, delim.join(cols)\n\n def fmt(rowData, colName):\n if not rowData.has_key(colName):\n return \"\"\n\n if handlers.has_key(colName):\n return handlers[colName](rowData[colName])\n else:\n return str(rowData[colName])\n\n try:\n while True:\n # Pull a list of record in from the producer\n itemList = (yield)\n\n for item in itemList:\n values = [fmt(item, col) for col in cols]\n print >>f, delim.join(values)\n\n except GeneratorExit:\n f.close()\n return\n except Exception as e:\n print e", "title": "" }, { "docid": "64d43479791909b770278ecf76dac402", "score": "0.5217452", "text": "def csv_file(self) -> None:\r\n self.create_parameters(self.type_checker(self.file[self.nft]))", "title": "" }, { "docid": "658ac8d51245fb54784a5ed0ada1435b", "score": "0.5216351", "text": "def run(self):\n # show the dialog\n self.dlg.show()\n \"\"\"\"To connect event to gui elements\"\"\"\n cbGC = self.dlg.comboGC\n cbSynthese = self.dlg.comboSynthese\n cbGcId = self.dlg.idGC\n cbSyntheseId = self.dlg.idSynthese\n cbState = self.dlg.cbState\n cbOpp = self.dlg.cbOpp\n # init combo\n self.initCb(cbGC, cbGcId,cbState)\n self.initCb(cbSynthese, cbSyntheseId,cbState)\n # buttons\n self.dlg.buttonFile.clicked.connect(self.searchFile) \n self.dlg.buttonFolder.clicked.connect(self.searchFolder) \n\n '''here we need to load opportunity list wehen user select id field to get opp values''' \n for el in [cbGcId, cbSyntheseId, cbState] :\n el.currentIndexChanged.connect(lambda: self.oppFiltering(cbGcId, cbSyntheseId, cbGC, cbSynthese, cbState, cbOpp))\n self.state = [] \n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n self.createFile()\n # substitute with your code.\n pass", "title": "" }, { "docid": "755e06b90dfbd3a3aad1e35b7f555325", "score": "0.5212298", "text": "def linear_search(files, terms):", "title": "" }, { "docid": "101ffd77e8750a0ee3025095cfea19b0", "score": "0.52047545", "text": "def handle(self, *args, **options):\n path = args[0]\n student_filter = ['userid','ipaddress','dateadded','datelastupdated',\n 'sectionid', 'useridLastUpdated', \n 'adminEmailSent','stamp',\n 'Comments_length','Comments_textIsHTML']\n self.port_table(student_filter, '/home/pykun/4560/dbs/student.txt')", "title": "" }, { "docid": "2cd861f8d7103818e500dc82533ac376", "score": "0.51907563", "text": "def search_and_facet_compute_resources(cbc, menu, parser):\n print('\\n' + '-' * 21)\n print('Available criteria filters:\\n')\n print(*('appliance_uuid', 'eligibility', 'cluster_name', 'name',\n 'ip_address', 'installation_status', 'uuid', 'os_type',\n 'os_architecture'), sep='\\n')\n print('-' * 21)\n\n query = cbc.select(ComputeResource)\n\n use_filter = input('\\nWould you like to use criteria filters?: Y/n\\n')\n if use_filter.lower() in ['y', 'yes']:\n print('Each criteria accepts one or more space separated values.',\n 'Exmaple: --name ABCD --appliance_uuid 1234 5678')\n args = parser.parse_args(input('Enter criteria filters: ').split())\n if args.name:\n query.set_name(args.name)\n if args.os_type:\n query.set_os_type(args.os_type)\n if args.appliance_uuid:\n query.set_appliance_uuid(args.appliance_uuid)\n if args.cluster_name:\n query.set_cluster_name(args.cluster_name)\n if args.ip_address:\n query.set_ip_address(args.ip_address)\n if args.installation_status:\n query.set_installation_status(args.installation_status)\n if args.uuid:\n query.set_uuid(args.uuid)\n if args.os_architecture:\n query.set_os_architecture(args.os_architecture)\n if args.eligibility:\n query.set_eligibility(args.eligibility)\n\n print(*query)\n new_query = input('\\nMake anothery query? Y/n\\n')\n if new_query.lower() in ['y', 'yes']:\n search_and_facet_compute_resources(cbc, menu, parser)\n else:\n menu['0']['function_call']()", "title": "" }, { "docid": "5e41dd2b77ab201d995f7f7a6135ec1f", "score": "0.51882935", "text": "def main(args):\n\n results = []\n\n # Load the provided personal finance dataset\n\n if len(args) is 1:\n # Default to preformatted.csv if no file is provided as an argument\n project_root = Path(__file__).resolve().parent.parent\n default_location = project_root/\"datasets/preformatted.csv\"\n results = process_file(default_location)\n\n elif len(args) is 2:\n location = Path(args[1])\n if location.is_file():\n results = process_file(location)\n elif location.is_dir():\n print(\"Folder provided. Please provide a file.\")\n exit()\n else:\n print(\"Please provide a file as an argument.\")\n exit()\n\n else:\n print(\"Too many arguments provided!\")\n exit()", "title": "" }, { "docid": "59d206adf74dd17ae352ef1f4620817b", "score": "0.5187672", "text": "def main():\n filename = \"data.csv\"\n read(filename)", "title": "" }, { "docid": "a3167ad26bef4b20f61cfd72a35285f6", "score": "0.5180637", "text": "def nameCSV():\r\n while True:\r\n # get title and strip spaces\r\n title = input('What would you like to name your CSV file?: ').strip()\r\n # check if user filled .csv (otherwise, make sure to add it)\r\n title = title.rstrip('.csv') + '.csv'\r\n # double check with file name\r\n while True:\r\n response = input(f\"Are you sure that you want to name your file '{title}'?[Y/N]\")\r\n if response.upper() == 'Y':\r\n # check for unacceptable characters\r\n check = unacceptable_naming(title)\r\n if check[1]:\r\n print(f'Your file name contained illegal characters and has been renamed to the following: '\r\n f'{check[0]}')\r\n # notify before overriding\r\n if os.path.exists(check[0]):\r\n while True:\r\n cont = input(\r\n 'The file that you are about to create will override an existing file. Are you sure that'\r\n ' you would like to continue?[Y/N]:')\r\n if cont.upper() == 'Y':\r\n return check[0]\r\n elif cont.upper() == 'N':\r\n break\r\n else:\r\n print(f\"'{response}' is an invalid input. Please type Y or N.\")\r\n break\r\n else:\r\n return check[0]\r\n elif response.upper() == 'N':\r\n break\r\n else:\r\n print(f\"'{response}' is an invalid input. Please type Y or N.\")\r\n pass", "title": "" }, { "docid": "8a3bd342393b81fa1ff75c476a418986", "score": "0.517995", "text": "def cli(ctx, type, view, text):\n try:\n data = ctx.tp.search(type, text)\n except Exception as ex:\n ctx.logerr(ex)\n sys.exit(1)\n else:\n if view == 'table':\n ctx.log(ctx.tp.print_result_table(data))\n elif view == 'list':\n ctx.log(ctx.tp.print_result_list(type, data))", "title": "" }, { "docid": "d8c341690084cbeda050a32e7c2d790e", "score": "0.51750344", "text": "def handlerSelectLandUseLookup(self):\n file = unicode(QtGui.QFileDialog.getOpenFileName(\n self, 'Select Land Use Lookup Table', QtCore.QDir.homePath(), 'Land Use Lookup Table (*{0})'.format(self.main.appSettings['selectCsvfileExt'])))\n \n if file:\n self.lineEditLandUseLookup.setText(file)\n \n logging.getLogger(type(self).__name__).info('select file: %s', file)", "title": "" }, { "docid": "0f31d5874ca39018df19d19f7a68c894", "score": "0.5160416", "text": "def main():\n file_names = [\"nysdaq.csv\", \"nyse.csv\", \"amex.csv\"]\n for file_name in file_names:\n open_file(file_name)", "title": "" }, { "docid": "4b73293125bb4672717d399ad30af864", "score": "0.5154706", "text": "def search(self):\n # get the searchfiles (now a single walkthrough)\n searchfiles = self.get_search_files()\n if self.settings.verbose:\n searchdirs = sorted(list({sf.path for sf in searchfiles}))\n log('\\nDirectories to be searched ({0}):'.format(len(searchdirs)))\n for d in searchdirs:\n log(d)\n log('\\n\\nFiles to be searched ({0}):'.format(len(searchfiles)))\n for f in searchfiles:\n log(str(f))\n log(\"\")\n\n # TODO: concurrent.futures.ProcessPoolExecutor, e.g.\n # with concurrent.futures.ProcessPoolExecutor() as executor:\n # futures = [executor.submit(<func_name>, <arg>) for _ in range(10)]\n # for f in concurrent.futures.as_completed(futures):\n # print(f.result())\n # - OR -\n # nums = list(range(10))\n # # runs in parallel but returns results in list order\n # results = executor.map(<func_name>, nums)\n for sf in searchfiles:\n self.search_file(sf)", "title": "" }, { "docid": "b5261494d530faf65df7d2da7e74361a", "score": "0.51534235", "text": "def handle(self, *args, **options):\n csv_file = options['csv_file']\n\n locations_added = Location.objects.add_from_csv(csv_file)\n self.stdout.write('Process finished, {0} locations added'.format(locations_added))", "title": "" }, { "docid": "fda667108c788e0286f0213092816b5d", "score": "0.51533115", "text": "def read_csv(self):\n try:\n self.csvName + 'hello'\n except TypeError:\n print('Input file name must be a String type')\n logging.warning('Input file entered was not a String type')\n raise TypeError('Input file entered was not a String type')\n return None\n\n try:\n pd.read_csv(self.csvName)\n except FileNotFoundError:\n print('No file with given filename found')\n logging.debug('No file with given filename found')\n raise FileNotFoundError('No file with given filename found')\n return None\n\n headers = ['Time', 'Voltage']\n df = pd.read_csv(self.csvName, names=headers)\n self.csvDf = df", "title": "" }, { "docid": "174b902d4c0c48749bf6d71c360217a8", "score": "0.514914", "text": "def get_list_from_csv(csv_file, search_key, for_values, get_key):\n result = []\n with open(csv_file) as c_fh:\n reader = csv.DictReader(c_fh)\n for row in reader:\n if row[search_key] in for_values:\n result.append(row[get_key])\n return result", "title": "" }, { "docid": "cad76e315ab9dafcbb10e154278753a0", "score": "0.5129369", "text": "def search(self):\n raise NotImplementedError", "title": "" }, { "docid": "1f721358650e921b91407ed4dd468f83", "score": "0.51280946", "text": "def goto():\n i = int(input('which question you want to go: '))\n os.system(\"cls\" if os.name == \"nt\" else \"clear\")\n with open(r'./quiz_wise_questions/'+quiz_number+'.csv', 'r') as file:\n reader = csv.reader(file)\n row = list(reader)\n print(\"Roll No- \", rollno)\n print(\"Name- \", Name)\n print(\"Goto Question: (Press - Ctrl+Alt+g)\")\n print(\"Final Submit: (Press Ctrl+Alt+f)\")\n print(\"Export Database Into CSV: (Press Ctrl+Alt+e)\")\n print(\"Ques No \", row[i][0], sep=':-')\n print(row[i][1], \" ?\")\n print(\"Option 1) \", row[i][2])\n print(\"Option 2) \", row[i][3])\n print(\"Option 3) \", row[i][4])\n print(\"Option 4) \", row[i][5])\n print(\"Credits if correct option: \", row[i][7])\n print(\"Negative Marking: \", row[i][8])\n print(\"Is Compulsory: \", row[i][9])\n print(\"Press esc to continue!!\")\n pass", "title": "" }, { "docid": "8edaed5bfbe9b490ddb606bcb5d56605", "score": "0.51276356", "text": "def readCsv():\n # Read the job offers CSV file and fill a list with all its content\n jobOfferList = [] # Full list of offers\n with open(CSV_ROOT + 'technical-test-jobs.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=DELIMITER)\n header = True\n for row in csv_reader:\n if header: # Do not read the header\n header = False\n else:\n job = jobOffer.JobOffer(row[0], row[1], row[2], row[3], row[4]) # Create a JobOffer object\n jobOfferList += [job] # Add it to the full list\n \n # Read the job types CSV file and fill a list with all its content\n professionList = []\n with open(CSV_ROOT + 'technical-test-professions.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=DELIMITER)\n header = True\n for row in csv_reader:\n if header: # Do not read the header\n header = False\n else:\n job = profession.Profession(row[0], row[1], row[2]) # Create a JobType object\n professionList += [job] # Add it to the list\n \n return ([jobOfferList, professionList])", "title": "" }, { "docid": "74b7e82c8fdc54a5d8a86c29e1799919", "score": "0.51267004", "text": "def search_string_is_in_row(search_category, search_string, csv_row):\n\tcsv_title_index = 0; csv_year_index = 1; csv_author_index = 2\n\tif search_category == csv_title_index: \n\t\treturn title_is_in_row(search_string, csv_row[csv_title_index])\n\telif search_category == csv_year_index:\n\t\treturn row_year_is_in_bounds(search_string, csv_row[csv_year_index])\t\n\telif search_category == csv_author_index:\n\t\treturn author_is_in_row(search_string, csv_row[csv_author_index])\n\treturn False", "title": "" }, { "docid": "425a8238f68f3ef4ef571060498c52b9", "score": "0.5123759", "text": "def get_filters():\n\tprint('Hello! Let\\'s explore some US bikeshare data!\\n')\n\n\tcity = input('Please choose a city to analyze\\n')\n\tcity = city.lower()\n\twhile city != 'chicago' and city != 'washington' and city != 'new york city':\n\t\tcity = input('Please choose a valid city: either chicago, washington or new york city\\n')\n\t\tcity = city.lower()\n\telse:\n\t\tprint('Okay great! We\\'re going to look at stats for', city.capitalize(), '\\n')\n\n\t# next input month by name and we'll do the indexing in the load_data section\n\n\tmonth = input('Please enter a month from January to June to analyze, or type \"all\" to view all months.\\n')\n\tlist_of_valid_months = ['january', 'february', 'march', 'april', 'may', 'june', 'all']\n\tmonth = month.lower()\n\twhile month not in list_of_valid_months:\n\t\tmonth = input('Please choose a valid month, January though June, or \\'all\\'\\n')\n\t\tmonth = month.lower()\n\t\tif month == 'all':\n\t\t\tprint('Great, you\\'ve selected all months\\n')\n\t\telif month in list_of_valid_months:\n\t\t\tprint('Great, you\\'ve selected', month.capitalize())\n\t\t\tbreak\n\telse:\n\t\tprint('Great, you\\'ve selected', month.capitalize())\n\n\tday = input('Please choose a day of the week to analyze or type \"all\"\\n')\n\tlist_of_valid_days = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday', 'all']\n\tday = day.lower()\n\twhile day not in list_of_valid_days:\n\t\tday = input('Please choose a valid day of the week or type \\'all\\'\\n')\n\t\tday = day.lower()\n\t\tif day == 'all':\n\t\t\tprint('Great you have selected all days of the week')\n\t\telif day in list_of_valid_days:\n\t\t\tprint('Great, you\\'ve selected', day.capitalize())\n\telse:\n\t\tprint('\\n')\n\t\tprint('Great, you\\'ve selected', day.capitalize())\n\n\tprint('-'*40)\n\treturn city, month, day", "title": "" }, { "docid": "8a468da6925ca97fb05a71ecc7c0ce1f", "score": "0.5113391", "text": "def extract_info(self, csv_file, request):\r\n data = pd.read_csv(csv_file)\r\n # Remove \"dontcare\" coded inputs\r\n request = {k:v for k,v in request.items() if v != 'dontcare'}\r\n options = {}\r\n for pref_type in request:\r\n # All the different unique options in a column\r\n options[pref_type] = data[pref_type].dropna().unique()\r\n for pref_type in request:\r\n data = self.__modify_data(data, pref_type, options, request[pref_type])\r\n # Shuffle the data\r\n data = data.sample(frac=1)\r\n # Prettify\r\n capt = lambda m: \" \".join([word.capitalize() for word in m.split(\" \")])\r\n data[\"restaurantname\"] = data[\"restaurantname\"].apply(capt)\r\n data[\"phone\"] = data[\"phone\"].apply(self.__fix_phone_number)\r\n data[\"addr\"] = data[\"addr\"].apply(self.__add_comma)\r\n # Can't capitalize a float\r\n data[\"addr\"] = data[\"addr\"].apply(lambda m: capt(m) if isinstance(m, str) else np.nan)\r\n return data", "title": "" }, { "docid": "2c25e42fde20d90e918af13728b72488", "score": "0.51049495", "text": "def search(self, entry, types):\n # Clearing the results dictionary\n self.__results = {}\n # Iterating through the types in the passed variable 'types'\n for atype in types:\n IDList = []\n for record in self.__entities[atype]:\n # If the user did not input any text in the search bar then return all records of atype\n if entry == \"\":\n IDList.append(record)\n else:\n # Get the list of search related attributes from 'record'\n searchList = self.__entities[atype][record].getSearch()\n # Convert this list to a string with each item separated by a comma\n key = ' '.join(searchList)\n \"\"\"If the lowercase version of the passed variable 'entry' is in the search related attributes, \n append the ID of that record to IDList\"\"\"\n if entry.lower() in key.lower():\n IDList.append(record)\n \"\"\"Create an entry in the results dictionary with key of 'atype' and value of the list of matching record \n IDs for that type\"\"\"\n self.__results[atype] = IDList\n return self.__results", "title": "" }, { "docid": "9a30bb24c9df73e306f337d6ed0255da", "score": "0.5104087", "text": "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs", "title": "" }, { "docid": "9a30bb24c9df73e306f337d6ed0255da", "score": "0.5104087", "text": "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs", "title": "" }, { "docid": "9a30bb24c9df73e306f337d6ed0255da", "score": "0.5104087", "text": "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs", "title": "" }, { "docid": "154c872f60c6fa1b1c966eafd40f8c77", "score": "0.51007843", "text": "def slsearch(tablename='', outfile='', freqrange=[84, 90], species=[''], reconly=False, chemnames=[''], qns=[''], intensity=[-1], smu2=[-1], loga=[-1], el=[-1], eu=[-1], rrlinclude=True, rrlonly=False, verbose=False, logfile='\"\"', append=False):\n if type(freqrange)==float: freqrange=[freqrange]\n if type(species)==str: species=[species]\n if type(chemnames)==str: chemnames=[chemnames]\n if type(qns)==str: qns=[qns]\n if type(intensity)==float: intensity=[intensity]\n if type(smu2)==float: smu2=[smu2]\n if type(loga)==float: loga=[loga]\n if type(el)==float: el=[el]\n if type(eu)==float: eu=[eu]\n\n#\n# The following is work around to avoid a bug with current python translation\n#\n mytmp = {}\n\n mytmp['tablename'] = tablename\n mytmp['outfile'] = outfile\n mytmp['freqrange'] = freqrange\n mytmp['species'] = species\n mytmp['reconly'] = reconly\n mytmp['chemnames'] = chemnames\n mytmp['qns'] = qns\n mytmp['intensity'] = intensity\n mytmp['smu2'] = smu2\n mytmp['loga'] = loga\n mytmp['el'] = el\n mytmp['eu'] = eu\n mytmp['rrlinclude'] = rrlinclude\n mytmp['rrlonly'] = rrlonly\n mytmp['verbose'] = verbose\n mytmp['logfile'] = logfile\n mytmp['append'] = append\n pathname='file://' + xmlpath( ) + '/'\n trec = casac.utils().torecord(pathname+'slsearch.xml')\n\n casalog.origin('slsearch')\n if trec.has_key('slsearch') and casac.utils().verify(mytmp, trec['slsearch']) :\n result = task_slsearch.slsearch(tablename, outfile, freqrange, species, reconly, chemnames, qns, intensity, smu2, loga, el, eu, rrlinclude, rrlonly, verbose, logfile, append)\n\n else :\n result = False\n return result", "title": "" }, { "docid": "7ccd4b63b38d3b33db89749cab4a6aac", "score": "0.51005465", "text": "def search():\n filename = request.args.get('filename')\n if filename is None:\n return render_template('search.html', filename=\"\")\n\n return render_template('search.html', filename=filename)", "title": "" }, { "docid": "fb2235c5b08f4503ba17ddb667e14125", "score": "0.5095346", "text": "def run_csv_input(self):\n self.txt.setText(\"\") # clear the display\n with open(self.csv_file) as csvfile:\n # The index of each field in the CSV file\n lat_idx, lon_idx, speed_idx = None, None, None\n \n reader = csv.reader(csvfile)\n headers = next(reader, None) \n \n for field_idx in range(len(headers)): \n if 'lat' in headers[field_idx].lower(): \n lat_idx = field_idx\n elif 'lon' in headers[field_idx].lower(): \n lon_idx = field_idx\n elif 'speed' in headers[field_idx].lower():\n speed_idx = field_idx\n \n # if we can't find lat or lon in the header, exit\n if lat_idx is None or lon_idx is None:\n self.txt.setText(\"cannot find lat/lon header fields in {}\".format(self.csv_file))\n return\n \n # read the CSV input sequentially. After the first line of coordinates has been read \n # we can start calculating distances between the current line and the previous.\n past_first = False\n # all of our lists for storing information...\n lats, lons, ms, nmis, bearings, speeds, travel_times, types, errors = [], [], [], [], [], [], [], [], []\n for row in reader: \n lats.append(convert_to_dd(row[lat_idx]))\n lons.append(convert_to_dd(row[lon_idx]))\n \n if speed_idx:\n try:\n speeds.append(float(row[speed_idx]))\n except Exception as err:\n print(\"could not parse speed from row: {}\".format(row))\n speeds.append(None)\n print(err)\n \n if past_first:\n start_lat, start_lon, stop_lat, stop_lon = lats[-2], lons[-2], lats[-1], lons[-1]\n if speed_idx:\n speed = speeds[-1]\n else:\n speed = None\n distance, nmi, bearing, system, error, travel_time = self.process(start_lat, start_lon, stop_lat, stop_lon, speed)\n ms.append(distance)\n nmis.append(nmi)\n bearings.append(bearings)\n types.append(system)\n travel_times.append(travel_time)\n errors.append(error)\n else:\n past_first = True\n\n self.txt.append(f\"\\nTotal distance: {sum(nmis):.3f} nmi\")\n if travel_times:\n self.txt.append(f\"Total travel time: {get_travel_time(sum(travel_times))}\")\n \n if self.output_file:\n with open(self.output_file, 'w') as outfile:\n output_header = \"start_latitude, start_longitude, end_latitude, end_longitude, type, distance (m), distance (nmi), bearing (degrees)\"\n if speed_idx:\n output_header += \", travel time\"\n outfile.write(output_header + \"\\n\")\n for i in range(len(ms)):\n output_csv_line = f\"{lats[i]}, {lons[i]}, {lats[i+1]}, {lons[i+1]}, {types[i]}, {ms[i]:.1f}, {nmis[i]:.3f}, {bearings[i]:.2f}\"\n if speed_idx:\n output_csv_line += f\", {get_travel_time(travel_times[i])}\"\n\n outfile.write(output_csv_line + \"\\n\")", "title": "" }, { "docid": "af1a292091f0c1a45a370eea0f573382", "score": "0.5093255", "text": "def search_string(self, field):\n self.display_list = []\n\n search_match = input(\"do you want to search {} for an exact match Y/[N]: \".format(field))\n if search_match.lower() == 'y':\n match = True\n else:\n match = False\n\n find = input(\"enter your search terms: \")\n\n for entry in self.active_log:\n if match:\n if re.match(find, entry[field], flags=re.IGNORECASE):\n self.display_list.append(entry)\n else:\n if re.search(find, entry[field], flags=re.IGNORECASE):\n self.display_list.append(entry)\n\n self.display()", "title": "" }, { "docid": "1120b36df7074597a5ecd5c2aec94083", "score": "0.5090677", "text": "def search(self):\r\n\t\t# there should be a grammar defined and some lexer/parser.\r\n\t\t# instead of this quick-and-dirty implementation.\r\n\r\n\t\tsafeEnvDict = {\r\n\t\t\t'freeSearch': self.freeSearch,\r\n\t\t\t'extentSearch': self.extentSearch,\r\n\t\t\t'indexSearch': self.indexSearch\r\n\r\n\t\t}\r\n\t\tfor col in self._dataFrame.columns:\r\n\t\t\tsafeEnvDict[col] = self._dataFrame[col]\r\n\r\n\t\ttry:\r\n\t\t\tsearchIndex = eval(self._filterString, {\r\n\t\t\t\t\t\t\t '__builtins__': None}, safeEnvDict)\r\n\t\texcept NameError as err:\r\n\t\t\treturn [], False\r\n\t\texcept SyntaxError as err:\r\n\t\t\treturn [], False\r\n\t\texcept ValueError as err:\r\n\t\t\t# the use of 'and'/'or' is not valid, need to use binary operators.\r\n\t\t\treturn [], False\r\n\t\texcept TypeError as err:\r\n\t\t\t# argument must be string or compiled pattern\r\n\t\t\treturn [], False\r\n\t\treturn searchIndex, True", "title": "" }, { "docid": "2a87f9fbd87860933bf8b0e9f4dc4384", "score": "0.50863165", "text": "def on_ok_click(self):\r\n # get line edit text\r\n export_csv_dir = self.export_csv_dir_entry.text().replace(\"\\\\\", \"/\")\r\n export_csv_name_entry_text = self.export_csv_name_entry.text()\r\n csv_ext = \".csv\" if not export_csv_name_entry_text.lower().endswith(\".csv\") else \"\"\r\n export_csv_path = os.path.join(export_csv_dir, export_csv_name_entry_text + csv_ext).replace(\"\\\\\", \"/\")\r\n\r\n dir_path_obj = Path(export_csv_dir)\r\n csv_path_obj = Path(export_csv_path)\r\n\r\n dir_exists = dir_path_obj.exists()\r\n dir_is_dir = dir_path_obj.is_dir()\r\n\r\n csv_exists = csv_path_obj.exists() and csv_path_obj.is_file()\r\n csv_format_valid = export_csv_path.lower().endswith(\".csv\") and not re.match(r'\\S*\\/ *.csv', export_csv_path)\r\n\r\n dir_valid = dir_exists and dir_is_dir\r\n csv_valid = csv_exists and csv_format_valid\r\n\r\n valid = dir_valid and csv_valid\r\n\r\n if valid:\r\n self.export_csv_dir = export_csv_dir\r\n self.export_csv_path = export_csv_path\r\n self.refresh_UI()\r\n if csv_exists:\r\n valid = display_yes_no_message(self, \"Provided .csv file already exists. Replace?\")\r\n if valid: # still\r\n if self.export_to_csv(self.export_csv_path):\r\n self.close()\r\n\r\n if not dir_valid:\r\n if not dir_exists:\r\n display_warning_message(self, \"Provided directory does not exist.\")\r\n elif not dir_is_dir:\r\n display_warning_message(self, \"Provided directory format is invalid.\")\r\n self.export_csv_dir = \"\"\r\n if not csv_valid:\r\n if dir_valid and not csv_exists and csv_format_valid and \\\r\n display_yes_no_message(self, \"Create file at \" + export_csv_path + \"?\"):\r\n try:\r\n csv_file = open(export_csv_path, \"w+\")\r\n csv_file.close()\r\n except IOError as error:\r\n display_warning_message(self, \"Failed to create provided .csv file: \" + export_csv_path)\r\n else:\r\n self.export_csv_dir = export_csv_dir\r\n self.export_csv_path = export_csv_path\r\n self.refresh_UI()\r\n if self.export_to_csv(self.export_csv_path):\r\n self.close()\r\n elif not csv_format_valid:\r\n display_warning_message(self, \"Be sure to specify a name for the .csv file.\")\r\n self.export_csv_path = \"\"\r\n\r\n self.refresh_UI()", "title": "" }, { "docid": "a3e7e96f6c627f440d7d370dbab4b3e6", "score": "0.50795966", "text": "def get_filters():\n\n # static strings to be used as prompt when request for input\n city_prompt = \"\\nPlease choose city(ies) (separated by comma) would you like to see data for: Chicago, New York City, Washington\\n>> \"\n month_prompt = \"\\nPlease choose which month(s) from January to June (separated by comma) or 'all' you would like to see data for:\\n>> \"\n dow_prompt = \"\\nPlease choose the day(s) of the week from Sunday to Saturday (separated by commaa) or 'all' you would like to see data for:\\n>> \"\n\n print(\"\\n\\n\", '*' * 20, \"Hello! Let's explore some US cities (Chicago, NYC, and Washington DC)bikeshare data!\", '*' * 20, \"\\n\")\n print(\"\\n>>>> Type 'end' at any time if you would like to exit the program. <<<<\\n\")\n print('=' * 80)\n\n # Collect user input for city\n while True:\n city = str(input(city_prompt))\n city = [i.strip().lower() for i in city.split(',')]\n\n # if input is end, terminate the program\n if 'end' in city:\n print('*' * 20, \"Thank you, goodbye.\", '-' * 20)\n raise SystemExit\n else:\n if list(filter(lambda x: x in CITY_DATA.keys(), city)) == city:\n user_confirmation = validate(city, 'city(ies)')\n if user_confirmation == 'Y':\n break\n else:\n print(\"\\nLet's try this again!\")\n else:\n print(\"\\n --- Please enter valid option. ---- \\n\\n\\n\")\n\n # Collect user input for Month\n print('=' * 80)\n while True:\n month = str(input(month_prompt))\n month = [i.strip().lower() for i in month.split(',')]\n # if input is end, terminate the program\n if 'end' in month:\n print('*' * 20, \"Thank you, goodbye.\", '-' * 20)\n raise SystemExit\n elif 'all' in month:\n month = months\n user_confirmation = validate(month, 'Month')\n if user_confirmation == 'Y':\n break\n else:\n print(\"\\nLet's try this again!\")\n else:\n if list(filter(lambda x: x in months, month)) == month:\n user_confirmation = validate(month, 'Month')\n if user_confirmation == 'Y':\n break\n else:\n print(\"\\nLet's try this again!\")\n else:\n print(\"\\n --- Please enter valid option. ---- \\n\\n\\n\")\n\n # Collect user input for Day(s) of the week\n print('=' * 80)\n while True:\n day = str(input(dow_prompt))\n day = [i.strip().lower() for i in day.split(',')]\n\n # if input is end, terminate the program\n if 'end' in day:\n print('*' * 20, \"Thank you, goodbye.\", '-' * 20)\n raise SystemExit\n elif 'all' in day:\n day = weekdays\n user_confirmation = validate(day, 'Day of the Week')\n if user_confirmation == 'Y':\n break\n else:\n print(\"\\nLet's try this again!\")\n else:\n if list(filter(lambda x: x in weekdays, day)) == day:\n user_confirmation = validate(day, 'Day of the Week')\n if user_confirmation == 'Y':\n break\n else:\n print(\"\\nLet's try this again!\")\n else:\n print(\"\\n --- Please enter valid option. ---- \\n\")\n\n # Print Summary\n print('=' * 80)\n print(\"\\n\\n\", '=' * 40, \"Summary of your selection:\", '=' * 40, \"\\n\")\n print(\"City(ies): \", city, \"\\n\")\n print(\"Month(s): \", month, \"\\n\")\n print(\"Day(s) of the week: \", day, \"\\n\")\n print('=' * 100)\n return city, month, day", "title": "" }, { "docid": "76074f2b71145444004d7e469f2629a5", "score": "0.50766665", "text": "def gui_ask_save_csv(output_data: list):\n while True:\n try:\n # Build a dialogue box and make sure the window comes to the front of the desktop\n root = tk.Tk()\n root.withdraw()\n root.attributes(\"-topmost\",True)\n # Ask the user for the file name using a dialogue box\n output_file = tk.filedialog.asksaveasfilename(defaultextension=\".csv\", filetypes=( (\"Comma Separated\", \"*.csv\"),(\"All Files\", \"*.*\") ))\n # Kill the tk window\n root.destroy()\n\n # Open the file and dump the contents of result_table into it\n with open(output_file, \"w\", newline=\"\") as out_file: # newline=\"\" is required with python3 to avoid a \"double newline\"\n writer = csv.writer(out_file, delimiter=\",\") \n for row in output_data:\n writer.writerow(row)\n except Exception as error:\n print(error)\n print(\"\\n :: Choose a new location :: \")\n else:\n break\n\n print(\"\\n\\nOutput file can be found here:\\n \",output_file)\n\n print(\"\\n\\n\\nPress ENTER to continue.\")\n input()", "title": "" }, { "docid": "e98d88d90e3e66a93316614535a17d09", "score": "0.5076338", "text": "def quicksearch_options(button, event):\n if event.type != gtk.gdk.BUTTON_PRESS:\n return False\n menu=gtk.Menu()\n item=gtk.MenuItem(_(\"Launch search\"))\n item.connect('activate', self.do_quicksearch)\n if not self.quicksearch_entry.get_text():\n item.set_sensitive(False)\n menu.append(item)\n item=gtk.CheckMenuItem(_(\"Ignore case\"))\n item.set_active(config.data.preferences['quicksearch-ignore-case'])\n item.connect('toggled', lambda i: config.data.preferences.__setitem__('quicksearch-ignore-case', i.get_active()))\n menu.append(item)\n\n item=gtk.MenuItem(_(\"Searched elements\"))\n item.connect('activate', update_quicksearch_sources)\n menu.append(item)\n\n menu.show_all()\n menu.popup(None, None, None, 0, gtk.get_current_event_time())\n return True", "title": "" }, { "docid": "626cefd2c8d123a8ac032f4665b02d96", "score": "0.5059322", "text": "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n \n check = False \n\n while True:\n city = str(input(\"\\nChoose which cities you want to explore (chicago, new york city, washington): \").strip().lower())\n if city not in (\"chicago\", \"new york city\", \"washington\"):\n print(\"\\nSorry!That\\'s not an option. Please try again\")\n continue\n else:\n print(\"\\nGood choice! It looks like you want to see data for: '{}' \".format(city.title()))\n check_option()\n break\n # get user input for month (all, january, february, ... , june)\n \n while True:\n month = str(input(\"From JANUARY to JUNE!Type the name of the month you want to filter ? (Use commas to list more than one):\").strip().lower())\n \n if month not in (\"january\", \"february\", \"march\", \"april\", \"may\", \"june\", \"all\"):\n print(\"\\nSorry! That\\'s not an option. Please type in month name(or \\\"all\\\" to select all of them)\")\n continue\n else:\n print(\"\\nOK! Confirm that you have chosen to filter by: '{} \".format(month.title()))\n check_option()\n break\n \n while True:\n day = str(input(\"\\nChoose a day of the week and write to filter by:\").strip().lower())\n \n if day not in (\"monday\", \"tuesday\", \"wednesday\", \"thursday\", \"friday\", \"saturday\" , \"sunday\", \"all\"):\n print(\"Sorry. Please type in valid day (i.e. Saturday) or \\\"all\\\" of them to select everyday:\")\n continue \n else:\n print(\"\\nOK! Confirm that you have chosen to filter by: '{}' \".format(day.title()))\n check_option()\n break\n \n \n # get user input for day of week (all, monday, tuesday, ... sunday)\n print(\"\\nYou selected '{}' as city, '{}' as month, and '{}' as day. \\nFiltering by your choices....\".format(city.title(), month.title(), day.title()))\n print()\n print('-'*40)\n return city, month, day", "title": "" }, { "docid": "a534c18be9d806605c037dcb3fe228d9", "score": "0.50479585", "text": "def search(self, *args, **kwargs) -> Any:\n pass", "title": "" }, { "docid": "7756cb6df47aacde18ce9581d95a47f4", "score": "0.5045136", "text": "def take_file_inputs(inPointsCSV,inStatesCSV):\n \n#check file type: https://docs.python.org/2/library/mimetypes.html\n #print \"Ingesting CSV files\"\n \n with open(WORKDIR+INSTATESCSV) as file:\n state_reader(file)\n #print \"States complete\"\n poly_valid(statesList)\n\n with open(WORKDIR+INPOINTSCSV) as file:\n point_reader(file)\n #print \"Points complete\"\n point_valid(pointsList)", "title": "" }, { "docid": "d622273c1baec899bbc3b0e9ecad8532", "score": "0.50368327", "text": "def generate_search(self, req_dict):\n\n\t\tsearches = {}\n\n\t\tfor arg in self.required_get_args_choose_one:\n\t\t\tif arg in req_dict:\n\t\t\t\t(key, suffix) = self.split_arg(arg)\n\n\t\t\t\tif key in searches:\n\t\t\t\t\traise RestClientError(\"You can pass only one variation of %s in a request!\" % key)\n\n\t\t\t\tif key == 'street_number': searches[key] = StreetNumberSearch(req_dict[key + suffix])\n\t\t\t\telif key == 'postal_code': searches[key] = AccountPaySearch(key, 'POSTAL_ZIP_CODE', req_dict[key + suffix], False)\n\t\t\t\telif key == 'state_province': searches[key] = AccountPaySearch(key, key.upper(), req_dict[key + suffix], True)\n\t\t\t\telif key == 'city': searches[key] = AccountPaySearch(key, key.upper(), req_dict[key + suffix], True)\n\t\t\t\telif key == 'country': searches[key] = AccountPaySearch(key, key.upper(), req_dict[key + suffix], True)\n\t\t\t\telif key == 'affiliate_name': searches[key] = AffiliateNameSearch(req_dict[key + suffix], suffix)\n\t\t\t\telif key == 'username': searches[key] = UserNameSearch(key, 'ACCOUNT_USER', 'au', 'USER_NAME', req_dict[key + suffix], suffix, True)\n\t\t\t\telif key == 'account_number': searches[key] = AccountNumberSearch(key, 'ACCOUNT', 'a', key.upper(), req_dict[key + suffix], suffix, 'none')\n\t\t\t\telif key == 'account_did': searches[key] = DefaultSearch(key, 'DID_NUMBERS', 'dn', 'PHONE_NUMBER', req_dict[key + suffix], suffix, 'none')\n\t\t\t\telif key == 'contact_telephone': searches[key] = DefaultSearch(key, 'ACCOUNT_USER', 'au', key, req_dict[key + suffix], suffix, 'none')\n\t\t\t\telif key == 'email_address': searches[key] = DefaultSearch(key, 'ACCOUNT_USER', 'au', key, req_dict[key + suffix], suffix, 'lower')\n\t\t\t\telif key == 'affiliate_refer_code': searches[key] = DefaultSearch(key, 'ACCOUNT_SUBSCRIBE_INFO', 'asi', key, req_dict[key + suffix], suffix, 'none')\n\t\t\t\telif key == 'mac_address': searches[key] = DefaultSearch(key, 'DEVICE_UNIT', 'du', 'DEVICE_NAME', req_dict[key + suffix], suffix, 'none')\n\t\t\t\telif key == 'order_number': searches[key] = DefaultSearch(key, 'PRODUCT_ORDERED', 'po', key, req_dict[key + suffix], suffix, 'none')\n\t\t\t\telif key == 'last_name': searches[key] = DefaultSearch(key, 'ACCOUNT_USER', 'au', key, req_dict[key + suffix], suffix, 'lower')\n\t\t\t\telif key == 'account_name': searches[key] = DefaultSearch(key, 'ACCOUNT', 'a', key, req_dict[key + suffix], suffix, 'none')\n\t\t\t\telif key == 'first_name': searches[key] = DefaultSearch(key, 'ACCOUNT_USER', 'au', key, req_dict[key + suffix], suffix, 'lower')\n\t\t\t\telif key == 'partner_id': searches[key] = DefaultSearch(key, 'ACCOUNT', 'a', key, req_dict[key + suffix], suffix, 'none')\n\t\t\t\telse: raise RestClientError(\"Encountered a key, %s, where the search function is undefined!\" % key)\n\n\t\tsearch_criteria = SearchCriteria()\n\n\t\tfor key in searches:\n\t\t\tsearches[key].process(search_criteria)\n\n\t\t(search_criteria.lower_bound, search_criteria.upper_bound) = self.get_bounds(req_dict[\"offset\"], req_dict[\"limit\"])\n\n\t\treturn search_criteria", "title": "" }, { "docid": "1ee01c2f4515f3ec7a112c4bb628bbe0", "score": "0.50233895", "text": "def run():\r\n # running function introduction\r\n introduction()\r\n\r\n # running function web_scraper\r\n # retrieving filename\r\n filename = web_scraper()\r\n\r\n print(\"Search finished! Thank you for using the newegg.com web-scraper. \\nYour results have been written to a CSV file! \\nYour CSV file is called \" + str(filename) + \"! Your file directory is: \\n\" + os.getcwd() + \"\\\\products.csv\")", "title": "" }, { "docid": "fa80fa408ebe8cb462f131dd6be92cb1", "score": "0.50124", "text": "def search(filename, term, exact, display, max, species, verbose):\n configure_logging(verbose)\n LOG = get_logger()\n LOG.info(\"Search database: {}\".format(filename))\n LOG.debug(\"Term: {}\".format(term))\n LOG.debug(\"Exact: {}\".format(exact))\n LOG.debug(\"Format: {}\".format(display))\n LOG.debug(\"Max: {}\".format(max))\n LOG.debug(\"Species: {}\".format(species))\n\n search_database.DATABASE = filename\n\n maximum = max if max >= 0 else None\n\n tstart = time.time()\n result, status = search_database.search(filename, term, species, exact, False, maximum)\n tend = time.time()\n\n LOG.debug(\"Num Results: {}\".format(result.num_results))\n count = 0\n\n if status.error:\n print(\"Error occurred: {}\".format(status.message))\n sys.exit(-1)\n\n if len(result.matches) == 0:\n print(\"No results found\")\n sys.exit()\n\n headers = [\"ID\", \"SYMBOL\", \"POSITION\", \"MATCH_REASON\", \"MATCH_VALUE\"]\n\n if display in ('tab', 'csv'):\n delim = '\\t' if display == 'tab' else ','\n print(delim.join(headers))\n for match in result.matches:\n line = list()\n line.append(match.ensembl_gene_id)\n line.append(match.symbol)\n line.append(\"{}:{}-{}\".format(match.chromosome, match.position_start, match.position_end))\n line.append(match.match_reason)\n line.append(match.match_value)\n print(delim.join(map(str, line)))\n count += 1\n if count >= max > 0:\n break\n elif display == 'json':\n tbl = []\n for match in result.matches:\n line = list()\n line.append(match.ensembl_gene_id)\n line.append(match.symbol if match.symbol else '')\n line.append(\"{}:{}-{}\".format(match.chromosome, match.position_start, match.position_end))\n line.append(match.match_reason)\n line.append(match.match_value)\n tbl.append(dict(zip(headers, line)))\n count += 1\n if count >= max > 0:\n break\n print(json.dumps({'data': tbl}, indent=4))\n else:\n tbl = []\n for match in result.matches:\n line = list()\n line.append(match.ensembl_gene_id)\n line.append(match.symbol)\n line.append(\"{}:{}-{}\".format(match.chromosome, match.position_start, match.position_end))\n line.append(match.match_reason)\n line.append(match.match_value)\n tbl.append(line)\n count += 1\n if count >= max > 0:\n break\n print(tabulate(tbl, headers))\n\n LOG.info(\"Search time: {}\".format(format_time(tstart, tend)))", "title": "" }, { "docid": "247cbcfefc0cb91cee241d45a56958e6", "score": "0.5011512", "text": "def test_get_query_taxon_from_csv(): # ***Incomplete test\n ##########################\n # Arrange.\n query_filename = \"query_filename\"\n\n ##########################\n # Act.\n #x = get_query_taxon_from_csv(query_filename)\n\n ##########################\n # Assert.\n assert True == True # ***Temporary.", "title": "" }, { "docid": "583aa1e30a2b1820e7a89a9346c83333", "score": "0.5007091", "text": "def get_filters():\n print('\\nHello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington)\n city_index = get_user_input('city', CITY_OPTIONS, range(1, len(CITY_OPTIONS)))\n\n # get user input for month (all, january, february, ... , june)\n month_index = get_user_input('month', MONTH_OPTIONS, range(len(MONTH_OPTIONS)))\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n day_index = get_user_input('day of week', DAY_OPTIONS, range(len(DAY_OPTIONS)))\n\n print(LINE)\n return city_index, month_index, day_index", "title": "" }, { "docid": "2138909e7ab26b1357c522f8ecf8fed1", "score": "0.5002233", "text": "def openResults(filename,unusedInCsv=False):\n if(not unusedInCsv):\n # Opens the requested file and extracts the data\n with open(filename, \"r\") as file:\n reader = csv.reader(file)\n dividedParcels = list(reader)\n\n spaceList = dividedParcels[0]\n dividedParcels = dividedParcels[1:]\n return spaceList, dividedParcels\n else:\n # Opens the requested file and extracts the data\n with open(filename, \"r\") as file:\n reader = csv.reader(file)\n dividedParcels = list(reader)\n\n unusedParcels = dividedParcels[-1]\n dividedParcels = dividedParcels[1:-1]\n return unusedParcels, dividedParcels", "title": "" }, { "docid": "c661d52e114b4b288082c9d15e93428c", "score": "0.50004387", "text": "def main():\n if len(sys.argv) == 1:\n run(get_Rdt_jokes())\n elif len(sys.argv) == 2:\n format = sys.argv[1].split(\".\", 1)\n if len(format) != 2 or (len(format) == 2 and format[1] != \"csv\"):\n print(\"Jokes not provided in CSV file. \")\n sys.exit()\n else:\n run(read_csv(sys.argv[1]))\n else:\n print(\"Too many command line arguments. \")\n sys.exit()", "title": "" }, { "docid": "7cb38dbf27e9598ebf6ccee295e4fa64", "score": "0.49946225", "text": "def search(self, caseno=\"\", court=\"\", repno=\"\", supplier=\"\"):\n driver = self.driver\n #Click search tab\n ##driver.find_element_by_css_selector(\"span\").click()\n self.ccr_tabs(click_link=\"Search For Claims\")\n\n #Complete search fields\n if caseno:\n driver.find_element_by_id(\"caseNumber\").clear()\n driver.find_element_by_id(\"caseNumber\").send_keys(caseno)#\"T20132011\"\n if court:\n Select(driver.find_element_by_id(\"court\")).select_by_visible_text(court)#\"Basildon (461)\"\n if repno:\n driver.find_element_by_id(\"representationOrderNumber\").clear()\n driver.find_element_by_id(\"representationOrderNumber\").send_keys(repno)\n if supplier:\n driver.find_element_by_id(\"supplierID\").clear()\n driver.find_element_by_id(\"supplierID\").send_keys(supplier)\n\n #Click the search button\n driver.find_element_by_xpath(\"//input[@value='Search']\").click()\n\n #Wait for sarch results\n WebDriverWait(driver,10).until(lambda driver: \"Search Results\" in driver.page_source\n or \"No claims found\" in driver.page_source,driver)\n\n #If we've results examine them\n if \"Search Results\" in driver.page_source:\n print \"Search has results as follows:\"\n #Loop to move through any multi-page results\n keep_going = True\n while keep_going:\n #Examine dispalyed results\n self.search_results()\n #Is there a \"Next\" button\n next_buttons = [e for e in driver.find_elements_by_class_name(\"button\") if e.get_attribute(\"value\")==u\"Next\"]\n ##print \"nb\", next_buttons\n #If next button click it to advance to next page, otherwise exit loop\n if next_buttons:\n next_buttons[0].click()\n else:\n keep_going = False", "title": "" }, { "docid": "c1837316011e29e7a87aca61cc72704a", "score": "0.49846646", "text": "def search_place( data ):\n target= None\n print( \" < 1 > City, Town, Province, State or Country\" )\n print( \" < 2 > Latitude and Longitude\" )\n z= input( \"CHOICE? \" )\n if z == \"1\":\n target= search_name( data )\n elif z == \"2\":\n target= search_lat_lon( data )\n return target", "title": "" }, { "docid": "03c3c16ccc66ab0df633a71a66f9df52", "score": "0.49843073", "text": "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n while True:\n try:", "title": "" }, { "docid": "dd0e961ac28d3ced5c803803af276d90", "score": "0.4975195", "text": "def search_by_url(self) -> None:\n url = input(Colors.p.value + \"Please enter URL:\\n\" + Colors.end.value)\n # check if the given url is valid\n if not check_valid_url(url):\n print_err(\"Incorrect url - check yourself!\")\n self.search_by_url()\n else:\n # present play options for the chosen series\n self.__player.goto(series_num=None, url=url)\n self.play_options()", "title": "" }, { "docid": "fcc2c4bd8878affe4a91e9048e3db511", "score": "0.4974744", "text": "def main():\n # takes a directory name as its command line argument\n dir_name = sys.argv[1]\n\n # creates stop_word hash table\n stop_table = HashTable()\n stop_table = import_stopwords(\"stop_words.txt\", stop_table)\n\n # create an instance of SearchEngine by passing the directory name\n search_engine = SearchEngine(dir_name, stop_table)\n\n # enter an infinite loop\n print(\"Enter 'q' to exit program\")\n print(\"Enter 's:{query}' to search\")\n while True:\n\n # prompt user for input\n raw_query = str(input(\"Enter query: \"))\n\n # if input is \"q\"\n if raw_query == \"q\":\n break\n\n elif raw_query[0:2] == \"s:\":\n search_engine.search(raw_query[2:])", "title": "" }, { "docid": "02f766fcbfb0df68ae94fd292860cecb", "score": "0.49710763", "text": "def execute_search():\n tw = twitter.Twitter(CATALOG_FILENAME, STOP_WORDS_FILENAME) \n logging.info(\"[Main] Initializing ...\")\n tw.load_tweets_and_load_index()\n logging.info(\"[Main] Initialized. %s docs loaded.\", \"{:,}\".format(tw.tweets_count()))\n\n query = None\n while True:\n query = input(QUERY_INPUT_MESSAGE)\n if query == '':\n continue\n if query == 'quit':\n break\n\n search_results = tw.search_tweets(query)\n print(search_results)", "title": "" }, { "docid": "62b78e4ea54af89a4907ffdec719aaea", "score": "0.49678212", "text": "def get_file_list(dir_path=\"data/simulation/sim_data/\",\n str_has=['sim'], str_inc=['0001', '0002'], ftype='.csv',\n interact=True):\n import os.path\n from os import listdir\n import tkinter\n from tkinter.filedialog import askdirectory\n root = tkinter.Tk() # this will help control file dialog boxes!\n\n # Check if the path specified includes at least 1 file of the file type\n success, err_msg = check_dir_path(dir_path, [ftype], 1, False)\n\n while not success and len(dir_path):\n # If the \"default\" directory failed the check,\n # Either raise that error, or ask for a different directory!\n # (Will exit If you X out of the dialog, to avoid getting stuck)\n if interact:\n root.lift()\n root.focus_force()\n dir_path = askdirectory(parent=root, title=err_msg,\n initialdir=dir_path)\n success, err_msg = check_dir_path(dir_path, [ftype], 1, False)\n else:\n root.destroy\n raise AssertionError(err_msg)\n if not len(dir_path):\n root.destroy()\n raise AssertionError(\"You Closed the Dialog Window Without a Folder!\")\n root.destroy()\n\n \"\"\"\n If we've gotten this far, we found files!\n So now, we will filter the list based on the parameters given, and return\n the result as a file list to open.\n \"\"\"\n full_dir = listdir(dir_path)\n files_wanted = []\n for file in full_dir:\n # For each file, decide if it passes\n for str_AND in str_has:\n # IF any of these fail, ignore the file.\n if str_AND in file:\n pass\n else:\n break\n else:\n # Only does this if all \"Required\" strings pass\n for str_OR in str_inc:\n # If ANY string is found in the file,\n # Add it to the list and then go to next file\n if str_OR in file:\n files_wanted.append(os.path.join(dir_path, file))\n break\n else:\n pass\n return tuple(files_wanted), dir_path", "title": "" }, { "docid": "8c2478d8324046e70ce705ab562bcfd0", "score": "0.49670327", "text": "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n #The following loop will get the user's choice of which city to display results.\n while True:\n #New Edit gives the user more choices to pick the city to ease typing\n city = input('Would you like to see data for Chicago/Ch, New York City/NYC, or Washington/WA: ').lower()\n if city not in CITY_DATA.keys() and city not in ['ch', 'nyc', 'wa']:\n print('this city is not available, please enter one of the three provided cities')\n continue\n else:\n if city == 'ch':\n city = 'chicago'\n elif city == 'nyc':\n city = 'new york city'\n elif city == 'wa':\n city = 'washington'\n city = CITY_DATA[city]\n break\n\n #The following loop will get the user choice whether to use filters or display unfiltered results.\n while True:\n filter = input('Would you like to filter the data by month or day, or not at all? \\nPlease chose(yes/no): ').lower()\n if filter == 'yes':\n filter = True\n elif filter == 'no':\n filter = False\n else:\n print('Please enter a valid answer!')\n continue\n break\n\n\n #the following loop will get the user choice for filters: whether by month, by day, or include both.\n while True:\n if filter:\n choice = input('What filter do you want to apply? please choose (month/day/both) ').lower()\n if choice not in ['month', 'day', 'both']:\n print('Please Enter a valid answer!')\n continue\n if choice == 'month':\n month = input('Which month - January, February, March, April, May, or June? ')\n if month not in months.keys():\n print('This month is invalid, Please Try again.')\n continue\n else:\n month = months[month]\n day = days\n break\n elif choice == 'day':\n day = input('Which day - Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, or Sunday? ').lower()\n if day not in days.keys():\n print('This day is invalid. Please try again.')\n continue\n else:\n day = days[day]\n month = months\n break\n elif choice == 'both':\n month = input('Which month - January, February, March, April, May, or June? ').lower()\n if month not in months.keys():\n print('This month is invalid. Please try again')\n continue\n month = months[month]\n day = input('Which day - Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, or Sunday? ').lower()\n if day not in days.keys():\n print('This day is invalid. Please try again')\n continue\n day = days[day]\n break\n else:\n day = days\n month = months\n break\n\n #Print Separator\n print('-'*40)\n #Return chosen values\n return city, month, day", "title": "" }, { "docid": "80ff6a6a9716a89fc825f4b27bcdf4c0", "score": "0.495757", "text": "def _run(self, *args, url=None):\n\n with Provider_Customers_Table() as _:\n pass\n\n url = url if url else self._get_urls()[0]\n Rel_File(self.path, self.csv_dir, url).parse_file()\n utils.delete_paths([self.csv_dir, self.path])\n\n # Fills these rov++ specific tables\n with ASes_Table(clear=True) as _as_table:\n _as_table.clear_table()\n _as_table.fill_table()\n # creates and closes table\n with AS_Connectivity_Table(clear=True) as _conn_table:\n _conn_table.fill_table()\n\n with Relationships_Table(clear=True) as db:\n db.fill_table()", "title": "" } ]
d1a63422da033794916a714b5fd0b99a
Given the following, generate a list of dict results split by fiscal years/quarters/months
[ { "docid": "68bc9b80b22032ac74bf4c74d2f56c9a", "score": "0.49230993", "text": "def bolster_missing_time_periods(filter_time_periods, queryset, date_range_type, columns):\n min_date, max_date = min_and_max_from_date_ranges(filter_time_periods)\n results = create_full_time_periods(min_date, max_date, date_range_type, columns)\n\n for row in queryset:\n for item in results:\n same_year = str(item[\"time_period\"][\"fy\"]) == str(row[\"fy\"])\n same_period = str(item[\"time_period\"][date_range_type]) == str(row[date_range_type])\n if same_year and same_period:\n for column_name, column_in_queryset in columns.items():\n item[column_name] = row[column_in_queryset]\n\n for result in results:\n result[\"time_period\"][\"fiscal_year\"] = result[\"time_period\"][\"fy\"]\n del result[\"time_period\"][\"fy\"]\n return results", "title": "" } ]
[ { "docid": "d277920cd02ddd135c01e630b1d38afa", "score": "0.6372415", "text": "def get_fiscalyear_revenue_timeseris_data(client_id:str)->List[Dict]:\n df = load_process_data(client_id)\n fiscalyears = df['date'].dt.to_period('A-MAR').astype(str).astype(int).unique().tolist()\n fiscalyears = sorted(fiscalyears, reverse=True)\n fy_names = list(map(lambda y: f'{y-1}-{y%1000}', fiscalyears))\n \n # return [get_yearly_quterly_montly_weekly_revenue(fy, df) for fy in fiscalyears]\n return {fy_names[i]:get_yearly_quterly_montly_weekly_revenue(fy, df) for i, fy in enumerate(fiscalyears)}", "title": "" }, { "docid": "f427af50c15fe673a67eea1e85ca63b3", "score": "0.61781615", "text": "def generate_dictionary_for_quarterwise_data(stock, columnName):\n result = {}\n stock.Date = pd.to_datetime(stock.Date)\n for index, row in stock.iterrows():\n try:\n q = (row.Date.month-1)//3 + 1\n year = row.Date.year\n month = row.Date.month\n res = result.get(year, {})\n # amount = re.findall(r\"\\d+.?\\d*\",row[\"Revenue\"])[0]\n amount = row[columnName]\n q = \"1q\" if 1 <= month <= 3 else \"2q\" if 4 <= month <= 6 else \"3q\" if 6 <= month <= 9 else \"4q\"\n val = res.get(q, [])\n val.append(float(amount))\n res[q] = val\n result[year] = res\n except:\n continue\n return result", "title": "" }, { "docid": "178ea43450a559fb8638aea5a018f058", "score": "0.60559785", "text": "def get_yearly_quterly_montly_weekly_revenue(fiscalyear, df):\n df2 = df[df['date'].dt.to_period('A-MAR')==fiscalyear].copy()\n yearly_revenue = df2['debit'].sum().round()\n fy_name = f'{fiscalyear-1}-{fiscalyear%1000}'\n \n quarterly_agg = df2.groupby([df2.date.dt.quarter], sort=False).agg({'debit' :sum}). \\\n rename(columns={'debit':'revenue'}).reset_index()\n quarterly_agg['quarter'] = list(range(1,5))\n quterly_revenue = quarterly_agg[['quarter', 'revenue']].round().to_dict(orient='records')\n \n monthly_agg = df2.groupby(df.date.dt.strftime('%b-%Y'), sort=False).agg({'debit' :sum}). \\\n reset_index(). \\\n rename(columns={'date':'month','debit':'monthly_revenue'})\n monthly_revenue = monthly_agg.round().to_dict(orient='records')\n \n weekly_agg = df2.groupby([df2.date.dt.week], sort=False).agg({'debit' :sum}). \\\n rename(columns={'debit':'weekly_revenue'}).reset_index()\n weekly_agg['week'] = list(range(1,53))\n weekly_revenue = weekly_agg[['week', 'weekly_revenue']].round().to_dict(orient='records')\n \n # return {'fiscalyear': fy_name,\n # 'data': {'annual_revenue': yearly_revenue,\n # 'quarterly_revenue': quterly_revenue,\n # 'monthly_revenue': monthly_revenue,\n # 'weekly_revenue': weekly_revenue}\n # }\n \n return {'weekly': weekly_revenue, \n 'monthly': monthly_revenue, \n 'quarterly': quterly_revenue}", "title": "" }, { "docid": "00802fde5706e77a6bab67351fd50154", "score": "0.60281336", "text": "def generate_dictionary_for_quarterwise_growthrate_data(data):\n gr_dic = {}\n keys = list(data.keys())\n array = [''] * (len(keys)*4)\n array_index = 0\n for key in data:\n lists = data.get(key)\n array_index += 4 - len(lists.keys())\n for lis in lists:\n if math.isnan(lists.get(lis)[0]):\n array[array_index] = ''\n else:\n array[array_index] = lists.get(lis)[0]\n array_index = array_index + 1\n if (array.count('')) > ((len(keys) * 4) / 2):\n return gr_dic\n\n for i in range(4, len(keys)*4, 4):\n res = [array[i], array[i+1], array[i+2], array[i+3]]\n avg = np.mean(list(filter(lambda i: isinstance(i, float), res)))\n if np.isnan(avg):\n pass\n else:\n array[i] = avg\n\n gr_array = [''] * (len(keys)*4)\n for i in range(0, len(keys)*4-1):\n x = array[i]\n y = array[i+1]\n if x == '' and y == '':\n continue\n if y == '' or y == 0:\n continue\n if x == '':\n gr_array[i] = 1\n else:\n gr_array[i] = (x - y) / y\n index = 0\n for key in data:\n gr_dic[key] = [gr_array[index], gr_array[index+1],\n gr_array[index+2], gr_array[index+3]]\n index = index + 4\n return gr_dic", "title": "" }, { "docid": "26a91c7045deb0a63f30a553d4bf294c", "score": "0.5766984", "text": "def of_year(cls, year):\n start = datetime(year, 1, 1)\n start_quarter = list(\n rrule(MONTHLY, interval=3, dtstart=start, count=4)\n )\n end_quarter = [\n date + relativedelta(months=3, days=-1) for date in start_quarter\n ]\n return [cls(*item) for item in list(zip(start_quarter, end_quarter))]", "title": "" }, { "docid": "ebc762741203c66ae9778ad90e4a8eea", "score": "0.5722563", "text": "def decode_periods(temporal_schema, start_date, end_date, time_step):\n requested_periods = {}\n if start_date is None:\n return requested_periods\n if isinstance(start_date, datetime.date):\n start_date = start_date.strftime('%Y-%m-%d')\n\n td_time_step = datetime.timedelta(days=time_step)\n steps_per_period = int(round(365./time_step))\n\n if end_date is None:\n end_date = datetime.datetime.now().strftime('%Y-%m-%d')\n if isinstance(end_date, datetime.date):\n end_date = end_date.strftime('%Y-%m-%d')\n\n if temporal_schema is None:\n periodkey = start_date + '_' + start_date + '_' + end_date\n requested_period = list()\n requested_period.append(periodkey)\n requested_periods[start_date] = requested_period\n return requested_periods\n\n if temporal_schema == 'M':\n start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')\n end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')\n delta = relativedelta(months=time_step)\n requested_period = []\n while start_date <= end_date:\n next_date = start_date + delta\n periodkey = str(start_date)[:10] + '_' + str(start_date)[:10] + '_' + str(next_date - relativedelta(days=1))[:10]\n requested_period.append(periodkey)\n requested_periods[start_date] = requested_period\n start_date = next_date\n return requested_periods\n\n # Find the exact start_date based on periods that start on yyyy-01-01\n firstyear = start_date.split('-')[0]\n new_start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')\n if temporal_schema == 'A':\n dbase = datetime.datetime.strptime(firstyear+'-01-01', '%Y-%m-%d')\n while dbase < new_start_date:\n dbase += td_time_step\n if dbase > new_start_date:\n dbase -= td_time_step\n start_date = dbase.strftime('%Y-%m-%d')\n new_start_date = dbase\n\n # Find the exact end_date based on periods that start on yyyy-01-01\n lastyear = end_date.split('-')[0]\n new_end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')\n if temporal_schema == 'A':\n dbase = datetime.datetime.strptime(lastyear+'-12-31', '%Y-%m-%d')\n while dbase > new_end_date:\n dbase -= td_time_step\n end_date = dbase\n if end_date == start_date:\n end_date += td_time_step - datetime.timedelta(days=1)\n end_date = end_date.strftime('%Y-%m-%d')\n\n # For annual periods\n if temporal_schema == 'A':\n dbase = new_start_date\n yearold = dbase.year\n count = 0\n requested_period = []\n while dbase < new_end_date:\n if yearold != dbase.year:\n dbase = datetime.datetime(dbase.year,1,1)\n yearold = dbase.year\n dstart = dbase\n dend = dbase + td_time_step - datetime.timedelta(days=1)\n dend = min(datetime.datetime(dbase.year, 12, 31), dend)\n basedate = dbase.strftime('%Y-%m-%d')\n start_date = dstart.strftime('%Y-%m-%d')\n end_date = dend.strftime('%Y-%m-%d')\n periodkey = basedate + '_' + start_date + '_' + end_date\n if count % steps_per_period == 0:\n count = 0\n requested_period = []\n requested_periods[basedate] = requested_period\n requested_period.append(periodkey)\n count += 1\n dbase += td_time_step\n if len(requested_periods) == 0 and count > 0:\n requested_periods[basedate].append(requested_period)\n else:\n yeari = start_date.year\n yearf = end_date.year\n monthi = start_date.month\n monthf = end_date.month\n dayi = start_date.day\n dayf = end_date.day\n for year in range(yeari,yearf+1):\n dbase = datetime.datetime(year,monthi,dayi)\n if monthi <= monthf:\n dbasen = datetime.datetime(year,monthf,dayf)\n else:\n dbasen = datetime.datetime(year+1,monthf,dayf)\n while dbase < dbasen:\n dstart = dbase\n dend = dbase + td_time_step - datetime.timedelta(days=1)\n basedate = dbase.strftime('%Y-%m-%d')\n start_date = dstart.strftime('%Y-%m-%d')\n end_date = dend.strftime('%Y-%m-%d')\n periodkey = basedate + '_' + start_date + '_' + end_date\n requested_period = []\n requested_periods[basedate] = requested_period\n requested_periods[basedate].append(periodkey)\n dbase += td_time_step\n return requested_periods", "title": "" }, { "docid": "9e94a8540c8064bbc321eabd2a95885c", "score": "0.5685768", "text": "def new_dictionary():\r\n years = {str(i) for i in range(1888, 2018)}\r\n future_dict = file_num()\r\n countries = countries_all()\r\n set1 = set()\r\n for i in countries:\r\n set1.add(str(\" \".join(i)))\r\n dict1 = dict()\r\n for element in future_dict:\r\n values = set()\r\n for j in element:\r\n if j in set1:\r\n values.add(j)\r\n if len(j) == 6:\r\n if str(j[1] + j[2] + j[3] + j[4]) in years:\r\n key = str(j[1] + j[2] + j[3] + j[4])\r\n if key in dict1:\r\n dict1[key].update(values)\r\n else:\r\n dict1[key] = values\r\n return dict1", "title": "" }, { "docid": "4614734baee919465ee37430332517a5", "score": "0.56512946", "text": "def names():\n\n year_extract = '201'\n avg_sql = (f'SELECT city, substr(start_date, INSTR(start_date, {year_extract}), 4) as yr, ' +\n 'avg(duration) as avg_trip ' +\n 'FROM trip ' +\n 'JOIN station ON trip.start_station_id = station.id ' +\n 'GROUP BY city, yr'\n )\n\n avg_city = pd.read_sql(avg_sql, engine2.connect())\n avg_city['avg_trip'] = avg_city['avg_trip']/60\n avg_city['avg_trip'] = avg_city['avg_trip'].round()\n avg_list = avg_city.to_dict('records')\n\n yr_2013 = []\n yr_2014 = []\n yr_2015 = []\n\n for item in avg_list:\n data = {}\n for k,v in item.items():\n if(k=='city'):\n data['axis'] = v\n if(k=='avg_trip'):\n data['value'] = v\n if(k=='yr'and v=='2013'):\n data['name'] = v\n yr_2013.append(data)\n elif(k=='yr'and v=='2014'):\n data['name'] = v\n yr_2014.append(data)\n elif(k=='yr'and v=='2015'):\n data['name'] = v\n yr_2015.append(data)\n \n all_yr = [yr_2015, yr_2014, yr_2013]\n\n\n return jsonify(all_yr)", "title": "" }, { "docid": "13401000b7a8392ec00dfa1233dc031f", "score": "0.56424075", "text": "def precipitation():\n# * Query for the dates and precipitation observations from the last year.\n# * Convert the query results to a Dictionary using `date` as the key and `prcp` as the value.\n# * Return the json representation of your dictionary.\n end_date_str = session.query(Measurement.date).order_by(Measurement.date.desc()).first().date\n dt_year = int(end_date_str[:4])\n dt_month = int(end_date_str[5:7])\n dt_day = int(end_date_str[8:10])\n end_date = dt.date(dt_year, dt_month, dt_day)\n start_date = end_date - dt.timedelta(days=365)\n\n#Select only the date and prcp values\n measurement_year = session.query(Measurement.date, Measurement.prcp).\\\n filter(Measurement.date >= start_date).\\\n filter(Measurement.date <= end_date).all()\n\n year_totals = []\n for result in measurement_year:\n year = {}\n year[\"date\"] = result[0]\n year[\"prcp\"] = result[1]\n year_totals.append(year)\n\n return jsonify(year_totals)", "title": "" }, { "docid": "0236d9ce3ec3917df7b8e2ccc7e430f6", "score": "0.5638206", "text": "def create_dividend(stock, corporate):\n corporate['Ex Date'] = pd.to_datetime(\n corporate['Ex Date'], errors='coerce')\n stock['Date'] = pd.to_datetime(stock['Date'], errors='coerce')\n\n dividend = corporate[corporate['Purpose'].str.contains(\"Dividend\")]\n result = {}\n for index, row in dividend.iterrows():\n try:\n year = row[\"Ex Date\"].year\n month = row[\"Ex Date\"].month\n amount = re.findall(r\"\\d+.?\\d*\", row[\"Purpose\"])[0]\n res = result.get(year, {})\n q = \"1q\" if 1 <= month <= 3 else \"2q\" if 4 <= month <= 6 else \"3q\" if 6 <= month <= 9 else \"4q\"\n val = res.get(q, [])\n val.append(float(amount))\n res[q] = val\n result[year] = res\n except:\n pass\n for year, quaters in result.items():\n for q, a in quaters.items():\n try:\n quaters[q] = sum(a)/len(a)\n except:\n pass\n result[year] = quaters\n divList = list()\n for index, row in stock.iterrows():\n try:\n year = row[\"Date\"].year\n month = row[\"Date\"].month\n q = \"1q\" if 1 <= month <= 3 else \"2q\" if 4 <= month <= 6 else \"3q\" if 6 <= month <= 9 else \"4q\"\n if result.get(year) != None:\n if result.get(year).get(q) != None:\n divList.append(result.get(year).get(q))\n else:\n divList.append(0)\n else:\n divList.append(0)\n except:\n pass\n stock[\"Dividend Value\"] = divList\n return stock", "title": "" }, { "docid": "ccce1eca2c258f31a2441f1ddf72cafa", "score": "0.5582218", "text": "def search_years(content: dict, start=0, step=0) -> dict:\n search_year = content['value']\n years_list = __get_years_(search_year, start, step)\n years = get_years_list(years_list)\n return years", "title": "" }, { "docid": "a46c3841d2c3dde7cd82f5d9bc7d700d", "score": "0.55810326", "text": "def precip_records():\n session = Session(engine)\n date = dt.datetime(2017, 8, 23) - dt.timedelta(days=365)\n results = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date > date).all()\n session.close()\n year_precip = []\n for date, prcp in results:\n precip_dict = {}\n precip_dict[\"Date\"] = date\n precip_dict[\"Precipitation\"] = prcp\n year_precip.append(precip_dict)\n\n return jsonify(year_precip)", "title": "" }, { "docid": "4cd8735069f5206b1317466d442f7585", "score": "0.55157673", "text": "def grouping(data, separator='day'):\n output = []\n results = dict(sorted(data.items()))\n\n if separator == 'year':\n\n for years in results:\n\n months = dict(sorted(results[years].items()))\n year_mean = []\n first_day_date = None\n first_month = None\n\n for month in months:\n if first_day_date is None:\n first_day_date = list(months[month][::-1][0].items())[0][0]\n first_month = month\n year_mean += [float(day.popitem()[1]) for day in months[month]]\n\n date = '{}/{}/{}'.format(first_day_date, first_month, years)\n output.append((date, '{0:.2f}'.format(statistics.mean(year_mean))))\n\n elif separator == 'month':\n\n for years in results:\n months = dict(sorted(results[years].items()))\n\n for month in months:\n first_day_date = list(months[month][::-1][0].items())[0][0]\n mean = \"{0:.2f}\".format(statistics.mean([float(day.popitem()[1]) for day in months[month]]))\n date ='{}/{}/{}'.format(first_day_date,month,years)\n output.append((date,mean))\n\n elif separator == 'day':\n for years in results:\n months = dict(sorted(results[years].items()))\n for month in months:\n for day_data in months[month][::-1]:\n day = day_data.popitem()\n date = '{}/{}/{}'.format(day[0],month,years)\n output.append((date,day[1]))\n return output, separator", "title": "" }, { "docid": "d2d209866c3a74514aa3bc378d98b1fb", "score": "0.55133194", "text": "def get_quarterly_new_members(quarters, members):\n response = {}\n for pair in quarters:\n year = pair[0]\n quarter = pair[1]\n quarter_desc = '{}-Q{}'.format(year, quarter)\n date_range = REPORT_QUARTERS[quarter-1]\n start = '{}-{}'.format(year, date_range[0])\n if quarter == 4:\n year += 1\n end = '{}-{}'.format(year, date_range[1])\n response[quarter_desc] = members.count_new_households(start, end)\n return response", "title": "" }, { "docid": "893ec5a6517aeda76dae8cf830c8e8c6", "score": "0.54815346", "text": "def gen_financials():\n net_worth = random.randrange(5, 33) * 10000\n liquid_assets = net_worth / random.randrange(1, 10)\n annual_income = random.randrange(7, 42) * 5000\n\n financials = {'net_worth' : net_worth, 'liquid_assets' : liquid_assets,\n 'annual_income' : annual_income}\n\n return financials", "title": "" }, { "docid": "7f498f103f77b92e8475de8e5010918d", "score": "0.54718643", "text": "def construct_years_dict(hurricanes):\n years_dict = {}\n for hurricane in hurricanes:\n current_year = hurricanes[hurricane]['Year']\n if current_year not in years_dict:\n years_dict[current_year] = [hurricanes[hurricane]]\n else:\n years_dict[current_year].append(hurricanes[hurricane])\n return years_dict", "title": "" }, { "docid": "eda4726c2b968a7e6e4a9350cab631a4", "score": "0.5427099", "text": "def get_years(xlsx_files, filename_company):\n years_dict = {}\n\n for xl_path, page_ref in xlsx_files.items(): # loop through all xlsx files\n path, filename = os.path.split(xl_path) # split file and path\n work_book = openpyxl.load_workbook(xl_path) # load workbook\n work_sheet = work_book[page_ref] # take sheet reference\n flg = 1\n count = 0\n years = []\n for j in range(1, 10):\n\n \n for i in range(1, 10):\n if work_sheet.cell(row=j, column=i).value is not None:\n match_obj = re.search(\"^2\\d{3}$|^2\\d{3}\\s2\\d{3}\\s2\\d{3}$|^2\\d{3}\\s2\\d{3}$|^\\s2\\d{3}$|(^2\\d{3}){1}|[^,]\\d{3}$\", str(work_sheet.cell(row=j, column=i).value))\n \n \n if match_obj:\n match_obj_space=str(match_obj.group(0))\n \n if \" \" in match_obj_space: \n \n years.append(str.split(work_sheet.cell(row=j, column=i).value)) \n years= [val for sublist in years for val in sublist] \n flg=0\n break\n else:\n count += 1\n years.append(match_obj.group(0))\n\n if count == 3: \n flg=0\n break\n years.sort(reverse = True)\n years_dict[filename_company[filename]] = years\n if flg == 0:\n break\n op.writelines(str(years_dict))\n return years_dict", "title": "" }, { "docid": "dccb29b8d5a25577623024f45c7b8f73", "score": "0.54231036", "text": "def _aggregate_by_year(historical_data_dict: dict):\n\n converted_response = {}\n\n for datapoint in historical_data_dict:\n converted_response[datapoint['date'].year] = datapoint['value']\n\n return converted_response", "title": "" }, { "docid": "b5dd786d07e8395bc784dacf81e3396a", "score": "0.54199255", "text": "def get_yearly_report(self):\n ans = dict()\n\n for year in range(Settings.CF_OPENED_YEAR, date.today().year+1):\n current_year_stats = self._get_insights_for_filtered(lambda entity: entity.get_time().year == year)\n if current_year_stats:\n ans[year] = current_year_stats\n\n return ans", "title": "" }, { "docid": "9df85e2d186917920e58f893c23c2126", "score": "0.54189634", "text": "def _transform_financial_stmt(std_financials_list: list, tag_filter_list: list):\n results = {}\n\n for financial in std_financials_list:\n\n if (tag_filter_list is None or\n financial.data_tag.tag in tag_filter_list):\n results[financial.data_tag.tag] = financial.value\n\n return results", "title": "" }, { "docid": "d45fd795c4a8638d9679fef4b2d4e512", "score": "0.5417625", "text": "def parse_flight_data() -> Dict[str, List[Tuple[str, str, int]]]:\n data_paths = {\n 'test': 'data/test_flight_data.csv',\n '11-19': 'data/Nov2019_flight_data.csv',\n '12-19': 'data/Dec2019_flight_data.csv',\n '01-20': 'data/Jan2020_flight_data.csv',\n '02-19': 'data/Feb2019_flight_data.csv',\n '03-19': 'data/Mar2019_flight_data.csv',\n '04-19': 'data/Apr2019_flight_data.csv',\n '05-19': 'data/May2019_flight_data.csv',\n '06-19': 'data/Jun2019_flight_data.csv',\n '07-19': 'data/Jul2019_flight_data.csv',\n '08-19': 'data/Aug2019_flight_data.csv',\n }\n flight_data = dict()\n for month_year, data_path in data_paths.items():\n flight_data[month_year] = list()\n with open(data_path) as f:\n lines_to_skip = 1\n i = 0\n for line in f:\n if i < lines_to_skip:\n i += 1\n continue\n split_line = line.rstrip().split(',')\n try:\n int(split_line[11])\n except:\n raise Exception('Bad line: {}'.format(line))\n flight_data[month_year].append((split_line[4], split_line[8], int(split_line[11])))\n return flight_data", "title": "" }, { "docid": "19adcc20050b657c0d3e4e7a9dc0f423", "score": "0.5409015", "text": "def generate_dates():\n current_year = datetime.now().year\n current_date = datetime.now().strftime('%m%d')\n years = range(2015, current_year)\n quarters = [\"0331\", \"0630\", \"0930\", \"1231\"]\n all_dates = []\n for r in itertools.product(years, quarters):\n all_dates.append(str(r[0]) + r[1])\n for q in quarters:\n if q < current_date:\n all_dates.append(str(current_year) + q)\n logging.info(\"dates %s\", all_dates)\n return all_dates", "title": "" }, { "docid": "511bcca5f08f87abd6a82f7e3d6b5f45", "score": "0.54009223", "text": "def pullcalldates(self):\n datelist = cal.Calendar()\n dlist = datelist.yeardatescalendar(2020)\n itr = 0\n self.d = {}\n for i in dlist[:]:\n for j in i:\n for k in j:\n for u in k:\n itr += 1\n self.d[itr] = u\n return self.d", "title": "" }, { "docid": "b91547afd334dea2da6b47dcb529830f", "score": "0.53689575", "text": "def year_frequency_dict(year_dict, start, end):\n year_freq_dict = dict()\n for year in range(start, end+1):\n if year in year_dict:\n year_freq_dict[year] = year_dict[year][1]/year_dict[year][0]\n return year_freq_dict", "title": "" }, { "docid": "4f179399ff9f3a698f32b03553e41b1f", "score": "0.53654516", "text": "def _returnCommonYears(arrayFNameS, lngFromYear, lngToYear):\n # arrayDays=[]\n arrayIncr = []\n blnSpecialDates = 0\n\n for arrayFName in arrayFNameS:\n # strTempDates=''\n # if (arrayFName[\"shape_dates\"]!=\"\"):\n # strTempDates=arrayFName[\"shape_dates\"]\n\n strTemporalType = arrayFName[\"interval\"]\n\n if ((strTemporalType == \"10d\") or (strTemporalType == \"16d\") or (strTemporalType == \"15d\") or (strTemporalType == \"1d\") or (strTemporalType == \"1m\") or (strTemporalType == \"1y\")):\n arrayIncr.append(1)\n else:\n if (strTemporalType == \"10y\"):\n arrayIncr.append(10)\n else:\n blnSpecialDates = 1\n arrayIncr.append(-1)\n\n lngStepYear = 0\n arrayReturn = []\n if ((blnSpecialDates == 1) and (len(arrayIncr) == 1)):\n\n arrayYears = arrayFName[\"fixed\"].split('_')\n arrayReturn = []\n for strTemp in arrayYears:\n arrayReturn.append(int(strTemp[:4]))\n else:\n lngStepYear = 1\n arrayReturn = range(int(lngFromYear), int(lngToYear) + 1, lngStepYear)\n\n return arrayReturn", "title": "" }, { "docid": "436e894c98e7982a82491977a93e0158", "score": "0.5351873", "text": "def base_data(code, begin='0000-01-01', end='9999-12-31'):\n\n accesses = self.ratchet.general(code)\n\n del accesses['total']\n del accesses['code']\n\n if len(begin) == 4:\n begin += '-01' \n\n if len(end) == 4:\n end += '-12' \n\n empty_months_range = {'%02d' % x: None for x in range(1, 13)}\n data = {}\n for key, value in accesses.items():\n if key in ['html', 'abstract', 'pdf']:\n del value['total']\n for year, months in value.items():\n del months['total']\n if year[1:] >= begin[0:4] and year[1:] <= end[0:4]:\n ye = data.setdefault(year[1:], copy.copy(empty_months_range))\n for month, days in months.items():\n if year[1:]+'-'+month[1:] >= begin and year[1:]+'-'+month[1:] <= end:\n if ye[month[1:]] == None:\n ye[month[1:]] = 0\n ye[month[1:]] += days['total']\n\n return data", "title": "" }, { "docid": "a47d7468eaaa3a6899b851b2e33ffee2", "score": "0.5343377", "text": "def get_quarter(y, m):\n \n result = []\n quarter = calc_quarter(y, m)\n for i in quarter:\n result.append(get_month(i[0], i[1]))\n \n return result", "title": "" }, { "docid": "592fdd4156ea98e22baa8b18cea62c67", "score": "0.5334084", "text": "def process_2019_raw_into_monthly() -> List[float]:\n this_file_path = Path(os.path.realpath(__file__))\n this_file_dir = this_file_path.parent\n usage_2019_file = this_file_dir / 'resources' / 'electricity2019.json'\n with usage_2019_file.open() as f:\n usage_2019_data = json.loads(f.read())\n flat_kwh_per_day = []\n for raw_month in usage_2019_data:\n start_date = datetime.strptime(raw_month['StartDate'], '%m/%d/%Y')\n end_date = datetime.strptime(raw_month['EndDate'], '%m/%d/%Y')\n days_in_range = (end_date - start_date).days\n for i in range(days_in_range + 1):\n this_date = start_date + timedelta(i)\n if this_date.year == 2019:\n flat_kwh_per_day.append(raw_month['kWh'])\n day_index = -1\n month_usages = []\n for month in range(1, 13):\n num_days = monthrange(2019, month)[1]\n month_sum = 0\n for day in range(num_days):\n day_index += 1\n month_sum += flat_kwh_per_day[day_index]\n month_usages.append(round(month_sum / num_days, 2))\n return month_usages", "title": "" }, { "docid": "11c90bea257ebbb063743758609bc40c", "score": "0.5325994", "text": "def _span(yf: YearData, yt: YearData, pf: int, pt: int = None) -> Dict[YearData, Tuple[int, int]]:\n years = (x for x in YearData.__subclasses__() if yf.year <= x.year <= yt.year)\n return {year: (pf, pt) if pt else (pf, pf) for year in years}", "title": "" }, { "docid": "877309f7053e125a74df922899646848", "score": "0.53181916", "text": "def make_prices_vecs(data):\n\n\toutput = []\n\n\t# We have the same set of dates (keys) for all companies,\n\t# so we have to sort keys only once (keys from whatever element).\n\n\tsome_el = data[0]\n\tsorted_keys = some_el[1].keys()\n\tsorted_keys.sort()\n\n\tfor elem in data:\n\t\tdate_price_dict = elem[1]\n\t\toutput.append([date_price_dict[key] for key in sorted_keys])\n\t\n\treturn output;", "title": "" }, { "docid": "6cde7cb2a1e98f7a4b0cf0d650725f9e", "score": "0.5299571", "text": "def create_years_genre(sql_data: tuple, years: dict):\n # MB i need to rewrite that, but this is work well =)\n for sql_line in sql_data:\n year = sql_line[0]\n years[year]['count'] = sql_line[1]\n return years", "title": "" }, { "docid": "8476d5e2bfc0abf3d9ce2d0ba3e9a1e1", "score": "0.5297891", "text": "def __get_years_(search_year, start, step) -> list:\n sql_request = _sql_request_search_years(search_year)\n years = get_ids_by_request(sql_request, start, step)\n return years", "title": "" }, { "docid": "93f20cfe72eb4951826084a4b4fe0082", "score": "0.5296002", "text": "def _aggregate_by_year_month(historical_data: dict):\n if historical_data is None:\n return {}\n\n converted_response = {}\n\n # first pass assemble the basic return value\n for datapoint in historical_data:\n year = datapoint['date'].year\n month = datapoint['date'].month\n\n if year not in converted_response:\n converted_response[year] = {}\n if month not in converted_response[year]:\n converted_response[year][month] = []\n\n converted_response[year][month].append(datapoint['value'])\n\n # second pass calculate averages\n for year in converted_response.keys():\n for month in converted_response[year]:\n converted_response[year][month] = sum(\n converted_response[year][month]) / len(converted_response[year][month])\n\n return converted_response", "title": "" }, { "docid": "312ef24c9f0408eab7b1778d9c94782d", "score": "0.5292689", "text": "def query_infos(self, fromYear, toYear):\n connection = sqlite3.connect(\"weather.sqlite\")\n cur = connection.cursor()\n toYear = int(toYear) + 1\n dictOuter = {}\n for row in cur.execute(\"select * from samples where \\\n sample_date between ? and ?\",\n (str(fromYear)+'%', str(toYear)+'%')):\n print(f\"row {row}\")\n myMonth = datetime.datetime.strptime(row[1], '%Y/%m/%d').month\n dictOuter.setdefault(myMonth, []).append(row[5])\n print(dictOuter)\n return dictOuter\n connection.commit()\n connection.close()", "title": "" }, { "docid": "50cb5ad1bc6e9a24d490050a2073dbb7", "score": "0.528686", "text": "def special_by_dd(keys):\n def expand_year(df, dd_name):\n \"\"\" For jan1989 - sep1995 they wrote the year as a SINGLE DIGIT\"\"\"\n if 'HRYEAR' in df.columns:\n k = 'HRYEAR'\n else:\n k = k = 'HdYEAR'\n last_digit = df[k].dropna().unique()[0]\n if last_digit >= 10:\n last_digit = last_digit % 10\n base_year = int(dd_name[-4:-1]) * 10\n df[\"HRYEAR4\"] = base_year + last_digit\n df = df.drop(k, axis=1)\n return df\n\n def combine_age(df, dd_name):\n \"\"\"For jan89 and jan92 they split the age over two fields.\"\"\"\n df[\"PRTAGE\"] = df[\"AdAGEDG1\"] * 10 + df[\"AdAGEDG2\"]\n df = df.drop([\"AdAGEDG1\", \"AdAGEDG2\"], axis=1)\n return df\n\n def align_lfsr(df, dd_name):\n \"\"\"Jan1989 and Jan1999. LFSR (labor focrce status recode)\n had\n 1 = WORKING\n 2 = WITH JOB,NOT AT WORK\n 3 = UNEMPLOYED, LOOKING FOR WORK\n 4 = UNEMPLOYED, ON LAYOFF\n 5 = NILF - WORKING W/O PAY < 15 HRS;\n TEMP ABSENT FROM W/O PAY JOB\n 6 = NILF - UNAVAILABLE\n 7 = OTHER NILF\n newer ones have\n 1 EMPLOYED-AT WORK\n 2 EMPLOYED-ABSENT\n 3 UNEMPLOYED-ON LAYOFF\n 4 UNEMPLOYED-LOOKING\n 5 NOT IN LABOR FORCE-RETIRED\n 6 NOT IN LABOR FORCE-DISABLED\n 7 NOT IN LABOR FORCE-OTHER\n this func does several things:\n 1. Change 3 -> 4 and 4 -> 3 in the old ones.\n 2. Change 5 and 6 to 7.\n 2. Read retired from AhNLFREA == 4 and set to 5.\n 3. Read ill/disabled from AhNLFREA == 2 and set to 6.\n Group 7 kind of loses meaning now.\n \"\"\"\n # 1. realign 3 & 3\n status = df[\"AhLFSR\"]\n # status = status.replace({3: 4, 4: 3}) # chcek on ordering\n\n status_ = status.copy()\n status_[status == 3] = 4\n status_[status == 4] = 3\n status = status_\n\n # 2. Add 5 and 6 to 7\n status = status.replace({5: 7, 6: 7})\n\n # 3. ill/disabled -> 6\n status[df['AhNLFREA'] == 2] = 6\n\n df['PEMLR'] = status\n df = df.drop([\"AhLFSR\", \"AhNLFREA\"], axis=1)\n return df\n\n def expand_hours(df, dd_name):\n \"\"\"\n 89 and 92 have a question for hours and bins. I goto midpoint of bin.\n\n Roughly corresponds to PEERNHRO\n\n A-EMPHRS CHARACTER*002 . (0357:0358) LFSR=1 OR 2\n REASONS NOT AT WORK OR HOURS AT WORK\n -1 = NOT IN UNIVERSE\n WITH A JOB, BUT NOT AT WORK\n 01 = ILLNESS\n 02 = VACATION\n 03 = BAD WEATHER\n 04 = LABOR DISPUTE\n 05 = ALL OTHER\n AT WORK\n 06 = 1-4 HOURS\n 07 = 5-14 HOURS\n 08 = 15-21 HOURS\n 09 = 22-29 HOURS\n 10 = 30-34 HOURS\n 11 = 35-39 HOURS\n 12 = 40 HOURS\n 13 = 41-47 HOURS\n 14 = 48 HOURS\n 15 = 49-59 HOURS\n 16 = 60 HOURS OR MORE\n \"\"\"\n hours = df['AhEMPHRS']\n hours_dic = {1: np.nan, 2: np.nan, 3: np.nan, 4: np.nan, 5: np.nan,\n 6: 2, 7: 9.5, 8: 18, 9: 25.5, 10: 32, 11: 37, 13: 44,\n 15: 54}\n hours = hours.replace(hours_dic)\n df['PEERNHRO'] = hours\n df.drop(\"AhEMPHRS\", axis=1)\n return df\n\n def combine_hours(df, dd_name):\n \"\"\"\n For 89 and 92; \"AdHRS1\", \"AdHRS2\" combine to form \"PEHRACTT\"\n \"\"\"\n fst = df['AdHRS1']\n snd = df['AdHRS2']\n df['PEHRACTT'] = fst * 10 + snd\n df = df.drop([\"AdHRS1\", \"AdHRS2\"], axis=1)\n return df\n\n func_dict = {\"expand_year\": expand_year, \"combine_age\": combine_age,\n \"expand_hours\": expand_hours, \"align_lfsr\": align_lfsr,\n \"combine_hours\": combine_hours}\n to_apply = filter(lambda x: x in keys, func_dict)\n filtered = {k: func_dict[k] for k in to_apply}\n return filtered", "title": "" }, { "docid": "75f0165efd1a3dc3ddea0b2eed32f263", "score": "0.52836514", "text": "def get_spread(quotes, dates, contract):\r\n\r\n # for quarter1, quarter2 in zip(list(dates.keys()), list(dates.keys()[1:])):\r\n\r\n new_quotes = {}\r\n new_dates = {}\r\n\r\n dates_keys = list(dates.keys())\r\n\r\n step = 2 if contract == \"Quarters\" else 1\r\n\r\n for index in range(0, len(dates_keys) - 1, step):\r\n current_key = dates_keys[index] # Q1 key\r\n key_after = dates_keys[index + 1] # Q2 key\r\n\r\n first_date = dates[key_after][0] # Get the first date of the Q2 time series\r\n\r\n try:\r\n index_first_date = dates[current_key].index(first_date)\r\n key_spread = \"{}x{}\".format(current_key, key_after)\r\n\r\n new_dates[key_spread] = []\r\n new_quotes[key_spread] = []\r\n\r\n offset_1 = 0\r\n offset_2 = 0\r\n\r\n # we go through all the days in the Q2 days list and add the common days and spread into the new lists\r\n for index_days in range(len(dates[key_after])):\r\n if dates[current_key][index_first_date + index_days + offset_1] == dates[key_after][\r\n index_days + offset_2]:\r\n new_dates[key_spread].append(dates[key_after][index_days + offset_1])\r\n new_quotes[key_spread].append(\r\n quotes[current_key][index_first_date + index_days + offset_1] - quotes[key_after][\r\n index_days + offset_2])\r\n\r\n else:\r\n date_1 = dt.datetime.strptime(dates[current_key][index_first_date + offset_1], \"%Y-%m-%dT00:00:00Z\")\r\n date_2 = dt.datetime.strptime(dates[key_after][index_first_date + offset_2], \"%Y-%m-%dT00:00:00Z\")\r\n\r\n while date_1 != date_2 and offset_1 < 10 and offset_2 < 10:\r\n if date_1 > date_2:\r\n offset_2 += 1\r\n else:\r\n offset_1 += 1\r\n\r\n if date_1 != date_2:\r\n continue\r\n\r\n new_dates[key_spread].append(dates[key_after][index_days + offset_1])\r\n new_quotes[key_spread].append(\r\n quotes[current_key][index_first_date + index_days + offset_1] - quotes[key_after][\r\n index_days + offset_2])\r\n\r\n # check_date(new_dates,new_quotes,quotes,dates)\r\n except IndexError:\r\n continue\r\n except Exception as e:\r\n print(\"Exception : {}\".format(e.args))\r\n print(\"No overlap for {} and {}\".format(current_key, key_after))\r\n continue\r\n\r\n return new_dates, new_quotes", "title": "" }, { "docid": "34e1565e03681cddf71dfa83444c53bd", "score": "0.5273918", "text": "def filter_by_year(data: dict, year: int) -> dict:\n filtered_data = data | {\"places\": []}\n\n for place in data[\"places\"]:\n dataframes = []\n\n for dataframe in place[\"data\"]:\n if dataframe[\"startYear\"] <= year <= dataframe[\"endYear\"]:\n dataframes.append(dataframe)\n\n if dataframes:\n filtered_data[\"places\"].append(\n place | {\"data\": dataframes}\n )\n\n return filtered_data", "title": "" }, { "docid": "fac1df3ff8bc107aee7b78b500a92e76", "score": "0.5269259", "text": "def createDictionaries(data):\n buildDict(data, ['office', 'office_name', 'level'], 1, 0)\n buildDict(data, ['level', 'level_name'], 1, 0)\n buildDict(data, ['area', 'area_name'], 1, 0)\n buildDict(data, ['source', 'source_name'], 1, 0)\n buildDict(data, ['financier', 'financier_name'], 1, 0)\n buildDict(data, ['year', 'office', 'unit', 'unit_name'], 3, 2)\n buildDict(data, ['year', 'office', 'unit', 'line', 'line_name'], 4, 3)", "title": "" }, { "docid": "9cd48b904c3b346bea1e0db7e901623d", "score": "0.52553475", "text": "def get_aggregates(self, monattag: str = \"\") -> dict:\n aggregates = {}\n for e in [\"TXK\", \"TNK\", \"RSK\", \"SDK\", \"PM\", \"UPM\"]:\n d = {}\n if monattag == \"\":\n list_ = [self.create_timeline(e, t) for t in self.tagreihe]\n s = sum(list_) / len(list_)\n else:\n s = self.create_timeline(e, monattag)\n if s.count() == 0:\n d = {\n \"first_year\": \"\",\n \"last_year\": \"\",\n \"mean\": \"\",\n \"mean2010\": \"\",\n \"count\": 0,\n \"std\": \"\",\n \"median\": \"\",\n \"max\": \"\",\n \"max_year\": \"\",\n \"min_year\": \"\",\n \"min\": \"\",\n \"zerorain\": \"\",\n \"zerosun\": \"\",\n \"mitteldruck\": \"\",\n \"depressiondays\": \"\"\n }\n else:\n if \"2010\" in s.index:\n index_2010 = s.index.to_list().index(\"2010\")\n d[\"mean2010\"] = round(s[index_2010:][~s.isnull()].mean(), 0)\n else:\n d[\"mean2010\"]=\"\"\n d[\"first_year\"] = s[~s.isnull()].index[0]\n d[\"last_year\"] = s[~s.isnull()].index[-1]\n d[\"mean\"] = round(s[~s.isnull()].mean(), 0)\n \n d[\"count\"] = s[~s.isnull()].count()\n d[\"std\"] = round(s[~s.isnull()].std(), 0)\n d[\"median\"] = s[~s.isnull()].median()\n d[\"max\"] = round(s.max(), 1)\n d[\"max_year\"] = s.index[np.where(s == np.max(s))[0][0]]\n d[\"min_year\"] = s.index[np.where(s == np.min(s))[0][0]]\n d[\"min\"] = round(s.min(), 1)\n if e == \"RSK\":\n d[\"zerorain\"] = s[s == 0.0].count()\n if e == \"SDK\":\n d[\"zerosun\"] = s[s == 0.0].count()\n if e == \"PM\":\n mittelhoehe = sum([x.stationshoehe for x in self.stations]) / len(self.stations)\n d[\"mean_pressure\"] = round(1013.25 * (1 - (0.0065 * mittelhoehe) / 288.15)**5.255,0) #Barometric formula\n d[\"depression_days\"] = s[s < d[\"mean_pressure\"]].count()\n aggregates[e] = d.copy()\n self.aggregates = aggregates.copy()\n return aggregates", "title": "" }, { "docid": "b2a2bb7a48a10f4bc819a4e58cbffbc1", "score": "0.5249546", "text": "def calculate_years(self):\n\n\n for node in self.nodes.values():\n node.min_year = int(cfg.cfgfile.get('case', 'current_year'))\n attributes = vars(node) \n for att in attributes:\n obj = getattr(node, att)\n if inspect.isclass(type(obj)) and hasattr(obj, '__dict__') and hasattr(obj, 'data') and obj.data is True:\n try:\n min_year = min(obj.raw_values.index.get_level_values('year'))\n except:\n min_year = min(obj.raw_values.index.get_level_values('vintage'))\n if min_year < node.min_year:\n node.min_year = min_year \n if hasattr(node,'technologies'):\n for technology in node.technologies.values():\n for reference_sales in technology.reference_sales.values():\n try:\n min_year = min(reference_sales.raw_values.index.levels[util.position_in_index(reference_sales.raw_values, 'vintage')])\n except:\n min_year = node.min_year\n if min_year < node.min_year:\n node.min_year = min_year\n for sales in technology.sales.values():\n try:\n min_year = min(sales.raw_values.index.get_level_values( 'vintage'))\n except:\n min_year = node.min_year\n if min_year < node.min_year:\n node.min_year = min_year \n if hasattr(node,'stock') and node.stock.data is True:\n try:\n min_year = min(node.stock.raw_values.index.levels[util.position_in_index(node.stock.raw_values, 'year')])\n except:\n min_year = node.min_year\n if min_year < node.min_year:\n node.min_year = min_year \n \n node.years = range(node.min_year,\n int(cfg.cfgfile.get('case', 'end_year')) + 1,\n int(cfg.cfgfile.get('case', 'year_step')))\n node.vintages = copy.deepcopy(node.years)\n self.years = cfg.cfgfile.get('case','supply_years')", "title": "" }, { "docid": "48cd10b1f1140e4f5e91588c554e7e36", "score": "0.52380484", "text": "def get_data(airport, start, end):\n\n data = {\"Max Temperature\": {},\n \"Min Temperature\": {},\n \"Mean Temperature\": {},\n \"Dew Point\": {}}\n\n for k in data:\n for i in range(1, 13):\n m = str(i)\n data[k][m] = {\"max\": [], \"avg\": [], \"min\": []}\n\n for year in range(start, end+1):\n for i in range(1, 13):\n m = str(i)\n html = download(airport, year, i, 1)\n for k in data:\n parse(html, data[k][m], k)\n\n return data", "title": "" }, { "docid": "401263491e91c4244ae2ee0f2abad561", "score": "0.5211393", "text": "def generate_fiscal_date_range(min_date: datetime, max_date: datetime, frequency: str) -> list:\n if frequency == \"fiscal_year\":\n interval = 12\n elif frequency == \"quarter\":\n interval = 3\n else: # month\n interval = 1\n\n date_range = []\n current_date = min_date\n while current_date <= max_date:\n date_range.append(\n {\n \"fiscal_year\": generate_fiscal_year(current_date),\n \"fiscal_quarter\": generate_fiscal_quarter(current_date),\n \"fiscal_month\": generate_fiscal_month(current_date),\n }\n )\n current_date = current_date + relativedelta(months=interval)\n\n return date_range", "title": "" }, { "docid": "a1864749a750557ed32fd54e73955375", "score": "0.5210616", "text": "def extract_financials_dataset(\n work_dict,\n scrub_mode='sort-by-date'):\n return extract_dataset('financials', work_dict, scrub_mode=scrub_mode)", "title": "" }, { "docid": "e622fa0b8c59c8b851268303230e03db", "score": "0.52005756", "text": "def extract_box_office_data(quarter='q1'):\n box_URL = 'https://www.boxofficemojo.com/quarter/' + quarter + '/?grossesOption=calendarGrosses'\n\n # Load the cache, save in global variable\n CACHE_DICT = load_cache()\n url_text = make_url_request_using_cache(box_URL, CACHE_DICT)\n soup = BeautifulSoup(url_text, 'html.parser')\n listing_divs = soup.find_all('tr')\n listing_divs.remove(listing_divs[0])\n\n data_in_rows = []\n print('-' * 50) # separator\n print(quarter.upper() + ' Historical Box Office')\n print('-' * 50) # separator\n for listing_div in listing_divs:\n alist = listing_div.find_all('a')\n year = alist[0].string\n name = alist[1].string.title()\n rank_of_year_path = alist[0]['href']\n rank_of_year_link = \"https://www.boxofficemojo.com\" + rank_of_year_path\n print(\"The #1 release of \" + year + \" \" + quarter + \" is \" + name)\n\n money_list = listing_div.find_all('td', class_=\"a-text-right mojo-field-type-money\")\n cumulative_gross = int(money_list[0].string.replace(\"$\", \"\").replace(',', ''))\n per_release_average_gross = int(money_list[1].string.replace(\"$\", \"\").replace(',', ''))\n data_in_rows.append([year, quarter, name, cumulative_gross, per_release_average_gross, rank_of_year_link])\n\n return data_in_rows", "title": "" }, { "docid": "330ea92fecffd906f1700281dee552db", "score": "0.51993227", "text": "def dates(self):\n base = dateutil.parser.parse(\"19000101\")\n with Dataset(self.index_path) as data:\n min_date = base + dt.timedelta(days=data[\"time\"][:].min())\n max_date = base + dt.timedelta(days=data[\"time\"][:].max())\n dates = {}\n dates[\"max_year\"] = pd.Timestamp(max_date).year\n dates[\"min_year\"] = pd.Timestamp(min_date).year + 1 # for 12 month indices\n dates[\"max_month\"] = pd.Timestamp(max_date).month\n dates[\"years\"] = list(range(dates[\"min_year\"], dates[\"max_year\"] + 1))\n return dates", "title": "" }, { "docid": "37313b5e818111fb7f6c028c26edf36c", "score": "0.5198772", "text": "def parse_billing_data(billing_data):\n user_dict = collections.defaultdict(dict)\n currency = ''\n month = ''\n year = ''\n\n for row in billing_data:\n if len(row) < 4:\n continue\n if row[3] == 'AccountTotal':\n if not currency:\n currency = row[23]\n\n if not month or not year:\n date = row[6]\n month = date[5:7]\n year = date[0:4]\n\n acct_num = row[2]\n user_dict[acct_num]['name'] = row[9]\n user_dict[acct_num]['total'] = float(row[24])\n user_dict[acct_num]['currency'] = row[23]\n\n return user_dict", "title": "" }, { "docid": "cecc5fcf08bd8fb8fb2e9fc7f817cea5", "score": "0.51934886", "text": "def get_quarterly_event_counts(quarters, event_manager):\n response = {}\n for pair in quarters:\n year = pair[0]\n quarter = pair[1]\n quarter_desc = '{}-Q{}'.format(year, quarter)\n date_range = REPORT_QUARTERS[quarter-1]\n start = '{}-{}'.format(year, date_range[0])\n if quarter == 4:\n year += 1\n end = '{}-{}'.format(year, date_range[1])\n response[quarter_desc] = event_manager.event_group_counts(start, end)\n return response", "title": "" }, { "docid": "a08fcf10f5fb5f4e36fcf75a0417594a", "score": "0.51927936", "text": "def aggregate_over_year(self) -> dict:\n oy = {\n \"monattag\": [],\n \"TXK\": {\n \"mean\": [],\n \"std\": []\n },\n \"TNK\": {\n \"mean\": [],\n \"std\": []\n },\n \"RSK\": {\n \"mean\": [],\n \"std\": []\n },\n \"SDK\": {\n \"mean\": [],\n \"std\": []\n },\n \"PM\": {\n \"mean\": [],\n \"std\": []\n },\n \"UPM\": {\n \"mean\": [],\n \"std\": []\n }\n }\n for mt in self.monattag_generator():\n oy[\"monattag\"].append(mt)\n aggregates = {}\n aggregates = self.get_aggregates(mt)\n for e in [\"TXK\", \"TNK\", \"RSK\", \"SDK\", \"PM\", \"UPM\"]:\n oy[e][\"mean\"].append(aggregates[e][\"mean\"])\n oy[e][\"std\"].append(aggregates[e][\"std\"])\n return oy", "title": "" }, { "docid": "385415e3bcdab55d2af71624e297effb", "score": "0.51850444", "text": "def collect_table(worksheet: Worksheet, year: int, fund_name, row: int, column: int) -> object:\n\n data = {}\n\n for iterated_row in range(row, row + 21):\n row_label: str = worksheet.cell(row=iterated_row, column=column).value\n\n if not row_label:\n # Some of the years has less than 21 fields which mean we'll fail\n # with older files. Skipping on an empty row label.\n continue\n\n row_data = {}\n for iterated_column in range(column + 1, column + 1 + 24):\n # Getting the matching month of the current cell and resetting\n # the cell value.\n month_index = int(iterated_column / 2)\n\n if month_index not in months:\n continue\n\n month_name = months[month_index]\n\n if month_name not in row_data:\n row_data[month_name] = {\n 'התרומה לתשואה': '',\n 'שיעור מסך הנכסים': '',\n 'year': year,\n 'fund': fund_name,\n }\n\n cell_kwargs = {'row': iterated_row, 'column': iterated_column}\n cell_value = worksheet.cell(**cell_kwargs).value\n\n if cell_value:\n if type(cell_value) is not str:\n cell_value = format(cell_value * 100, '.2f')\n\n # Values in cell with even index number have a label and odd\n # indexed-cell have a different label.\n if iterated_column % 2 == 0:\n key = 'התרומה לתשואה'\n else:\n key = 'שיעור מסך הנכסים'\n row_data[month_name][key] = cell_value\n\n data[row_label.strip()] = row_data\n\n return data", "title": "" }, { "docid": "4409b92c616a592f7c726c76e48493cd", "score": "0.51814896", "text": "def quarter_dict_key(freq_start):\n return str(freq_start.year) + quarter_dict[freq_start.month]", "title": "" }, { "docid": "a0d836b487720e59e09858bf10db052a", "score": "0.5180538", "text": "def _get_data_pre2007(date): \r\n \r\n # build the url based on year\r\n url = '{}/Environmental_Data_{}.txt'.format(BASE_URL, date.year)\r\n print('Fetching online data for {} (full year)'.format(date.year))\r\n \r\n try:\r\n year_data = request.urlopen(url).read().decode(encoding='utf_8').split('\\n') \r\n except:\r\n raise ValueError(date) # error accessing website\r\n else:\r\n year_data.pop(0) # remove first item which contain column header info\r\n \r\n for line in year_data:\r\n \r\n elements = line.split()\r\n yield dict(Date = elements[0],\r\n Time = elements[1],\r\n Status = 'COMPLETE', # all data from pre2007 will be complete\r\n Air_Temp = elements[5],\r\n Barometric_Press = elements[7],\r\n Wind_Speed = elements[2])", "title": "" }, { "docid": "9651f20d619d3a1c3dbe76f2283bd8e8", "score": "0.51773274", "text": "def stations():\n\n year_extract = '201'\n avg_station_sql = (f'SELECT city, name, substr(start_date, INSTR(start_date, {year_extract}), 4) as yr, ' +\n 'avg(duration) as avg_trip ' +\n 'FROM trip ' +\n 'JOIN station ON trip.start_station_id = station.id ' +\n 'GROUP BY city, name, yr'\n )\n\n avg_station = pd.read_sql(avg_station_sql, engine2.connect())\n\n avg_station['avg_trip'] = avg_station['avg_trip']/60\n avg_station['avg_trip'] = avg_station['avg_trip'].round()\n avg_station_list = avg_station.to_dict('records')\n\n yrs_2013 = []\n yrs_2014 = []\n yrs_2015 = []\n\n for item in avg_station_list:\n data = {}\n for k,v in item.items():\n if(k=='name'):\n data['axis'] = v\n if(k=='avg_trip'):\n data['value'] = v\n if(k=='city'):\n data['city'] = v\n if(k=='yr'and v=='2013'):\n data['name'] = v\n yrs_2013.append(data)\n elif(k=='yr'and v=='2014'):\n data['name'] = v\n yrs_2014.append(data)\n elif(k=='yr'and v=='2015'):\n data['name'] = v\n yrs_2015.append(data)\n \n all_yr_station = [yrs_2015, yrs_2014, yrs_2013]\n\n\n return jsonify(all_yr_station)", "title": "" }, { "docid": "55f8534937949b37e823653ef59c45bb", "score": "0.5148466", "text": "def ageGroups4():\n name = \"ageGroups4\"\n groups = {\n AGE: {\n \"Under 4 years\" : list(range(0, 4)),\n \"4 to 7 years\" : list(range(4, 8)),\n \"8 to 11 years\": list(range(8, 12)),\n \"12 to 15 years\": list(range(12, 16)),\n \"16 to 19 years\": list(range(16, 20)),\n \"20 to 23 years\": list(range(20, 24)),\n \"24 to 27 years\": list(range(24, 28)),\n \"28 to 31 years\": list(range(28, 32)),\n \"32 to 35 years\": list(range(32, 36)),\n \"36 to 39 years\": list(range(36, 40)),\n \"40 to 43 years\": list(range(40, 44)),\n \"44 to 47 years\": list(range(44, 48)),\n \"48 to 51 years\": list(range(48, 52)),\n \"52 to 55 years\": list(range(52, 56)),\n \"56 to 59 years\": list(range(56, 60)),\n \"60 to 63 years\": list(range(60, 64)),\n \"64 to 67 years\": list(range(64, 68)),\n \"68 to 71 years\": list(range(68, 72)),\n \"72 to 75 years\": list(range(72, 76)),\n \"76 to 79 years\": list(range(76, 80)),\n \"80 to 83 years\": list(range(80, 84)),\n \"84 to 87 years\": list(range(84, 88)),\n \"88 to 91 years\": list(range(88, 92)),\n \"92 to 95 years\": list(range(92, 96)),\n \"96 to 99 years\": list(range(96, 100)),\n \"100 to 103 years\": list(range(100, 104)),\n \"104 to 107 years\": list(range(104, 108)),\n \"108 to 111 years\": list(range(108, 112)),\n \"112 to 115 years\": list(range(112, 116)),\n }\n }\n return name, groups", "title": "" }, { "docid": "aa947001fa1f5b68b4ececc83462919d", "score": "0.51367325", "text": "def get_relevant_text_bodies(subreddit_list, start_year, start_month, end_month, base_path):\n\n text_bodies = defaultdict(lambda: [])\n\n #this really should be done using bash brace expansion...\n\n base_path_full_non_top_level = base_path + \"non_top_level_comments/\"\n valid_file_paths_non_top_level = extract_pairs.list_file_appropriate_data_range(start_year,\n start_month, end_month,\n base_path_full_non_top_level)\n\n for file_path in valid_file_paths_non_top_level:\n with open(file_path, 'rb') as fop:\n # because that's what this file and others had for output\n csvreadr = csv.reader(fop, delimiter=',', quotechar='|')\n csvreadr.next()\n for line in csvreadr:\n try:\n comm = json.loads(line[0])\n except ValueError:\n print \"BADLY FORMATTED\", line\n continue\n if any (subreddit in comm[\"subreddit\"] for subreddit in subreddit_list):\n #body = re.search('\\\"body\\\":\\\"(.+?)\\\"(,\\\")|(})', line).group(1)\n body = comm[\"body\"] #these two options appear to the be the same speed.\n if body == \"[deleted]\" or body == \"[removed]\":\n continue\n\n subreddit = comm[\"subreddit\"]\n\n text_bodies[subreddit].append(body)\n\n\n # TODO: parameterize the end month here. cause what if it's two months or whatever\n base_path_full_top_level = base_path + \"top_level_comments/\"\n valid_file_paths_top_level = extract_pairs.list_file_appropriate_data_range(start_year,\n start_month, end_month-1,base_path_full_top_level)\n\n for file_path in valid_file_paths_top_level:\n with open(file_path, 'rb') as fop:\n # because that's what this file and others had for output\n csvreadr = csv.reader(fop, delimiter=',', quotechar='|')\n csvreadr.next()\n for line in csvreadr:\n try:\n parent = json.loads(line[0])\n child = json.loads(line[1])\n except ValueError:\n print \"BADLY FORMATTED\", line\n continue\n\n for comm in [parent, child]:\n if any (subreddit in comm[\"subreddit\"] for subreddit in subreddit_list):\n #body = re.search('\\\"body\\\":\\\"(.+?)\\\"(,\\\")|(})', line).group(1)\n body = comm[\"body\"] #these two options appear to the be the same speed.\n if body == \"[deleted]\" or body == \"[removed]\":\n continue\n\n subreddit = comm[\"subreddit\"]\n\n text_bodies[subreddit].append(body)\n return text_bodies", "title": "" }, { "docid": "4729b851910250e8d78e057da7e00078", "score": "0.512572", "text": "def get_story(cls, x):\n\n r = []\n t20100101 = datetime.datetime(2010, 1, 1, tzinfo=pytz.utc)\n t20120101 = datetime.datetime(2012, 1, 1, tzinfo=pytz.utc)\n t20130101 = datetime.datetime(2013, 1, 1, tzinfo=pytz.utc)\n t20140101 = datetime.datetime(2014, 1, 1, tzinfo=pytz.utc)\n t20141001 = datetime.datetime(2014, 10, 1, tzinfo=pytz.utc)\n t20150101 = datetime.datetime(2015, 1, 1, tzinfo=pytz.utc)\n # M5 2015/1/1〜\n r += [cls(name=x.name,\n genetic_id=x.id,\n currency_pair=x.currency_pair,\n span=Granularity.M5.value,\n test_start_at=t20150101,\n ai_id=x.ai_id)]\n # H1 2010/1/1〜\n r += [cls(name=x.name,\n genetic_id=x.id,\n currency_pair=x.currency_pair,\n span=Granularity.H1.value,\n test_start_at=t20100101,\n ai_id=x.ai_id)]\n # H1 2012/1/1〜\n r += [cls(name=x.name,\n genetic_id=x.id,\n currency_pair=x.currency_pair,\n span=Granularity.H1.value,\n test_start_at=t20120101,\n ai_id=x.ai_id)]\n # H1 2013/10/1〜\n r += [cls(name=x.name,\n genetic_id=x.id,\n currency_pair=x.currency_pair,\n span=Granularity.H1.value,\n test_start_at=t20130101,\n ai_id=x.ai_id)]\n # H1 2014/10/1〜\n r += [cls(name=x.name,\n genetic_id=x.id,\n currency_pair=x.currency_pair,\n span=Granularity.H1.value,\n test_start_at=t20140101,\n ai_id=x.ai_id)]\n # H1 2015/1/1〜\n r += [cls(name=x.name,\n genetic_id=x.id,\n currency_pair=x.currency_pair,\n span=Granularity.H1.value,\n test_start_at=t20150101,\n ai_id=x.ai_id)]\n # M5 2013/1/1〜\n # r += [cls(name=x.name,\n # genetic_id=x.id,\n # currency_pair=x.currency_pair,\n # span=Granularity.M5.value,\n # test_start_at=t20130101,\n # ai_id=x.ai_id)]\n # M5 2014/1/1〜\n r += [cls(name=x.name,\n genetic_id=x.id,\n currency_pair=x.currency_pair,\n span=Granularity.M5.value,\n test_start_at=t20140101,\n ai_id=x.ai_id)]\n # M1 2014/10/1〜\n # r += [cls(name=x.name,\n # genetic_id=x.id,\n # currency_pair=x.currency_pair,\n # span=Granularity.M1.value,\n # test_start_at=t20141001,\n # ai_id=x.ai_id)]\n # M1 2015/1/1〜\n r += [cls(name=x.name,\n genetic_id=x.id,\n currency_pair=x.currency_pair,\n span=Granularity.M1.value,\n test_start_at=t20150101,\n ai_id=x.ai_id)]\n return r", "title": "" }, { "docid": "bc9a53143ebcb7e3ac64b6e0fa303102", "score": "0.51230866", "text": "def make_quarters_request():\n args = {'command': 'quarters'}\n res = _make_ww_request(args)\n _check_ww_response(res)\n\n df = pd.DataFrame(res.json()['quarters'])\n df['filing_period'] = pd.to_datetime(df['filing_period'])\n return df.set_index('filing_period')", "title": "" }, { "docid": "ba1f8f5dca779bccb3c164a7808f39e4", "score": "0.5121964", "text": "def months(q):\n try:\n if q == 1:\n return ['jan', 'feb', 'mar']\n if q == 2:\n return ['apr', 'may', 'jun']\n if q == 3:\n return ['jul', 'aug', 'sep']\n if q == 4:\n return ['oct', 'nov', 'dec']\n else:\n raise\n except:\n print('Not a valid quarter. Enter 1, 2, 3, or 4')\n raise", "title": "" }, { "docid": "2a65fba93a8c3111ed54ea7b47cf12e7", "score": "0.51124316", "text": "def retrievecarbondata(periodstart, periodend, geometrycode):\n\n # Calculate base parameters from all area data\n # This uses numeric equations to calculate linear trends\n now = datetime.datetime.now()\n areadata = Data.objects.filter(geometrycode=geometrycode).order_by('year')\n if areadata.exists() is False: return {}\n baseparameters = calculatebaseparameters(areadata)\n\n # Create blank array for all years in requested range\n data = {}\n for year in range(int(periodstart), 1 + int(periodend)):\n year = str(year)\n data[year] = {'electricity': 0, 'gas': 0}\n\n # Populate blank array with existing data where it exists\n maxyear = 0\n for areadataitem in areadata:\n year = areadataitem.year\n if int(year) > maxyear: maxyear = int(year)\n if year in data:\n if areadataitem.type == 0: \n key = 'electricity'\n conversionfactor = converter_kg_electricity[year] / 1000\n if areadataitem.type == 1: \n key = 'gas'\n conversionfactor = converter_kg_gas[year] / 1000\n data[year][key] = int(conversionfactor * float(areadataitem.value))\n\n # For years outside provided data, estimate using base parameters\n if maxyear < int(periodend):\n extrastart = maxyear + 1\n if extrastart < int(periodstart): extrastart = int(periodstart)\n for year in range(extrastart, 1 + int(periodend)):\n predictionarray = makeprediction(baseparameters, year)\n data[year] = calculateemissions(predictionarray)\n \n return data", "title": "" }, { "docid": "2c019c1a3945dea060f8eec8d47b89d2", "score": "0.5101044", "text": "def get_years(type_: str, value_: str, page: int, step: int):\n years = factory.get_elem_list(Year, type_, value_, page, step)\n return years", "title": "" }, { "docid": "b8e48962e2d993cfb722569c1799f802", "score": "0.5093737", "text": "def barclay_hfi():\n df = pd.read_csv(\"data_parser/Barclay_HFI.csv\")\n df.columns = [\"Year\"] + [j+1 for j in range(12)] \n # df = df.drop([\"Temp\"], axis = 1)\n df.set_index(\"Year\", inplace = True)\n\n first_year = int(df.index[0])\n target_ret_dict = {}\n for i in range(df.shape[0]):\n for j in range(df.shape[1]):\n dt = datetime.datetime(first_year + i, 1+j, 1) + relativedelta(months = 1) - datetime.timedelta(days = 1) \n raw_val = df.iloc[i,j]\n if not df.isna().iloc[i,j]:\n ret = float(raw_val.replace(\"%\", \"\"))/100\n target_ret_dict[dt] = ret\n tdf = pd.Series(target_ret_dict)\n tdf = pd.DataFrame(tdf)\n tdf.columns = [\"Barclay_HFI\"]\n return target_ret_dict, tdf", "title": "" }, { "docid": "5dbd307b16cad6c073027873a54be501", "score": "0.5087606", "text": "def construct_all_holdings(self):\n\n\t\td = dict((k, v) for k, v in [(s, self.current_positions[s]) for s in self.symbol_list])\n\n\t\td['datetime'] = self.start_date\n\t\td['cash'] = self.initial_capital\n\t\tprint(d)\n\t\td['commission'] = 0.0 ## todo why do i need this\n\t\td['total'] = self.initial_capital\n\t\treturn [d]", "title": "" }, { "docid": "2a559e0283fc1f14ee9a0f3e3e9438ca", "score": "0.50796884", "text": "def precipitation():\n # Query all dates and thier temperature\n results = session.query(Measurement.date,Measurement.tobs).\\\n filter(func.strftime(\"%y\", Measurement.date) == \"2017\").all()\n\n # Create a dictionary from the row data and append to a list of last_year_tobs\n last_year_tobs = []\n for tobs in results:\n date_tobs_dict = {}\n date_tobs_dict[\"date\"] = Measurement.date\n date_tobs_dict[\"tobs\"] = Measurement.tobs\n \n last_year_tobs.append(date_tobs_dict)\n\n return jsonify(last_year_tobs)", "title": "" }, { "docid": "b3ee76d82e5b449f7e9d124e383fe59b", "score": "0.5077868", "text": "def get_birthday_structure_to_check(got_data):\n presents = dict()\n for month in got_data:\n for citizen in got_data[month]:\n key = str((citizen['citizen_id'], month))\n presents[key] = citizen['presents']\n\n return presents", "title": "" }, { "docid": "fa3f071e9c2e0db528ee1c9bb6db569e", "score": "0.5074117", "text": "def collect_year_growth(markup) -> List[Tuple[float, str]]:\n\n results = []\n rows = BeautifulSoup(\n markup,\n \"html.parser\",\n parse_only=SoupStrainer(\"table\", class_=\"table table-small\"),\n ).find_all(\"tr\")[1:]\n for row in islice(rows, 1, None):\n columns = row.find_all(\"td\")\n name = columns[0].text.strip()\n growth = float(columns[-2].find_all(\"span\")[-1].string.replace(\"%\", \"\"))\n results.append((growth, name))\n return results", "title": "" }, { "docid": "f44897ac90470cc73c81bca4b74acddf", "score": "0.50718236", "text": "def report_dates(start_date, end_date):\n # iterate over every year within date range\n for year in range(start_date.year, end_date.year + 1):\n # find the month range for the year\n month_range = range(1, 13)\n # start and end year cases\n if year == start_date.year:\n month_range = range(start_date.month, 13)\n elif year == end_date.year:\n month_range = range(1, end_date.month + 1)\n # single year case\n if start_date.year == end_date.year:\n month_range = range(start_date.month, end_date.month + 1)\n # iterate over every month in the year\n for month in month_range:\n # find the day range for the year\n day_range = (1, days_in_month(year, month))\n # start and end month cases\n if year == start_date.year and month == start_date.month:\n day_range = (start_date.day, days_in_month(year, month))\n elif year == end_date.year and month == end_date.month:\n day_range = (1, end_date.day)\n # single month case\n if start_date.year == end_date.year and start_date.month == end_date.month:\n day_range = (start_date.day, end_date.day)\n # create the sub reports\n yield (date(year, month, day_range[0]), date(year, month, day_range[1]))", "title": "" }, { "docid": "7b62385b35ba067a0d288bb5d337e9cd", "score": "0.5071464", "text": "def precipitation():\n session = Session(engine)\n last_date = str(session.query(Measurement.date).order_by(Measurement.date.desc()).first())\n last_date_dt = dt.datetime.strptime(last_date, \"('%Y-%m-%d',)\")\n year_ago = last_date_dt - dt.timedelta(days=365)\n precip_results = session.query(Measurement.date, Measurement.prcp).\\\n filter(func.DATE(Measurement.date) > year_ago).all()\n session.close()\n\n date = [result[0] for result in precip_results]\n precip = [result[1] for result in precip_results]\n precip_dict = dict(zip(date, precip))\n\n return jsonify(precip_dict)", "title": "" }, { "docid": "29ef7164fe7b57b60772278d03f49913", "score": "0.50678146", "text": "def _parse_entries(raw_lines):\n\n # First two lines are field names\n entries = []\n\n for group in _get_groups(raw_lines):\n # Refactor date lines onto start of following line and split tabs\n entry = {}\n first_values = group[0].split(\" \")\n entry[\"Transaction Status\"] = \" \".join(first_values[:2])\n entry[\"Date\"] = \" \".join(first_values[2:])\n\n entry[\"Description\"] = group[1]\n if \" \" in group[2]:\n entry[\"Amount\"], entry[\"Balance\"] = group[2].split(\" \")\n else:\n # No balance\n entry[\"Amount\"], entry[\"Balance\"] = group[2], \"\"\n\n for field in entry:\n entry[field] = format_value(entry[field], field, DATE_FORMAT)\n\n entries.append(entry)\n\n return entries", "title": "" }, { "docid": "8dcdea2f892615694a846a3ab194b2e2", "score": "0.5064139", "text": "def generate_demo_quarter_data(requests_in_quarter: pd.DataFrame, output_dir: str) -> None:\n def generate_json_demo_data(data_in_range: pd.DataFrame, range_: str, interval_: str) -> None:\n # write json file with demand and wait_time for given range and interval\n demand = compute_metric_on_intervals(data_in_range, interval_, 'demand').to_dict()\n wait_time = compute_metric_on_intervals(data_in_range, interval_, 'wait_time').to_dict()\n\n # convert all entries to strings, and round integers by two\n demand_key, wait_time_key = list(demand.keys())[0], list(wait_time.keys())[0]\n data = {\n list(demand.keys())[0]:\n {str(key): str(demand[demand_key][key])\n for key in demand[demand_key]},\n list(wait_time.keys())[0]:\n {str(key): str(round(wait_time[wait_time_key][key], 2))\n for key in wait_time[wait_time_key]}\n }\n interval_ = 'week' if interval_ == 'week_in_quarter' else interval_\n file_name = f'time_range={range_}&interval={interval_}.json'\n io_lib.create_json_file(file_path=os_lib.join_path(output_dir, file_name), contents=data)\n\n if len(set(requests_in_quarter['quarter'])) != 1:\n raise ValueError('Given data must contain only one quarter type (eg: \\'Fall 2015\\').')\n\n # single file (since single quarter) generated containing daily stats over a quarter\n quarter_term, quarter_year = requests_in_quarter['quarter'].iloc[0].split()\n generate_json_demo_data(data_in_range=requests_in_quarter,\n range_=f'quarter+{quarter_term}_{quarter_year}', interval_='week_in_quarter')\n\n # for each week in quarter, generate a file containing daily stats over a week range\n all_weeks_in_qtr = requests_in_quarter['week_in_quarter'].unique()\n for week_num in all_weeks_in_qtr:\n single_week_data = requests_in_quarter[requests_in_quarter['week_in_quarter'] == week_num]\n generate_json_demo_data(data_in_range=single_week_data,\n range_=f'week+{week_num}', interval_='day')\n\n # for each day in quarter, generate a file containing hourly stats over a day (24 hour) range\n all_recorded_datetimes = pd.Series(data=requests_in_quarter.index)\n dates_in_qtr = all_recorded_datetimes.apply(func=lambda dt: str(dt).split()[0]).unique()\n for date in dates_in_qtr:\n generate_json_demo_data(data_in_range=requests_in_quarter[date],\n range_=f'day+{date}', interval_='hour')", "title": "" }, { "docid": "a614f453fc6605d950c3ecebf58b44fa", "score": "0.50499946", "text": "def calc_quarter(y, m):\n \n # Previous / Next month's year number and month number\n prev_y = y\n prev_m = m - 1\n next_y = y\n next_m = m + 1 \n \n if m == 1:\n prev_m = 12\n prev_y = y - 1\n elif m == 12:\n next_m = 1\n next_y = y + 1\n \n return [(prev_y, prev_m), (y, m), (next_y, next_m)]", "title": "" }, { "docid": "e693893afae45446640c8f3283215749", "score": "0.503061", "text": "def get_buildtime(in_list, start_year, path_list):\n buildtime_dict = {}\n for index, row in in_list.iterrows():\n comm_date = date.parse(row['Grid Date'])\n start_date = [comm_date.year, comm_date.month, comm_date.day]\n delta = ((start_date[0] - int(start_year)) * 12 +\n (start_date[1]) +\n round(start_date[2] / (365.0 / 12)))\n if delta < 0:\n delta = 1\n for index, reactor in enumerate(path_list):\n name = row['Unit'].replace(' ', '_')\n country = row['Country']\n file_name = (reactor.replace(\n os.path.dirname(path_list[index]), '')).replace('/', '')\n if (name + '.xml' == file_name):\n buildtime_dict.update({name: (country, delta)})\n return buildtime_dict", "title": "" }, { "docid": "9db1e84c97594469c918acac53ca5f06", "score": "0.50304216", "text": "def list_qtr():\n\tqtr = ['Q1','Q2','Q3','Q4']\n\tfor y in range(2015,2025):\n\t\tfor q in qtr:\n\t\t\tif y == 2015 and q !='Q4':\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tyield(str(y)+\"-\"+q)", "title": "" }, { "docid": "bddad394471640c6fa6737765bb3c79a", "score": "0.502508", "text": "def process_query(query: str, dataset: dict) -> dict:\n start_year, end_year, top_n = map(int, query.strip().split(','))\n logger.debug('got query \"%s,%s,%s\"', start_year, end_year, top_n)\n answer = defaultdict(int)\n for key, values in dataset.items():\n for value in values:\n if start_year <= value[0] <= end_year:\n answer[key] += value[1]\n\n answer = sorted(answer.items(), key=lambda x: (-x[1], x[0]))\n if len(answer) < top_n:\n logger.warning(\n 'not enough data to answer, found %s words out of %s for period \"%s,%s\"',\n len(answer), top_n, start_year, end_year\n )\n answer = [list(x) for x in answer[:top_n]]\n answer = {\"start\": start_year, \"end\": end_year, \"top\": answer}\n return answer", "title": "" }, { "docid": "2b4c277dcad4b9dd57edebe05fd3aed2", "score": "0.50178164", "text": "def map_ints_to_dmy(int_list):\n if int_list[1] > 31 or int_list[1] <= 0:\n return None\n\n over_12 = 0\n over_31 = 0\n under_1 = 0\n for i in int_list:\n if 99 < i < DATE_MIN_YEAR or i > DATE_MAX_YEAR:\n return None\n\n if i > 31:\n over_31 += 1\n if i > 12:\n over_12 += 1\n if i <= 0:\n under_1 += 1\n\n if over_31 >= 2 or over_12 == 3 or under_1 >= 2:\n return None\n\n possible_year_splits = [\n [int_list[2], int_list[:2]], # year last\n [int_list[0], int_list[1:]] # year first\n ]\n for [y, rest] in possible_year_splits:\n if DATE_MIN_YEAR <= y <= DATE_MAX_YEAR:\n dm = map_ints_to_dm(rest)\n if dm is not None:\n return {\n \"year\": y,\n \"month\": dm[\"month\"],\n \"day\": dm[\"day\"]\n }\n else:\n return None\n\n for [y, rest] in possible_year_splits:\n dm = map_ints_to_dm(rest)\n if dm is not None:\n y = two_to_four_digit_year(y)\n return {\n \"year\": y,\n \"month\": dm[\"month\"],\n \"day\": dm[\"day\"]\n }", "title": "" }, { "docid": "08d2960f1a0db429fa3eb84cab1e4eaf", "score": "0.501417", "text": "def get_months(self, language):\n # done via naive way as django's having tough time while aggregating on date fields\n entries = self.filter_by_language(language)\n dates = entries.values_list('publication_start', flat=True)\n dates = [(x.year, x.month) for x in dates]\n date_counter = Counter(dates)\n dates = set(dates)\n dates = sorted(dates, reverse=True)\n return [{'date': datetime.date(year=year, month=month, day=1),\n 'count': date_counter[year, month]} for year, month in dates]", "title": "" }, { "docid": "497d7f84b560524327016254f3f0a251", "score": "0.50086653", "text": "def dictify(dataframe):\n data = defaultdict(dict)\n data7 = defaultdict(dict)\n companies = set()\n invests = set()\n\n for idx, row in dataframe.iterrows():\n #Date filter\n try:\n dt = datetime.strptime(row['日期(time)'], \"%Y.%m.%d\")\n except ValueError as ve:\n print(ve, row['日期(time)'])\n\n company = row['公司(company)']\n companies.add(company)\n round_ = row['融资轮数(round)']\n invests_ = eval(row['投资机构(invests)'])\n assert (isinstance(invests_, list))\n\n if '投资方未透露' in invests_:\n invests_ = []\n\n if dt.year < 2017:\n if round_ not in data[company]:\n data[company][round_] = invests_\n invests = invests.union(invests_)\n else:\n if round_ not in data[company]:\n data7[company][round_] = invests_\n invests = invests.union(invests_)\n\n return data, data7, companies, invests", "title": "" }, { "docid": "d67cd23dd40cad97ec57b9061b10e1fc", "score": "0.49964508", "text": "def yearly_dates():\n start_date = datetime.date.today()\n return rr.rrule(\n rr.YEARLY,\n dtstart=start_date,\n count=1,\n byweekday=(rr.FR(4)),\n bymonth=(12)\n )", "title": "" }, { "docid": "86815376808e43e44b73c14a1be75480", "score": "0.49936873", "text": "def man2df(mandict: dict, year1: int = 1) -> pd.DataFrame:\n rows = []\n baseyear = year1 # roundabout\n for iofe in range(mandict[\"iofe\"]):\n for iyear in range(mandict[\"inyr\"]):\n year = iyear + baseyear\n scenyr = mandict[\"rotations\"][iyear][iofe][\"yearindex\"]\n ncrop = mandict[\"scens\"][scenyr - 1][\"ntype\"]\n tilseq = mandict[\"scens\"][scenyr - 1][\"tilseq\"]\n plant_date = None\n if mandict[\"surfeffects\"]:\n for surfeff in mandict[\"surfeffects\"][tilseq - 1][\"tills\"]:\n op = surfeff[\"op\"]\n if (\n mandict[\"operations\"][op - 1][\"scecomment\"].find(\n \"Planter\"\n )\n > -1\n ):\n doy = surfeff[\"mdate\"]\n plant_date = datetime.date(\n year, 1, 1\n ) + datetime.timedelta(days=doy - 1)\n rows.append(\n {\n \"year\": year,\n \"ofe\": iofe + 1,\n \"plant_date\": plant_date,\n \"crop_name\": mandict[\"crops\"][ncrop - 1][\"crpnam\"],\n }\n )\n return pd.DataFrame(rows)", "title": "" }, { "docid": "19e01f68ce527c424dc544f4eb2223ee", "score": "0.49803594", "text": "def fundings(bank):\n conn = engine.connect()\n loanPie_df = pd.read_sql(\"select ApprovalYear as FundingYear, sum(grossApproval) as TotalApproval from bank_data2 group by 1,2\", conn)\n all_fundings = loanPie_df.to_json(orient='records')\n\n return all_fundings", "title": "" }, { "docid": "05bc421beb0d0b0d9b1be964237322ea", "score": "0.49758768", "text": "def month_dict_constructor(line, grouped, list_group):\n id_, length, start, end = grouped.groups()\n ids, lengths, starts, ends = list_group\n ids.append(id_)\n lengths.append(int(length))\n starts.append(int(start))\n ends.append(int(end))\n return list_group", "title": "" }, { "docid": "dfca2bae9f227ee16be273afc25ce323", "score": "0.49752215", "text": "def scale_series(numerator, denominator):\n data = {}\n for date, value in numerator.items():\n if date in denominator:\n if denominator[date] > 0:\n data[date] = value / denominator[date]\n else:\n data[date] = 0\n else:\n try:\n numerator_year = get_year(date)\n for i in range(0, MAX_DENOMINATOR_BACK_YEAR + 1):\n year = str(numerator_year - i)\n if year in denominator:\n if denominator[year] > 0:\n data[date] = value / denominator[year]\n else:\n data[date] = 0\n break\n except ValueError:\n return {}\n return data", "title": "" }, { "docid": "480976a0fa9ccd7bc9d17fd3803e162f", "score": "0.49749124", "text": "def get_yearly_profit(self):\n\n data = (TradingDay.objects\n .filter(user=self.user)\n .filter(account=self.account)\n .annotate(year=Year(\"date_created\"))\n .values(\"year\")\n .annotate(total=Sum(\"profit\"))\n .order_by(\"year\"))\n\n data_dict = {}\n for year in data:\n try:\n total = round(float(year[\"total\"]), 2)\n except:\n total = 0\n data_dict[year[\"year\"]] = total\n\n return data_dict", "title": "" }, { "docid": "95350e455ff7fe7b6d35dcfded94f458", "score": "0.49613392", "text": "def range_year(some_data):\n print(\"Unique Year is {} \".format(some_data.FiscalYear.unique()))", "title": "" }, { "docid": "d3efc4ea4f829dfa9597ddccdfc0861d", "score": "0.49568775", "text": "def getSeasonalProbabilities(probability_collection, year, band_names, reduce_method='median', season_list = [['winter',-1,12,1,0,2,'end'],['spring',0,3,1,0,5,'end'],['summer',0,6,1,0,8,'end'],['fall',0,9,1,0,11,'end']], include_difference=True, year_difference=1, image_name='season_probs_{}'):\n season_changes = []\n year = int(year)\n for season_definition in season_list:\n season_name = season_definition[0]\n season_name = season_name.lower()\n \n season_start_year_position = season_definition[1]\n season_start_month = season_definition[2]\n season_start_day = season_definition[3]\n season_end_year_position = season_definition[4]\n season_end_month = season_definition[5]\n season_end_day = season_definition[6]\n \n season_start_year_firstYear = year+season_start_year_position\n season_end_year_firstYear = year+season_end_year_position\n \n if include_difference:\n season_start_year_secondYear = year+season_start_year_position+year_difference\n season_end_year_secondYear = year+season_end_year_position+year_difference\n \n if season_start_day == 'end':\n season_firstYear_start_day = calendar.monthrange(season_start_year_firstYear, int(season_start_month))[1]\n if include_difference:\n season_secondYear_start_day = calendar.monthrange(season_end_year_firstYear, int(season_start_month))[1]\n \n else:\n season_firstYear_start_day = season_start_day\n if include_difference:\n season_secondYear_start_day = season_start_day\n \n if season_end_day == 'end':\n season_firstYear_end_day = calendar.monthrange(season_end_year_firstYear, int(season_end_month))[1]\n if include_difference:\n season_secondYear_end_day = calendar.monthrange(season_start_year_secondYear, int(season_end_month))[1]\n \n else:\n season_firstYear_end_day = season_end_day\n if include_difference:\n season_secondYear_end_day = season_end_day\n \n season_firstYear_start = '{}-{}-{}'.format(season_start_year_firstYear, season_start_month, season_firstYear_start_day)\n season_firstYear_end = '{}-{}-{}'.format(season_end_year_firstYear, season_end_month, season_firstYear_end_day)\n \n if include_difference:\n season_secondYear_start = '{}-{}-{}'.format(season_start_year_secondYear, season_start_month, season_secondYear_start_day)\n season_secondYear_end = '{}-{}-{}'.format(season_end_year_secondYear, season_end_month, season_secondYear_end_day) \n \n if reduce_method=='mean':\n season_image = probability_collection.filterDate(season_firstYear_start,season_firstYear_end).reduce(ee.Reducer.mean()).rename(band_names)\n if include_difference:\n diff_image = getTemporalProbabilityDifference(probability_collection, season_firstYear_start, \n season_firstYear_end, season_secondYear_start, season_secondYear_end, reduce_method='mean').rename(band_names)\n else:\n season_image = probability_collection.filterDate(season_firstYear_start,season_firstYear_end).reduce(ee.Reducer.median()).rename(band_names)\n if include_difference:\n diff_image = getTemporalProbabilityDifference(probability_collection, season_firstYear_start, \n season_firstYear_end, season_secondYear_start, season_secondYear_end, reduce_method='median').rename(band_names)\n \n season_image = season_image.set('system:index','{}_start'.format(season_name))\n \n season_changes.append(season_image)\n \n if include_difference:\n diff_image = diff_image.set('system:index','{}_difference'.format(season_name))\n season_changes.append(diff_image) \n \n season_changes = ee.ImageCollection(season_changes) \n season_changes = season_changes.toBands()\n season_changes = season_changes.set('system:index',image_name.format(year))\n season_changes = season_changes.set('system:time_start',ee.Date(season_firstYear_start))\n season_changes = season_changes.set('system:time_end',ee.Date(season_firstYear_end))\n return season_changes", "title": "" }, { "docid": "14e65b1e68af69135b958ef514ca8f1d", "score": "0.49498585", "text": "def get_contracts_report_structure():\n return [\n {\n 'header': 'Contract Name',\n 'field': 'contract__description',\n 'type': '',\n 'value': ''\n },\n {\n 'header': 'Contract Number',\n 'field': 'contract__number',\n 'type': '',\n 'value': ''\n },\n {\n 'header': 'Contract Type',\n 'field': 'contract__type', # choicefield\n 'type': 'choice',\n 'value': 'CONTRACT_TYPES'\n },\n {\n 'header': 'Contract Start',\n 'field': 'contract__start_date',\n 'type': '',\n 'value': ''\n },\n {\n 'header': 'Contract End',\n 'field': 'contract__end_date',\n 'type': '',\n 'value': ''\n },\n {\n 'header': 'Product ID',\n 'field': 'item__ndc',\n 'type': '',\n 'value': ''\n },\n {\n 'header': 'Product Type',\n 'field': 'created_by',\n 'type': 'static',\n 'value': 'NDC'\n },\n {\n 'header': 'Product Name',\n 'field': 'item__description',\n 'type': '',\n 'value': ''\n },\n {\n 'header': 'Unit Measure',\n 'field': 'updated_by',\n 'type': 'static',\n 'value': 'EA'\n },\n {\n 'header': 'Unit Contract Price',\n 'field': 'price',\n 'type': '',\n 'value': ''\n },\n {\n 'header': 'Product Start',\n 'field': 'start_date',\n 'type': '',\n 'value': ''\n },\n {\n 'header': 'Product End',\n 'field': 'end_date',\n 'type': '',\n 'value': ''\n },\n {\n 'header': 'Status',\n 'field': 'status', # choicefield\n 'type': 'choice',\n 'value': 'STATUSES'\n }\n ]", "title": "" }, { "docid": "9131645c75deb9557b11be33a04a010f", "score": "0.4946772", "text": "def parse_encost(fpath: Path) -> Tuple[List[dict], int]:\n\n fname = fpath.with_suffix(\"\").name\n year = int(f\"20{fname.replace('encost', '')}\")\n print(f\"Start parsing {fpath=}, {year=}\")\n\n with open(fpath, \"r\") as f:\n content = f.read()\n lines = [x.strip() for x in content.splitlines()]\n\n region = None\n years = None\n escalation_start_year = None\n fuel = None\n prices = None\n results = []\n\n for i, line in enumerate(lines):\n if not line:\n region = None\n years = None\n # print(\"Reset.\\n\")\n continue\n\n if region is None:\n region = line\n # print(f\"{region=}\")\n continue\n\n if years is None:\n years = [int(x) for x in line.split(\" \")]\n if len(years) != NUMBER_YEARS:\n print(\n f\"Warning: expected {NUMBER_YEARS} years, \"\n f\"got {len(years)}: {years}\"\n )\n if years[0] != year:\n raise ValueError(\n f\"Given the name of the file {fpath.name=}, we assumed \"\n f\"we would find {year=} but got {years[0]}\"\n )\n if escalation_start_year is not None:\n if escalation_start_year != years[1]:\n raise ValueError(\"Inconsistent escalation start year\")\n\n escalation_start_year = years[1]\n continue\n\n if fuel is None:\n fuel = line\n prices = None\n continue\n\n if prices is None:\n prices = [float(x) for x in line.split(\" \")]\n if len(prices) != NUMBER_YEARS:\n print(\n f\"Warning: expected {NUMBER_YEARS} years, \"\n f\"got {len(prices)} prices: {prices}\"\n )\n\n escalations = [x / prices[0] for x in prices[1:]]\n this_dict = {\n \"region\": region,\n \"fuel\": fuel,\n \"escalations\": dict(zip(years[1:], escalations)),\n # 'prices': dict(zip(years, prices)),\n }\n results.append(this_dict)\n fuel = None\n\n return results, escalation_start_year", "title": "" }, { "docid": "231a11796b5774bef7ec6e1244036093", "score": "0.4946276", "text": "def yearly_return(stockList):\n for stock in stockList: # Iterate through list of stock dictionaries\n current_date = datetime.strptime(today,\"%m/%d/%Y\") # Create datetome object from string of current date\n purchase_date = datetime.strptime(stock['purchaseDate'],\"%m/%d/%Y\") # Create datetime object from string of sotck purchase data.\n no_days = (current_date-purchase_date).days # Calc number of days between two datetime objects. \n yearlyVal = 365.2425 # Constant year value\n yearlyReturn = (stock['percentage_yield']/(no_days/yearlyVal)) * 100 # Calculate the perctnage of yearly loss/earnings for each stock\n stock['yearly_return'] = yearlyReturn # Append new value tp yearly_return in stock dictionary.", "title": "" }, { "docid": "4faf88c15e5a3aec669db1e795f16bde", "score": "0.49412447", "text": "def precipitation():\n last_day = session.query(Measurement.date, Measurement.prcp).order_by(Measurement.date.desc()).first()[0]\n last_year = str(dt.datetime.strptime(last_day, \"%Y-%m-%d\") - dt.timedelta(days=366))\n\n precipitation = session.query(Measurement.date, Measurement.prcp).\\\n filter(Measurement.date >= last_year, Measurement.date <= last_day).\\\n order_by(Measurement.date).all()\n prcp_dict = {date: prcp for date, prcp in precipitation}\n return jsonify(prcp_dict)", "title": "" }, { "docid": "6c21ca33f32ed18b643781b52773f115", "score": "0.49346516", "text": "def create_country_yearly_data(country_data: List[CountryTemperature],\r\n nation: str, year: int, end_year: int) -> List[CountryTemperature]:\r\n country_only_data = [row for row in country_data if row.country == nation]\r\n data_so_far = []\r\n for i in range(year, end_year):\r\n sum_temperature = []\r\n for row in country_only_data:\r\n if int(row.date.strftime(\"%Y\")) < i:\r\n pass\r\n elif int(row.date.strftime(\"%Y\")) == i:\r\n sum_temperature.append(row.temperature)\r\n elif int(row.date.strftime(\"%Y\")) >= i:\r\n if isinstance(sum_temperature[0], float) is True:\r\n data_so_far.append(CountryTemperature(datetime.date(i, 1, 1),\r\n statistics.mean(sum_temperature), nation))\r\n break\r\n return data_so_far", "title": "" }, { "docid": "5069fe1c54a9e14d982862a8b5ea1c52", "score": "0.49336904", "text": "def get_date_endpoints(q=None, year=2018, kind='year'):\n if kind=='year':\n return (dt.date(year, 1, 1), dt.date(year, 12, 31))\n elif kind=='quarter':\n return get_quarter_start_end(q,year=year)", "title": "" }, { "docid": "d337754dc0ebdddf6127a503ddd7ddec", "score": "0.49279833", "text": "def fetch_subset(start_yr, end_yr):\r\n for yr in range(start_yr, end_yr+1):\r\n fetch_single(yr)", "title": "" }, { "docid": "a9f48b9e08c801f08b97784d2d300ae5", "score": "0.49251616", "text": "def get_crime_at_year(year, data):\n return {i: data[i] for i in data if str(year) in i}", "title": "" }, { "docid": "8c013745577336b557c09e90112490b4", "score": "0.49234694", "text": "def processGroup(data):\n fillAges(data)\n titles = findTitles(data)\n names = findLastNames(data)\n\n return {'titles': titles, 'names': names}", "title": "" }, { "docid": "ee19dfb7f1f4afbcc3a75e19ea1540d7", "score": "0.49151674", "text": "def get_all_records():\n season_records = {}\n round_records = {}\n\n round_scores = Tip.objects \\\n .exclude(afl_fixture__round__status='Scheduled') \\\n .exclude(afl_fixture__round__is_finals=True) \\\n .filter(afl_fixture__round__season=2014) \\\n .values('afl_fixture__round__season', 'afl_fixture__round', 'club') \\\n .annotate(\n round_total=Sum('total'),\n round_winners=Sum('winner_score'),\n round_margins=Sum('margin_score'),\n round_crowds=Sum('crowd_score'),\n round_bogs=Sum('bog_score')\n ) \\\n .order_by('afl_fixture__round__season', 'club', 'afl_fixture__round')\n\n # Get round records\n def _season_key(row):\n return (row['afl_fixture__round__season'], row['club'])\n\n# def _max(group, key):\n# '''\n# Return the maximum score and the round it was scored in the group\n# '''\n# sorted_group = sorted(group, key=)\n#\n# return (row['afl_fixture__round__season'], row['club'])\n#\n# def _season_min_key(row):\n# return (row['afl_fixture__round__season'], row['club'])\n\n for attr in ('total', 'winners', 'margins', 'crowds', 'bogs'):\n attr_key = 'round_%s' % attr\n scores = round_scores.order_by(\n 'afl_fixture__round__season',\n 'club',\n attr_key,\n 'afl_fixture__round'\n )\n for key, group in groupby(scores, key=_season_key):\n club_records = {}\n\n group = list(group)\n season = key[0]\n club = Club.objects.get(id=key[1])\n if club in club_records:\n club_records[club].update(\n {\n 'total_%s' % attr: sum(g[attr_key] for g in group),\n 'max_%s' % attr: max(g[attr_key] for g in group),\n 'min_%s' % attr: min(g[attr_key] for g in group),\n 'avg_%s' % attr: avg(g[attr_key] for g in group),\n }\n )\n else:\n club_records[club] = {\n 'total_%s' % attr: sum(g[attr_key] for g in group),\n 'max_%s' % attr: max(g[attr_key] for g in group),\n 'min_%s' % attr: min(g[attr_key] for g in group),\n 'avg_%s' % attr: avg(g[attr_key] for g in group),\n }\n# 'total_winners': sum(g['round_winners'] for g in group),\n# 'max_winners': max(g['round_winners'] for g in group),\n# 'min_winners': min(g['round_winners'] for g in group),\n# 'avg_winners': avg(g['round_winners'] for g in group),\n# 'total_margins': sum(g['round_margins'] for g in group),\n# 'max_margins': max(g['round_margins'] for g in group),\n# 'min_margins': min(g['round_margins'] for g in group),\n# 'avg_margins': avg(g['round_margins'] for g in group),\n# 'total_crowds': sum(g['round_crowds'] for g in group),\n# 'max_crowds': max(g['round_crowds'] for g in group),\n# 'min_crowds': min(g['round_crowds'] for g in group),\n# 'avg_crowds': avg(g['round_crowds'] for g in group),\n# 'total_bogs': sum(g['round_bogs'] for g in group),\n# 'max_bogs': max(g['round_bogs'] for g in group),\n# 'min_bogs': min(g['round_bogs'] for g in group),\n# 'avg_bogs': avg(g['round_bogs'] for g in group)\n\n # Winners bonus isn't included in tips scores from 2012 so adjust\n # total scores\n if attr == 'total':\n for g in group:\n if g['afl_fixture__round__season'] >= 2012:\n if g['round_winners'] == 54:\n club_records[club]['total_total'] += 10\n\n round_records[season] = club_records", "title": "" }, { "docid": "81d74af9fff9fb759fd7673c30a2799a", "score": "0.49082693", "text": "def stock_performance(start_year,end_year,stocks):\n \n # obtaining the TICKER symbols of the stocks\n stock = stocks\n \n # create a list to obtain all the returns of the stock\n all_returns = []\n\n # obtaining Quarterly returns using quarterly_returns() function\n stock_data = quarterly_returns(start_year,end_year,stocks)\n \n\n # for each TICKER symbol in stock \n for abbv in stock:\n data = stock_data[abbv]\n\n \n # creating pyfolio tearsheet\n# pf.create_returns_tear_sheet(data)\n\n # changing into numpy array for calculation\n data = np.array(data)\n \n # creating a list to remove the NaN and make it a list of float values \n val = []\n for i in data:\n if np.isnan(i):\n i = float(0)\n val.append(i)\n else:\n i = float(i)\n val.append(i)\n \n # normalising to 100\n norm = 100\n for i in range(len(val)):\n push = (1+val[i])*norm\n val[i] = push\n norm = push\n\n # adding the normalised returns of all stocks to the all_returns[] list\n all_returns.append(val)\n\n return all_returns", "title": "" }, { "docid": "d6fbcc994afc0988968438af3e806076", "score": "0.49082536", "text": "def get_year(self, year):\n return [item\n for month in range(1, 13)\n for item in self.get_month(year, month)]", "title": "" }, { "docid": "c091f15c8d7769426b1f979cbcb84098", "score": "0.4901673", "text": "def get_years(self):\n \n items = []\n\n # Get a datetime object\n now = datetime.datetime.now()\n currentYear = now.year\n startYear = 1950\n \n for eachYear in range(startYear, (currentYear + 5), 1):\n items.append( str(eachYear) ) \n\n return items", "title": "" }, { "docid": "c77ace9ed99b3f301ce91e0ba078e42f", "score": "0.48991683", "text": "def get_quarters(n=3):\n now = datetime.datetime.now()\n year = now.year\n quarter = pd.Timestamp(now).quarter\n quarters = [(year, quarter)]\n for i in range(n):\n if quarter == 1:\n quarter = 4\n year -= 1\n else:\n quarter -= 1\n quarters.append((year, quarter))\n quarters.reverse()\n return quarters", "title": "" }, { "docid": "d13f2926f48c21cdb18b6be729d2e9b4", "score": "0.4897494", "text": "def construct_all_holdings(self):\r\n d=dict((k,v) for k,v in [(s,0.0) for s in self.symbol_list])\r\n d['datetime']=self.start_date\r\n d['cash']=self.initial_capital\r\n d['commisson']=0.0\r\n d['total']=self.initial_capital\r\n \r\n return [d]", "title": "" }, { "docid": "6ac6b82e1ffe05a04c3fafb32c5465b0", "score": "0.48928243", "text": "def agg_by_state(fuel_codes, df_jan_to_nov, df_dec):\n\n # output of function = makes dictionary of states\n fuel_per_state = { }\n\n # note -- the retrieved row in only a copy, not the original. Dataframes are good for data manipulation (e.g. pivot table), but are not very mutable.\n for idx,row in enumerate(df_jan_to_nov.values):\n\n # get all the data, and insert into nested dict\n plant_name, state, fuel_type, mwh_gen = row[0], row[1], row[2], row[3:]\n # convert fuel code, to actual fuel name\n fuel_type_name = fuel_codes[fuel_type]\n\n starting_totals = [0,0,0,0,0,0,0,0,0,0,0,0]\n\n # only add to dict, if not \"State Incremental Fuel Level\":\n if plant_name != \"State-Fuel Level Increment\":\n\n # add to dict, summing the list per month:\n if state not in fuel_per_state:\n fuel_per_state[state] = {\n \"gas\":starting_totals,\n \"coal\":starting_totals,\n \"solar\":starting_totals,\n \"wind\":starting_totals,\n \"nuclear\": starting_totals,\n \"hydro\":starting_totals,\n \"other\":starting_totals\n }\n\n already_in_dict = fuel_per_state[state][fuel_type_name]\n replace_in_dict = [ (mwh_gen[i]+int(already_in_dict[i]) ) for i in range(len(mwh_gen)) ]\n\n fuel_per_state[state][fuel_type_name] = replace_in_dict\n\n # add the december fuel data. make sure to add the 12th month\n for idx,row in enumerate(df_dec.values):\n plant_name = row[0] # for each row, get the plant name\n\n # make sure we know the state, before we take the data\n if plant_name != \"State-Fuel Level Increment\":\n\n # get all the data, and insert into nested dict\n plant_name, state, fuel_type, mwh_gen = row[0], row[1], row[2], row[3:]\n # convert fuel code, to actual fuel name\n fuel_type_name = fuel_codes[fuel_type]\n\n # add to dict, as the 12th month in the list:\n in_dict = fuel_per_state[state][fuel_type_name]\n if len(in_dict) < 12:\n in_dict.append(int(mwh_gen))\n else:\n in_dict[11] += int(mwh_gen)\n fuel_per_state[state][fuel_type_name] = in_dict\n\n return fuel_per_state", "title": "" }, { "docid": "190d34f53bce60cd1b7f18050c4a0da7", "score": "0.4891378", "text": "def get_dated_items(self, year, month):\r\n date_field = self.get_date_field()\r\n date = _date_from_string(year, '%Y', month, self.get_month_format())\r\n\r\n # Construct a date-range lookup.\r\n first_day, last_day = _month_bounds(date)\r\n lookup_kwargs = {\r\n '%s__gte' % date_field: first_day,\r\n '%s__lt' % date_field: last_day,\r\n }\r\n\r\n allow_future = self.get_allow_future()\r\n qs = self.get_dated_queryset(allow_future=allow_future, **lookup_kwargs)\r\n date_list = self.get_date_list(qs, 'day')\r\n\r\n return (date_list, qs, {\r\n 'month': date,\r\n 'next_month': self.get_next_month(date),\r\n 'previous_month': self.get_previous_month(date),\r\n })", "title": "" } ]
3d6d60c4d0a2893d1edeb7e1161b1596
Pull ungoogledchromium repositories, run scripts and apply patches.
[ { "docid": "706f0331272d80f78e074f013adb38fa", "score": "0.6706228", "text": "def prepare(config):\n # Checkout ungoogled-chromium\n uc_git_origin = 'https://github.com/Eloston/ungoogled-chromium.git'\\\n if ungoogled_chromium_origin is None else ungoogled_chromium_origin\n git_maybe_checkout(\n uc_git_origin,\n 'ungoogled-chromium',\n branch=ungoogled_chromium_version, reset=True)\n if config.target_os == 'android':\n git_maybe_checkout(\n 'https://github.com/ungoogled-software/ungoogled-chromium-android.git',\n 'ungoogled-chromium-android',\n branch=ungoogled_chromium_android_version, reset=True)\n sp.check_call(['patch', '-p1', '--ignore-whitespace', '-i',\n os.path.join('ungoogled-chromium-android', 'patches', 'Other', 'ungoogled-main-repo-fix.patch'),\n '--no-backup-if-mismatch'])\n\n domain_substitution_cache_file = \"domsubcache.tar.gz\"\n if os.path.exists(domain_substitution_cache_file):\n os.remove(domain_substitution_cache_file)\n\n # ungoogled-chromium scripts\n # Do not check here because prune script return non-zero for non-existing files\n cwd = SRC_DIR\n uc_dir = 'ungoogled-chromium'\n utils_dir = os.path.join(uc_dir, 'utils')\n sp.run([os.path.join(utils_dir, 'prune_binaries.py'),\n SRC_DIR, filter_list_file(\n uc_dir, 'pruning.list',\n excludes=['buildtools/linux64/gn'])])\n sp.check_call([os.path.join(utils_dir, 'patches.py'),\n 'apply', 'src', os.path.join(uc_dir, 'patches')])\n sp.check_call([os.path.join(utils_dir, 'domain_substitution.py'),\n 'apply', '-r', os.path.join(uc_dir, 'domain_regex.list'),\n '-f', filter_list_file(uc_dir, 'domain_substitution.list'),\n '-c', domain_substitution_cache_file, 'src'])\n\n # ungoogled-chromium-android scripts\n if config.target_os == 'android':\n if os.path.exists(domain_substitution_cache_file):\n os.remove(domain_substitution_cache_file)\n\n uca_dir = 'ungoogled-chromium-android'\n sp.run([os.path.join(utils_dir, 'prune_binaries.py'),\n 'src', filter_list_file(uca_dir, 'pruning_2.list')])\n sp.check_call([os.path.join(utils_dir, 'patches.py'),\n 'apply', 'src', os.path.join(uca_dir, 'patches')])\n sp.check_call([os.path.join(utils_dir, 'domain_substitution.py'),\n 'apply', '-r', os.path.join(uc_dir, 'domain_regex.list'),\n '-f', filter_list_file(uca_dir, 'domain_sub_2.list'),\n '-c', domain_substitution_cache_file, 'src'])", "title": "" } ]
[ { "docid": "ecac29780f2dec0c3d136abb6f000c6c", "score": "0.57570237", "text": "def exec_main():\n\n try_get_lock()\n\n # Allow user to force a SHA1 from env, mostly for testing purpose without\n # putting pressure on Github API (anon is rate-limited to 60 reqs/hr from the\n # same IP address.\n try:\n sha_to_run = os.environ.get('ds_automation_force_sha').split(',')\n except:\n sha_to_run = [ ]\n\n # ensure we have a real dir name\n assert len(DEEPSPEECH_CLONE_PATH) > 3\n assert os.path.isabs(DEEPSPEECH_CLONE_PATH)\n\n # Creating holding directories if needed\n if not os.path.isdir(CACHE_DIR):\n os.makedirs(CACHE_DIR)\n\n if not os.path.isdir(DATA_DIR):\n os.makedirs(DATA_DIR)\n\n if len(sha_to_run) == 0:\n current = get_last_sha1()\n if len(current) == 40:\n print('Existing SHA1:', current, 'fetching changes')\n sha_to_run, rt = get_new_commits(current)\n if sha_to_run is not None and len(sha_to_run) is 0:\n print(\"No new SHA1, got HTTP status:\", rt)\n sys_exit_safe()\n elif sha_to_run is None:\n print(\"Something went badly wrong, unable to use Github compare\")\n sys_exit_safe()\n else:\n # Ok, we do not have an existing SHA1, let us get one\n print('No pre-existing SHA1, fetching refs')\n sha1, rt = get_current_sha1()\n if sha1 is None:\n print(\"No SHA1, got HTTP status:\", rt)\n sys_exit_safe()\n sha_to_run = [ sha1 ]\n else:\n print(\"Using forced SHA1 from env\")\n\n print(\"Will execute for\", sha_to_run)\n\n for sha in sha_to_run:\n if not ensure_git_clone(sha):\n print(\"Error with git repo handling.\")\n sys_exit_safe()\n\n print(\"Ready for\", sha)\n\n print(\"Let us place ourselves into the git clone directory ...\")\n root_dir = os.getcwd()\n os.chdir(DEEPSPEECH_CLONE_PATH)\n\n print(\"Starting GPU nvidia-smi monitoring\")\n gpu_usage_csv, gpu_usage_charts = ensure_gpu_usage(root_dir)\n gu = GPUUsage(csvfile=gpu_usage_csv)\n gu.start()\n\n print(\"Do the training for getting WER computation\")\n exec_wer_run()\n\n print(\"Producing GPU monitoring charts\")\n gu.stop()\n GPUUsageChart(source=gpu_usage_csv, basename=gpu_usage_charts)\n\n print(\"Save progress\")\n write_last_sha1(sha)\n\n print(\"Let us place back to the previous directory %s ...\" % root_dir)\n os.chdir(root_dir)\n\n print(\"Getting rid of git clone\")\n wipe_git_clone()\n\n release_lock()", "title": "" }, { "docid": "3c5be54f5511e7bd55288895dd7d66b4", "score": "0.57431793", "text": "def update_code():\n with cd('/srv/directedstudies'):\n sudo('git pull')", "title": "" }, { "docid": "dc8f6b8ddb8d77ac432c93cf3b8061a7", "score": "0.57357633", "text": "def _update(self):\n self._system.execute_command(\"git\", [\"pull\"], cwd=self.get_install_path(), verbose=True)\n self._install()", "title": "" }, { "docid": "dc8f6b8ddb8d77ac432c93cf3b8061a7", "score": "0.57357633", "text": "def _update(self):\n self._system.execute_command(\"git\", [\"pull\"], cwd=self.get_install_path(), verbose=True)\n self._install()", "title": "" }, { "docid": "894e9e90d2a206ddfafb9ffc4a6440a6", "score": "0.5680683", "text": "def site_updaterepos():\n cmd = [\"git\", \"submodule\", \"foreach\", \"--recursive\", \"git\", \"pull\", \"origin\", \"main\"]\n with Popen(cmd, env=os.environ) as proc:\n proc.wait()", "title": "" }, { "docid": "2159d9b3b5047cd35571d00d483fe1b5", "score": "0.5612964", "text": "def fetchRepo():\n # get covid19 data\n if os.path.isdir(COVID_19_DATA_DIR):\n os.system(f\"cd {COVID_19_DATA_DIR} & git pull\")\n else:\n os.system(f\"git clone {COVID_19_DATA_URL} {COVID_19_DATA_DIR}\")\n \n # get populaion data\n if os.path.isdir(POPULATION_DATA_DIR):\n os.system(f\"cd {POPULATION_DATA_DIR} & git pull\")\n else:\n os.system(f\"git clone {POPULATION_DATA_URL} {POPULATION_DATA_DIR}\")\n \n # get country to region map\n if os.path.isdir(COUNTRY_TO_REGION_MAPPING_DIR):\n os.system(f\"cd {COUNTRY_TO_REGION_MAPPING_DIR} & git pull\")\n else:\n os.system(f\"git clone {COUNTRY_TO_REGION_MAPPING_URL} {COUNTRY_TO_REGION_MAPPING_DIR}\")", "title": "" }, { "docid": "fff4fcb582800e47841f1ace3214240c", "score": "0.55779135", "text": "def update():\n with cd(site_path):\n run('git pull --all')\n reload()", "title": "" }, { "docid": "bbf80f8ecc65941a6ab5d8e192f0befc", "score": "0.5454635", "text": "def main():\n #gotoMYChrome()\n #returndockspot()\n gotochrome()\n copyurl()\n getsearchbar()\n pasteurl()", "title": "" }, { "docid": "a95012a42bda14b7c48c1c4af2e35a48", "score": "0.5423222", "text": "def _post_install():\n import subprocess\n from distutils import log\n log.set_verbosity(log.DEBUG)\n\n try:\n print(\"Downloading most resent gitignores from Github.com...\")\n pass\n except:\n log.warn(\"Post Install Failed.\")\n pass", "title": "" }, { "docid": "04ec5ac856415af6ead54d602f1f5c75", "score": "0.53963065", "text": "def dip_upgrade(names):\n for name in names:\n with settings.getapp(name) as app:\n try:\n app.repo.pull()\n except AttributeError:\n pass", "title": "" }, { "docid": "d6661b592ad5f70a24d38779f5047838", "score": "0.53792614", "text": "def puller(options):\n \n executor([\"git\", \"pull\"] + options)", "title": "" }, { "docid": "67db9a8df4e58978cd77cabcdd5b3e14", "score": "0.53673506", "text": "def perform_update():\n if os.path.islink(\"./data\"):\n os.rename(\"./data\", \"./data_updating\")\n try:\n os.makedirs(\"./data\")\n except OSError as e:\n print(\"System update error \", e)\n\n command = u\"git config core.filemode true\"\n subprocess.call(command.split())\n \n command = u\"git pull\"\n subprocess.call(command.split())\n \n if os.path.isdir(\"./data_updating\"):\n try:\n shutil.rmtree(\"./data\")\n os.rename(\"./data_updating\", \"./data\")\n except OSError as e:\n print(\"System update error \", e)", "title": "" }, { "docid": "e01b1e7c6a0334d936addaf94797a622", "score": "0.5357921", "text": "async def source(self, ctx, *, command: str = None):\r\n #chrome_options = Options()\r\n #chrome_options.add_argument(\"--headless\")\r\n #chrome_options.add_argument('--no-sandbox')\r\n #driver = webdriver.Chrome('/usr/lib/chromium-browser/chromedriver', options=chrome_options)\r\n #driver2 = webdriver.Chrome('/usr/lib/chromium-browser/chromedriver', options=chrome_options)\r\n source_url = 'https://github.com/proguy914629bot/LyricMaster'\r\n branch = '1.0.0'\r\n\r\n if command is None:\r\n return await ctx.send(source_url + f'\\n\\nHint: You can specify a specific command to view the source of that command, Like `lm?source ping`!')\r\n\r\n obj = self.bot.get_command(command.replace('.', ' '))\r\n if obj is None:\r\n return await ctx.send(f\"Command Named {command} Not Found!\")\r\n\r\n src = obj.callback.__code__\r\n module = obj.callback.__module__\r\n filename = src.co_filename\r\n\r\n lines, firstlineno = inspect.getsourcelines(src)\r\n location = os.path.realpath(filename).replace(\"\\\\\", '/')\r\n\r\n if \"home/gilb\" in location:\r\n await ctx.send(\"Command Found but Not Updated/Found in Github Repo. Here is the main link! {}!\".format(source_url))\r\n return\r\n\r\n #if \"/home/gilb/.local\" in location:\r\n # async with ctx.message.channel.typing():\r\n # finallocation = location.replace(\"/home/gilb/.local/lib/python3.8/site-packages/\", \"\")\r\n # finallocation2 = finallocation.replace(\" \", \"+\")\r\n # driver2.get(f\"https://www.google.com/search?q=pip+install+{finallocation2}\")\r\n #\r\n # pypipage = driver2.find_elements_by_css_selector(\"yuRUbf > a\").__getattribute__('href')\r\n #\r\n # print(pypipage)\r\n #\r\n # driver.get(f'{str(pypipage)}')\r\n #\r\n # pipcmd = driver.find_element_by_xpath('.//span[@class = \"package-header__pip-instructions\"]')[0]\r\n #\r\n # print(pipcmd)\r\n\r\n # finallocation = location.replace(\"/home/gilb/.local/lib/python3.8/site-packages/\", \"\")\r\n # finallocation2 = finallocation.replace(\" \", \"+\")\r\n # req = Request(f\"https://www.google.com/search?q=pip+install+{finallocation2}\", headers={'User-Agent': 'Mozilla/5.0'})\r\n # webpage = urlopen(req).read()\r\n # soup = BeautifulSoup(webpage, \"html.parser\")\r\n # for link in soup.findAll('a', attrs={'href', re.compile(\"^https://pypi.com/\")}):\r\n # href = print(link.get('href'))\r\n\r\n #async with ClientSession() as session:\r\n # async with session.get(f'{source_url}/blob/{branch}/{location}#L{firstlineno - 1}-L{firstlineno + len(lines) - 2}') as response:\r\n # if response.status != 200:\r\n # await ctx.send(\"Cannot connect to GitHub Servers. Try Again Later! Maybe check the Github Status. If not here is the main link! {}\".format(source_url))\r\n # return\r\n # else:\r\n # pass\r\n\r\n final_url = f'{source_url}/blob/{branch}/{location}#L{firstlineno - 1}-L{firstlineno + len(lines) - 2}'\r\n finalurl2 = final_url.replace('//home/gilb/LyricMaster/', '/')\r\n await ctx.send(finalurl2)", "title": "" }, { "docid": "ba6b9a072b5b886a8adcf7c78728baed", "score": "0.53354615", "text": "def sync(config):\n # Fetch & Sync Chromium\n # Copy PATH from current process and add depot_tools to it\n depot_tools_path = os.path.join(os.getcwd(), 'depot_tools')\n if not os.path.exists(depot_tools_path) or not os.path.isdir(depot_tools_path):\n raise FileNotFoundError(\"Cannot find depot_tools!\")\n _env = os.environ.copy()\n _env[\"PATH\"] = depot_tools_path + \":\" + _env[\"PATH\"]\n\n # Get chromium ref\n # Set src HEAD to version\n chromium_ref = set_revision(config)\n\n # Create .gclient file\n with open('.gclient', 'w', encoding='utf-8') as f:\n f.write(GCLIENT_CONFIG.replace(\"@@TARGET_OS@@\", \"'{}'\".format(config.target_os)))\n\n # Run gclient sync without hooks\n extra_args = []\n if config.reset:\n extra_args += ['--revision', 'src@' + chromium_ref, '--force', '--upstream', '--reset']\n if config.shallow:\n # There is a bug with --no-history when syncing third_party/wayland. See\n # https://bugs.chromium.org/p/chromium/issues/detail?id=1226496\n extra_args += ['--shallow']\n else:\n extra_args += ['--with_tags', '--with_branch_heads']\n\n sp.check_call(['gclient', 'sync', '--nohooks'] + extra_args, env=_env)\n\n # Run hooks\n sp.check_call(['gclient', 'runhooks'], env=_env)\n\n # If Debian/Ubuntu and install_deps, then run the script.\n # Note: requires sudo\n if config.install_build_deps:\n if config.target_os == 'android':\n script = 'install-build-deps-android.sh'\n else:\n script = 'install-build-deps.sh'\n distro_name = distro.linux_distribution(full_distribution_name=False)[0].lower()\n if distro_name == 'debian' or distro_name == 'ubuntu':\n warnings.warn(\"Note: installing dependencies requires root privilege!\",\n RuntimeWarning)\n sp.check_call(['sudo', os.path.join(SRC_DIR, 'build', script)])\n else:\n warnings.warn(\"Installing dependencies only works on Debian based systems, skipping.\",\n RuntimeWarning)", "title": "" }, { "docid": "f59fcbb73b07ac726f380a869c275510", "score": "0.5229842", "text": "def update():\n for install_type in VSCode:\n checkout = checkout_directory(install_type)\n if not checkout.exists():\n continue\n print(f\"UPDATING {checkout}\")\n print(\"Deleting files downloaded by the extension ...\")\n cleanup(checkout)\n print(\"Updating clone ...\")\n update_checkout(checkout)\n build(checkout)", "title": "" }, { "docid": "38deecf9c422fd2526e63f854a2dc97a", "score": "0.5225225", "text": "def _pull_and_resolve_conflicts(self):\n\n logging.info('Starting pull from {}'.format(self.git_url))\n\n yield from execute_cmd(['git', 'checkout', self.branch_name], cwd=self.repo_dir)\n yield from execute_cmd(['git', 'fetch'], cwd=self.repo_dir)\n yield from execute_cmd(['git', 'merge', '-Xours', 'origin/{}'.format(self.branch_name)], cwd=self.repo_dir)\n\n logging.info('Pulled from {}'.format(self.git_url))", "title": "" }, { "docid": "e523b739a907ac07930f9068581966b2", "score": "0.519865", "text": "def main(pr_ids, output_dir, upstream):\n check_for_cargo_benchcmp()\n check_github_api_env_vars()\n\n temp_dir = None\n if output_dir is None:\n temp_dir = TemporaryDirectory()\n output_dir = temp_dir.name\n else:\n os.makedirs(output_dir, exist_ok=True)\n\n if upstream is None:\n check_environment_variable('RBU_UPSTREAM_URL', 'if --upstream is not')\n upstream = os.environ['RBU_UPSTREAM_URL']\n\n for pr_id in pr_ids:\n benchmark_github_pr(pr_id, upstream, output_dir)\n\n # Cleanup if it's a temp dir\n if temp_dir is not None:\n temp_dir.cleanup()", "title": "" }, { "docid": "0d07a072b9a2a84f23e5275a9202a910", "score": "0.5186958", "text": "def update_fetch(self):\n Popen([\"mount\", \"-t\", \"devfs\", \"devfs\",\n \"{}/releases/{}/root/dev\".format(self.iocroot,\n self.release)]).communicate()\n copy(\"/etc/resolv.conf\",\n \"{}/releases/{}/root/etc/resolv.conf\".format(self.iocroot,\n self.release))\n\n # TODO: Check for STABLE/PRERELEASE/CURRENT/BETA if we support those.\n # TODO: Fancier.\n self.lgr.info(\"\\n* Updating {} to the latest patch level... \".format(\n self.release))\n\n os.environ[\"UNAME_r\"] = self.release\n os.environ[\"PAGER\"] = \"/bin/cat\"\n new_root = \"{}/releases/{}/root\".format(self.iocroot, self.release)\n if os.path.isfile(\"{}/etc/freebsd-update.conf\".format(new_root)):\n # 10.1-RELEASE and under have a interactive check\n if float(self.release.partition(\"-\")[0][:5]) <= 10.1:\n with NamedTemporaryFile(delete=False) as tmp_conf:\n conf = \"{}/usr/sbin/freebsd-update\".format(new_root)\n with open(conf) as update_conf:\n for line in update_conf:\n tmp_conf.write(re.sub(\"\\[ ! -t 0 \\]\", \"false\",\n line))\n\n os.chmod(tmp_conf.name, 0o755)\n Popen([tmp_conf.name, \"-b\", new_root, \"-d\",\n \"{}/var/db/freebsd-update/\".format(new_root), \"-f\",\n \"{}/etc/freebsd-update.conf\".format(new_root),\n \"fetch\"], stdout=PIPE, stderr=PIPE).communicate()\n os.remove(tmp_conf.name)\n else:\n Popen([\"freebsd-update\", \"-b\", new_root, \"-d\",\n \"{}/var/db/freebsd-update/\".format(new_root), \"-f\",\n \"{}/etc/freebsd-update.conf\".format(new_root),\n \"fetch\"], stdout=PIPE, stderr=PIPE).communicate()\n\n Popen([\"freebsd-update\", \"-b\", new_root, \"-d\",\n \"{}/var/db/freebsd-update/\".format(new_root), \"-f\",\n \"{}/etc/freebsd-update.conf\".format(new_root),\n \"install\"], stdout=PIPE, stderr=PIPE).communicate()\n\n try:\n # Why this sometimes doesn't exist, we may never know.\n os.remove(\"{}/releases/{}/root/etc/resolv.conf\".format(\n self.iocroot, self.release))\n except OSError:\n pass\n\n Popen([\"umount\", \"{}/releases/{}/root/dev\".format(\n self.iocroot, self.release)]).communicate()", "title": "" }, { "docid": "9f36ab8a04e43304346ad9e5a6e485e1", "score": "0.51862854", "text": "def DP():\n url = \"https://github.com/W1l50n2208\"\n os.startfile(url)", "title": "" }, { "docid": "dd0a3541ad9ec2a22e2c2a2e8af51117", "score": "0.51758665", "text": "def pull():\n local('git pull origin master')", "title": "" }, { "docid": "22a13d27f39ad2b52f407c1ae7dfdffe", "score": "0.5153826", "text": "def refresh_external_libs():\n for url, alias in settings.EXTERNAL_LIBRARIES:\n path = os.path.join(settings.EXTERNAL_LIBRARIES_ROOT, alias)\n if os.path.exists(path):\n pull(alias, url)\n logger.info(f\"Library '{alias}' updated from '{url}.\")\n else:\n clone(alias, url)\n logger.info(f\"Library '{alias}' cloned from '{url}.\")", "title": "" }, { "docid": "1e1d0ab50f4462c010f3c71831e3b69e", "score": "0.51401055", "text": "def repo_update():\n with cd(env.base_dir):\n run('git fetch --all && git reset --hard origin/master')", "title": "" }, { "docid": "dab8d08a20148424d77e201e82fb42b5", "score": "0.51230353", "text": "def pull(self):\n\n Program.runprogram(shlex.split(\"git pull\"), workdir=self.directory, use_sudo=self.use_sudo, user=self.sudo_user)", "title": "" }, { "docid": "21253266bc44016b41b6209492b502af", "score": "0.50979227", "text": "def pull(env, opts, args):\n remotes = remote.get_remotes(env)\n if args:\n filt = lambda rmt: rmt.name in args\n else:\n filt = lambda rmt: True\n for entry in filter(filt, remotes):\n external.rsync(entry.url, env.directory, backup_dir=env.backup)", "title": "" }, { "docid": "ae36b5c1a49ae955ce4fc10c854f840d", "score": "0.50876707", "text": "def soft_deploy():\n clear_compiled_python_files()\n git_pull()\n collectstatic()\n reload_http()", "title": "" }, { "docid": "11db07804dc60e5b5dd91eac8b424071", "score": "0.5043245", "text": "def main(repo_dirs, linewise, loop, delay, repos):\n\n if repo_dirs is not None:\n repos = repo_dirs.read().splitlines()\n if not repos:\n return\n\n while True:\n dirs = get_git_repos(repos)\n\n codes = []\n for d in dirs:\n chdir(d)\n\n status = get_git_status()\n if status == \"-\":\n continue\n\n code = get_git_code(d, status)\n if code:\n codes.append(code)\n\n if linewise:\n codes = \"\\n\".join(codes)\n else:\n codes = \" \".join(codes)\n click.echo(codes)\n\n if not loop:\n break\n time.sleep(delay)", "title": "" }, { "docid": "31b6c0e338a09af69a5cd589959b966d", "score": "0.5024227", "text": "async def pull(self, ctx):\n await ctx.typing()\n try:\n output = subprocess.check_output(\n ['git', 'pull']).decode()\n await ctx.send('```git\\n' + output + '\\n```')\n except Exception as e:\n return await ctx.send(str(e))\n\n _cogs = [f'cogs.{i}' for i in self.cog_re.findall(output)]\n active_cogs = [i for i in _cogs if i in self.client.extensions]\n if active_cogs:\n for cog_name in active_cogs:\n await ctx.invoke(self.client.get_command('reload'), cog_name)", "title": "" }, { "docid": "79de43382848d91c9f1f995cdc6d6672", "score": "0.5023246", "text": "def CM():\n url = \"https://github.com/Chrism1c\"\n os.startfile(url)", "title": "" }, { "docid": "0254aab07f47c30f36709b63bba55011", "score": "0.50232244", "text": "def main():\n last_url = \"\"\n config_file = read_config()\n while True:\n try:\n url = get_latest_url()\n if url != last_url: # update detected\n print(f\"Update detected!\")\n\n last_url = url\n data_location = config_file[\"downloaded_data_path\"]\n\n create_data_folder(data_location)\n download_file(url, data_location)\n\n print(\"Extraction completed.\")\n except Exception as e:\n print(f\"Error! {e}\")\n continue\n return 0", "title": "" }, { "docid": "e9d33f77a9c32d40eec20f4ffcb2b751", "score": "0.50189006", "text": "def downloadUpdateClick():\n lcd.clear()\n lcd.message(\"Downloading...\")\n lcd.setCursor(14, 0)\n lcd.ToggleBlink()\n os.system(\"mount -o rw,remount /\")\n try:\n response = subprocess.check_output([\"git\", \"pull\"])\n except:\n response = \"Update Error:\\nCheck Internet\"\n os.system(\"mount -o ro,remount /\")\n if response.strip() == \"Already up-to-date.\":\n message = \"No Update Found\"\n else:\n message = response.strip()\n lcd.ToggleBlink()\n lcdPrint(message, 2)", "title": "" }, { "docid": "db6ae95bae35a1c71e16eb21c9af1113", "score": "0.50038403", "text": "def main():\n setup_and_install_dependencies()\n run_lighthouse_checks()\n delete_reports()", "title": "" }, { "docid": "ad3c2780c7ddfe0aa19187141b4b6807", "score": "0.49998796", "text": "def pull(c, only_latest=False):\n images = [\n ('6.0', 'latest')\n ]\n if not only_latest:\n images.extend([\n ('5.0', 'stable'),\n ('7.0', 'testing'),\n ])\n for image, tag in images:\n c.run(f'docker pull readthedocs/build:{image}', pty=True)\n c.run(f'docker tag readthedocs/build:{image} readthedocs/build:{tag}', pty=True)", "title": "" }, { "docid": "7c88df71b7982d7dcd77552162f97020", "score": "0.4989056", "text": "def update(self, options):\n\n self._repo.ensure_lines(\".gitignore\", [\n \"workspace/\",\n \"*.egg-info/\",\n \"dist/*\",\n \"__pycache__\",\n \"*.pyc\",\n \".pytest_cache/\",\n \"build/*\"\n ])\n\n self._repo.ensure_template(\".editorconfig\", template=\"editorconfig\")\n self._repo.ensure_lines(\"requirements_build.txt\", [\n \"requests ~= 2.20\",\n \"twine ~= 1.12\",\n \"pycryptodome ~= 3.7\",\n \"pytest ~= 4.0\",\n \"wheel >= 0.32\"\n ], [\n r\"^requests\",\n r\"^twine\",\n r\"^pycryptodome\",\n r\"^pytest\",\n r\"^wheel\"\n ], multi=True)\n\n variables = {\n 'options': options,\n 'components': self._repo.components\n }\n\n self._repo.ensure_directory(self._repo.SCRIPT_DIR)\n self._repo.ensure_template(os.path.join(self._repo.MULTIPACKAGE_DIR, \"components.txt\"), template=\"components.txt\", overwrite=False)\n self._repo.ensure_template(os.path.join(self._repo.SCRIPT_DIR, \"release_by_name.py\"), \"release_by_name.py.tpl\", variables)\n self._repo.ensure_template(os.path.join(self._repo.SCRIPT_DIR, \"test_by_name.py\"), \"test_by_name.py.tpl\", variables)\n self._repo.ensure_template(os.path.join(self._repo.SCRIPT_DIR, \"components.py\"), \"components.py.tpl\", variables)\n self._repo.ensure_template(os.path.join(self._repo.SCRIPT_DIR, \"tag_release.py\"), \"tag_release.py.tpl\", variables)\n\n self._repo.ensure_script(os.path.join(self._repo.SCRIPT_DIR, \"shared_errors.py\"), \"shared_errors.py\")\n self._repo.ensure_script(os.path.join(self._repo.SCRIPT_DIR, \"release_notes.py\"), \"release_notes.py\")\n self._repo.ensure_script(os.path.join(self._repo.SCRIPT_DIR, \"release_component.py\"), \"release_component.py\")", "title": "" }, { "docid": "40bdeaeaa7a49a5dc2de9a4cb21c18ea", "score": "0.49812675", "text": "def pull(url, revision=None):\r\n do_bzr_cmd(cmd_pull, location=url, revision=revision)", "title": "" }, { "docid": "335ef2d96c80e15a44077504e3fa2e36", "score": "0.49721912", "text": "def git_f():\r\n webbrowser.open(\"https://github.com/Katerunner/Flights-Course-Work\")", "title": "" }, { "docid": "0f299d209b5897cbd5861ab8d8334063", "score": "0.49692765", "text": "async def pull(ctx, pip=None):\n dev = ctx.message.author\n if bot.botdev_role in dev.roles or bot.owner_role in dev.roles:\n await ctx.send(\"`Pulling changes...`\")\n call([\"git\", \"stash\", \"save\"])\n call([\"git\", \"pull\"])\n call([\"git\", \"stash\", \"clear\"])\n pip_text = \"\"\n if pip == \"-p\" or pip == \"--pip\" or pip == \"-Syu\":\n await ctx.send(\"`Updating python dependencies...`\")\n call([\"python3.6\", \"-m\", \"pip\", \"install\", \"--user\", \"--upgrade\", \"-r\",\n \"requirements.txt\"])\n pip_text = \" and updated python dependencies\"\n await ctx.send(\"Pulled changes{}! Restarting...\".format(pip_text))\n execv(\"./Brick.py\", argv)\n else:\n if \"pacman\" in ctx.message.content:\n await ctx.send(\"`{} is not in the sudoers file. This incident will be reported.`\".format(ctx.message.author.display_name))\n else:\n await ctx.send(\"Only bot devs and / or owners can use this command\")", "title": "" }, { "docid": "f0ef4a1a4bc0bf3eee043955cc29e5e7", "score": "0.49266657", "text": "def waitUncommentableSourceIfNecessary(self):\n url = 'http://download.csdn.net/my/downloads/1'\n maxMinutes = 11\n for i in range(0, maxMinutes):\n html = self.getUrlContent(self.sess, url)\n if html is None:\n break\n soup = BeautifulSoup(html)\n sourcelist = soup.findAll('span', attrs={'class' : 'btn-comment'})\n if sourcelist is None or len(sourcelist) == 0:\n print('None uncommentable source now!')\n break\n print('Waiting for uncommentable source count down %d minutes.' % (maxMinutes-i))\n time.sleep(60)", "title": "" }, { "docid": "22c97af99de78a03c3ea5e7da122ce6d", "score": "0.49146226", "text": "def main(self):\r\n req = urllib2.Request(raw_input(\"Enter a URL! \\n\"), headers={ 'User-Agent': 'Mozilla/5.0' })\r\n target = urllib2.urlopen(req) #getting the source code\r\n inter = target.read()\r\n inter = inter.decode(\"utf-8\") #decode unicode entities\r\n inter = inter.replace(\"<style type\", \"<s><style>\") #replace problematic tags\r\n inter = inter.replace(\"<!DOCTYPE\", \"<DOCTYP><\")\r\n inter = inter.replace(\"<!doctype\", \"<doctyp><\")\r\n inter = inter.replace(\"<![\", \"<[[\")\r\n inter = inter.replace(\"<!]\", \"<]]\")\r\n text = list(inter) #make a list containing every character\r\n logging.info(\"\\nPLEASE WAIT\\n\")\r\n logging.info(\"removing self.comments...\\n\" )\r\n self.removeComments(text) #comment removal\r\n logging.info(\"removing self.scripts...\\n\")\r\n self.removeScripts(text) #script removal\r\n logging.info(\"removing self.styles...\\n\")\r\n self.removeStyles(text) #style removal\r\n logging.info(\"extracting html and non-html characters...\\n\" )\r\n self.htmlSeparate(text) #separating the remaining parts\r\n logging.info(\"FINISHED! EVERY FILE IS IN THE SAME FOLDER AS THIS SCRIPT.\")", "title": "" }, { "docid": "d0c82e5a83232ad6d0e4609b19bbc277", "score": "0.49099478", "text": "def update_dependencies(dep_dict: dict, open_pull_requests: dict):\n file_queue = Queue(config.MAXIMUM_DEPENDENCIES)\n for dep_file in glob.glob('{}/**/ivy.xml'.format(config.REPO_PATH.replace('/', '')), recursive=True):\n file_queue.put(dep_file)\n threads = []\n for i in range(config.NETWORKING_THREADS_GITHUB):\n t = threading.Thread(\n target=update_run, name='th-{}'.format(i),\n kwargs={'file_queue': file_queue, 'dep_dict': dep_dict, 'open_pull_requests': open_pull_requests})\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()", "title": "" }, { "docid": "f8fd9b8fd0c8e07a9bc1024daaa3e478", "score": "0.48900807", "text": "def cli(from_url: str, force: bool, noleanup: bool, python_debug: bool) -> None:\n global cache_url, force_download, lean_upgrade, debug\n cache_url = from_url\n force_download = force\n lean_upgrade = not noleanup\n debug = python_debug", "title": "" }, { "docid": "7d854b9b8c5c46517fdc2bc891136328", "score": "0.48873034", "text": "def update():\n local('git pull origin master')\n local('pip install -r requirements.txt')\n\n kwarg = _check_env()\n with shell_env(**kwarg):\n local('python pillbox-engine/manage.py migrate')\n local('python pillbox-engine/manage.py collectstatic --noinput')\n local('python pillbox-engine/manage.py makeusers')", "title": "" }, { "docid": "c7ea35b864619c46883aca7fd214a868", "score": "0.48757318", "text": "def main():\n args = docopt(__doc__, version=VERSION)\n\n config = ConfigParser(interpolation=ExtendedInterpolation())\n # Set defaults for all sections\n config.read_dict(copy.copy(DEFAULT_OPTIONS))\n # Load configuration from a file. NOTE: ConfigParser doesn't warn if user\n # sets a filename which doesn't exist, in this case defaults will be used.\n try:\n config.read(args['--file'])\n except ParsingError as exc:\n sys.exit(str(exc))\n\n if args['--print']:\n for section in sorted(DEFAULT_OPTIONS):\n if section == 'pull' or section == 'DEFAULT':\n print(\"[{}]\".format(section))\n for key, value in sorted(DEFAULT_OPTIONS[section].items()):\n print(\"{k} = {v}\".format(k=key, v=value))\n print()\n sys.exit(0)\n if args['--print-conf']:\n for section in sorted(config):\n if section == 'pull' or section == 'DEFAULT':\n print(\"[{}]\".format(section))\n for key, value in sorted(config[section].items()):\n print(\"{k} = {v}\".format(k=key, v=value))\n print()\n sys.exit(0)\n\n try:\n configuration_check(config, 'pull')\n except ValueError as exc:\n sys.exit(str(exc))\n\n loglevel = (config.get('pull', 'loglevel') # pylint: disable=no-member\n .upper())\n log.setLevel(getattr(logging, loglevel, None))\n\n log.info('haproxystats-pull %s version started', VERSION)\n # Setup our event loop\n loop = asyncio.get_event_loop()\n executor = ThreadPoolExecutor(max_workers=config.getint('pull',\n 'workers'))\n # Register shutdown to signals\n\n def shutdown(signalname):\n \"\"\"Perform a clean shutdown.\n\n Arguments:\n signalname (str): Signal name\n \"\"\"\n tasks_running = False\n log.info('received %s', signalname)\n\n for task in asyncio.Task.all_tasks():\n if not task.done():\n tasks_running = True\n log.info('cancelling %s task', task)\n task.cancel()\n\n if not tasks_running:\n log.info('no tasks were running when %s signal received', signal)\n log.info('waiting for threads to finish any pending IO tasks')\n executor.shutdown(wait=True)\n sys.exit(0)\n\n loop.add_signal_handler(signal.SIGHUP, partial(shutdown, 'SIGHUP'))\n loop.add_signal_handler(signal.SIGTERM, partial(shutdown, 'SIGTERM'))\n\n # a temporary directory to store fetched data\n tmp_dst_dir = config['pull']['tmp-dst-dir']\n # a permanent directory to move data from the temporary directory. Data are\n # picked up by the process daemon from that directory.\n dst_dir = config['pull']['dst-dir']\n for directory in dst_dir, tmp_dst_dir:\n try:\n os.makedirs(directory)\n except OSError as exc:\n # errno 17 => file exists\n if exc.errno != 17:\n sys.exit(\"failed to make directory {d}:{e}\"\n .format(d=directory, e=exc))\n supervisor(loop, config, executor)", "title": "" }, { "docid": "d97a1759419d9157e06a0870ab250ba3", "score": "0.48682088", "text": "def git_pull(options):\n check_call([\"git\", \"pull\"])", "title": "" }, { "docid": "bd0e90d91aff718268b0a1d54259680d", "score": "0.4868125", "text": "def scrape_github():\n githubs = [\"/bitshares/bitshares-ui/master/app/api/apiConfig.js\"]\n if not GITHUB_MASTER:\n githubs += Nodes.github()\n # scrape from github\n urls = []\n http = \"https://\"\n raw = \"raw.githubusercontent.com\"\n if PROXY_GITHUB: # text proxy\n uri = http + PROXY + raw\n else:\n uri = http + raw\n for _, git in enumerate(githubs):\n url = uri + git\n urls.append(url)\n print(\"scraping github for Bitshares nodes...\")\n validated = []\n repos = []\n for url in urls:\n attempts = 3\n while attempts > 0:\n try:\n raw = requests.get(url, timeout=(6, 30)).text\n ret = validate(parse(clean(raw)))\n if ret:\n repos.append(url)\n del raw\n repo = url.replace(uri, \"\")\n repo = \"/\".join(repo.split(\"/\", 3)[:3])\n print((\"found %s nodes at %s\" % (len(ret), repo)))\n validated += ret\n attempts = 0\n except BaseException:\n print((\"failed to connect to %s\" % url))\n attempts -= 1\n return validated, repos", "title": "" }, { "docid": "328613caf0318b587caf1cd286245d83", "score": "0.48634103", "text": "def main():\n err_print(\"Booting up web driver...\")\n beige_books = webdriver.Chrome(WEBDRIVER_PATH) # Webdriver loaded from folder in case PATH not configured\n err_print(\"Loading main archive page...\")\n beige_books.get(ARCHIVE_URL)\n err_print(\"Loading XPATH and ID identifiers...\")\n search_button = beige_books.find_element_by_xpath(SEARCH_BUTTON_XPATH) # Easy button pressing\n selects = Select(beige_books.find_element_by_name(SELECT_ID)).options # Grab all available options\n err_print(\"Selecting all possible years...\\n\")\n reports = []\n curr_year = CURRENT_YEAR\n\n while curr_year >= END_YEAR: # While loop used becuase 'selects' change on every iteration and useful for indexing\n year = selects[index_from_year(curr_year)] # Loads the year into memory\n err_print(\"Grabbing all reports for \" + str(curr_year) + \"...\")\n year.click()\n search_button.click()\n links = beige_books.find_elements_by_xpath(HTML_LINK_XPATH) # Grabs all hyperlinks\n\n for link in links:\n link_html = link.get_attribute(\"href\") # Converts Selenium object to string URLs\n if contains(URL_IDENTIFIER + str(curr_year), link_html) and link_html not in reports: # Checks for dupes\n err_print(\"Grabbed \" + link_html + \"!\")\n reports.append(link_html)\n\n err_print(\"Finished \" + str(curr_year) + \"!\")\n curr_year -= 1 # Since the current year is the first item on the select, deincrement after every loop\n err_print(\"Refreshing parameters for \" + str(curr_year) + \"...\\n\")\n search_button = beige_books.find_element_by_xpath(SEARCH_BUTTON_XPATH) # Refresh needed because site changed\n selects = Select(beige_books.find_element_by_name(SELECT_ID)).options\n\n err_print(\"Closing web driver...\")\n beige_books.close() # Exits out of the website\n err_print(\"End of scrape!\")\n\n # START OF PARSING #\n\n err_print(\"Instantiating new data table...\")\n final_table = new_table() # Creates a new table to store data\n err_print(\"Parsing reports to table...\\n\")\n for report in reports: # Reads the 'text content' of all the scraped files and parses them into the final table\n err_print(\"Extracting data from \" + report + \"...\")\n site = soupify(report)\n final_table = parse(site, final_table)\n save(final_table) # Saves the table into a CSV\n err_print(\"End of parse!\")", "title": "" }, { "docid": "2dfd07bb8af2396a3d18e3aacc82fb84", "score": "0.48553193", "text": "def api_github_message():\n if rq.headers['Content-Type'] == 'application/json':\n my_info = json.dumps(rq.json)\n payload = json.loads(my_info)\n if not payload['action'] == 'closed':\n url = payload['pull_request']['url']\n url = url + '/files'\n comment_url = payload['pull_request']['comments_url']\n r = requests.get(url)\n a = r.json()\n #print(a)\n print(\"Received Pull Request for %d Changed Files\"%(len(a)))\n print(\"Initializing Linting Process\")\n i = 0\n x = []\n filelist = []\n for i in range(len(a)):\n raw = (a[i]['raw_url'])\n #print(raw)\n filename = (a[i]['filename']).split(\"/\")\n file = filename[-1]\n\n re = urllib.request.urlopen('%s' % raw)\n # returned_value = os.system(\"start \\\"\\\" %s\" %raw)\n data = re.read()\n dst = open(\"%s\" % file, \"wb\")\n dst.write(data)\n dst = open(\"%s\" % file, \"r\")\n print(\"Checking syntax errors for File: %d. %s\" %(i+1, file))\n\n file_ext = (file).split(\".\")\n print(\"File extension of the file: %s\" % file_ext[-1])\n if file_ext[-1] == 'py':\n print(\"Cloned file: %s deleted\" % file)\n print(\"Linting file : %s\" % file)\n cmd = 'pycodestyle %s' % file\n process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE).communicate(0)\n syntax_check = str(process)[2:-1]\n # p = str(p)\n # p = p[2:-1]\n if process == \"b''\":\n q = \"No syntax error found\"\n print(q)\n\n else:\n print(\"Syntax error found: %s\" % syntax_check)\n q = \"Syntax error found\"\n filelist.append(file)\n\n dst.close()\n os.remove(\"%s\" % file)\n s = \"%s %s\" % (q, syntax_check)\n else:\n print(\"No linter found for %s extension\" % file_ext[-1])\n s = \"No linter found for %s extension\" % file_ext[-1]\n x.append(s)\n i = i + 1\n if x is None:\n comment_body = \"Git Assist Prediction : No Syntax Errors Found.\"\n else:\n filelist = list( dict.fromkeys(filelist) )\n filelist = ', '.join(filelist)\n comment_body = \"Git Assist Prediction: To Be Rejected. Syntax Errors found in the following files: %s.\"%filelist\n\n headers = {'Content-Type': 'application/json'}\n myurl = '%s' % comment_url\n post_comment_data = '{\\\"body\\\":\\\"%s\\\"}' % comment_body\n post_comment_json = json.loads(post_comment_data)\n response = requests.post(myurl, auth=(\"iamthebj\", \"Kj9158852858#\"), headers=headers,\n json=post_comment_json)\n if response.status_code == 201:\n print(\"Successfully posted a comment\")\n else:\n print(\"Failed to post a response with http status code : \", response.status_code)\n return \"%s\" % x\n else:\n print(\"Closed pull request\")\n return \"Closed pull request\"", "title": "" }, { "docid": "2e1c079cc7c16bd70c74ca9e25b4115f", "score": "0.48452666", "text": "def post_pr_merge_sync(context):\n context.run(\"git switch main\")\n context.run(\"git pull\")", "title": "" }, { "docid": "aedbd6deb7f263ed35508c635b58f685", "score": "0.4842096", "text": "def _cleanup_and_commit(self, updates):\n self._clean_up_web_root()\n self._git_apply_changes(updates)", "title": "" }, { "docid": "fbbf83dd83765c46feafc7adae9a7847", "score": "0.48368776", "text": "def main():\n _, repo_root = commands.getstatusoutput('git rev-parse --show-toplevel')\n checks = imp.load_source('run_checks',\n repo_root + \"/devtools/run_checks.py\")\n exit(checks.main())", "title": "" }, { "docid": "d73ce5dde2baa9c8c010fc38457ea73f", "score": "0.4835393", "text": "def main():\n current_build_number = os.environ.get(\"BUILDKITE_BUILD_NUMBER\", None)\n if not current_build_number:\n raise Exception(\"Not running inside Buildkite\")\n current_build_number = int(current_build_number)\n\n for _ in range(5):\n latest_generation, latest_build_number = latest_generation_and_build_number()\n\n if current_build_number <= latest_build_number:\n eprint(\n (\n \"Current build '{0}' is not newer than latest published '{1}'. \"\n + \"Skipping publishing of binaries.\"\n ).format(current_build_number, latest_build_number)\n )\n break\n\n try:\n try_publish_binaries(current_build_number, latest_generation)\n except BinaryUploadRaceException:\n # Retry.\n continue\n\n eprint(\n \"Successfully updated '{0}' to binaries from build {1}.\".format(\n bazelci_builds_metadata_url(), current_build_number\n )\n )\n break\n else:\n raise Exception(\"Could not publish binaries, ran out of attempts.\")", "title": "" }, { "docid": "2929748d00c944809dadcc070aad7fb9", "score": "0.48351246", "text": "def hunt(urls, threads, exclude_flags, include_flags, interesting_extensions, interesting_files, interesting_keywords,\n stdout_flags, progress_enabled, timeout, max_depth, not_follow_subdomains, exclude_sources, proxies, delay,\n not_allow_redirects, limit, to_file, user_agent, cookies, headers):\n if exclude_flags and include_flags:\n raise BadOptionUsage('--exclude-flags and --include-flags are mutually exclusive.')\n welcome()\n urls = flat_list(urls)\n proxies = multiplier_args(proxies)\n if not urls:\n click.echo('•_•) OOPS! Add urls to analyze.\\nFor example: dirhunt http://domain/path\\n\\n'\n 'Need help? Then use dirhunt --help', err=True)\n return\n exclude_flags, include_flags = flags_range(exclude_flags), flags_range(include_flags)\n progress_enabled = (sys.stdout.isatty() or sys.stderr.isatty()) if progress_enabled is None else progress_enabled\n crawler = Crawler(max_workers=threads, interesting_extensions=interesting_extensions,\n interesting_files=interesting_files, interesting_keywords=interesting_keywords,\n std=sys.stdout if sys.stdout.isatty() else sys.stderr,\n progress_enabled=progress_enabled, timeout=timeout, depth=max_depth,\n not_follow_subdomains=not_follow_subdomains, exclude_sources=exclude_sources,\n not_allow_redirects=not_allow_redirects, proxies=proxies, delay=delay, limit=limit,\n to_file=to_file, user_agent=user_agent, cookies=cookies, headers=headers)\n crawler.add_init_urls(*urls)\n if os.path.exists(crawler.get_resume_file()):\n click.echo('Resuming the previous program execution...')\n try:\n crawler.resume(crawler.get_resume_file())\n except IncompatibleVersionError as e:\n click.echo(e)\n while True:\n choice = catch_keyboard_interrupt_choices(crawler.print_results, ['abort', 'continue', 'results'], 'a') \\\n (set(exclude_flags), set(include_flags))\n if choice == 'a':\n crawler.close(True)\n click.echo('Created resume file \"{}\". Run again using the same parameters to resume.'.format(\n crawler.get_resume_file())\n )\n return\n elif choice == 'c':\n crawler.restart()\n continue\n else:\n break\n crawler.print_urls_info()\n if not sys.stdout.isatty():\n output_urls(crawler, stdout_flags)\n if to_file:\n crawler.create_report(to_file)\n if not to_file and os.path.exists(crawler.get_resume_file()):\n # The resume file exists. Deleting...\n os.remove(crawler.get_resume_file())", "title": "" }, { "docid": "3f155df21b78df33d691f99311b89722", "score": "0.4811949", "text": "def main():\n logging.basicConfig(\n format='[%(asctime)s] %(levelname)s -- %(message)s',\n level=logging.DEBUG)\n\n parser = argparse.ArgumentParser(description='Synchronizes a github repository with a local repository.')\n parser.add_argument('--git-url', help='Url of the repo to sync', required=True)\n parser.add_argument('--branch-name', default='master', help='Branch of repo to sync')\n parser.add_argument('--repo-dir', default='./', help='Path to sync to')\n args = parser.parse_args()\n\n for line in GitAutoSync(\n args.git_url,\n args.branch_name,\n args.repo_dir\n ).pull_from_remote():\n print(line)", "title": "" }, { "docid": "68507aa3d67f277ccaab4c7150eec540", "score": "0.48108003", "text": "def install_core():\n commands = \"\"\"\nyum -y update\nyum -y groupinstall \"Development Tools\"\nyum install -y gvim xterm\nyum install -y python-pip python-virtualenv\npip install --upgrade pip\npip install virtualenvwrapper\npip install --upgrade google-api-python-client oauth2client pyasn1 --ignore-installed requests\npip install --no-cache-dir -U crcmod\n \"\"\".strip().split('\\n')\n run_commands(commands)", "title": "" }, { "docid": "4db17b04aa500fbb22d7910028433174", "score": "0.48068625", "text": "def setUp(self):\n self.driver = webdriver.Chrome(executable_path=\"E:\\chrome driver\\chromedriver_win32\\chromedriver.exe\")\n self.driver.maximize_window()\n self.base_url = \"https://github.com/\"", "title": "" }, { "docid": "61912d4944a8c6a13c140669b93d7114", "score": "0.48045424", "text": "def main(local):\n\n rewrite_baseurl(local)\n rewrite_manifest(local)", "title": "" }, { "docid": "357f5d3b03b1e434c6e649ebd2e1572a", "score": "0.4802946", "text": "def start():\n\n pr_builder = PullRequestBuilder()\n events.dispatcher.register_target(PullRequestListener(pr_builder))\n events.dispatcher.register_target(ManualPullRequestListener(pr_builder))\n events.dispatcher.register_target(IRCRebuildListener(pr_builder))\n utils.DaemonThread(target=pr_builder.run).start()\n\n collector = BuildStatusCollector()\n events.dispatcher.register_target(BBHookListener(collector))\n utils.DaemonThread(target=collector.run).start()", "title": "" }, { "docid": "86f1f4f5cfe273d4088f62ee618ed9a0", "score": "0.4802121", "text": "def fetch(args):\n\n # figure out which repos/revs we're hoping to update.\n # None is our internal, temp keyword representing the LATEST possible\n # rev.\n user_repo_revs = {} # repo -> version\n repo_paths = map(lambda x: x.local_path, REPOS)\n args_queue = collections.deque(args[:])\n\n while len(args_queue) > 0:\n current_arg = args_queue.popleft()\n\n # If the user provides repo revisions, it MUST be a specific repo.\n if current_arg in repo_paths:\n # the user might provide a revision.\n # It's a rev if it's not a repo.\n try:\n possible_rev = args_queue.popleft()\n except IndexError:\n # When no other args after the repo\n user_repo_revs[current_arg] = None\n continue\n\n if possible_rev in repo_paths:\n # then it's not a revision, it's a repo. put it back.\n # Also, assume user wants the repo we're currently working with\n # to be updated to the tip OR whatever.\n user_repo_revs[current_arg] = None\n args_queue.appendleft(possible_rev)\n continue\n elif possible_rev in ['-r', '--rev']:\n requested_rev = args_queue.popleft()\n user_repo_revs[current_arg] = requested_rev\n else:\n print \"ERROR: unclear arg %s\" % possible_rev\n return\n\n # determine which groupings the user wants to operate on.\n # example: `src` would represent all repos under src/\n # example: `data` would represent all repos under data/\n # example: `src/pygeoprocessing` would represent the pygeoprocessing repo\n repos = set([])\n for argument in args:\n if not argument.startswith('-'):\n repos.add(argument)\n\n def _user_requested_repo(local_repo_path):\n \"\"\"\n Check if the user requested this repository.\n Does so by checking prefixes provided by the user.\n\n Arguments:\n local_repo_path (string): the path to the local repository\n relative to the CWD. (example: src/pygeoprocessing)\n\n Returns:\n Boolean: Whether the user did request this repo.\n \"\"\"\n # check that the user wants to update this repo\n for user_arg_prefix in repos:\n if local_repo_path.startswith(user_arg_prefix):\n return True\n return False\n\n for repo in REPOS:\n LOGGER.debug('Checking %s', repo.local_path)\n\n # If the user did not request this repo AND the user didn't want to\n # update everything (by specifying no positional args), skip this repo.\n if not _user_requested_repo(repo.local_path) and len(repos) > 0:\n continue\n\n # does repo exist? If not, clone it.\n if not repo.ischeckedout():\n repo.clone()\n else:\n LOGGER.debug('Repository %s exists', repo.local_path)\n\n # is repo up-to-date? If not, update it.\n # If the user specified a target revision, use that instead.\n try:\n target_rev = user_repo_revs[repo.local_path]\n if target_rev is None:\n raise KeyError\n except KeyError:\n try:\n target_rev = repo.tracked_version()\n except KeyError:\n print 'ERROR: repo not tracked in versions.json: %s' % repo.local_path\n return 1\n\n repo.pull()\n repo.update(target_rev)", "title": "" }, { "docid": "a4a68f02b159b73a970b71c3bc12ac51", "score": "0.4798461", "text": "def test_pull():", "title": "" }, { "docid": "d28ddac3c4d5c5cc0337f9f01ad8552c", "score": "0.4795856", "text": "def needs_pull(self):\n for remote in self.repo.remotes:\n remote.fetch()\n\n for _ in self.repo.iter_commits('master..origin/master'):\n logger.info(\"Repo %s needs update\", self.repo_name)\n return True\n return False", "title": "" }, { "docid": "fbfd37f030eace6d5d07ff861498329b", "score": "0.47957903", "text": "def run_updates(self):\n updates = {}\n try:\n sites = self.get_sites_to_update()\n except DrupdatesError as update_status_error:\n raise DrupdatesUpdateError(20, update_status_error)\n if not sites['count']:\n return updates\n else:\n sites.pop('count')\n # Note: call Drush.call() without site alias as alias comes after dd argument.\n drush_dd = Drush.call(['dd', '@drupdates.' + self._site_name])\n self.site_web_root = drush_dd[0]\n # Create seperate commits for each project (ie module/theme)\n one_commit_per_project = self.settings.get('oneCommitPerProject')\n # Iterate through the site/sub-sites and perform updates, update files etc...\n sites_copy = copy.copy(sites)\n for site, data in sites.items():\n if 'modules' not in data:\n sites_copy.pop(site)\n continue\n modules = copy.copy(data['modules'])\n x = 0\n for project, descriptions in data['modules'].items():\n if self.settings.get('useMakeFile'):\n self.update_make_file(project, descriptions['current'], descriptions['candidate'])\n if one_commit_per_project:\n if x:\n build = Sitebuild(self._site_name, self.ssh, self.working_dir)\n build.build()\n self._update_code(site, [project])\n modules.pop(project)\n updates = self._build_commit_message(sites_copy, site, project)\n self._cleanup_and_commit(updates)\n x += 1\n if self.settings.get('buildSource') == 'make' and self.settings.get('useMakeFile'):\n self.utilities.make_site(self._site_name, self.site_dir)\n elif len(modules):\n self._update_code(site, modules.keys())\n if not one_commit_per_project:\n updates = self._build_commit_message(sites_copy)\n self._cleanup_and_commit(updates)\n return updates", "title": "" }, { "docid": "b2e9340ff3341af2f0de2b8361ab1366", "score": "0.47924095", "text": "def github_list_pull_requests(urls, numbers_only=False):\n pulls = github_get_pull_request_all(urls)\n formatted_pulls = []\n print \"Total pull count\", len(pulls)\n sys.stdout.write(\"Processing pulls...\")\n for pull in pulls:\n n = pull[\"number\"]\n sys.stdout.write(\" %d\" % n)\n sys.stdout.flush()\n pull_info = github_get_pull_request(urls, n)\n if not pull_info:\n # Pull request is an issue\n continue\n mergeable = pull_info[\"mergeable\"]\n if pull[\"head\"][\"repo\"]:\n repo = pull[\"head\"][\"repo\"][\"html_url\"]\n else:\n repo = None\n branch = pull[\"head\"][\"ref\"]\n created_at = pull[\"created_at\"]\n created_at = time.strptime(created_at, \"%Y-%m-%dT%H:%M:%SZ\")\n created_at = time.mktime(created_at)\n username = pull[\"user\"][\"login\"]\n user_info = github_get_user_info(urls, username)\n author = \"\\\"%s\\\" <%s>\" % (user_info.get(\"name\", \"unknown\"),\n user_info.get(\"email\", \"\"))\n branch_against = pull[\"base\"][\"ref\"]\n formatted_pulls.append({\n 'created_at': created_at,\n 'n': n,\n 'repo': repo,\n 'branch': branch,\n 'author': author,\n 'mergeable': mergeable,\n 'branch_against': branch_against,\n })\n formatted_pulls.sort(key=lambda x: x['created_at'])\n print \"\\nPatches that cannot be merged without conflicts:\"\n nonmergeable = []\n for pull in formatted_pulls:\n if pull['mergeable']:\n continue\n nonmergeable.append(int(pull['n']))\n if numbers_only:\n print pull['n'],\n else:\n print \"#%03d: %s %s (against %s)\" % (pull['n'], pull['repo'], pull['branch'], pull['branch_against'])\n print unicode(\" Author : %s\" % pull['author']).encode('utf8')\n print \" Date : %s\" % time.ctime(pull['created_at'])\n if numbers_only:\n print\n print\n print \"-\"*80\n print \"Patches that can be merged without conflicts:\"\n mergeable_list = []\n for pull in formatted_pulls:\n if not pull['mergeable']:\n continue\n mergeable_list.append(int(pull['n']))\n if numbers_only:\n print pull['n'],\n else:\n print \"#%03d: %s %s (against %s)\" % (pull['n'], pull['repo'], pull['branch'], pull['branch_against'])\n print unicode(\" Author : %s\" % pull['author']).encode('utf8')\n print \" Date : %s\" % time.ctime(pull['created_at'])\n if numbers_only:\n print\n print\n return nonmergeable, mergeable_list", "title": "" }, { "docid": "f149b4633005e2971d1cf63158e5e81c", "score": "0.47882986", "text": "def update_uwsgitool():\n url = 'https://github.com/kyan001/PyMyApps/raw/master/UwsgiTool/uwsgiTool.py'\n if cct.update_file(__file__, url):\n cct.run_cmd('{py} \"{f}\"'.format(py=cct.get_py_cmd(), f=__file__))\n cit.bye(0)", "title": "" }, { "docid": "de90c722d9dab0c46969c302e5d83152", "score": "0.4787962", "text": "def update():\n update_all_hometpl()\n update_ssh()\n update_debian()\n update_crontab()", "title": "" }, { "docid": "1d0c4a84678b5cf973e2b74fe99fd3f1", "score": "0.47873887", "text": "def download_old_versions(d):\n\n content_url = d.getVar('SWUPD_CONTENT_BUILD_URL', True)\n version_url = d.getVar('SWUPD_VERSION_BUILD_URL', True)\n current_format = int(d.getVar('SWUPD_FORMAT', True))\n deploy_dir = d.getVar('DEPLOY_DIR_SWUPD', True)\n www_dir = os.path.join(deploy_dir, 'www')\n\n if not content_url or not version_url:\n bb.warn('SWUPD_CONTENT_BUILD_URL and/or SWUPD_VERSION_BUILD_URL not set, skipping download of old versions for the initial build of a swupd update stream.')\n return\n\n # Avoid double // in path. At least twisted is sensitive to that.\n content_url = content_url.rstrip('/')\n version_url = version_url.rstrip('/')\n\n # Set up env variables with proxy information for use in urllib.\n export_proxies(d)\n\n # Find latest version for each of the older formats.\n # For now we ignore the released milestones and go\n # directly to the URL with all builds. The information\n # about milestones may be relevant for determining\n # how format changes need to be handled.\n latest_versions = {}\n for format in range(3, current_format + 1):\n try:\n url = '%s/version/format%d/latest' % (version_url, format)\n response = urllib.request.urlopen(url)\n version = int(response.read())\n latest_versions[format] = version\n formatdir = os.path.join(www_dir, 'version', 'format%d' % format)\n bb.utils.mkdirhier(formatdir)\n with open(os.path.join(formatdir, 'latest'), 'w') as latest:\n latest.write(str(version))\n except urllib.error.HTTPError as http_error:\n if http_error.code == 404:\n bb.debug(1, '%s does not exist, skipping that format' % url)\n else:\n raise\n except urllib.error.URLError as url_error:\n # Happens for file:// URLs.\n if isinstance(url_error.reason, OSError) and url_error.reason.errno == errno.ENOENT:\n bb.debug(1, '%s does not exist, skipping that format' % url)\n else:\n raise\n\n # Now get the Manifests of the latest versions and the\n # versions we are supposed to provide a delta for, as a starting point.\n # In addition, we also need Manifests that provide files reused by\n # these initial set of Manifests or get referenced by them.\n #\n # There's no integrity checking for the files. bsdtar is\n # expected to detect corrupted archives and https is expected\n # to protect against man-in-the-middle attacks.\n pending_versions = set(latest_versions.values())\n pending_versions.update([int(x) for x in d.getVar('SWUPD_DELTAPACK_VERSIONS', True).split()])\n fetched_versions = set([0])\n while pending_versions:\n version = pending_versions.pop()\n sub_versions = set()\n sub_versions.update(download_manifests(content_url, version,\n 'MoM',\n os.path.join(www_dir, str(version))))\n sub_versions.update(download_manifests(content_url, version,\n 'full',\n os.path.join(www_dir, str(version))))\n fetched_versions.add(version)\n pending_versions.update(sub_versions.difference(fetched_versions))\n\n latest_version_file = os.path.join(deploy_dir, 'image', 'latest.version')\n if not os.path.exists(latest_version_file):\n # We located information about latest version from online www update repo.\n # Now use that to determine what we are updating from. Doing this here\n # instead of swupd-image.bbclass has the advantage that we can do some\n # sanity checking very early in a build.\n #\n # Building a proper update makes swupd_create_fullfiles\n # a lot faster because it allows reusing existing, unmodified files.\n # Saves a lot of space, too, because the new Manifest files then merely\n # point to the older version (no entry in ${DEPLOY_DIR_SWUPD}/www/${OS_VERSION}/files,\n # not even a link).\n if not latest_versions:\n bb.fatal(\"%s does not exist and no information was found under SWUPD_CONTENT_BUILD_URL %s, cannot proceed without information about the previous build. When building the initial version, unset SWUPD_VERSION_BUILD_URL and SWUPD_CONTENT_BUILD_URL to proceed.\" % (latest_version_file, content_url))\n latest = sorted(latest_versions.values())[-1]\n bb.debug(2, \"Setting %d in latest.version file\" % latest)\n with open(latest_version_file, 'w') as f:\n f.write(str(latest))", "title": "" }, { "docid": "46cf41a81a9d1dc11b2cb58237a7aa27", "score": "0.47735083", "text": "async def update(self, ctx):\n\t\tlog.info(emb(f'Git pull called, Caller:[{ctx.author.id}][{ctx.author.name}]'))\n\t\tbash_cmd = \"git pull\"\n\t\tprocess = subprocess.Popen(bash_cmd.split(), stdout=subprocess.PIPE)\n\t\toutput, error = process.communicate()\n\t\toutput = output.decode(\"utf-8\")\n\t\tlog.info(output)\n\t\tawait ctx.send(emb(output))\n\n\t\tif error is not None:\n\t\t\tlog.error(error)", "title": "" }, { "docid": "66a998ae0a42f49efc4cc345bfaf3982", "score": "0.4769368", "text": "def graball(self):\n\n\t\tif self.debug:\n\t\t\tif self.debounce():\n\t\t\t\tfor uri in self.urilist:\n\t\t\t\t\tself.grabone(uri)\n\t\t\t\t\t\n\t\t\telse:\n\t\t\t\tprint('Not fetching because we fetched within the last second.')\n\n\t\t\tprint('Waiting...')\n\n\t\telse:\n\t\t\tif self.debounce():\n\t\t\t\tname = str(time.time()).split('.')[0] + '.html'\n\t\t\t\tfor uri in self.urilist:\n\t\t\t\t\tbasepath = op.normpath(self.outdirname + '/' + WebGetter.pathfromuri(uri) + '/')\n\n\t\t\t\t\treq = self.http.req(uri)\n\t\t\t\t\tflat = Inliner(req, uri, self.http).get()\n\n\t\t\t\t\tif self.checkhash(basepath, flat):\n\t\t\t\t\t\turidir = WebGetter.pathfromuri(uri)\n\t\t\t\t\t\tpath = self.outdirname + '/' + uridir + '/' + name\n\t\t\t\t\t\tfhandle = open(op.normpath(path), 'wb')\n\t\t\t\t\t\tfhandle.write(flat.encode('utf-8'))\n\t\t\t\t\t\tfhandle.close()", "title": "" }, { "docid": "74700b3ecfc9dd64c909eb44a17d2948", "score": "0.47403857", "text": "def main():\n with open(os.getenv('GITHUB_EVENT_PATH')) as f:\n event_info = json.loads(f.read())\n\n repo = setup_git(event_info['pull_request']['merge_commit_sha'])\n\n comment_body = ''\n new_tag = None\n if len(repo.tags) == 0:\n new_tag = 'v1.0.0'\n else:\n try:\n new_tag = semver_bump(repo)\n except ValueError:\n comment_body = 'latest tag does not conform to semver ([v]?MAJOR.MINOR.PATCH), failed to bump version'\n\n if new_tag is not None:\n create_and_push_tag(repo, event_info['pull_request']['merge_commit_sha'], new_tag)\n comment_body = f\"This PR has now been tagged as [{new_tag}](https://github.com/{os.getenv('GITHUB_REPOSITORY')}/releases/tag/{new_tag})\"\n\n comment_on_pr(event_info['number'], comment_body)", "title": "" }, { "docid": "e277e38259195fe86b0f884d8c056053", "score": "0.47390124", "text": "def run(self) -> None:\n try:\n resp = requests.get(self.url, auth=self.auth, timeout=self.timeout)\n if not resp.ok:\n logger.info(\n f\"{self.prog_name} : unable to fetch {self.url} : {resp.text}\"\n ) # pylint: disable=logging-fstring-interpolation\n return\n\n parser = MyParser()\n parser.feed(resp.text)\n if not parser.output_list:\n logger.info(\n f\"{self.prog_name} : no packages links detected in {resp.text}\"\n ) # pylint: disable=logging-fstring-interpolation\n return\n\n last_link = parser.output_list[-1]\n last_version_matches = re.search(r\"(?:(\\d+\\.[.\\d]*\\d+))\", last_link)\n if not last_version_matches:\n logger.info(\n f\"{self.prog_name} : no version found in string {last_link}\"\n ) # pylint: disable=logging-fstring-interpolation\n return\n\n last_version = last_version_matches.group(1)\n\n last_version_info = Version.parse(last_version)\n current_version_info = Version.parse(self.current_version)\n if current_version_info < last_version_info:\n pip_extra_index_url = \"\"\n pipx_extra_index_url = \"\"\n if self.domain != DEFAULT_PYPI:\n pip_extra_index_url = f\"--extra-index-url=https://{self.domain} \"\n pipx_extra_index_url = f\"--index-url=https://{self.domain} \"\n self.new_version_warning = click.style(\n f\"\"\"\n{self.prog_name} : new version {last_version} available (current version: {self.current_version})\nupgrade command :\n pip3 install -U {pip_extra_index_url}{self.prog_name}\nOR pipx upgrade {pipx_extra_index_url}{self.prog_name} (preferred)\"\"\",\n fg=\"bright_blue\",\n )\n except Exception as error: # pylint: disable=broad-except\n logger.exception(error)", "title": "" }, { "docid": "440da2e765acc69b4bac536335f4576a", "score": "0.47359017", "text": "def loadRoutine2():\n subprocess.call([\"notify-send\", \"i-am-bored\", \"Enjoy Terence Tao's blog today.\"])\n subprocess.call(\"xdg-open https://terrytao.wordpress.com/\", shell=True)", "title": "" }, { "docid": "6b70dbbf50edc573e553f11b9ece7e7b", "score": "0.47322673", "text": "def update():\n result = run(\"yum check-update --disablerepo='*artifactory' %s\" % (env.excludes), pty=True)\n if result.return_code == 100:\n \"\"\"Run yum update with exclusions\"\"\"\n print \"<font color=yellow>%s needs updating.</font>\" % env.host\n print (\"These are the excludes: %s\") % (env.excludes)\n sudo(\"yum -y update --disablerepo='*artifactory' %s\" % (env.excludes), pty=True)\n elif result.return_code == 0:\n print \"<font color=red>%s does not seem to need any updates, skipping...</font>\" % env.host\n elif result.return_code == 1:\n print \"<font color=red>%s returned an error</font>\" % env.host", "title": "" }, { "docid": "32c4eaddd5f9cef8039f530288385ffa", "score": "0.47188002", "text": "def hooks() -> None:\n proj().setup_git_hooks()", "title": "" }, { "docid": "aecca12f0a06a1025270e346d051060f", "score": "0.4714615", "text": "def run(self):\r\n if self.demands.refresh_metadata:\r\n self.base.cleanCli('expire-cache')\r\n if self.demands.sack_activation:\r\n lar = self.demands.available_repos\r\n self.base.fill_sack(load_system_repo='auto',\r\n load_available_repos=lar)\r\n self.base.plugins.run_sack()\r\n return self.command.run(self.base.extcmds)", "title": "" }, { "docid": "07c0d3e63e045d105b0319685d9360df", "score": "0.47138634", "text": "def main():\n clear_api()\n build()", "title": "" }, { "docid": "ac8878f32ef860b2ae5525c62ce21b59", "score": "0.4711336", "text": "def run():\n _rundocker('dev', env.docker_run_cmd)", "title": "" }, { "docid": "af26926769b9981117de6d540f6de3d9", "score": "0.46989152", "text": "def patch():\n\tCLIENT_PATH = VENV_PATH + 'src/grimoirelab-perceval/perceval/client.py'\n\tclient_patch(CLIENT_PATH).patch()\n\t\n\tINDEX_LOCK_PATH = VENV_PATH + 'src/grimoirelab-elk/grimoire_elk/elastic.py'\n\tindex_lock_patch(INDEX_LOCK_PATH).patch()\n\t\n\tGITHUB_PATH = VENV_PATH + 'src/grimoirelab-perceval/perceval/backends/core/github.py'\n\tgithub_patch(GITHUB_PATH).patch()", "title": "" }, { "docid": "0b2957d228db6b33144b45872f405ab4", "score": "0.4696051", "text": "def _pull_site_action(self, event):\n self._fetch_site()\n event.set_results({\"result\": \"site pulled\"})", "title": "" }, { "docid": "4f48ce82039e338adfac5ef776ec719d", "score": "0.46897972", "text": "def pull_code(local_folder, urlrepo=\"https://github.com/di-unipi-socc/DockerFinder.git\", branch=\"dfcustom\" ):\n Repo.clone_from(urlrepo, local_folder , branch=branch, depth=1)", "title": "" }, { "docid": "1b789dc76edef85501a6ba94a1737963", "score": "0.4687412", "text": "def _build_and_run_tests(self, cfg, update_step, bot_db, revision_hash,\n **kwargs):\n with_patch = 'With Patch' in kwargs.get('name') # pragma: no cover\n\n # We don't need to do a checkout if there's a patch applied, since that will\n # overwrite the local changes and potentially change the test results.\n if not with_patch: # pragma: no cover\n update_step = self._checkout_revision(update_step, bot_db, revision_hash)\n if not revision_hash:\n if update_step.presentation.properties:\n revision_hash = update_step.presentation.properties['got_revision']\n revision = build_state.BuildState(self, revision_hash, with_patch)\n # request build and wait for it only when the build is nonexistent\n if with_patch or not self._gsutil_file_exists(revision.build_file_path):\n revision.request_build()\n revision.wait_for()\n revision.download_build(update_step, bot_db)\n if self.m.chromium.c.TARGET_PLATFORM == 'android':\n self.m.chromium_android.adb_install_apk('ChromePublic.apk')\n\n return self._run_test(cfg, **kwargs)", "title": "" }, { "docid": "4d428df469aebae71ed5bb779804e6fb", "score": "0.46746722", "text": "def github_get_pull_request_all():\n return json.load(urlopen(format_repo('http://github.com/api/v2/json/pulls/{repo}')))", "title": "" }, { "docid": "6e84f59223e1d74711c47d74c4839735", "score": "0.4663502", "text": "def hotfix(url,products_dir='products'):\n with api.cd('%s/%s'%(api.env.path,products_dir)):\n with asbuildoutuser():\n #api.run(\"curl %s /tmp/hotfix.zip\"%url)\n #api.run(\"python -c \\\"import urllib; f=open('/tmp/hotfix.zip','w'); f.write(urllib.urlopen('%s').read()); f.close()\\\"\"%url)\n filename = os.path.basename(url)\n tmp = '/tmp/%s'%filename\n if not os.path.exists(tmp):\n f=open(tmp,'w')\n f.write(urllib.urlopen(url).read())\n f.close()\n api.put(tmp, tmp)\n try:\n api.run(\"unzip -o %s\"%tmp)\n except:\n api.run(\"\"\"python -c \"import zipfile;import urllib;import StringIO; zipfile.ZipFile(StringIO.StringIO(urllib.urlopen('%s').read())).extractall()\" \"\"\"%url)\n\n group = api.env['buildout-group']\n api.run(\"chgrp -R %s .\"%(group))\n api.run('rm %s'%tmp)", "title": "" }, { "docid": "15ed65a9571e5c34eacf4adbba65e157", "score": "0.46541414", "text": "def run_downloader(self):\n cmd = '''\n source import-stx\n stx -d shell --no-tty -c \"downloader -b -s -B std,rt\"\n '''\n ret = run_cmd(cmd)\n log.info(\"Downloader return code %s\", ret.returncode)\n if ret.returncode != 0:\n raise Exception(\"Error while downloading dependencies\")", "title": "" }, { "docid": "284b42169720f0c88e4a289815c5f73a", "score": "0.46497148", "text": "def git_pull():\n repo.git.pull('origin')", "title": "" }, { "docid": "72ed090ea9933838a34b9b64766667f2", "score": "0.46468872", "text": "def main() -> None:\n parser = argparse.ArgumentParser(description=main.__doc__)\n parser.add_argument(\"url\")\n args = parser.parse_args()\n\n if platform.system() == \"Darwin\":\n subprocess.call((\"open\", args.url))\n else:\n subprocess.call((\"chromium\", \"--new-window\", args.url))", "title": "" }, { "docid": "a182863158c0a8e227478cecf7a97071", "score": "0.46411553", "text": "def tools_upgrade(auth, ignore_apps=False, ignore_packages=False):\n from yunohost.app import app_upgrade\n\n failure = False\n\n # Retrieve interface\n is_api = True if msettings.get('interface') == 'api' else False\n\n if not ignore_packages:\n cache = apt.Cache()\n cache.open(None)\n cache.upgrade(True)\n\n # If API call\n if is_api:\n critical_packages = (\"moulinette\", \"moulinette-yunohost\",\n \"yunohost-admin\", \"yunohost-config-nginx\", \"ssowat\", \"python\")\n critical_upgrades = set()\n\n for pkg in cache.get_changes():\n if pkg.name in critical_packages:\n critical_upgrades.add(pkg.name)\n # Temporarily keep package ...\n pkg.mark_keep()\n # ... and set a hourly cron up to upgrade critical packages\n if critical_upgrades:\n msignals.display(m18n.n('packages_upgrade_critical_later',\n ', '.join(critical_upgrades)))\n with open('/etc/cron.d/yunohost-upgrade', 'w+') as f:\n f.write('00 * * * * root PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin apt-get install %s -y && rm -f /etc/cron.d/yunohost-upgrade\\n' % ' '.join(critical_upgrades))\n\n if cache.get_changes():\n msignals.display(m18n.n('upgrading_packages'))\n try:\n # Apply APT changes\n # TODO: Logs output for the API\n cache.commit(apt.progress.text.AcquireProgress(),\n apt.progress.base.InstallProgress())\n except Exception as e:\n failure = True\n logging.warning('unable to upgrade packages: %s' % str(e))\n msignals.display(m18n.n('packages_upgrade_failed'), 'error')\n else:\n msignals.display(m18n.n('done'))\n else:\n msignals.display(m18n.n('packages_no_upgrade'))\n\n if not ignore_apps:\n try:\n app_upgrade(auth)\n except Exception as e:\n failure = True\n logging.warning('unable to upgrade apps: %s' % str(e))\n msignals.display(m18n.n('app_upgrade_failed'), 'error')\n\n if not failure:\n msignals.display(m18n.n('system_upgraded'), 'success')\n\n # Return API logs if it is an API call\n if is_api:\n from yunohost.service import service_log\n return { \"log\": service_log('yunohost-api', number=\"100\").values()[0] }", "title": "" }, { "docid": "bdcfd24289cc3e52c462f3c51bb4a95f", "score": "0.46410316", "text": "def release(repo, tag, site_uri, sync_uri=None):\n build(repo, tag, site_uri, None, False)\n if (sync_uri):\n sync_site(sync_uri, site_uri)\n local('php /var/aegir/drush/drush.php @%s cache-clear all' % site_uri)\n local('php /var/aegir/drush/drush.php @%s cache-clear all' % site_uri)\n local('php /var/aegir/drush/drush.php @%s features-list' % site_uri)\n local('php /var/aegir/drush/drush.php --yes @%s features-revert-all' % site_uri)\n local('php /var/aegir/drush/drush.php @%s cache-clear all' % site_uri)", "title": "" }, { "docid": "185f862b7ef72826b0a3aa296d6c3a6b", "score": "0.46341094", "text": "def update():\n local('sudo git pull')\n local('sudo pyclean .')\n\n local('sudo ../.env/bin/pip install -q -r requirements.txt')\n local('sudo ../.env/bin/yoyo-migrate -b apply migrations/')\n\n local('sudo service uwsgi restart')", "title": "" }, { "docid": "3b3a297cc1961080794f9d33ab7ca04c", "score": "0.46320468", "text": "def fetch(ui, url, path, *args, **opts):\n if url.startswith(\"hg+ssh://\"):\n url = url[8:]\n else:\n url = url.replace(\"hg://\", \"http://\")\n try:\n cmd = locate_program(\"hg\", raise_error=True)\n except ValueError, e:\n raise VendorError(e)\n \n cmd += \" clone %s %s\" % (url, path)\n\n # exec cmd\n (child_stdin, child_stdout, child_stderr) = popen3(cmd)\n err = child_stderr.read()\n if ui.verbose >=2:\n ui.logger.info(child_stdout.read())\n if err:\n raise VendorError(str(err)) \n return", "title": "" }, { "docid": "e1e484ccfccc8043c2262ab705e6cfd0", "score": "0.4624194", "text": "def deploy():\n clear_compiled_python_files()\n git_pull()\n update_requirements()\n run_migrations()\n collectstatic()\n reload_http()", "title": "" }, { "docid": "e026b032939e72a71dba0fad5883f72a", "score": "0.46217918", "text": "def first_deploy():\n with cd(APPS_DIR):\n run(\"git clone %s\" % GITHUB_REPO)\n restart_apache()", "title": "" }, { "docid": "4181664b821ae34410ae8654c86d22d7", "score": "0.46207875", "text": "def fetch_runner(pkgnames, preprocessed=False):\n abspkgs = []\n aurpkgs = []\n allpkgs = []\n try:\n if preprocessed:\n allpkgs = pkgnames\n pkgnames = [p.name for p in allpkgs]\n else:\n print(':: ' + _('Fetching package information...'))\n for pkgname in pkgnames:\n pkg = None\n try:\n pkg = pkgbuilder.utils.info([pkgname])[0]\n except IndexError:\n try:\n DS.log.info('{0} not found in the AUR, checking in '\n 'repositories'.format(pkgname))\n syncpkgs = []\n for j in [i.pkgcache for i in DS.pyc.get_syncdbs()]:\n syncpkgs.append(j)\n syncpkgs = functools.reduce(lambda x, y: x + y,\n syncpkgs)\n abspkg = pyalpm.find_satisfier(syncpkgs, pkgname)\n pkg = pkgbuilder.package.ABSPackage.from_pyalpm(abspkg)\n\n except AttributeError:\n pass\n allpkgs.append(pkg)\n if not pkg:\n raise pkgbuilder.exceptions.PackageNotFoundError(\n pkgname, 'fetch')\n\n for pkg in allpkgs:\n if pkg.is_abs:\n abspkgs.append(pkg)\n else:\n aurpkgs.append(pkg)\n\n if abspkgs:\n print(_(':: Retrieving packages from asp...'))\n pm = pkgbuilder.ui.Progress(len(abspkgs))\n for pkg in abspkgs:\n pm.msg(_('retrieving {0}').format(pkg.name), True)\n rc = asp_export(pkg)\n if rc > 0:\n raise pkgbuilder.exceptions.NetworkError(\n _('Failed to retieve {0} (from ASP).').format(\n pkg.name), source='asp', pkg=pkg, retcode=rc)\n\n if aurpkgs:\n print(_(':: Retrieving packages from aur...'))\n pm = pkgbuilder.ui.Progress(len(aurpkgs))\n for pkg in aurpkgs:\n pm.msg(_('cloning {0}').format(pkg.packagebase), True)\n clone(pkg.packagebase)\n\n print(_('Successfully fetched: ') + ' '.join(pkgnames))\n except pkgbuilder.exceptions.PBException as e:\n print(':: ERROR: ' + str(e.msg))\n exit(1)", "title": "" }, { "docid": "bfa7b45ed98917204fc73742fbebb4b4", "score": "0.461377", "text": "def pull(remote: str = '') -> None:\n proj().pull(remote)", "title": "" }, { "docid": "fb2783b935a8b5d9cf3b638aabb0f2fb", "score": "0.46110207", "text": "def download_dependencies(projects, dependencies, branch, project_root):\n for dependency in dependencies:\n project = find_project_for_name(projects, dependency)\n if project is None:\n print(f'Skipping unknown dependency {dependency}')\n continue\n directory = project_root + '/' + project['path']\n run_command(f\"git fetch {project['remote']['name']} {branch}\",\n directory)", "title": "" }, { "docid": "0e4778835fa86fde2ce78706c57145f8", "score": "0.46105772", "text": "def setup_pristine_repositories():\n\n # these directories don't exist out of the box, so we may have to create them\n if not os.path.exists(main.general_wc_dir):\n os.makedirs(main.general_wc_dir)\n\n if not os.path.exists(main.general_repo_dir):\n os.makedirs(main.general_repo_dir) # this also creates all the intermediate dirs\n\n if not os.path.exists(main.other_dav_root_dir):\n os.makedirs(main.other_dav_root_dir)\n if not os.path.exists(main.non_dav_root_dir):\n os.makedirs(main.non_dav_root_dir)\n\n _setup_pristine_repo(main.greek_state,\n main.pristine_greek_repos_dir,\n main.greek_dump_dir,\n main.pristine_greek_repos_url)\n\n # NOTE: We don't use precooked trojan repositories.\n _setup_pristine_repo(main.trojan_state,\n main.pristine_trojan_repos_dir,\n main.trojan_dump_dir,\n main.pristine_trojan_repos_url,\n use_precooked=False)", "title": "" }, { "docid": "409bcfa9d0c2d1018b31d578567188f6", "score": "0.46065253", "text": "def github_get_pull_request_all(urls):\n return keep_trying(lambda: _query(urls.pull_list_url), urllib2.URLError,\n \"get list of all pull requests\")", "title": "" }, { "docid": "6523cd565db9f75ebfe8a2eb9b9e96cb", "score": "0.4598307", "text": "def sync(args=\"\"):\n\n spawn.process(f\"{config.aur_helper} --noconfirm -Sua {args}\")\n log.write(\"Synced AUR packages\")", "title": "" }, { "docid": "55399e63a9c7850c3a7286935e807a07", "score": "0.45883447", "text": "def _download_chrome_drivers():\n # Mac OS X\n if platform.system() == 'Darwin':\n remote_file = driver_urls['chrome_os_x']\n elif platform.system() == 'Linux':\n remote_file = driver_urls['chrome_ubuntu']\n elif platform.system() == 'Windows':\n remote_file = driver_urls['chrome_windows_10']\n else:\n raise ValueError('Unsupported OS specified: %s' % (platform.system()))\n _download_temp_file(remote_file['url'], remote_file['file_name'])", "title": "" }, { "docid": "d02629142812d403cf7a5c691f4768d0", "score": "0.45816785", "text": "def pull(keys, targets='all'):", "title": "" }, { "docid": "edfca4955955386a5f47f52a26256f3f", "score": "0.45725054", "text": "def _pull_predefined_dockerimages(self):\n dc = DockerClient()\n for url in list(self.remote_docker_image_urls.values()):\n # only pull if not present (speedup for development)\n if not FORCE_PULL:\n if len(dc.images.list(name=url)) > 0:\n LOG.debug(\"Image %r present. Skipping pull.\" % url)\n continue\n LOG.info(\"Pulling image: %r\" % url)\n # this seems to fail with latest docker api version 2.0.2\n # dc.images.pull(url,\n # insecure_registry=True)\n # using docker cli instead\n cmd = [\"docker\",\n \"pull\",\n url,\n ]\n Popen(cmd).wait()", "title": "" }, { "docid": "c4d5d13a4143133833bef60d61df731b", "score": "0.45721", "text": "def MP():\n url = \"https://github.com/m3ttiw\"\n os.startfile(url)", "title": "" }, { "docid": "ff6e80c163ea5c9b03631b43cae7ba1e", "score": "0.45707574", "text": "def DownloadChromium(channel):\n # Get the version for the current channel from omahaproxy\n platform_data = PLATFORM_MAPPING[sys.platform]\n omaha_platform = platform_data['omaha']\n version_lookup_url = VERSION_LOOKUP_URL % (omaha_platform, channel)\n response = urllib2.urlopen(version_lookup_url)\n version = response.readlines()[1].split(',')[2]\n\n # Get the base position for that version from omahaproxy\n base_pos_lookup_url = BASE_POS_LOOKUP_URL % version\n response = urllib2.urlopen(base_pos_lookup_url)\n base_pos = json.load(response)['chromium_base_position']\n\n # Find the build from that base position in cloud storage. If it's not found,\n # decrement base position until one is found.\n cloud_storage_lookup_url = CLOUD_STORAGE_LOOKUP_URL % (\n platform_data['prefix'], base_pos)\n download_url = None\n while not download_url:\n response = urllib2.urlopen(cloud_storage_lookup_url)\n prefixes = json.load(response).get('prefixes')\n if prefixes:\n download_url = CLOUD_STORAGE_DOWNLOAD_URL % (\n platform_data['prefix'], base_pos, platform_data['zip_prefix'])\n break\n base_pos = int(base_pos) - 1\n cloud_storage_lookup_url = CLOUD_STORAGE_LOOKUP_URL % (\n platform_data['prefix'], base_pos)\n\n print 'Approximating Chrome %s with chromium from base position %s.' % (\n version, base_pos)\n print 'Downloading from %s' % download_url\n\n tmpdir = tempfile.mkdtemp()\n zip_path = os.path.join(tmpdir, 'chrome.zip')\n with open(zip_path, 'wb') as local_file:\n local_file.write(urllib2.urlopen(download_url).read())\n zf = zipfile.ZipFile(zip_path)\n zf.extractall(path=tmpdir)\n return tmpdir, version", "title": "" } ]
a5654440ae5c404efcd3140429dcd2df
Excutes 'query' in textarea in respose to the 'submit' button. Either the 'status' div or the cell's output region are updated according to whether the output mode is set to 'html' or 'text'. An explict COMMIT is required to modify a database file opened in either 'r' or 'w' mode. The pseudo command ".schema" dumps the database's schema, and '.changes' gives the total number of changes to the database in this interactive session.
[ { "docid": "1aebb93a9022179b9fdb512f14d20a1d", "score": "0.64445883", "text": "def executeSQL(self, button):\n query = self.query.value\n self.history.append(query)\n self.histIdx = -1\n if (query.strip().lower().startswith('.schema')):\n table = query[7:].strip()\n if len(table):\n df = pandas.read_sql_query(self.getTable % table, self.db)\n else:\n df = pandas.read_sql_query(self.getSchema, self.db)\n if (self.output == 'html'):\n html = ''\n for i, sql in enumerate(df['sql']):\n if not (df['name'][i].startswith('sqlite_')):\n if (self.output == 'html'):\n html += self.schemaStyle % (sql.replace(' ', '&nbsp;').replace('\\n', '<br>'))\n else:\n print(sql)\n print()\n if (self.output == 'html'):\n self.status.value = '<div class=\"sqldiv\">%s</div>' % html\n self.result = None\n return\n if (query.strip().lower() == '.changes'):\n if (self.output == 'html'):\n self.status.value = self.defaultStyle % (\"SQL Total Changes = %d\" % self.db.total_changes)\n else:\n print(\"SQL Total Changes = %d\" % self.db.total_changes)\n self.result = None\n return\n elif (query.strip().lower() == 'commit'):\n self.db.commit()\n if (self.output == 'html'):\n self.status.value = self.defaultStyle % \"SQL Transaction committed\"\n else:\n print(\"SQL Transaction committed\")\n self.result = None\n return\n try:\n df = pandas.read_sql_query(query, self.db)\n if (self.output == 'html'):\n result = df.to_html(max_rows=50, classes='sqltable')\n self.status.value = self.tableStyle % result\n else:\n print(df)\n print()\n self.result = [[v for v in row.values] for i, row in df.iterrows()]\n except TypeError as error:\n if (self.output == 'html'):\n self.status.value = self.defaultStyle % \"SQL Command Succeeded\"\n else:\n print(\"SQL Command Succeeded\")\n pass\n self.result = None\n except Exception as error:\n if (self.output == 'html'):\n self.status.value = self.errorStyle % str(error)\n else:\n print(\"SQL Error: %s\" % str(error))\n pass\n self.result = None", "title": "" } ]
[ { "docid": "e21bbedcfe57e4dde89c501f78053c1d", "score": "0.5836899", "text": "def commit(message):\n settings = _load_settings()\n status = _load_settings(\".TDB\", check=[])\n settings.update(status)\n database = settings[\"database\"]\n client, msg = _connect(settings)\n click.echo(msg)\n sys.path.append(os.getcwd())\n schema_plan = __import__(\"schema\", globals(), locals(), [], 0)\n last_item = None\n documentation = {}\n for line in schema_plan.__doc__.split(\"\\n\"):\n if \"Title:\" in line:\n documentation[\"title\"] = line[6:].strip()\n last_tiem = documentation[\"title\"]\n elif \"Description:\" in line:\n documentation[\"description\"] = line[12:].strip()\n last_tiem = documentation[\"description\"]\n elif \"Authors:\" in line:\n documentation[\"authors\"] = line[8:].strip()\n last_tiem = documentation[\"authors\"]\n elif last_item is not None:\n last_item += \"\\n\" + line.strip()\n authors = documentation.get(\"authors\")\n if authors:\n authors = documentation[\"authors\"].split(\",\")\n authors = list(map(lambda x: x.strip(), authors))\n schema_obj = WOQLSchema(\n title=documentation.get(\"title\"),\n description=documentation.get(\"description\"),\n authors=authors,\n )\n for obj_str in dir(schema_plan):\n obj = eval(f\"schema_plan.{obj_str}\") # noqa: S307\n if isinstance(obj, woqlschema.TerminusClass) or isinstance(obj, enum.EnumMeta):\n if obj_str not in [\"DocumentTemplate\", \"EnumTemplate\", \"TaggedUnion\"]:\n schema_obj.add_obj(obj.__name__, obj)\n if message is None:\n message = \"Schema updated by Python client.\"\n schema_obj.commit(client, commit_msg=message, full_replace=True)\n click.echo(f\"{database} schema updated.\")", "title": "" }, { "docid": "20f4b7e800f85d30417dae67aa538cde", "score": "0.56011313", "text": "def sql_btn():\n content = None\n sql = None\n if request.method == 'POST':\n db = get_db()\n query = request.form['sql']\n query = \"\\n\".join(query.splitlines())\n #print(query)\n #pprint.pprint(query)\n sql = query.split('\\n')\n\n print(\"\")\n\n try:\n if sql[0].split(\" \")[0].upper() == \"SELECT\":\n print(\"{}\".format(\" \".join(sql)))\n content = db.execute(\" \".join(sql)).fetchall()\n else:\n print('{}'.format(\" \".join(sql)))\n db.execute(\" \".join(sql))\n db.commit()\n except BaseException as e:\n content = None\n print(str(e))\n flash(str(e))\n\n print(\"\")\n return render_template('sql.html', sql=sql, content=content)", "title": "" }, { "docid": "b21f8b9a80ccfb1a010fef94a8f80b55", "score": "0.5516456", "text": "def accept(self):\n self.parent.sqlcommand = self.sqlEdit.toPlainText()\n self.parent.errormessage = \"The data was successfully consolidated,\\nthe program will now close.\"\n self.close()", "title": "" }, { "docid": "e44149c528588baa6ceea6346e8d17cc", "score": "0.5497707", "text": "def handle_editor_command(self, text):\n while special.editor_command(text):\n filename = special.get_filename(text)\n query = (special.get_editor_query(text) or\n self.get_last_query())\n sql, message = special.open_external_editor(filename, sql=query)\n if message:\n # Something went wrong. Raise an exception and bail.\n raise RuntimeError(message)\n while True:\n try:\n text = self.prompt_app.prompt(default=sql)\n break\n except KeyboardInterrupt:\n sql = ''\n continue\n return text", "title": "" }, { "docid": "903985c0d2a908c9c4ce9527b832770f", "score": "0.5439685", "text": "def sqlcmd(self):\n self.update_sqlcmd = \"\\n\".join([i for i in [self.update_clause, \n self.set_clause, \n self.where_clause] if i])", "title": "" }, { "docid": "25ce078eb48a25c83fa01b4fb25a5c43", "score": "0.54361415", "text": "def test_execute_sql_query_selection(self):\n self.tab = self._add_new_query_tab()\n sql_query_string = 'SELECT name FROM streets LIMIT 3\\n UPDATE'\n self._prepare_query_text(sql_query_string)\n self.tab.run_query()\n self.assertTrue(self.tab.errors_panel.isVisible())\n self.assertIn('UPDATE', self.tab.errors_panel.toPlainText())\n\n # position cursor before `\\n` in the SQL query\n cur_pos = 32\n cur = self.tab.query.textCursor()\n cur.setPosition(0)\n cur.setPosition(cur_pos, QTextCursor.KeepAnchor)\n self.tab.query.setTextCursor(cur)\n self.assertEqual(self.tab.query.textCursor().selectedText(),\n sql_query_string[:cur_pos])\n\n self.tab.run_query()\n self.assertFalse(self.tab.errors_panel.isVisible())\n self.assertTrue(self.tab.table.isVisible())\n self.assertEqual(self.tab.table.table_data.number_layer_rows, 3)\n self.assertEqual(self.tab.table.table_data.columnCount(), 1)\n return", "title": "" }, { "docid": "0003c9ea1822aada63869891dd3bc090", "score": "0.53923744", "text": "def menu_update_query(run_args):\n run_args['command'] = input(\"input command:\")\n run_args['error'] = input(\"input error:\")\n run_args['query'] = get_query(run_args['command'], run_args['error'])", "title": "" }, { "docid": "50e971f298692a6fce4de3a911f7dfb3", "score": "0.53099227", "text": "def submitQuery(self):\n \n # Update query entries\n fpO1 = open('C:/Users/toExecute.sql',\"w\")\n data = fp2.read()\n fp2.close()\n nvlVal1 = self.queryUpdate1.get()\n nvlVal2 = self.queryUpdate2.get()\n nvlVal3 = self.queryUpdate3.get()\n nvlVal4 = nvlVal2[:4] + nvlVal2[5:7] + nvlVal2[8:10]\n print (nvlVal4)\n mydict = {'myNV1': nvlVal1, 'myNV2': nvlVal2, 'myNV3' : nvlVal3, 'myNV4' : nvlVal4}\n \n for key, value in mydict.items():\n data = data.replace(key, value) \n fpO1.write(data)\n fpO1.close()\n \n # Prepare the file and the query\n fpO1 = open(\"C:/Users/toExecute.sql\")\n outFile1 = open(\"C:/Users/query1.sql\", \"w\")\n outFile2 = open(\"C:/Users/query2.sql\", \"w\")\n outFile3 = open(\"C:/Users/query3.sql\", \"w\")\n outFile4 = open(\"C:/Users/query4.sql\", \"w\")\n buffer = []\n keepCurrentSet = True\n for line in fpO1:\n buffer.append(line)\n if line.startswith(\"/*A\"):\n #---- starts a new data set\n if keepCurrentSet:\n outFile1.write(\"\".join(buffer))\n #now reset our state\n keepCurrentSet = False\n buffer = []\n \n elif line.startswith(\"/*B\"):\n if keepCurrentSet:\n outFile2.write(\"\".join(buffer))\n keepCurrentSet = False\n buffer = []\n \n elif line.startswith(\"/*C\"):\n if keepCurrentSet:\n outFile3.write(\"\".join(buffer))\n keepCurrentSet = False\n buffer = []\n \n elif line.startswith(\"/*D\"):\n if keepCurrentSet:\n outFile4.write(\"\".join(buffer))\n keepCurrentSet = False\n buffer = []\n \n elif line.startswith(\";\"):\n keepCurrentSet = True\n \n fpO1.close()\n outFile1.close()\n outFile2.close()\n outFile3.close()\n outFile4.close()\n \n outFile1 = open(\"C:/Users/query1.sql\", \"r\")\n qury1 = outFile1.read()\n \n outFile2 = open(\"C:/Users/query2.sql\", \"r\")\n qury2 = outFile2.read()\n \n outFile3 = open(\"C:/Users/query3.sql\", \"r\")\n qury3 = outFile3.read()\n \n outFile4 = open('C:/Users/query4.sql', 'r')\n qury4 = outFile4.read()\n \n status = ' --- Connection successful --- Query successfully executed ---'\n global cursor1\n global cursor2\n global cursor3\n global cursor4\n global data1\n global data2\n global data3\n global data4\n global fields1, fields2, fields3, fields4\n global names1, names2, names3, traiames4\n \n # Select instance, Open connection, retrieve cursor and execute query\n if self.var.get() == 'IE301':\n connection = pymssql.connect(server ='FRER0973\\IO13',user = 'OMUser', password = '%Essilor1%', database = 'OrderDB')\n connection2 = pymssql.connect(server ='FRER0973\\ION3',user = 'OMUser', password = '%Essilor1%', database = 'OnRouteDB')\n \n cursor1 = connection.cursor()\n cursor1.execute(qury1)\n data1 = cursor1.fetchall()\n fields1 = cursor1.description # metadata from query\n names1 = [x[0] for x in cursor1.description]\n cursor1.close()\n \n cursor2 = connection.cursor()\n cursor2.execute(qury2)\n data2 = cursor2.fetchall()\n fields2 = cursor2.description # metadata from query\n names2 = [x[0] for x in cursor2.description]\n cursor2.close()\n \n cursor3 = connection.cursor()\n cursor3.execute(qury3)\n data3 = cursor3.fetchall()\n fields3 = cursor3.description # metadata from query\n names3 = [x[0] for x in cursor3.description]\n cursor3.close()\n \n cursor4 = connection2.cursor()\n cursor4.execute(qury4)\n data4 = cursor4.fetchall()\n fields4 = cursor4.description # metadata from query\n names4 = [x[0] for x in cursor4.description]\n cursor4.close()\n \n self.submitStatus.setentry(status)\n \n elif self.var.get() == 'IE401':\n connection = pymssql.connect(server='FRER0972\\IO14', user='OMUser', password='%Essilor1%', database='OrderDB')\n connection2 = pymssql.connect(server='FRER0972\\ION4', user='OMUser', password='%Essilor1%', database='OnRouteDB')\n \n cursor1 = connection.cursor()\n cursor1.execute(qury1)\n data1 = cursor1.fetchall()\n fields1 = cursor1.description # metadata from query\n names1 = [x[0] for x in cursor1.description]\n cursor1.close()\n \n cursor2 = connection.cursor()\n cursor2.execute(qury2)\n data2 = cursor2.fetchall()\n fields2 = cursor2.description # metadata from query\n names2 = [x[0] for x in cursor2.description]\n cursor2.close()\n \n cursor3 = connection.cursor()\n cursor3.execute(qury3)\n data3 = cursor3.fetchall()\n fields3 = cursor3.description # metadata from query\n names3 = [x[0] for x in cursor3.description]\n cursor3.close()\n \n cursor4 = connection2.cursor()\n cursor4.execute(qury4)\n data4 = cursor4.fetchall()\n fields4 = cursor4.description # metadata from query\n names4 = [x[0] for x in cursor4.description]\n cursor4.close()\n \n self.submitStatus.setentry(status)\n \n elif self.var.get() == 'IE402':\n connection = pymssql.connect(server='FRER1420\\IO14', user='OMUser', password='%Essilor1%', database='OrderDB')\n connection2 = pymssql.connect(server='FRER1420\\ION4', user='OMUser', password='%Essilor1%', database='OnRouteDB')\n \n cursor1 = connection.cursor()\n cursor1.execute(qury1)\n data1 = cursor1.fetchall()\n fields1 = cursor1.description # metadata from query\n names1 = [x[0] for x in cursor1.description]\n cursor1.close()\n \n cursor2 = connection.cursor()\n cursor2.execute(qury2)\n data2 = cursor2.fetchall()\n fields2 = cursor2.description # metadata from query\n names2 = [x[0] for x in cursor2.description]\n cursor2.close()\n \n cursor3 = connection.cursor()\n cursor3.execute(qury3)\n data3 = cursor3.fetchall()\n fields3 = cursor3.description # metadata from query\n names3 = [x[0] for x in cursor3.description]\n cursor3.close()\n \n cursor4 = connection2.cursor()\n cursor4.execute(qury4)\n data4 = cursor4.fetchall()\n fields4 = cursor4.description # metadata from query\n names4 = [x[0] for x in cursor4.description]\n cursor4.close()\n \n self.submitStatus.setentry(status)\n \n elif self.var.get() == 'IE501':\n connection = pymssql.connect(server='FRER1308\\IO14', user='OMUser', password='%Essilor1%', database='OrderDB')\n connection2 = pymssql.connect(server='FRER1308\\ION4', user='OMUser', password='%Essilor1%', database='OnRouteDB')\n \n cursor1 = connection.cursor()\n cursor1.execute(qury1)\n data1 = cursor1.fetchall()\n fields1 = cursor1.description # metadata from query\n names1 = [x[0] for x in cursor1.description]\n cursor1.close()\n \n cursor2 = connection.cursor()\n cursor2.execute(qury2)\n data2 = cursor2.fetchall()\n fields2 = cursor2.description # metadata from query\n names2 = [x[0] for x in cursor2.description]\n cursor2.close()\n \n cursor3 = connection.cursor()\n cursor3.execute(qury3)\n data3 = cursor3.fetchall()\n fields3 = cursor3.description # metadata from query\n names3 = [x[0] for x in cursor3.description]\n cursor3.close()\n \n cursor4 = connection2.cursor()\n cursor4.execute(qury4)\n data4 = cursor4.fetchall()\n fields4 = cursor4.description # metadata from query\n names4 = [x[0] for x in cursor4.description]\n cursor4.close()\n \n self.submitStatus.setentry(status)\n \n elif self.var.get() == 'IFF02':\n connection = pymssql.connect(server='FRER1344\\IO1F', user='OMUser', password='%Essilor1%', database='OrderDB')\n connection2 = pymssql.connect(server='FRER1344\\IONF', user='OMUser', password='%Essilor1%', database='OnRouteDB')\n \n cursor1 = connection.cursor()\n cursor1.execute(qury1)\n data1 = cursor1.fetchall()\n fields1 = cursor1.description # metadata from query\n names1 = [x[0] for x in cursor1.description]\n cursor1.close()\n \n cursor2 = connection.cursor()\n cursor2.execute(qury2)\n data2 = cursor2.fetchall()\n fields2 = cursor2.description # metadata from query\n names2 = [x[0] for x in cursor2.description]\n cursor2.close()\n \n cursor3 = connection.cursor()\n cursor3.execute(qury3)\n data3 = cursor3.fetchall()\n fields3 = cursor3.description # metadata from query\n names3 = [x[0] for x in cursor3.description]\n cursor3.close()\n \n cursor4 = connection2.cursor()\n cursor4.execute(qury4)\n data4 = cursor4.fetchall()\n fields4 = cursor4.description # metadata from query\n names4 = [x[0] for x in cursor4.description]\n cursor4.close()\n \n self.submitStatus.setentry(status)\n \n elif self.var.get() == 'New server':\n \n parser = RawConfigParser()\n parser.read(file_path2)\n myserver = parser.get('add_instance', 'server')\n myserver2 = parser.get('add_instance', 'server2')\n myuser = parser.get('add_instance', 'user')\n mypassword = parser.get('add_instance', 'password')\n mydatabase = parser.get('add_instance', 'dbname')\n \n connection = pymssql.connect(server = myserver, user = myuser, password = mypassword, database = mydatabase)\n connection2 = pymssql.connect(server = myserver2, user = myuser, password = mypassword, database = mydatabase2)\n \n cursor1 = connection.cursor()\n cursor1.execute(qury1)\n data1 = cursor1.fetchall()\n fields1 = cursor1.description # metadata from query\n names1 = [x[0] for x in cursor1.description]\n cursor1.close()\n \n cursor2 = connection.cursor()\n cursor2.execute(qury2)\n data2 = cursor2.fetchall()\n fields2 = cursor2.description # metadata from query\n names2 = [x[0] for x in cursor2.description]\n cursor2.close()\n \n cursor3 = connection.cursor()\n cursor3.execute(qury3)\n data3 = cursor3.fetchall()\n fields3 = cursor3.description # metadata from query\n names3 = [x[0] for x in cursor3.description]\n cursor3.close()\n \n cursor4 = connection2.cursor()\n cursor4.execute(qury4)\n data4 = cursor4.fetchall()\n fields4 = cursor4.description # metadata from query\n names4 = [x[0] for x in cursor4.description]\n cursor4.close()\n \n self.submitStatus.setentry(status)\n \n else:\n status = '--- Connection failed --- No query executed ---'\n self.submitStatus.setentry(status)\n \n self.master.withdraw()\n infoMessage = \"Please wait ! The export is running...\\nThe window will appear once exporting finish.\\nYou can continue to use your PC.\"\n showinfo(\"Running...\", infoMessage)\n \n \"\"\"Export query results to Excel file\"\"\" \n df = pd.DataFrame(list(data1), columns = names1)\n df2 = pd.DataFrame(list(data2), columns = names2)\n df3 = pd.DataFrame(list(data3), columns = names3)\n df4 = pd.DataFrame(list(data4), columns = names4)\n \n # Save a copy of the xls file for upload\n save_path = 'C:/temp/Results.xlsx' \n writer = pd.ExcelWriter(save_path, engine='xlsxwriter')\n list_dfs = [df, df2, df3, df4]\n for n, df in enumerate(list_dfs):\n df.to_excel(writer,'sheet%s' % n)\n writer.save()\n \n src_filename = 'C:/temp/Results.xlsx'\n dest_filename = 'C:/temp/Template_Report.xlsx'\n \n wb1 = openpyxl.load_workbook(dest_filename)\n wb2 = openpyxl.load_workbook(src_filename)\n \n ws2 = wb2.get_sheet_by_name('sheet0')\n ws22 = wb2.get_sheet_by_name('sheet1')\n ws222 = wb2.get_sheet_by_name('sheet2')\n ws2222 = wb2.get_sheet_by_name('sheet3')\n \n sh11_count = (ws222.max_row - 1)\n \n #ws0 = wb1.worksheets[0]\n ws1 = wb1.worksheets[1]\n ws11 = wb1.worksheets[2]\n ws111 = wb1.worksheets[3]\n ws1111 = wb1.worksheets[4]\n \n sh0_count = ws2.max_column\n sh1_count = ws22.max_column\n sh2_count = ws222.max_column\n sh3_count = ws2222.max_column\n \n for i in range(1, sh0_count):\n col_d = ws2.columns[i] # 0-indexing\n for idx, cell in enumerate(col_d, 1):\n ws1.cell(row = idx, column = i).value = cell.value #1-indexing\n ws1['A1'] = 'Request ID'\n ws1['B1'] = 'Order Entry'\n ws1['C1'] = 'Customer Number'\n ws1['D1'] = 'DN Number'\n \n ws1['P1'] = 'Creation Hour'\n ws1['Q1'] = 'Allocation Hour'\n ws1['R1'] = 'Diff Crea/Allo (in Second)'\n ws1['S1'] = 'Round Allocation Time (in Minute)'\n \n for j in range(1, sh1_count):\n col_d = ws22.columns[j] # 0-indexing\n for idx, cell in enumerate(col_d, 1):\n ws11.cell(row = idx, column = j).value = cell.value #1-indexing\n ws11['A1'] = 'Order Entry'\n ws11['B1'] = 'Customer Number'\n ws11['C1'] = 'Request ID'\n ws11['D1'] = 'DN Number'\n ws11['E1'] = 'Product Code'\n ws11['F1'] = 'Product Label'\n ws11['G1'] = 'Status'\n ws11['H1'] = 'Reason'\n ws11['I1'] = 'Lab'\n ws11['J1'] = 'Creation Date'\n ws11['K1'] = 'Severity'\n ws11['L1'] = 'Error Message'\n ws11['M1'] = 'Detailed Error Message'\n \n for k in range(1, sh2_count):\n col_d = ws222.columns[k] # 0-indexing\n for idx, cell in enumerate(col_d, 1):\n ws111.cell(row = idx, column = k).value = cell.value #1-indexing\n ws111['A1'] = 'Order Entry'\n ws111['B1'] = 'Customer Number'\n ws111['C1'] = 'Request ID'\n ws111['D1'] = 'DN Number'\n ws111['G1'] = 'Customer Reference'\n ws111['H1'] = 'Status'\n ws111['I1'] = 'Reason'\n ws111['J1'] = 'Lab'\n ws111['K1'] = 'Creation Date'\n \n for l in range(1, sh3_count):\n col_d = ws2222.columns[l] # 0-indexing\n for idx, cell in enumerate(col_d, 1):\n ws1111.cell(row = idx, column = l).value = cell.value #1-indexing\n ws1111['B1'] = 'Request ID'\n ws1111['C1'] = 'Error Message'\n ws1111['D1'] = 'Error Value'\n \n \n '''for rowNumP in range(2, (ws1.get_highest_row()) + 1):\n if ws1.cell(row = rowNumP, column = 10).value is not None:\n CreationDT = ws1.cell(row = rowNumP, column = 10).value\n CreationT = str(CreationDT)\n Cre = re.search(' (.+?)\\.', CreationT).group(1)\n CreT = datetime.datetime.strptime(Cre, \"%H:%M:%S\")\n CreationTime = CreT.strftime(\"%I:%M:%S %p\")\n ws1.cell(row = rowNumP, column = 16).value = CreationTime\n creTime = time.strptime(CreationTime.split(' ')[0],'%H:%M:%S')\n CrSec = datetime.timedelta(hours=creTime.tm_hour,minutes=creTime.tm_min,seconds=creTime.tm_sec).total_seconds()\n \n if ws1.cell(row = rowNumP, column = 11).value is not None:\n AllocationDT = ws1.cell(row = rowNumP, column = 11).value\n AllocationT = str(AllocationDT)\n Alo = re.search('(?<= )\\S+', AllocationT).group(0)\n OK = re.search(r'\\.', Alo)\n if OK:\n AloT = datetime.datetime.strptime(Alo, \"%H:%M:%S.%f\")\n AllocationTime = AloT.strftime(\"%I:%M:%S %p\")\n ws1.cell(row = rowNumP, column = 17).value = AllocationTime\n aloTime = time.strptime(AllocationTime.split(' ')[0],'%H:%M:%S')\n alSec = datetime.timedelta(hours=aloTime.tm_hour,minutes=aloTime.tm_min,seconds=aloTime.tm_sec).total_seconds()\n ws1.cell(row = rowNumP, column = 18).value = alSec - CrSec\n ws1.cell(row = rowNumP, column = 19).value = round(((ws1.cell(row = rowNumP, column = 18).value)/60), 1)\n \n else:\n AloT = datetime.datetime.strptime(Alo, \"%H:%M:%S\")\n AllocationTime = AloT.strftime(\"%I:%M:%S %p\")\n ws1.cell(row = rowNumP, column = 17).value = AllocationTime\n aloTime = time.strptime(AllocationTime.split(' ')[0],'%H:%M:%S')\n alSec = datetime.timedelta(hours=aloTime.tm_hour,minutes=aloTime.tm_min,seconds=aloTime.tm_sec).total_seconds()\n ws1.cell(row = rowNumP, column = 18).value = alSec - CrSec\n ws1.cell(row = rowNumP, column = 19).value = round(((ws1.cell(row = rowNumP, column = 18).value)/60), 1)\n \n \n if ws1.cell(row = rowNumP, column = 1).value is not None and ws1.cell(row = rowNumP, column = 19).value is None:\n ws1.cell(row = rowNumP, column = 19).value = 0.00'''\n \n wb1.save('C:/temp/ReportUP.xlsx')\n \n # Show the tool window again \n status = \"--- Successfully exported to the final reports file --- Look at C:\\Users ---\"\n self.submitStatus.setentry('')\n self.submitStatus.setentry(status)\n self.master.deiconify()\n infoMessage = \"Allocation rate = %d commands\" % sh11_count\n showinfo(\"Finish\", infoMessage)", "title": "" }, { "docid": "862259078688ff55fff89a2532cf968b", "score": "0.5165891", "text": "def fm_edit(self):\n return self._commit(\"edit\")", "title": "" }, { "docid": "ae85c4117e5e5a27a836b03e16888b9c", "score": "0.51652116", "text": "def act_query(query):\n connection = conn\n cursor = connection.cursor()\n num_affected_rows = cursor.execute(query)\n cursor.close()\n connection.commit()\n return num_affected_rows", "title": "" }, { "docid": "6fa3c136ecd401824890d8232e62061b", "score": "0.515516", "text": "def _prepare_query_text(self, sql_query):\n self.tab.gdb = self.local_gdb\n sql_query_string = sql_query\n self.tab.query.setText(sql_query_string)\n return", "title": "" }, { "docid": "0bfa1a48b30594357ad7d7c6116285e2", "score": "0.5114265", "text": "def _run_query(self, query):\n try:\n result = self.cursor.execute(query)\n except:\n messagebox.showerror(\"Error\",\"\"\"\nSomething went wrong executing the query.\n\nCheck if you are connected to the database.\n\nIf you are trying to manipulate data, check if you filled in the fields correctly.\n\nIf you were using delete or update you must select a register on the table before.'\"\"\")\n\n self.connection.commit()\n return result", "title": "" }, { "docid": "516f72418c9d0c4ad35c8b4a751a446c", "score": "0.5096106", "text": "def real_process(text):\n prod = parser(text, utcnow=common.utcnow())\n if not common.dbwrite_enabled():\n return\n if prod.warnings:\n common.email_error(\"\\n\".join(prod.warnings), text)\n df = DBPOOL.runInteraction(prod.sql)\n df.addCallback(got_data)\n df.addErrback(common.email_error, text)", "title": "" }, { "docid": "87b8033fa8192771d155bd96aaecfc69", "score": "0.50843114", "text": "def modify(self, SQL, params='', verbose=True):\n try:\n \n # Make sure the database isn't locked\n self.conn.commit()\n \n if SQL.lower().startswith('select'):\n print('Use self.query method for queries.')\n else:\n self.list(SQL, params)\n self.conn.commit()\n if verbose:\n print('Number of records modified: {}'.format(self.list(\"SELECT changes()\").fetchone()[0] or '0'))\n except:\n print(\"Could not execute: \"+SQL)", "title": "" }, { "docid": "9d52685d54bbc8d787972983ad9064f3", "score": "0.507759", "text": "def docmd(self, fmt, *args):\n if not self.gdb.accepting_cmd():\n self.gdb.console_print(\n \"gdb busy: command discarded, please retry\\n\")\n return False\n\n self.gdb.gdb_busy = True\n self.result = ''\n return self.send(fmt, *args)", "title": "" }, { "docid": "477d56fe945bb8eb90a93a42b0e61ba9", "score": "0.50667006", "text": "def _commit(self):\r\n self.conn.commit()\r\n self.changed = False", "title": "" }, { "docid": "97a3289e1cb047263b16b7dd0126ca7d", "score": "0.5056425", "text": "def submit_query(self, query):\n return self.app.post('/query_database/', data=dict(\n query=query\n ), follow_redirects=True)", "title": "" }, { "docid": "56989304d6f6c38df5f879e37448494d", "score": "0.50355613", "text": "def cmd(ctx, url, key, secret, export_format, table_format, sq, folder, create, echo):\n client = ctx.obj.start_client(url=url, key=key, secret=secret)\n\n p_grp = ctx.parent.parent.command.name\n apiobj = getattr(client, p_grp)\n\n with ctx.obj.exc_wrap(wraperror=ctx.obj.wraperror):\n data = apiobj.saved_query.update_folder(\n sq=sq, folder=folder, create=create, echo=echo, as_dataclass=True\n )\n ctx.obj.echo_ok(f\"Successfully updated Saved Query {data.name}\")\n\n click.secho(EXPORT_FORMATS[export_format](data=data, table_format=table_format))\n ctx.exit(0)", "title": "" }, { "docid": "0a4a5faa9747af799d7a9388f7c58af2", "score": "0.5008634", "text": "def mysql(self, cursor, query):\n if isinstance(query, basestring):\n query = [query]\n for q in query:\n cursor.execute(q)\n q.replace('\\n', ' ')\n self.print_log('Executed: {}'.format(q))", "title": "" }, { "docid": "f710ed780a16500ee0ba33da855f6ca2", "score": "0.49840534", "text": "def opModify(self, sql):\n insert_num = self.cur.execute(sql) # execute sql\n self.coon.commit()\n return insert_num", "title": "" }, { "docid": "2be7523496aeffd2f8e5ae94f3cf4701", "score": "0.49723747", "text": "def prompt(skipfile, skipall, query, chunk):\n newpatches = None\n if skipall is not None:\n return skipall, skipfile, skipall, newpatches\n if skipfile is not None:\n return skipfile, skipfile, skipall, newpatches\n while True:\n resps = messages[b'help'][operation]\n # IMPORTANT: keep the last line of this prompt short (<40 english\n # chars is a good target) because of issue6158.\n r = ui.promptchoice(b\"%s\\n(enter ? for help) %s\" % (query, resps))\n ui.write(b\"\\n\")\n if r == 8: # ?\n for c, t in ui.extractchoices(resps)[1]:\n ui.write(b'%s - %s\\n' % (c, encoding.lower(t)))\n continue\n elif r == 0: # yes\n ret = True\n elif r == 1: # no\n ret = False\n elif r == 2: # Edit patch\n if chunk is None:\n ui.write(_(b'cannot edit patch for whole file'))\n ui.write(b\"\\n\")\n continue\n if chunk.header.binary():\n ui.write(_(b'cannot edit patch for binary file'))\n ui.write(b\"\\n\")\n continue\n # Patch comment based on the Git one (based on comment at end of\n # https://mercurial-scm.org/wiki/RecordExtension)\n phelp = b'---' + _(\n b\"\"\"\nTo remove '-' lines, make them ' ' lines (context).\nTo remove '+' lines, delete them.\nLines starting with # will be removed from the patch.\n\nIf the patch applies cleanly, the edited hunk will immediately be\nadded to the record list. If it does not apply cleanly, a rejects\nfile will be generated: you can use that when you try again. If\nall lines of the hunk are removed, then the edit is aborted and\nthe hunk is left unchanged.\n\"\"\"\n )\n (patchfd, patchfn) = pycompat.mkstemp(\n prefix=b\"hg-editor-\", suffix=b\".diff\"\n )\n ncpatchfp = None\n try:\n # Write the initial patch\n f = util.nativeeolwriter(os.fdopen(patchfd, 'wb'))\n chunk.header.write(f)\n chunk.write(f)\n f.write(\n b''.join(\n [b'# ' + i + b'\\n' for i in phelp.splitlines()]\n )\n )\n f.close()\n # Start the editor and wait for it to complete\n editor = ui.geteditor()\n ret = ui.system(\n b\"%s \\\"%s\\\"\" % (editor, patchfn),\n environ={b'HGUSER': ui.username()},\n blockedtag=b'filterpatch',\n )\n if ret != 0:\n ui.warn(_(b\"editor exited with exit code %d\\n\") % ret)\n continue\n # Remove comment lines\n patchfp = open(patchfn, 'rb')\n ncpatchfp = stringio()\n for line in util.iterfile(patchfp):\n line = util.fromnativeeol(line)\n if not line.startswith(b'#'):\n ncpatchfp.write(line)\n patchfp.close()\n ncpatchfp.seek(0)\n newpatches = parsepatch(ncpatchfp)\n finally:\n os.unlink(patchfn)\n del ncpatchfp\n # Signal that the chunk shouldn't be applied as-is, but\n # provide the new patch to be used instead.\n ret = False\n elif r == 3: # Skip\n ret = skipfile = False\n elif r == 4: # file (Record remaining)\n ret = skipfile = True\n elif r == 5: # done, skip remaining\n ret = skipall = False\n elif r == 6: # all\n ret = skipall = True\n elif r == 7: # quit\n raise error.CanceledError(_(b'user quit'))\n return ret, skipfile, skipall, newpatches", "title": "" }, { "docid": "8278ced69daf481d385aafa53c4373e4", "score": "0.49688333", "text": "def test_sql_code_styling(self):\n self.tab = self._add_new_query_tab()\n sql_query_string = 'SELECT name FROM streets LIMIT 3'\n self._prepare_query_text(sql_query_string)\n\n cur = self.tab.query.textCursor()\n cur.setPosition(0)\n cur.setPosition(6, QTextCursor.KeepAnchor)\n self.tab.query.setTextCursor(cur)\n return", "title": "" }, { "docid": "66ae32f923589713677394732ebbd070", "score": "0.49606788", "text": "def commit(self):\n self.buffer.commit()", "title": "" }, { "docid": "f87c382a88f3d488b69292b8ed3085a2", "score": "0.4956843", "text": "def command_sql(self, event):\n conn = database.obj.get_conn()\n\n try:\n tbl = MessageTable(codeblock=False)\n\n with conn.cursor() as cur:\n start = time.time()\n cur.execute(event.codeblock.format(e=event))\n dur = time.time() - start\n if not cur.description:\n return event.msg.reply('_took {}ms - no result_'.format(\n int(dur * 1000)))\n tbl.set_header(*[desc[0] for desc in cur.description])\n\n for row in cur.fetchall():\n tbl.add(*row)\n\n result = tbl.compile()\n if len(result) > 1900:\n return event.msg.reply(\n '_took {}ms_'.format(int(dur * 1000)),\n attachments=[('result.txt', result)])\n\n event.msg.reply('```' + result + '```\\n_took {}ms_\\n'.format(\n int(dur * 1000)))\n except psycopg2.Error as e:\n event.msg.reply('```{}```'.format(e.pgerror))", "title": "" }, { "docid": "b716b577b4adc31a962470803b29ae1d", "score": "0.4952748", "text": "def commitdata(self):\n try:\n self.db.execute(self.sqlcommand)\n self.conn.commit()\n except Exception as e:\n self.setVisible(False)\n self.errormessage = \"Error Data Commit\\n\" + \"Error committing the data for: \" + \\\n self.enstr + \" \" + self.rustr + \"\\nError: \" + str(e) + \"\\nCommand: \" + self.sqlcommand\n errorgui = ErrorDisplay(self)\n errorgui.activateWindow()\n errorgui.exec()\n self.setVisible(True)\n self.activateWindow()", "title": "" }, { "docid": "f5f4cc0fe27c1e914fd6b04f89f3b14f", "score": "0.49368232", "text": "def _execute_sql(self, sql, user_gdb=None):\n self.tab = self.ui.tab_widget.currentWidget()\n if user_gdb:\n self.tab.gdb = Geodatabase(user_gdb)\n else:\n self.tab.gdb = self.local_gdb\n self.tab.query.setPlainText(sql)\n self.tab.run_query()\n return", "title": "" }, { "docid": "6803e96604a7d4f40b6bae3f39138526", "score": "0.49363384", "text": "def commit(self):\r\n return self.conn.commit()", "title": "" }, { "docid": "5e8e074b324b9c1f8a8ea2349c3c2093", "score": "0.49327952", "text": "def command_edit_answer(update, context):\n\n chat_data = context.chat_data\n\n text = (\n '___DESCRIPTION___: You are in command editting mode. '\n 'You can either send new messages to add them '\n 'or delete your previous messages.'\n )\n update.message.reply_text(\n text=text,\n reply_markup=keyboards.edit_asnwer_markup(),\n parse_mode='MARKDOWN',\n )\n command = chat_data['cmd_instance']\n chat_data['edit_actions_log'] = []\n chat_data['msgs_count'] = command.message_set.count()\n chat_data['msgs_last_id'] = 0\n chat_data['msgs_to_delete'] = []\n\n inform_number_of_commands(update, context)\n return states.SEND_EDIT_MESSAGE", "title": "" }, { "docid": "729b45eda5b7b240df53452d7a76e7f7", "score": "0.49326035", "text": "def __init__(self, dbfilename, initialize='', mode='r', output='html'):\n if (output.lower() in ['html', 'text']):\n self.output = output.lower()\n else:\n print('Unsupported output format: \"%s\", use one of \"html\" or \"text\"' % output)\n self.result = None\n self.history = []\n self.histIdx = 0\n buttonRow = []\n self.query = ipywidgets.Textarea(description=\"SQL:\", value=initialize, \n layout=ipywidgets.Layout(width=\"600px\", height=\"100px\", font_weight=\"bold\"))\n self.query.color = \"#000040\"\n\n self.executeButton = ipywidgets.Button(description=\"Execute\")\n self.executeButton.width = \"100px\"\n self.executeButton.margin = \"10px 5px 10px 72px\"\n self.executeButton.on_click(self.executeSQL)\n buttonRow.append(self.executeButton)\n \n self.submitButton = ipywidgets.Button(description=\"Submit\")\n self.submitButton.width = \"100px\"\n self.submitButton.margin = \"10px 5px 10px 72px\"\n self.submitButton.on_click(self.executeSQL)\n buttonRow.append(self.submitButton)\n \n self.prevButton = ipywidgets.Button(description=\"prev\")\n self.prevButton.width = \"50px\"\n self.prevButton.margin = \"10px 5px\"\n self.prevButton.on_click(self.prevQuery)\n buttonRow.append(self.prevButton)\n \n self.nextButton = ipywidgets.Button(description=\"next\")\n self.nextButton.width = \"50px\"\n self.nextButton.margin = \"10px 5px\"\n self.nextButton.visible = False\n self.nextButton.on_click(self.nextQuery)\n buttonRow.append(self.nextButton)\n\n if (self.output.lower() == 'html'):\n self.status = ipywidgets.HTML()\n\n self.container = ipywidgets.VBox(children=[self.query, ipywidgets.HBox(children=buttonRow), self.status])\n if isinstance(dbfilename, sqlite3.Connection):\n self.db = dbfilename\n if (self.output == 'html'):\n self.status.value = 'Using existing database connection'\n else:\n print('Using existing database connection')\n elif ('r' in mode) and os.path.isfile(dbfilename):\n self.db = sqlite3.connect(dbfilename)\n if (self.output == 'html'):\n self.status.value = 'Connected to database: <em>&quot;%s&quot;</em>' % dbfilename\n else:\n print('Connected to database: \"%s\"' % dbfilename)\n elif ('w' in mode):\n if os.path.isfile(dbfilename):\n os.remove(dbfilename)\n self.db = sqlite3.connect(dbfilename)\n if (self.output == 'html'):\n self.status.value = 'Created database: <em>&quot;%s&quot;</em>' % dbfilename\n else:\n print('Created database: \"%s\"' % dbfilename)\n else:\n if (self.output == 'html'):\n self.status.value = self.errorStyle % (\"Database &quot;%s&quot; does not exist\" % dbfilename)\n else:\n print('Connected to database: \"%s\"' % dbfilename)\n self.query.close()\n self.submitButton.close()\n display(self.container)", "title": "" }, { "docid": "1322df4aef4a5a154205979db32c3e97", "score": "0.49315444", "text": "def commit():", "title": "" }, { "docid": "7389315399befd2c27903b213802e9d4", "score": "0.48991475", "text": "def execute(self, _, db_conn):\n\n self.create_additional_input_column(db_conn)\n\n failed = set([])\n cursor = db_conn.cursor()\n update_sql = \"UPDATE input SET %s = ? WHERE identifier = ?\" % (\n self.column_name,\n )\n\n # Read the input documents\n annotation_set = self.get_input_set(db_conn)\n\n for count, (identifier, text) in enumerate(annotation_set):\n # Annotate each input document\n value = self.produce_annotation(identifier, text, db_conn)\n if value is None:\n failed.add(identifier)\n logging.debug(\"Annotation failed: %d\", identifier)\n # Save the annotation\n cursor.execute(update_sql, (value, identifier))\n if count % 20 == 0:\n logging.debug(\"Annotation %.2f%% complete\", count * 100.0 / len(annotation_set))\n\n if len(failed) != 0:\n logging.info(\"Failed to annotate %d entries\", len(failed))\n\n logging.info(\"Committing changes...\")\n db_conn.commit()\n return True, db_conn", "title": "" }, { "docid": "f97374807375d7034db1c5f89e01e4b5", "score": "0.489561", "text": "def Commit(self):\n self.cursor.commit()", "title": "" }, { "docid": "6645904bec6fd3c25fc288abcd501e49", "score": "0.4893254", "text": "def submitQuery(self, query):\r\n return True", "title": "" }, { "docid": "ce1cc1d12dcb8ff4baaf12c7c167d458", "score": "0.48835176", "text": "def commit(self):", "title": "" }, { "docid": "ce1cc1d12dcb8ff4baaf12c7c167d458", "score": "0.48835176", "text": "def commit(self):", "title": "" }, { "docid": "ce1cc1d12dcb8ff4baaf12c7c167d458", "score": "0.48835176", "text": "def commit(self):", "title": "" }, { "docid": "41ef534238e9c6f7b3718c606c670884", "score": "0.4880506", "text": "def GetQuery(command):\n\n QueryTemplate = {\n \"update\":\"UPDATE {table} SET {column} = {value} WHERE {condition}\",\n \"delete\":\"DELETE FROM {table} WHERE {condition}\",\n \"select\":\"SELECT {columns} FROM {table} WHERE \",\n \"select_all\":\"SELECT {columns} FROM {table}\",\n \"get_years\":\"SELECT DISTINCT DATENAME({Year_Month},{column}) AS YEARS FROM {table}\",\n \"get_months\":\"SELECT DISTINCT DATENAME({Year_Month},{column}) AS MONTHS FROM {table}\"\n }\n return QueryTemplate.get(command)", "title": "" }, { "docid": "63943be5230060cc4bee018993c6c9b6", "score": "0.48783872", "text": "def commit(self):\n self.conn.commit()", "title": "" }, { "docid": "59cf71675fb17558c89f11e70d15636c", "score": "0.4872711", "text": "def commit(self):\r\n return self.adaptor.commit()", "title": "" }, { "docid": "632f218c0df7a062b4899bdec09ecf55", "score": "0.48614815", "text": "def commit ( self ):\n pass", "title": "" }, { "docid": "7e82ffb53d8f55ec3b5aa781159310e6", "score": "0.48404896", "text": "def commit(self):\n return _xapian.WritableDatabase_commit(self)", "title": "" }, { "docid": "2e7ba78df5c9d9e87911f4b0b3ba3c8b", "score": "0.48258275", "text": "def run_query(parameters):\n conn = parameters['conn'] \n cur = conn.cursor()\n cur.execute(parameters['query'])\n conn.commit();", "title": "" }, { "docid": "f54bf55db6bb92b99ad52a7497cde8b5", "score": "0.48236316", "text": "def step_impl(context):\n context.tranql_query = context.text", "title": "" }, { "docid": "c48c68ad456b8ade00ca74a67c9965ff", "score": "0.48208627", "text": "def checksql(self):\n self.setVisible(False)\n sql = SQLTest(self)\n sql.activateWindow()\n sql.exec()\n self.setVisible(True)\n self.activateWindow()", "title": "" }, { "docid": "574b65737f8fb7c8a542d8a54088ef84", "score": "0.48044884", "text": "def format_database(self):\n qr = self.cursor.executescript(\"\"\"\ndelete from Packages;\ndelete from Gits;\ndelete from Repositories;\n\nupdate sqlite_sequence set seq=0 where name=\"Packages\";\nupdate sqlite_sequence set seq=0 where name=\"Gits\";\nupdate sqlite_sequence set seq=0 where name=\"Repositories\";\n \"\"\")\n del qr", "title": "" }, { "docid": "dcd20e1a9f3dda73b3ab9236af24cc21", "score": "0.4791975", "text": "def set_query(self,query,data):\n self.cursor.execute(query,data)\n self.db.commit()", "title": "" }, { "docid": "bfa83370a6ba0e42e17a62010866eda8", "score": "0.47914323", "text": "def popup_submit(self, event, columns, table, row_id, text_list): # !!!!! new code added by Jetsun\n\n db = SqliteQueries(\"database.db\")\n entry_values = []\n for i in range(len(text_list)):\n entry_values.append(text_list[i].get(0.0, \"end\"))\n db.update(table, columns, tuple(entry_values), 'userID = %s' % str(row_id))\n self.popup.destroy()\n\n # refresh results\n if self.btn_click == \"show_all_record\":\n self.show_all_record()\n elif self.btn_click == \"search_record\":\n self.search_record()\n else:\n pass", "title": "" }, { "docid": "a4e670eb6c4863a82b334de7ad8d5033", "score": "0.47911483", "text": "def commit(self, a=False, m=None, amend=False):", "title": "" }, { "docid": "6023ebc7f2d64dd69fbe952ea3982c3d", "score": "0.47899255", "text": "def query(self, cmd):\n data = {\n 'code': 'from google.appengine.ext import db\\n' + cmd,\n 'module_name': 'default',\n 'xsrf_token': self.xsrf_token,\n }\n return self.post('console', data, url=self.admin_url)", "title": "" }, { "docid": "d5a629b3dbe487d4f60163d64bc31fdd", "score": "0.4766869", "text": "def edit(self, command, prompt):\n instruction, target = command.split(None)\n self.sock.send(\"Graphique print {}\".format(target).encode())\n size_of_file = self.sock.recv(BUFFER_SIZE).decode('utf-8')\n print(size_of_file)\n if size_of_file == \"Directory\":\n self.editor.replace(\n \"0.0\", tk.END, \"{}{} is a directory, so I've cd you into it\".format(prompt, target))\n elif size_of_file == \"AccessError\":\n self.editor.replace(\n \"0.0\", tk.END, \"{} You can't access{}\".format(prompt, target))\n else:\n content = self.sock.recv(int(size_of_file)).decode('utf-8')\n print(content)\n self.editing = True\n self.filename = target\n self.editor.replace(\n \"0.0\", tk.END, \"{}\".format(content))\n # page1.pop", "title": "" }, { "docid": "5d712464dbce2d83a3c2a05ae4e86f4f", "score": "0.47487596", "text": "def commit(self):\n if self.con:\n self.con.commit()", "title": "" }, { "docid": "3def92f430044d5807e461dfa65287dd", "score": "0.4745809", "text": "def commit(self) -> None:", "title": "" }, { "docid": "68f4e98ec6aaa5f2e9288541c2437de8", "score": "0.47407138", "text": "def execute(self,sql):\r\n #print sql \r\n cursor = self.db.cursor() \r\n cursor.execute(sql)\r\n self.db.commit()\r\n cursor.close()", "title": "" }, { "docid": "e060a5bae2d22bdd13d4f96c96e1024b", "score": "0.47224945", "text": "def insert(self):\n MAX_RESULTS = 10\n query = \"\"\n t = None\n self.detailWindow.clear() #clear detail window\n self.detailWindow.refresh()\n self.HelpWindow.clear()\n self.HelpWindow.addstr(self.INSERT_TIP)\n self.HelpWindow.refresh()\n #Insert mode\n self.modeWindow.addstr(0, 0, self.INSERT_NAME)\n self.modeWindow.refresh()\n #Clear any previous queries\n self.queryWindow.clear()\n self.queryWindow.refresh()\n while True:\n t = self.getChar()\n if t == self.ESC:\n break #dont clear results, save it for selecting through\n else:\n if t == self.BACKSPACE and len(query) > 0:\n query = query[:-1]\n elif t != self.ENTER:\n query+=t\n self.queryWindow.clear()\n self.queryWindow.addstr(query)\n\n self.searchResults = [] #empty the results list from before\n self.selectCursor = -1 #select nothing\n self.search(query) #mutate self.results\n\n self.resultsWindow.clear()\n for j in xrange(len(self.searchResults)):\n if j > MAX_RESULTS:\n break\n #add results, each on its own line\n self.resultsWindow.addstr(j, 1, self.searchResults[j].cleanName())\n self.queryWindow.refresh()\n self.resultsWindow.refresh()\n self.modeWindow.addstr(0, 0, self.SELECT_NAME)\n self.modeWindow.refresh()\n self.HelpWindow.clear()\n self.HelpWindow.refresh()", "title": "" }, { "docid": "996d1219ea3ada3d4b362b3bb3031e48", "score": "0.4721329", "text": "def save(self):\r\n if self.instance is None:\r\n raise CQLEngineException(\"DML Query intance attribute is None\")\r\n assert type(self.instance) == self.model\r\n\r\n # organize data\r\n value_pairs = []\r\n values = self.instance._as_dict()\r\n\r\n # get defined fields and their column names\r\n for name, col in self.model._columns.items():\r\n val = values.get(name)\r\n if val is None:\r\n continue\r\n value_pairs += [(col.db_field_name, val)]\r\n\r\n # construct query string\r\n field_names = zip(*value_pairs)[0]\r\n field_ids = {n: uuid4().hex for n in field_names}\r\n field_values = dict(value_pairs)\r\n query_values = {field_ids[n]: field_values[n] for n in field_names}\r\n\r\n qs = []\r\n if self.instance._has_counter or self.instance._can_update():\r\n qs += [\"UPDATE {}\".format(self.column_family_name)]\r\n qs += [\"SET\"]\r\n\r\n set_statements = []\r\n # get defined fields and their column names\r\n for name, col in self.model._columns.items():\r\n if not col.is_primary_key:\r\n val = values.get(name)\r\n if val is None:\r\n continue\r\n if isinstance(col, (BaseContainerColumn, Counter)):\r\n # remove value from query values, the column will\r\n # handle it\r\n query_values.pop(field_ids.get(name), None)\r\n\r\n val_mgr = self.instance._values[name]\r\n set_statements += col.get_update_statement(\r\n val, val_mgr.previous_value, query_values)\r\n\r\n else:\r\n set_statements += [\r\n '\"{}\" = %({})s'.format(col.db_field_name, field_ids[col.db_field_name])]\r\n qs += [', '.join(set_statements)]\r\n\r\n qs += ['WHERE']\r\n\r\n where_statements = []\r\n for name, col in self.model._primary_keys.items():\r\n where_statements += ['\"{}\" = %({})s'.format(col.db_field_name,\r\n field_ids[col.db_field_name])]\r\n\r\n qs += [' AND '.join(where_statements)]\r\n\r\n # clear the qs if there are no set statements and this is not a\r\n # counter model\r\n if not set_statements and not self.instance._has_counter:\r\n qs = []\r\n\r\n else:\r\n qs += [\"INSERT INTO {}\".format(self.column_family_name)]\r\n qs += [\"({})\".format(', '.join(['\"{}\"'.format(f)\r\n for f in field_names]))]\r\n qs += ['VALUES']\r\n qs += [\"({})\".format(', '.join(['%(' + field_ids[f] + ')s' for f in field_names]))]\r\n\r\n qs = ' '.join(qs)\r\n\r\n # skip query execution if it's empty\r\n # caused by pointless update queries\r\n if qs:\r\n if self._batch:\r\n self._batch.add_query(qs, query_values)\r\n else:\r\n execute(qs, query_values)\r\n\r\n # delete nulled columns and removed map keys\r\n qs = ['DELETE']\r\n query_values = {}\r\n\r\n del_statements = []\r\n for k, v in self.instance._values.items():\r\n col = v.column\r\n if v.deleted:\r\n del_statements += ['\"{}\"'.format(col.db_field_name)]\r\n elif isinstance(col, Map):\r\n del_statements += col.get_delete_statement(\r\n v.value, v.previous_value, query_values)\r\n\r\n if del_statements:\r\n qs += [', '.join(del_statements)]\r\n\r\n qs += ['FROM {}'.format(self.column_family_name)]\r\n\r\n qs += ['WHERE']\r\n where_statements = []\r\n for name, col in self.model._primary_keys.items():\r\n field_id = uuid4().hex\r\n query_values[field_id] = field_values[name]\r\n where_statements += [\r\n '\"{}\" = %({})s'.format(col.db_field_name, field_id)]\r\n qs += [' AND '.join(where_statements)]\r\n\r\n qs = ' '.join(qs)\r\n\r\n if self._batch:\r\n self._batch.add_query(qs, query_values)\r\n else:\r\n execute(qs, query_values)", "title": "" }, { "docid": "87d3979a3a145ed1df1502cf6c2092f7", "score": "0.47211018", "text": "def endsql(self):\n self.sqldata = None\n self.adjdict.clear()\n self.tagtext = ''\n self.declension = list()\n self.contpage = True\n self.adjpages = list([False, False])\n self.shortadj = True\n self.adjcommand = \"\"\n self.shortadjcommand = \"\"\n tagval = None\n tagval = self.listDisplay.selectedItems()\n if tagval:\n self.tagtext = tagval[0].text()\n self.sqldict[self.finalkey] = self.tagtext\n if self.wclass == \"noun\":\n self.setVisible(False)\n decline = DeclEntry(self)\n decline.activateWindow()\n decline.exec()\n if self.cancel or len(self.declension) == 0:\n self.setVisible(True)\n self.activateWindow()\n self.wordclasses()\n return\n basecommand = self.sqlcommand\n self.sqlcommand = \"\"\n for x in self.declension:\n cols = \"(\"\n data = \"(\"\n cols += \"variety, \"\n data += \"\\'\" + x[0] + \"\\', \"\n cols += \"type, \"\n data += \"\\'\" + x[1] + \"\\', \"\n cols += \"runame, \"\n data += \"\\'\" + x[2] + \"\\', \"\n cols += \"name, \"\n data += \"\\'\" + x[3] + \"\\', \"\n cols += \"gender, \"\n data += \"\\'\" + x[4] + \"\\', \"\n cols += \"declension, \"\n data += \"\\'\" + x[5] + \"\\', \"\n cols += \"wordcase, \"\n data += \"\\'\" + x[6] + \"\\', \"\n cols += \"animate) VALUES\"\n data += \"\\'\" + x[7] + \"\\');\\n\"\n self.sqlcommand += basecommand + cols + data\n elif self.wclass == \"adjective\":\n basecommand = self.sqlcommand\n self.sqldict[self.finalkey] = \"\\'\" + self.sqldict[self.finalkey] + \"\\'\"\n if self.tagtext == \"descriptive\":\n self.setVisible(False)\n while self.contpage:\n self.sqldict[\"variety\"] = \"\\'descriptive\\'\"\n decline = AdjDeclEntry(self)\n decline.activateWindow()\n decline.exec()\n self.setVisible(True)\n if self.cancel:\n self.activateWindow()\n self.wordclasses()\n return\n self.sqldict[\"variety\"] = \"\\'short adjective\\'\"\n adjgui = ShortAdj(self)\n adjgui.activateWindow()\n adjgui.exec()\n self.setVisible(True)\n if self.cancel:\n self.activateWindow()\n self.wordclasses()\n return\n self.sqlcommand = self.adjcommand\n if self.shortadj:\n self.sqlcommand += self.shortadjcommand\n self.activateWindow()\n elif self.tagtext == \"comparative\":\n engcomp = \"\"\n self.completed = True\n while len(engcomp) == 0 and self.completed:\n engcomp, self.completed = QInputDialog.getText(self, \"English Comparative\", \"Comparative Adjective Value:\")\n if engcomp and not engcomp[0] in self.morphs.enall:\n QMessageBox.warning(self, \"Non-English Character\", \"A non-english character was entered for an english word.\")\n engcomp = \"\"\n if self.completed:\n self.sqldict[\"name\"] = \"\\'\" + engcomp + \"\\'\"\n else:\n self.wordclasses()\n return\n rucomp = \"\"\n self.completed = True\n while len(rucomp) == 0 and self.completed:\n rucomp, self.completed = QInputDialog.getText(self, \"Russian Comparative\", \"Russian Comparative Adjective:\")\n if rucomp and not rucomp[0] in self.morphs.ruall:\n QMessageBox.warning(self, \"Non-Cyrillic Character\", \"A non-cyrillic character was entered for an russian word.\")\n rucomp = \"\"\n if self.completed:\n self.sqldict[\"declension\"] = \"\\'\" + rucomp + \"\\'\"\n else:\n self.wordclasses()\n return\n self.sqldict[\"runame\"] = \"\\'\" + self.rustr + \"\\'\"\n self.sqldict[\"gender\"] = \"\\'masculine\\'\"\n self.sqldict[\"wordcase\"] = \"\\'nominative\\'\"\n self.sqldict[\"animate\"] = \"\\'inanimate\\'\"\n self.sqldict[\"variety\"] = \"\\'comparative\\'\"\n cols = \"(\"\n data = \"(\"\n for y in self.sqldict:\n cols += y + \", \"\n data += self.sqldict[y] + \", \"\n cols = cols[:-2] + \") VALUES\"\n data = data[:-2] + \");\\n\"\n self.sqlcommand = basecommand + cols + data\n elif self.tagtext == \"superlative\":\n self.setVisible(False)\n self.sqldict[\"variety\"] = \"\\'superlative\\'\"\n decline = AdjDeclEntry(self)\n decline.activateWindow()\n decline.exec()\n self.setVisible(True)\n self.activateWindow()\n if self.cancel:\n self.wordclasses()\n return\n self.sqlcommand = self.adjcommand\n self.noexit = True\n self.finalize()\n else:\n QMessageBox.warning(self, \"No Item Selected\", \"You must select an item from the list or cancel.\")", "title": "" }, { "docid": "12b3badc60659ea49cadcbc978c0a23d", "score": "0.47151494", "text": "def query(self, arg):\r\n self.cur.execute(arg)\r\n self.conn.commit()\r\n return self.cur", "title": "" }, { "docid": "74e0af94cc91216d18fcfd76ba4b93e7", "score": "0.4710647", "text": "def schema(self,sql_str):\n\t\tself.cursor_db.executescript(sql_str)\n\t\tself.conn_db.commit()", "title": "" }, { "docid": "263af540ef2b92ceb9aa2f6247a4cfa9", "score": "0.4702192", "text": "async def sql(self, ctx):\n await util.command_group_help(ctx)", "title": "" }, { "docid": "24235ee1765db78e6af90d4fd98ed242", "score": "0.46943846", "text": "def QueryHowDoI(Query, num_answers, full_text):\n output = search_book(Query)\n print(\"You asked: \" + Query)\n print(\"_______________________________________\")\n print(output)\n exit_code = 3", "title": "" }, { "docid": "ba52805d2ba853af5cbf75f113249a58", "score": "0.46941972", "text": "def executescript(self,sql):\r\n #print sql \r\n cursor = self.db.cursor() \r\n cursor.executescript(sql)\r\n self.db.commit()\r\n cursor.close()", "title": "" }, { "docid": "94b8d1eae799d06b25aca2f018d3bcb0", "score": "0.46883547", "text": "def execute_query(connection, query):\n cursor = connection.cursor()\n try:\n cursor.execute(query)\n connection.commit()\n print(\"Query executed successfully\")\n except Error as e:\n print(f\"The error '{e}' occurred\")", "title": "" }, { "docid": "9e4011a30e6abc7fdbfacbfa5c7acdc4", "score": "0.46793383", "text": "def query_expense():\n\n if request.method == 'POST':\n sql = request.form[\"sql\"]\n\n error = None\n\n if not sql:\n error = \"query is required\"\n\n if error is None:\n obj = sqlOperation()\n results = obj.searchQuery(sql, session['user_id'])\n table = readResults(results)\n table.border = True\n return render_template('expense/table.html', table = table)\n else:\n flash(error)\n\n return render_template('expense/sql.html')", "title": "" }, { "docid": "327080e49104d0ff94b811b21ec72c31", "score": "0.4672157", "text": "def execute_query(cur, conn, query):\n if __debug__:\n print(\"Executing ================================ Query\")\n print(query)\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "d3cc13e77e83190eac3f815196259ad7", "score": "0.46710664", "text": "def make_commit(self):\n self.sat.commit()", "title": "" }, { "docid": "47cbe5a8067ddc3bcbe3eb7e49248bf8", "score": "0.46670425", "text": "def commit_and_close(self):\r\n request = input('Save the database? ')\r\n if request.upper() == 'YES':\r\n self.database.commit()\r\n self.database.close()", "title": "" }, { "docid": "90caca07874299609a6876e8682485fe", "score": "0.4665845", "text": "def do_query(self) -> None:\n ...", "title": "" }, { "docid": "4a642ff48f0322ca6f2c791ce5b3c50f", "score": "0.4661898", "text": "def test_preserve_whitespace(self):\n query = \"DUMP\\nSCHEMA\\n\\n;\"\n for fragment in query.split(\"\\n\"):\n self.query(fragment)\n self.assertEqual(self.engine.last_query, query)", "title": "" }, { "docid": "eff0e15b2ed4296ae907d69c80527baa", "score": "0.46522677", "text": "def _commit_handler(self, cmd):\n current_prompt = self.device.find_prompt().strip()\n terminating_char = current_prompt[-1]\n # Look for trailing pattern that includes '#' and '>'\n pattern1 = r\"[>#{}]\\s*$\".format(terminating_char)\n # Handle special username removal pattern\n pattern2 = r\".*all username.*confirm\"\n patterns = r\"(?:{}|{})\".format(pattern1, pattern2)\n output = self.device.send_command_expect(cmd, expect_string=patterns)\n loop_count = 50\n new_output = output\n for i in range(loop_count):\n if re.search(pattern2, new_output):\n # Send confirmation if username removal\n new_output = self.device.send_command_timing(\n \"\\n\", strip_prompt=False, strip_command=False\n )\n output += new_output\n else:\n break\n # Reset base prompt in case hostname changed\n self.device.set_base_prompt()\n return output", "title": "" }, { "docid": "d9b156bec756de2c09ce1745f7de9acb", "score": "0.46498632", "text": "def handleCustomQuery(self):\n\n # Get query\n print(\"Enter query below\")\n query = input()\n # Process documents\n processedQuery = self.preprocessQueries([query])[0]\n\n # Read documents\n docs_json = json.load(open(args.dataset + \"cran_docs.json\", 'r'))[:10]\n docs = [item[\"body\"] for item in docs_json]\n # Process documents\n processedDocs = self.preprocessDocs(docs)", "title": "" }, { "docid": "b56fcf3bc68efa86ff38670cf2ece9a5", "score": "0.46482247", "text": "def commit(self):\r\n self.connection.commit()", "title": "" }, { "docid": "49237b79f48e9d644736b826c1d234be", "score": "0.46452644", "text": "def _update_rows(self, query):\n self.db.get_connection()\n with self.db.conn.cursor() as cur:\n cur.execute(query)\n self.db.conn.commit()\n logger.debug(f'Query is: {query}.')\n logger.info(f\"{cur.rowcount} rows affected.\")", "title": "" }, { "docid": "d272e7c24acb4c3fde76e91e946d8640", "score": "0.46423507", "text": "def custom_query(self, con, cur, query):\n\n try:\n cur.execute(query)\n con.comit()\n print('\\n\\n QUERY SUCCESSFULL \\n\\n')\n except Exception as error:\n print('\\n Error by executing YOUR QUERY. '\n '\\n Server said: {}'.format(error)\n )", "title": "" }, { "docid": "58601b82f844e31a8aa211e13ab5c275", "score": "0.46373543", "text": "def qsqldb( sqldb, sql_cmd, outfilename=None ):\n\t#\n\t# Create the output file if specified\n\tif outfilename:\n\t\toutfile = open(outfilename, \"wb\")\n\t\tcsvout = csv.writer(outfile, quoting=csv.QUOTE_NONNUMERIC)\n\t#\n\t# Execute SQL\n\tcurs = sqldb.cursor()\n\tcurs.execute(sql_cmd)\n\t#\n\t# Write output to file or console\n\tif outfilename:\n\t\tdatarows = curs.fetchall()\n\t\theaders = [ item[0] for item in curs.description ]\n\t\tcsvout.writerow(headers)\n\t\tfor row in datarows:\n\t\t\tcsvout.writerow(list(row))\n\t\toutfile.close()\n\telse:\n\t\tprint pp(curs)", "title": "" }, { "docid": "011eca2001844c9633087ed0db67ddba", "score": "0.4634989", "text": "def color_q(query_object):\n return color_sql(compile_query(query_object))", "title": "" }, { "docid": "5360a7de8c72610842cce277150526a9", "score": "0.46349132", "text": "def db_action(sql_action: str):\n conn = psycopg2.connect(db_url)\n curs = conn.cursor()\n curs.execute(sql_action)\n conn.commit()\n curs.close()\n conn.close()", "title": "" }, { "docid": "6bc835fcbd42e91ccdded3ea90ed3b91", "score": "0.46291798", "text": "def _commit(self, action):\n self._dbparams_append((\"-max\", self._maxret))\n\n url = self._url % self.__dict__\n data = urlencode(self._dbdata + self._dbparams) + \"&-\" + action\n self._dbparams = []\n # use POST to submit data\n request = Request(url, bytestr(data))\n request.add_header('User-Agent', b'Fmkr.py')\n # authorization header\n auth = b\"Basic \" + encodestr(\"%s:%s\" % (self._dbuser,\n self._dbpasswd))[:-1]\n request.add_header(\"Authorization\", auth)\n\n try:\n fd = urlopen(request)\n except HTTPError as e:\n raise FMError(str(e))\n except URLError as e:\n raise FMError(\"URL Error: %s\" % str(e.reason))\n\n results = FMPXMLResult()\n results.httpinfo = fd.info()\n results.url = url + \"?\" + data\n # hide logon information\n #if self._dbuser and self._dbpasswd:\n # results.url = results.url.replace(\n # \"//\", \"//%s:%s@\" % (self._dbuser, self._dbpasswd), 1)\n doc = minidom.parse(fd)\n fd.close()\n\n # <PRODUCT BUILD=\"06/14/2006\" NAME=\"FileMaker Web Publishing Engine\"\n # VERSION=\"8.0.4.128\"/>\n try:\n attrs = doc.getElementsByTagName(\"PRODUCT\")[0].attributes\n results.product.update(attrs.items())\n except Exception:\n pass\n\n # <DATABASE DATEFORMAT=\"MM/dd/yyyy\" LAYOUT=\"data entry\" NAME=\"Test\"\n # RECORDS=\"68\" TIMEFORMAT=\"HH:mm:ss\"/>\n try:\n attrs = doc.getElementsByTagName(\"DATABASE\")[0].attributes\n results.database.update(attrs.items())\n except Exception:\n pass\n\n # <ERRORCODE>0</ERRORCODE>\n try:\n results.errorcode = int(\n doc.getElementsByTagName(\"ERRORCODE\")[0].firstChild.data)\n except Exception:\n results.errorcode = -1\n\n if results.errorcode:\n try:\n errormsg = FMError.codes[results.errorcode]\n except KeyError:\n errormsg = \"Unknown error code\"\n raise FMError(\"FileMaker Error #%s: %s\" % (\n results.errorcode, errormsg), results.errorcode)\n\n # <METADATA>\n fields = doc.getElementsByTagName(\"METADATA\")[0].childNodes\n metadata = results.metadata\n for field in fields:\n metadata.append(FMField(field.attributes))\n\n # <RESULTSET>\n try:\n resultset = doc.getElementsByTagName(\"RESULTSET\")[0]\n except Exception:\n pass\n else:\n escrslt = self._escrslt\n unctype = unicode_t\n for row in resultset.childNodes:\n d = {}\n d[\"MODID\"] = int(row.attributes[\"MODID\"].value)\n d[\"RECORDID\"] = int(row.attributes[\"RECORDID\"].value)\n for md, cn in zip(metadata, row.childNodes):\n if escrslt and md.dtype == unctype:\n convert_type = escape_unicode\n else:\n convert_type = md.dtype\n if md.maxrepeat == 1:\n de = cn.firstChild.firstChild\n d[md.name] = convert_type(de.data) if de else None\n else:\n a = []\n for c in cn.childNodes:\n value = c.firstChild\n if value:\n a.append(convert_type(value.data))\n else:\n break\n d[md.name] = a\n results.resultset.append(d)\n return results", "title": "" }, { "docid": "f875ebf0d965df05e4a6d4969b277f21", "score": "0.4614563", "text": "def edit(self, query=None, **kwargs):\n pass", "title": "" }, { "docid": "7c40551d211449ed1dec5f69af03ee40", "score": "0.4592555", "text": "def formatted_queries():\n\n id = get_id() # Could be a SQLi response..\n\n query1 = f\"SELECT * FROM users WHERE id = {id}\"\n\n query2 = \"SELECT * FROM users WHERE id = {0}\" % id\n\n query3 = \"SELECT * FROM users WHERE id = {0}\".format(id)\n\n query4 = f\"UPDATE users SET is_admin = 1 WHERE id = {id}\"\n\n query5 = f\"DELETE FROM users WHERE id = {id}\"\n\n query6 = f\"INSERT INTO users (id) VALUES ( id = {id} )\"\n\n query7 = f\"SELECT * FROM users WHERE id = {id}\"", "title": "" }, { "docid": "d7f78cf09994c8f069f7146171f320bb", "score": "0.4590935", "text": "def commit(self):\n self.connection.commit()", "title": "" }, { "docid": "d7f78cf09994c8f069f7146171f320bb", "score": "0.4590935", "text": "def commit(self):\n self.connection.commit()", "title": "" }, { "docid": "ec13f5ddfb3792a28f440c6d6f1f53f2", "score": "0.45908436", "text": "def commit_rst(self):\n self.state_machine.insert_input(self.rst_lines, \"\")\n self.rst_lines = []", "title": "" }, { "docid": "982819eb11651908f894cfcdf9739fe1", "score": "0.45901954", "text": "def meta_command(connection, user_input):\n user_parts = user_input.split()\n replaced = ''\n\n # .tables: print list of tables in database\n if user_parts[0].lower() == '.tables' or user_parts[0].lower() == '.table':\n replaced = 'SELECT name FROM sqlite_master WHERE type = \"table\";'\n\n # .schema: print details about a tables column\n elif user_parts[0].lower() == '.schema':\n if len(user_parts) == 1:\n replaced = 'SELECT sql FROM sqlite_master WHERE type = \"table\";'\n elif len(user_parts) == 2:\n s = 'SELECT sql FROM sqlite_master WHERE type = \"table\" AND name = \"{[1]}\";'\n replaced = s.format(user_parts)\n else:\n print('Usage: .schema [table]')\n\n # .dump: print SQL commands to recreate table\n # ToDo: http://www.sqlitetutorial.net/sqlite-dump/\n elif user_parts[0].lower() == '.dump':\n if len(user_parts) == 1:\n for line in connection.iterdump():\n print(line)\n elif len(user_parts) == 2:\n replaced = 'SELECT * from {[1]};'.format(user_parts)\n else:\n print('Usage: .dump [table]')\n\n else:\n print('That sqlite3 meta command has not been implimented')\n\n return replaced", "title": "" }, { "docid": "fe5bb448d4e729664a53d52a337c233e", "score": "0.4587876", "text": "def execute_query(self, query):\n sql_blacklist = ['drop', 'delete']\n if any(word.casefold() in query.casefold() for word in sql_blacklist):\n print(\"No queries with {0} will be executed\".format([word for word in sql_blacklist if word.casefold() in query.casefold()][0]))\n return\n dbcursor = self.connection.cursor()\n dbcursor.execute(query)\n result = dbcursor.fetchall()\n return result", "title": "" }, { "docid": "9bc2e40b428d4a5fb47134018f0c3fdf", "score": "0.45863706", "text": "def edit():", "title": "" }, { "docid": "08b6eaae8567df11341309842b1e20bd", "score": "0.4586093", "text": "def commit(self):\n self.connect.commit()", "title": "" }, { "docid": "d19fa45f4ed36207f1de5bd22ff54b60", "score": "0.4576854", "text": "def submit_textarea():\n post_content = request.form[\"content\"]\n author = request.form[\"author\"]\n\n post_object = {\n 'author': author,\n 'content': post_content,\n }\n\n # Submit a transaction\n new_tx_address = \"{}/new_transaction\".format(CONNECTED_NODE_ADDRESS)\n\n requests.post(new_tx_address,\n json=post_object,\n headers={'Content-type': 'application/json'})\n\n return redirect('/')", "title": "" }, { "docid": "1aa8b5c5b06e5e1f7c60d8fbe04ef6d0", "score": "0.45657098", "text": "def commit(self):\n pass", "title": "" }, { "docid": "1aa8b5c5b06e5e1f7c60d8fbe04ef6d0", "score": "0.45657098", "text": "def commit(self):\n pass", "title": "" }, { "docid": "1aa8b5c5b06e5e1f7c60d8fbe04ef6d0", "score": "0.45657098", "text": "def commit(self):\n pass", "title": "" }, { "docid": "0c63c83926920827115ebe9e62d22751", "score": "0.45615292", "text": "def submit_textarea():\n post_object = {\n \"contents\": transaction.to_dict(),\n \"signature\": transaction.signature\n }\n\n # Submit a transaction\n if request.method == 'GET':\n new_tx_address = \"http://127.0.0.1:8000/new_transaction\"\n else:\n port_nb = request.get_json()\n new_tx_address = \"http://127.0.0.1:\" + str(port_nb) + \"/new_transaction\"\n\n requests.post(new_tx_address,\n json=post_object,\n headers={'Content-type': 'application/json'})\n\n return \"Success\", 201", "title": "" }, { "docid": "6bf63136f22ed1813f702a3eefc3f733", "score": "0.45585993", "text": "def execute(self, req, data = {}):\n res = self.cursor().execute(req, data)\n if not('SELECT' in req) and self.auto_commit:\n self.commit()\n return res", "title": "" }, { "docid": "6cfce166a7e5f5199782bd00a7aba9f4", "score": "0.45553318", "text": "def format_queries(queries):\n output = '-- Queries %s\\n' % ('-'*(LINE_BIG-11))\n\n for query in queries:\n output += '-- %s %s\\n' % (query, '-'*(LINE_SMALL-4-len(query)))\n output += queries[query]\n output += '\\n;%s' % ('\\n'*3)\n\n return output", "title": "" }, { "docid": "4ae4fdd9487cc167bf18577c58d5679e", "score": "0.4553439", "text": "def commit(self):\n self.get_connection().commit()", "title": "" }, { "docid": "64ff9a9219abb92fdd8c79dd0c4f78a5", "score": "0.4548623", "text": "def write():\n db = bottle.request.forms.get('db')\n return \"Not implemented.\"", "title": "" }, { "docid": "f9a38c1baa64adf90927da1ddc9c6ce5", "score": "0.45434672", "text": "def get_sql(self, undo=False):\n return self.undo if undo else self.do", "title": "" }, { "docid": "a9ccd280dc1c1812786911cc0667553c", "score": "0.4541497", "text": "def query(self, qu):\n return self.cur.execute(qu)", "title": "" }, { "docid": "51da7ec25e38198334a0e58eaf1d0ce6", "score": "0.45397788", "text": "def test_query_button_clicked_query_parse_error(self, widget):\n\n widget.query.text = \"ans(study_id, term, tfidf):-neurosynth_default_mode_study_id(study_id),neurosynth_pcc_study_id(study_id),neurosynth_study_tfidf(study_id, term, tfidf\"\n widget._on_query_button_clicked(None)\n assert widget.result_viewer.layout.visibility == \"hidden\"\n assert widget.error_display.layout.visibility == \"visible\"\n assert len(widget.query.marks) == 1\n assert len(widget.query.text_marks) == 1", "title": "" }, { "docid": "914f9be4724a27528047407f06d521f3", "score": "0.45382926", "text": "def commit(self) -> None:\n self.con.commit()", "title": "" }, { "docid": "64dd4aba698f62aeb80815ea901e267d", "score": "0.45381135", "text": "def update_row(self,query,info):\n \n with self.conn.cursor() as cur:\n try:\n cur.execute(query,info)\n self.conn.commit()\n cur.close()\n except Exception as e:\n print(e)\n cur.execute(\"ROLLBACK\")\n self.conn.commit()\n cur.close()\n raise HTTPException(status_code=400, detail=\"Information given was invalid\")\n return \"Row updated\"", "title": "" } ]
cab3e85aee2ae1056716a2a86c357d5f
Prefect task to finalize the downscaling run.
[ { "docid": "b68008b92ad4c8e52eef51cd0120f6e8", "score": "0.0", "text": "def finalize(run_parameters: RunParameters = None, **paths):\n _finalize(run_parameters, kind='runs', **paths)", "title": "" } ]
[ { "docid": "f1f9202e19312af090558d9300f3c976", "score": "0.6260884", "text": "def teardown(self, task, job_config):", "title": "" }, { "docid": "1a1c2f4fd22c23c94eab7333d3e99aee", "score": "0.616505", "text": "def finalize(self):\n self.logger.info(\"Please wait while finalizing the operation.. Thank you\")\n self.save_checkpoint()\n # self.summary_writer.export_scalars_to_json(\"{}all_scalars.json\".format(self.config.summary_dir))\n # self.summary_writer.close()\n # self.dataloader.finalize()", "title": "" }, { "docid": "f48c5d6a1e1470da3127bfc0e9aa1b28", "score": "0.6017869", "text": "def _post_compute(self):\n pass", "title": "" }, { "docid": "1b8d8d660d524b84f3277c7b155d3143", "score": "0.5813739", "text": "def finalize_timepoint(self):\n pass", "title": "" }, { "docid": "3c942b1700e1adfd749cbf41f64b3a53", "score": "0.5796718", "text": "def ExecuteFinalize(self):\n pass", "title": "" }, { "docid": "62002f7cf997a569e1213856931a3f88", "score": "0.5784241", "text": "def finished_task(self):\n self.mutex.lock()\n if self.cache is not None:\n args, kwargs = self.cache\n self.algorithm.set_parameters(*args, **kwargs)\n self.cache = None\n self.clean_later = False\n if self._image is not None:\n self.algorithm.set_image(self._image)\n self._image = None\n if self._mask is not None:\n self.algorithm.set_mask(self._mask)\n self._mask = None\n if self.rerun[0]:\n self.rerun = False, QThread.InheritPriority\n super().start(self.rerun[1])\n elif self.clean_later:\n self.algorithm.clean()\n self.clean_later = False\n self.mutex.unlock()", "title": "" }, { "docid": "830c5eb3d65b9e5e31d11a1627d080c7", "score": "0.5719745", "text": "def cleanup_run(self, *kargs, **kwargs):\n pass", "title": "" }, { "docid": "2e3a5a0288c231d4c513b377e4d5fb62", "score": "0.571714", "text": "def finalize(self) -> None:\n ...", "title": "" }, { "docid": "11c4e7194d1e1fb4df64103a852f6c91", "score": "0.571261", "text": "def finalize(self):\n pass # currently nothing to do here", "title": "" }, { "docid": "cacc113ca7766fb5b2680238e76e50e6", "score": "0.5700527", "text": "def after_main_loop(self):\n for task in self.session:\n if isinstance(task,GsnowpackApplication) and task.execution.returncode == 0:\n # Only consider successfully terminated tasks\n _update_original_sno_files(task)\n \n if self.params.www and task.get_images():\n try:\n _publish_data(task, self.params.www)\n except OSError, osx:\n continue", "title": "" }, { "docid": "362527538ef01a8b8b55ebb82dc3d379", "score": "0.5681788", "text": "def _cleanup(self):\n functions.DAQmxWaitUntillTaskDone(self.taskHandle, -1) # second argument needs checking\n functions.DAQmxStopTask(self.taskHandle)\n functions.DAQmxClearTask(self.taskHandle)", "title": "" }, { "docid": "76a61d8f24431362d3da8862938ff2b9", "score": "0.5672402", "text": "def postRunCleanup(self):\n self.logDesc(\"Post Run Cleanup\")", "title": "" }, { "docid": "76a61d8f24431362d3da8862938ff2b9", "score": "0.5672402", "text": "def postRunCleanup(self):\n self.logDesc(\"Post Run Cleanup\")", "title": "" }, { "docid": "76a61d8f24431362d3da8862938ff2b9", "score": "0.5672402", "text": "def postRunCleanup(self):\n self.logDesc(\"Post Run Cleanup\")", "title": "" }, { "docid": "76a61d8f24431362d3da8862938ff2b9", "score": "0.5672402", "text": "def postRunCleanup(self):\n self.logDesc(\"Post Run Cleanup\")", "title": "" }, { "docid": "bf88fa429f38f638ea5e532b91157ede", "score": "0.567161", "text": "def _finalize(self):\n self._finalized = True", "title": "" }, { "docid": "c8fb1c29151dffdcb6f53cf35689fb91", "score": "0.5633229", "text": "def finalize(self):\n # Finalize output training\n self.pca_cell.finalize()\n\n # Finalized\n self.finalized = True", "title": "" }, { "docid": "0b87f5501b23a309b84bb80544899bf2", "score": "0.5624566", "text": "def _finalize(self):\n pass", "title": "" }, { "docid": "b755d6fd430ce98ac87075348ab8f444", "score": "0.56239605", "text": "def finalize(self):\n pass", "title": "" }, { "docid": "b755d6fd430ce98ac87075348ab8f444", "score": "0.56239605", "text": "def finalize(self):\n pass", "title": "" }, { "docid": "b755d6fd430ce98ac87075348ab8f444", "score": "0.56239605", "text": "def finalize(self):\n pass", "title": "" }, { "docid": "b755d6fd430ce98ac87075348ab8f444", "score": "0.56239605", "text": "def finalize(self):\n pass", "title": "" }, { "docid": "b21a454931fb57f8c91f36e515708891", "score": "0.5617566", "text": "def finalize(args):\n return Update.within_job(args.metric_value, args.test)", "title": "" }, { "docid": "86247afd98fd0af6087c5dd18db7d104", "score": "0.560529", "text": "def finalize(self):", "title": "" }, { "docid": "86247afd98fd0af6087c5dd18db7d104", "score": "0.560529", "text": "def finalize(self):", "title": "" }, { "docid": "86247afd98fd0af6087c5dd18db7d104", "score": "0.560529", "text": "def finalize(self):", "title": "" }, { "docid": "86247afd98fd0af6087c5dd18db7d104", "score": "0.560529", "text": "def finalize(self):", "title": "" }, { "docid": "86247afd98fd0af6087c5dd18db7d104", "score": "0.560529", "text": "def finalize(self):", "title": "" }, { "docid": "86247afd98fd0af6087c5dd18db7d104", "score": "0.560529", "text": "def finalize(self):", "title": "" }, { "docid": "86247afd98fd0af6087c5dd18db7d104", "score": "0.560529", "text": "def finalize(self):", "title": "" }, { "docid": "3d64d460f2254138857b91899c2743ac", "score": "0.55869454", "text": "def _finalize_run(self):\n self._run_information[self.v_crun]['completed'] = 1\n while len(self._new_links):\n name_pair, child_parent_pair = self._new_links.popitem(last=False)\n parent_node, _ = child_parent_pair\n _, link = name_pair\n parent_node.f_remove_child(link)\n\n while len(self._new_nodes):\n _, child_parent_pair = self._new_nodes.popitem(last=False)\n parent, child = child_parent_pair\n child_name = child.v_name\n parent.f_remove_child(child_name, recursive=True)", "title": "" }, { "docid": "61252ecafd86555516b9a9d5895b23a6", "score": "0.55836266", "text": "def finalize(self):\n raise NotImplementedError", "title": "" }, { "docid": "2a7fec7bee8790abae08451ee7fdfce5", "score": "0.5562341", "text": "def finalize(self):\n DLOG.verbose(\"Default thread worker finalize called for %s.\"\n % self._name)", "title": "" }, { "docid": "bf65c549e98de6bf12769f27ee86daef", "score": "0.55509955", "text": "def process_cleanup(self):", "title": "" }, { "docid": "bd4d8d96d9e46015bc6eb726ab5550db", "score": "0.5543055", "text": "def finalize(self):\n\t\tutils.forgetMemoized(self)\n\t\tfor proc in itertools.chain(self.metaMakers, self.dataFunctions):\n\t\t\tutils.forgetMemoized(proc)\n\t\tutils.forgetMemoized(self.descriptorGenerator)\n\t\tif self.dataFormatter:\n\t\t\tutils.forgetMemoized(self.dataFormatter)\n\t\tself.breakCircles()\n\t\tself.run = None", "title": "" }, { "docid": "889a8b06b7b30bcd6e4cdf26169d99e5", "score": "0.5526978", "text": "def finalize(self):\r\n pass", "title": "" }, { "docid": "8fac4d8af648ac685f80e96bdd900336", "score": "0.5520506", "text": "def finalize(self) -> None:\n total_execution_time = time.time() - self.start_all\n print(f\"TOTAL EXECUTION TIME: {total_execution_time:.10f} sec\")\n self.merge_outputs()", "title": "" }, { "docid": "af187d5fd35793273ce85350f4a1b426", "score": "0.551647", "text": "def _set_finish(self):\n\n run_info_dict = self._run_information[self.v_crun]\n timestamp_run = run_info_dict['timestamp']\n\n run_summary = self._summarize_explored_parameters()\n\n finish_timestamp_run = time.time()\n\n findatetime = datetime.datetime.fromtimestamp(finish_timestamp_run)\n startdatetime = datetime.datetime.fromtimestamp(timestamp_run)\n\n runtime_run = str(findatetime - startdatetime)\n\n run_info_dict['parameter_summary'] = run_summary\n run_info_dict['completed'] = 1\n run_info_dict['finish_timestamp'] = finish_timestamp_run\n run_info_dict['runtime'] = runtime_run", "title": "" }, { "docid": "2e16f4b7d7f12ada9a8554e2f19c8f5f", "score": "0.551429", "text": "def finalize(self):\n self.classifier.finalize()", "title": "" }, { "docid": "7689206054dd79b00f3bcb0c3541994d", "score": "0.5514289", "text": "def finalize():", "title": "" }, { "docid": "7689206054dd79b00f3bcb0c3541994d", "score": "0.5514289", "text": "def finalize():", "title": "" }, { "docid": "28596baa6965789722574d07e7fcc5de", "score": "0.5513951", "text": "def _on_task_run(self):\n # self.publish_desired_twist_power(self.twist) # change to ALlocateVelocityTask\n # self.publish_desired_twist(self.twist.linear.x,\n # self.twist.linear.y,\n # self.twist.linear.z,\n # self.twist.angular.x,\n # self.twist.angular.y,\n # self.twist.angular.z\n # )\n\n self.rotate_task.run()\n\n if not self.on_finish_segment and task_utils.at_pose(\n self.state.pose.pose, self.target_pose, float(\"inf\"), self.seg_rads / 2):\n self.current_segment += 1\n self.target_pose = task_utils.add_poses([self.target_pose, self.angle_pose])\n rospy.loginfo(\"Now on segment \" + str(self.current_segment))\n self.output[\"rads_turned\"] += self.seg_rads\n self.output[\"points_scored\"] = int(self.output[\"rads_turned\"] / (math.pi / 2)) * self.nintey_points\n\n # rospy.loginfo(\"1 clear\")\n\n if not self.on_finish_segment and self.current_segment == self.num_segments:\n self.on_finish_segment = True\n\n # rospy.loginfo(\"2 clear\")\n\n if self.on_finish_segment and task_utils.at_pose(\n self.state.pose.pose, self.target_pose, float(\"inf\"), self.seg_rads):\n rospy.loginfo(\"Turn Complete!\\n\")\n self.output[\"rads_turned\"] = self.angle\n self.output[\"points_scored\"] = int(self.output[\"rads_turned\"] / (math.pi / 2)) * self.nintey_points\n rospy.loginfo(\"Stopping turn!\")\n self.rotate_task.finish()\n self.finish()\n\n # rospy.loginfo(\"all clear, self.finished is {}\".format(self.finished))", "title": "" }, { "docid": "06bf10d20f94cf9ff247be4bcbdec8d0", "score": "0.54889214", "text": "def post(self, executor):\n tasks.PauseExecutor(executor)", "title": "" }, { "docid": "8d6efe452ecfa67fd710d9e222e40bce", "score": "0.5461304", "text": "def process_cleanup(self):\n pass", "title": "" }, { "docid": "0a230a62e94fc9dfcdd364db0f8380a7", "score": "0.5441202", "text": "def finalize(self):\r\n pass", "title": "" }, { "docid": "0a230a62e94fc9dfcdd364db0f8380a7", "score": "0.5441202", "text": "def finalize(self):\r\n pass", "title": "" }, { "docid": "0a230a62e94fc9dfcdd364db0f8380a7", "score": "0.5441202", "text": "def finalize(self):\r\n pass", "title": "" }, { "docid": "2d77d14eafa02c7ba5801786154098db", "score": "0.54286355", "text": "def fin():\n u_libs.testflow.teardown(\n \"Release hosts from CPU load: %s\", load_to_resources.values()\n )\n sla_helpers.stop_load_on_resources(load_to_resources.values())", "title": "" }, { "docid": "b7ccf732c99c3ca2a038cedca21f40c6", "score": "0.54172254", "text": "def finalizeActualSampling(self, jobObject, model, myInput):\n self.localFinalizeActualSampling(jobObject, model, myInput)", "title": "" }, { "docid": "0def583b5a6ab21e79adb4eaf7955145", "score": "0.54120964", "text": "def work(self):\n # HACK: overwrite the classes' self._do_stage() method with our own\n try:\n ds_orig = self._do_stage\n # Functools.partial() should be more jit friendly than lambda\n self._do_stage = functools.partial(self.__do_stage, _ds=ds_orig)\n super(Deinstall, self).work()\n finally:\n self._do_stage = ds_orig", "title": "" }, { "docid": "e63a5463d9c114c73aaf0a1499e5bcda", "score": "0.53938115", "text": "def finalize(self):\n assert self.num_frames[\"data\"] > 0\n # Note: self.num_frames could be greater than self.data.get_num_timesteps() in case of chunking.\n for key, value in self.results.items():\n if key != \"ctc_priors\":\n self.results[key] *= self.epoch_norm_factor_for_result(key)\n self.score = dict([(key,value) for (key, value) in self.results.items() if key.startswith(\"cost:\")])\n self.error = dict([(key,value) for (key, value) in self.results.items() if key.startswith(\"error:\")])\n self.finalized = True", "title": "" }, { "docid": "8fb2d6423fbda66b2e1512adbbdf061f", "score": "0.53866833", "text": "def scale_down(self, inactive_additional_hosts):\n Logger.info('Start grid engine SCALING DOWN for %s hosts.' % len(inactive_additional_hosts))\n self.scale_down_orchestrator.scale_down(inactive_additional_hosts)", "title": "" }, { "docid": "9e44e7830810a6a48e0088572ed0f45c", "score": "0.5382066", "text": "def unset_pre_scale_done(self):\n key = self._qualifyKey(NodeDecorator.PRE_SCALE_DONE)\n self._ss_client.setRuntimeParameter(key, 'false')", "title": "" }, { "docid": "ccacdce455feebe1cb069f0af4ea1d10", "score": "0.5380459", "text": "def re_run(self, task):\n # Basically delete all and run again.\n pass", "title": "" }, { "docid": "a7c6dd07df633726c2cf43fb0c947083", "score": "0.53783774", "text": "def finalize(self):\n self._finalize.set()", "title": "" }, { "docid": "25227499a45aa95dba795d37bba9d00c", "score": "0.5373788", "text": "def finalize_run(self, run_state):\n failure_message = run_state.info.get('failure_message', None)\n if 'exitcode' not in run_state.info:\n run_state.info['exitcode'] = None\n if not failure_message and run_state.is_killed:\n run_state.info['failure_message'] = run_state.info['kill_message']\n run_state.info['finalized'] = False\n return run_state._replace(\n stage=LocalRunStage.FINALIZING, info=run_state.info, run_status=\"Finalizing bundle\"\n )", "title": "" }, { "docid": "137f67628d91a43d8c5f15a914658c0a", "score": "0.53666997", "text": "def end_run(self):\n # not necessary\n pass", "title": "" }, { "docid": "89f6e94d212db843d55c683e31be1f3a", "score": "0.5364355", "text": "def tear_down(self):\n\n self.run_etl_job_cutoff_statement()", "title": "" }, { "docid": "5e8d1f6d6f7c906109daf8ca24c47868", "score": "0.5319805", "text": "def finalize(self, loss, optimizer):\n self.loss, self.optimizer = loss, optimizer\n self.finalized = True", "title": "" }, { "docid": "5e8d1f6d6f7c906109daf8ca24c47868", "score": "0.5319805", "text": "def finalize(self, loss, optimizer):\n self.loss, self.optimizer = loss, optimizer\n self.finalized = True", "title": "" }, { "docid": "410c19aa571e63a03bd1b2ebbc7d9abe", "score": "0.5315652", "text": "def onComplete(self, task): #$NON-NLS-1$\r", "title": "" }, { "docid": "ae1295a08dfffc2d01d25f836820ae5b", "score": "0.53156406", "text": "def end_run(\n self,\n *,\n state: gca_execution.Execution.State = gca_execution.Execution.State.COMPLETE,\n ):\n self.update_state(state)", "title": "" }, { "docid": "e16a7f732648c81cab0445eb6ee93b03", "score": "0.5315085", "text": "def shutdown_worker(self):\n parallel_sampler.terminate_task(scope=self.algo.scope)", "title": "" }, { "docid": "9c6d6a40ade20a4f828ec67fd85ec522", "score": "0.53096217", "text": "def _postprocess(self):\n if not super()._postprocess():\n return False\n\n try:\n torch.distributed.destroy_process_group()\n except BaseException as e:\n self._result.set_return_code(ReturnCode.DISTRIBUTED_SETTING_DESTROY_FAILURE)\n logger.error(\n 'Post process failed - benchmark: {}, mode: {}, message: {}.'.format(\n self._name, self._args.mode, str(e)\n )\n )\n return False\n\n return True", "title": "" }, { "docid": "e900b0d994ae9ead66862686c7ff340e", "score": "0.53038293", "text": "def distribute_task(self, task: object):", "title": "" }, { "docid": "324167904bb0fd9961af605d4ff719c6", "score": "0.5303111", "text": "def _perform_task(self):\n self._get_zeropoint_index()\n self._execute_compensation(self.parameters['zeropoint_index'])", "title": "" }, { "docid": "4c9711549439bdce3d72d459b5c09943", "score": "0.5297899", "text": "def force_finalize_on_wait(self):\n self._finalize_on_wait = True", "title": "" }, { "docid": "72fdcb6fa8ec3782a5e3b73fd8cac27a", "score": "0.52747715", "text": "def cleanup_resources_from_task_run(self, task_run: \"TaskRun\", server_url: str) -> None:\n raise NotImplementedError()", "title": "" }, { "docid": "f69a7bfbe06d86b75b6c33b6afb7651e", "score": "0.5271502", "text": "def finalize(self, ctx, shard_state):\n return", "title": "" }, { "docid": "57c62082184e724d28fe4637f12e3f4c", "score": "0.52652365", "text": "def set_pre_scale_done(self):\n key = self._qualifyKey(NodeDecorator.PRE_SCALE_DONE)\n self._ss_client.setRuntimeParameter(key, NodeDecorator.PRE_SCALE_DONE_SUCCESS)", "title": "" }, { "docid": "e9f59b28975b092865edcd08669ae3df", "score": "0.5247657", "text": "def release(self) -> None:\n self.thr._task_logs.pop()", "title": "" }, { "docid": "aba89a93093941599f217863e283975e", "score": "0.5245691", "text": "def clean(self):\n\n self.instance.send_event('Cleaning job..')\n result = self.instance.execute_operation('croupier.interfaces.lifecycle.cleanup',\n kwargs={\"name\": self.name})\n\n self.instance.send_event('.. job cleaned')\n\n return result.task", "title": "" }, { "docid": "614831f18af27faa4291875dfbb04635", "score": "0.5243402", "text": "def signal_shutdown(self):\n self.log.info('done')\n self.metrics.set_ready(False)", "title": "" }, { "docid": "a519d358eaa90f8e61aaf99e6edf2218", "score": "0.5241345", "text": "async def cleanup(self) -> None:\n ...", "title": "" }, { "docid": "2c37034d842407eba3a2b0b2b7f115e2", "score": "0.5238297", "text": "def shutdown(self):\n\n logger.debug('Job done!')", "title": "" }, { "docid": "4c32973dcd51b17ae863a713ec46839a", "score": "0.5237835", "text": "def unthrottle_downlad(self):", "title": "" }, { "docid": "4c160a823daf70543e27c2a69e0ab952", "score": "0.52340615", "text": "def task(self):\n pass", "title": "" }, { "docid": "9d60d7c941ed7102fa6c93128493449f", "score": "0.52276", "text": "def finish(self):\n pass", "title": "" }, { "docid": "9d60d7c941ed7102fa6c93128493449f", "score": "0.52276", "text": "def finish(self):\n pass", "title": "" }, { "docid": "9d60d7c941ed7102fa6c93128493449f", "score": "0.52276", "text": "def finish(self):\n pass", "title": "" }, { "docid": "a5b49537b8138bc93349fe17c35c80ce", "score": "0.5226661", "text": "def task5(self):\n \n pass", "title": "" }, { "docid": "e305a9594cb2ca413d43265a9fe70d46", "score": "0.5226587", "text": "def cog_unload(self):\n\t\tself.bg_task.cancel()\n\t\tprint(\"DNfeed cog unloaded\")", "title": "" }, { "docid": "42e5291df190d8641277d415270e513d", "score": "0.5218882", "text": "def run_post_processing(self):\n self.update_post_processing_solutions()\n\n self.exports.t = self.t\n self.exports.write(self.label_to_function, self.dt)", "title": "" }, { "docid": "cd340b26acbd6546f487de4662edef9a", "score": "0.5206462", "text": "def run(task):", "title": "" }, { "docid": "92b36027dcd9d76470e8cb2c3a3874f2", "score": "0.5203779", "text": "def _transition_from_CLEANING_UP(self, run_state):\n bundle_uuid = run_state.bundle['uuid']\n if run_state.container_id is not None:\n while True:\n try:\n finished, _, _ = docker_utils.check_finished(run_state.container)\n if finished:\n run_state.container.remove(force=True)\n break\n except docker.errors.APIError:\n logger.error(traceback.format_exc())\n time.sleep(1)\n\n for dep in run_state.bundle['dependencies']:\n self.dependency_manager.release(bundle_uuid, (dep['parent_uuid'], dep['parent_path']))\n\n child_path = os.path.join(run_state.bundle_path, dep['child_path'])\n try:\n remove_path(child_path)\n except Exception:\n logger.error(traceback.format_exc())\n\n if run_state.has_contents:\n return run_state._replace(\n stage=LocalRunStage.UPLOADING_RESULTS,\n run_status='Uploading results',\n container=None,\n )\n else:\n return self.finalize_run(run_state)", "title": "" }, { "docid": "3926745fcb5cb231035e4bde257b5088", "score": "0.52021843", "text": "def finalize(self, interrupted=False):\n pass", "title": "" }, { "docid": "34f768bb2cb461e71485054bed5fa47b", "score": "0.5201078", "text": "def end(self):\n print(\"AWS Task is finished.\")", "title": "" }, { "docid": "cfcdd31365a214fb0d91a0e1b45e9c32", "score": "0.5194666", "text": "def task2(self):\n\n pass", "title": "" }, { "docid": "f6d1ab194209e043b7d54acd082a0d7f", "score": "0.5188646", "text": "def finish(self):\n raise NotImplementedError", "title": "" }, { "docid": "0301c6cb545a9f4a7d8fd16ea4609f2f", "score": "0.5180248", "text": "def finalize(self):\n if self.output_stream:\n self.output_stream.close()\n else:\n pass", "title": "" }, { "docid": "b8743289f2b4ce07ce5fdac530990fdd", "score": "0.5179197", "text": "def task4(self):\n\n pass", "title": "" }, { "docid": "0d913aab6696403e0033cf4853d38456", "score": "0.51692045", "text": "def __finishRun(self):\n print('finish run called')\n os.remove(self.tempFile)\n self.tempFile = None", "title": "" }, { "docid": "23690bd51b3e1892f54d195ecc51e190", "score": "0.5165639", "text": "def _on_task_run(self):\n pass", "title": "" }, { "docid": "f1046f0ac918044bb35f75ac46bfc1be", "score": "0.5162351", "text": "def localFinalizeActualSampling(self, jobObject, model, myInput):", "title": "" }, { "docid": "72662e6d3fd64cc7dbc52833f5fb03d4", "score": "0.5157304", "text": "def finalize(self, kwargs):\n pass", "title": "" }, { "docid": "a4552d35dc0f2fe541672962e9f069f8", "score": "0.51567674", "text": "def _finalize(self, store_meta_data=True):\n self._is_run = False\n self.f_set_crun(None)\n if store_meta_data:\n self.f_store(only_init=True)", "title": "" }, { "docid": "3e2cac8b1b8e4b0ee34bf6179e463d24", "score": "0.5147796", "text": "def tearDown(self):\n del self.task\n del self.scheduler", "title": "" }, { "docid": "a3e9f0d6d9f088050d244043cac27296", "score": "0.51415724", "text": "def _complete_inst_progress():\n global __INST_PROGRESS__\n if not self._inst_uuid in __INST_PROGRESS__:\n LOG.error(_(\"The progress updater has not started\"\n \" for virtual machine %s.\") % self._inst_uuid)\n raise pvcex.IBMPowerVMProgressUpdateError(uuid=self._inst_uuid)\n\n # before clean up, make sure we set the progress to 100\n if __INST_PROGRESS__[self._inst_uuid] != 100:\n conductor.API().instance_update(self._context, self._inst_uuid,\n progress=round(100))\n del __INST_PROGRESS__[self._inst_uuid]", "title": "" }, { "docid": "fa491f1b220293d322be55d6a8153bb9", "score": "0.5140171", "text": "def test_downscale(runner):\n src_path = os.path.join(TESTDATADIR, 'wtk/ri_100_wtk_2012.h5')\n truth_path = os.path.join(TESTDATADIR, 'wtk/rechunk_3hr.h5')\n var_attrs = create_var_attrs(src_path, t_chunk=(7 * 24))\n\n with tempfile.TemporaryDirectory() as td:\n rechunk_path = os.path.join(td, 'rechunk.h5')\n attrs_path = os.path.join(td, 'var_attrs.json')\n var_attrs.to_json(attrs_path)\n\n result = runner.invoke(main, ['-src', src_path,\n '-dst', rechunk_path,\n '-vap', attrs_path,\n '-res', '3h'])\n msg = ('Failed with error {}'\n .format(traceback.print_exception(*result.exc_info)))\n assert result.exit_code == 0, msg\n\n check_rechunk(truth_path, rechunk_path)\n\n LOGGERS.clear()", "title": "" }, { "docid": "a95b86610c0cf37174d265820795e625", "score": "0.5130314", "text": "def finalize(self, loss, optimizer):\n self.loss, self.optimizer = loss, optimizer\n self.finalized = True", "title": "" }, { "docid": "a95b86610c0cf37174d265820795e625", "score": "0.5130314", "text": "def finalize(self, loss, optimizer):\n self.loss, self.optimizer = loss, optimizer\n self.finalized = True", "title": "" } ]
8c0e015fc5660b31466a762e418555be
Updates the total amount of posts made
[ { "docid": "45fe69c7411c9f44665fd644334b8242", "score": "0.0", "text": "def updatepost(id):\n file_name = 'posts.json'\n posts = json.loads(openfile(file_name))\n posts['ids'].append(id)\n writefile(file_name, posts)", "title": "" } ]
[ { "docid": "c75a06323bbbf01792c1d8e98464103f", "score": "0.6925383", "text": "def total_posts():\n return Post.published.count()", "title": "" }, { "docid": "777b5802ad52cea96062f0865321a171", "score": "0.6705593", "text": "def update_totals(self):\r\n result = self.votes.aggregate(\r\n score=Sum('delta'), total_votes=Count('id'))\r\n self.total_votes = result['total_votes'] or 0\r\n self.delta_votes = result['score'] or 0\r\n self.save()", "title": "" }, { "docid": "ab8a56bbb0de96bea7cc6aa55ea53a5c", "score": "0.66430277", "text": "def total_posts(self):\n return self.user.post_set.count()", "title": "" }, { "docid": "911e2128072b6c9098dbdc116b496a1d", "score": "0.65555024", "text": "def post_count(self, obj):\r\n return obj.post_set.count()", "title": "" }, { "docid": "a2b73d04c56ecde527bedfd11cc7a9d8", "score": "0.65479416", "text": "def post_detail(request, pk):\n post = get_object_or_404(Post, pk=pk)\n post.views += 1\n post.save()\n return render(request,'postdetail.html',{'post': post})", "title": "" }, { "docid": "22114b15d14a0ce0deefb90b24882fb6", "score": "0.6526367", "text": "def __update_total(self):\n\n self.total = sum(self.scores)", "title": "" }, { "docid": "d93e3e30acde6dedd488e5d394482fd8", "score": "0.64719266", "text": "def post(self):\n update_posts()\n build()", "title": "" }, { "docid": "4d578fb7193418522264a50847283967", "score": "0.6455365", "text": "def update_story_count_for_author(request):\n obj = get_key_or_none(request)\n if not obj:\n raise Http404\n logging.debug(\"Updating story count for Author %s\" % obj)\n obj.story_count = obj.get_story_count()\n obj.put()\n return HttpResponse('ok!')", "title": "" }, { "docid": "acd956fb54bd18589073e7e815824057", "score": "0.64391446", "text": "def post_detail(request, pk):\n post = get_object_or_404(Post, pk=pk)\n post.views += 1\n post.save()\n return render(request, \"postdetail.html\", {'post':post})", "title": "" }, { "docid": "04fe2fd00c789d49e974013fd293b98a", "score": "0.63979346", "text": "def update_total_clicks():\n clicks = 0\n for domain in Domain.query():\n clicks = clicks + domain.clickcount\n memcache.set('clicks_total', clicks)", "title": "" }, { "docid": "9077418211fadf240aade6be346a4a9a", "score": "0.63957876", "text": "def update_posts(self):\n\n if self.ids.limit_input.text == '' or \\\n self.ids.limit_input.text == 'Limit':\n self._limit = 50\n else:\n self._limit = self.ids.limit_input.text\n\n if self.ids.uid_input.text == '' or \\\n self.ids.uid_input.text == 'User ID':\n self._uid = None\n else:\n self._uid = self.ids.uid_input.text\n\n if self.ids.tag_input.text == '' or \\\n self.ids.tag_input.text == 'Tag':\n self._tag = None\n else:\n self._tag = self.ids.tag_input.text\n try:\n self.ids.display.text = feed(self._limit, self._uid, self._tag)\n except:\n pass", "title": "" }, { "docid": "2672e8d8d4da29a87b8211e74ed63616", "score": "0.6394472", "text": "def post_detail(request, pk):\n\n post = get_object_or_404(Post, pk=pk)\n post.views += 1\n post.save()\n return render(request, \"postdetail.html\", {'post': post})", "title": "" }, { "docid": "55817e4c7cb2b867d45f1be2c8639337", "score": "0.6338622", "text": "def get_account_posts_counter(account: Account) -> int:\n return account.post_author_set.all().count()", "title": "" }, { "docid": "18c8869c26284efafb3961429fa6b86f", "score": "0.6323842", "text": "def update_entry_category_visits_count(sender, instance, **kwargs):\n for category in instance.category_set.all():\n category.total_visits_count += 1\n category.save()", "title": "" }, { "docid": "5f3da43108c9b90e9ac7b0a2a47163e0", "score": "0.63052577", "text": "def post_detail(request, pk):\n post = get_object_or_404(Post, pk=pk)\n post.views += 1\n post.save()\n return render(request, 'post_detail.html', {'post': post})", "title": "" }, { "docid": "731f73ca4d82f74bcfd4203f060a827d", "score": "0.6238773", "text": "def increase_count(post_id):\n\n votes=None\n table = db.Solution\n\n try:\n solution = table.find_one({'_id': int(post_id)})\n votes = solution['voteup']\n\n except Exception as e:\n print('Error finding element : {}'.format(e))\n\n new_vote = int(votes)+1\n\n try:\n db.Solution.update_one(\n {'_id': int(post_id)},\n {\"$set\":{\"voteup\" : new_vote}\n }\n )\n except Exception as e:\n print('Error updating count: {}'.format(e))", "title": "" }, { "docid": "062d0cb04e6d1d77972c74a0f25d0677", "score": "0.6186989", "text": "def update_tweets_total(self):\n\t\tself.statuses = 0\n\t\tfor k,v in self.followed_users.items():\n\t\t\tself.statuses += len(v.tweets)", "title": "" }, { "docid": "6e461b468ce0177a51404964d37e9b2e", "score": "0.61827165", "text": "def testUpdateEntryCount(self):\n\t\tshutil.copy(TEST.fixture(u'flickr_tag_feed.xml'), self.feedurl)\n\t\tfeed = Feed.Load(self.store, self.feedurl)\n\t\toldcount = feed.entries.count()\n\t\t\n\t\tshutil.copy(TEST.fixture(u'flickr_tag_feed_updated.xml'), self.feedurl)\n\t\tfeed = Feed.FindByUrl(self.store, self.feedurl)\n\t\tfeed.update(self.store)\n\t\tself.assertEquals(feed.entries.count(), oldcount)", "title": "" }, { "docid": "3cab530679bfbf18b09c91ac7d0ad979", "score": "0.6157481", "text": "def reset_upvotes():\n logging.info(\"Reset upvotes in process...\")\n Post.objects.all().update(upvotes_amount=0)\n logging.info(\"Reset upvotes successfully done!\")", "title": "" }, { "docid": "dcfd95838da0668400fd566d1128f8dd", "score": "0.6132344", "text": "def posts(self, obj):\n return obj.brainbankpost_set.count()", "title": "" }, { "docid": "ba320810388c3da00426b331520cee0c", "score": "0.6128701", "text": "def update_total(self, size=1):\n self.total += size", "title": "" }, { "docid": "09a1f2a680b546e4e47111756c151a9a", "score": "0.60911375", "text": "def update_total(self):\n self.order_total = self.orderitems.aggregate(Sum('orderitem_total'))['orderitem_total__sum'] or 0\n self.save()", "title": "" }, { "docid": "db2caf6584b677cf15dbb3108350fff4", "score": "0.6086138", "text": "async def update_posts(self, _):\n await self.parse_posts()\n return web.json_response(dict(message=\"Updated!\"))", "title": "" }, { "docid": "93a8393287e0b93f2e70c5b903d861d3", "score": "0.60603285", "text": "def update_count(self):\n self.button_clicks += 1\n self.button[\"text\"] = \"total Clicks: \" + str (self.button_clicks)", "title": "" }, { "docid": "ba74e9f8c061ffdcf458ac5f98185759", "score": "0.60562676", "text": "def count_posts(self):\n return self.ultra_blog_posts.all().count()", "title": "" }, { "docid": "2c1e7474ca97b575c0c327dc2469cf0c", "score": "0.6028252", "text": "def ex_increase_count(post_id):\n votes=None\n table = db.Experience\n\n try:\n Experience = table.find_one({'_id': int(post_id)})\n votes = Experience['voteup']\n\n except Exception as e:\n print('Error finding element : {}'.format(e))\n\n new_vote = int(votes)+1\n\n try:\n db.Experience.update_one(\n {'_id': int(post_id)},\n {\"$set\":{\"voteup\" : new_vote}\n }\n )\n except Exception as e:\n print('Error updating count: {}'.format(e))", "title": "" }, { "docid": "6262345df33d28f6aa66e72c91684770", "score": "0.6026869", "text": "def increment_total_requests(self):\n self.total_requests += 1", "title": "" }, { "docid": "4db265ef9af7a65a73d785acace42f42", "score": "0.6017058", "text": "def _update_counts(self, date, job, entry):\n if job.status == 'COMPLETED':\n entry.completed_count += 1\n entry.total_count += 1\n elif job.status == 'FAILED':\n entry.failed_count += 1\n entry.total_count += 1\n elif job.status == 'CANCELED':\n entry.canceled_count += 1\n entry.total_count += 1\n\n if job.error:\n if job.error.category == 'SYSTEM':\n entry.error_system_count += 1\n elif job.error.category == 'DATA':\n entry.error_data_count += 1\n elif job.error.category == 'ALGORITHM':\n entry.error_algorithm_count += 1", "title": "" }, { "docid": "6ad05c4b046214eb11f6227577a2e673", "score": "0.59979635", "text": "def count_votes(self):\n dic = Counter(self.votes.values_list(\"value\", flat=True))\n Event.objects.filter(id=self.id).update(total_votes=dic[True] - dic[False])\n self.refresh_from_db()", "title": "" }, { "docid": "f6d11ac649e588a957e69e63596fda3b", "score": "0.5995157", "text": "def increase_total_hit(self):\n self.client.incr(\"total_hit\")", "title": "" }, { "docid": "20089a2eca5ac38524a1d8627ee078ee", "score": "0.5994227", "text": "def add_post(self, postId, *args, **kwargs):\n if postId not in self.posts:\n self.posts.append(postId)\n self.post_count = str(len(self.posts))\n self = super().update(\n {'posts': self.posts, 'post_count': self.post_count})\n return self.save()\n return self", "title": "" }, { "docid": "d11c6c29ed348f5f310805b78727d7e4", "score": "0.59911263", "text": "def sum_posts(kinesis_actors):\n total_records = 0\n for actor in kinesis_actors:\n total_records += actor.total_records\n return total_records", "title": "" }, { "docid": "d11c6c29ed348f5f310805b78727d7e4", "score": "0.59911263", "text": "def sum_posts(kinesis_actors):\n total_records = 0\n for actor in kinesis_actors:\n total_records += actor.total_records\n return total_records", "title": "" }, { "docid": "e1d58bb482dead2ee396609bade44113", "score": "0.59740806", "text": "def update_total(order):\n if not isinstance(order, Order):\n raise Http404(\"{} is not an instance of Order.\".format(order))\n else: \n items = order.items.all()\n order.total = 0\n for item in items:\n order.total += item.price\n order.save()", "title": "" }, { "docid": "5886a6a9d5e00ca4a3c6ad4701282e5c", "score": "0.59644246", "text": "def update(self, count=1):\n self._last_update = datetime.now()\n self._count += count", "title": "" }, { "docid": "35b45568ecf53c1c893f9873b7745990", "score": "0.5955138", "text": "def update_total(self, delta: int) -> None:\n with self.lock:\n self.total_count += delta\n self._update_progress()", "title": "" }, { "docid": "292f4b5f7ca1e835000bb1725318025d", "score": "0.5932628", "text": "def update_count(self):\n self.bttn_clicks += 1\n self.bttn[\"text\"]=\"Total Clicks: \" + str(self.bttn_clicks)\n if self.bttn_clicks>=100:\n self.bttn[\"text\"] = \"You're such a loser... Total Clicks: \" + str(self.bttn_clicks)", "title": "" }, { "docid": "4da32d4eede53987a80c306af3e01493", "score": "0.5909062", "text": "def update_on_save(sender, instance, created, **kwargs):\n instance.purchase.update_total()", "title": "" }, { "docid": "c1cce5ff5525059dd154e84fb0b11c73", "score": "0.58914053", "text": "def update_total_votes(self):\n sql = \"\"\"UPDATE pollit_poll SET total_votes = v.votes\n FROM (SELECT poll_id, SUM(votes) as votes FROM pollit_pollchoice \n GROUP BY poll_id) v WHERE pollit_poll.id = v.poll_id\"\"\"\n from django.db import connection, transaction\n cursor = connection.cursor()\n cursor.execute(sql)\n transaction.commit_unless_managed()", "title": "" }, { "docid": "39c27ea6e575dbb34fdb8192bcf8aa8a", "score": "0.58754885", "text": "def update(self):\n self.occurrences += 1", "title": "" }, { "docid": "9fe9648c88e5ab72c8081f3429fc9348", "score": "0.5863087", "text": "def update_collections_total():\n\n data = (CollectionCount.objects.values('collection_id')\n .annotate(sum=Sum('count')))\n\n ts = [tasks.update_collections_total.subtask(args=[chunk])\n for chunk in chunked(data, 50)]\n group(ts).apply_async()", "title": "" }, { "docid": "93a16f552da648c3b67a3e6a1edda3dd", "score": "0.5837815", "text": "def update_total(self):\n self.order_total = self.lineitems.aggregate(\n Sum('lineitem_total'))['lineitem_total__sum'] or 0\n self.item_quantity_count = self.lineitems.aggregate(\n Sum('quantity'))['quantity__sum']\n self.combo_quantity_count = self.lineitems.aggregate(\n Sum('combo_quantity'))['combo_quantity__sum']\n self.order_count = 0\n if self.item_quantity_count:\n self.order_count += self.item_quantity_count\n if self.combo_quantity_count:\n self.order_count += self.combo_quantity_count\n self.delivery_fee = settings.DELIVERY_FEE\n self.grand_total = float(\n self.order_total) + self.delivery_fee - float(self.discount)\n self.save()", "title": "" }, { "docid": "13cc6b8bbe07e2caae54a388dd479c49", "score": "0.58350796", "text": "def update_on_save(sender, instance, created, **kwargs):\n # update_total obtained from checkout.models\n instance.order.update_total()", "title": "" }, { "docid": "4f414d90dd41c77d938ae6dce433d7e9", "score": "0.5832133", "text": "def set_post_counts(self, mock_request, threads_count=1, comments_count=1):\n self._set_mock_request_data(mock_request, {\n \"threads_count\": threads_count,\n \"comments_count\": comments_count,\n })", "title": "" }, { "docid": "5a38943061292f58712533f48e646aca", "score": "0.5791238", "text": "def rate_post(self, postid, token, uid):\n get_response = requests.get(base_url + '/posts')\n posts_data = json.loads(get_response.text)\n data = {}\n data['uid'] = uid\n data['token'] = token\n for item in posts_data:\n if postid == item[u'postid']:\n data['postid'] = item[u'postid']\n else:\n continue\n requests.post(base_url + '/upvotes', data)", "title": "" }, { "docid": "f47532e83de052d6bc121655142dce2f", "score": "0.5785656", "text": "def testAddEntryCount(self):\n\t\tshutil.copy(TEST.fixture(u'flickr_tag_feed.xml'), self.feedurl)\n\t\tfeed = Feed.Load(self.store, self.feedurl)\n\t\toldcount = feed.entries.count()\n\t\t\n\t\tshutil.copy(TEST.fixture(u'flickr_tag_feed_added.xml'), self.feedurl)\n\t\tfeed = Feed.FindByUrl(self.store, self.feedurl)\n\t\tfeed.update(self.store)\n\t\tself.assertEquals(feed.entries.count(), oldcount + 1)", "title": "" }, { "docid": "653f9b2e5d0a62e36ed4e93a46cea6e5", "score": "0.5783948", "text": "def update_count(self, delta: int) -> None:\n with self.lock:\n self.count += delta\n self._update_progress()", "title": "" }, { "docid": "febf542a4b3656f6d5c0a1eefc3fe180", "score": "0.5775445", "text": "def _update(self, count=True, forced=False):", "title": "" }, { "docid": "e1d84110cc2bf2af27e6acaf8ba055d2", "score": "0.5759842", "text": "def total_likes(self):\n rate=0\n likes= Like.objects.filter(post=self)\n for i in likes:\n if i.rate==True:\n rate+=1\n else:\n rate-=1\n return rate", "title": "" }, { "docid": "2ae46f3e9802f1fc168e4d83a1ad0425", "score": "0.57247436", "text": "def total_ans(self):\n\n return Answer.objects.filter(post=self).count()", "title": "" }, { "docid": "355d495bea38961430f21b96da6deb9a", "score": "0.5716899", "text": "def upvote_posts(self):\n list_upvotes = []\n string_upvotes = ''\n final_list = []\n token = GlobalData._user_model.get_token()\n post_getter = PostMessageInterface()\n uid = GlobalData._user_model.get_id()\n if self.ids.upvotes_input.text == '' or \\\n self.ids.upvotes_input.text == \\\n '[MAX 3 ENTRIES] Example: 1 3 20':\n list_upvotes = []\n else:\n string_upvotes = self.ids.upvotes_input.text\n list_upvotes = string_upvotes.split()\n\n for i in range(0, len(list_upvotes)):\n final_list.append(int(list_upvotes[i]))\n\n final_list.sort(key=int)\n for i in range(0, len(final_list)):\n if final_list[i] in self._current_ids:\n post_getter.rate_post(final_list[i], token, uid)\n else:\n continue\n\n self.update_posts()", "title": "" }, { "docid": "a15bd5585973247b3cfde8901bfda19c", "score": "0.5715961", "text": "def get_votes(self):\n return sum([\n Upvote.query.filter_by(post_id=post.id , vote=1).count()\n for post in self.posts\n ])", "title": "" }, { "docid": "8c81ae835106c558797b453fcafb7150", "score": "0.570416", "text": "def increment_views(self):\n self.views += 1\n self.save()", "title": "" }, { "docid": "e5fa75c6e475e081ffbee53109db9a06", "score": "0.56977785", "text": "def updateScore(self, total_score):\n self.score = self.score + total_score", "title": "" }, { "docid": "fdae4260a8be2d18ef02166242f76c60", "score": "0.56964254", "text": "def update_story_count_for_all_authors(request):\n logging.info(\"Updating story count for all Authors\")\n [taskqueue.add(\n url = '/_update_story_count_for_author/',\n params = {'key' : i.key()},\n method='GET'\n ) for i in Author.all()]\n return HttpResponse('ok!')", "title": "" }, { "docid": "8ea92e43e191f2154b390ffde13ca1af", "score": "0.56854033", "text": "def decrease_count(post_id):\n votes = None\n table = db.Solution\n\n try:\n solution = table.find_one({'_id': int(post_id)})\n votes = solution['votedown']\n\n except Exception as e:\n print('Error finding element : {}'.format(e))\n\n new_vote = int(votes) + 1\n try:\n db.Solution.update_one(\n {'_id': int(post_id)},\n {\"$set\": {\"votedown\" : new_vote}\n }\n )\n except Exception as e:\n print('Error updating count: {}'.format(e))", "title": "" }, { "docid": "781e0f517c6d04ab5cc3a230d7e6dda7", "score": "0.56802917", "text": "def updateTotal(self):\n self.totalPoints += self.roundPoints\n #self.roundPoints = 0", "title": "" }, { "docid": "b46ee23cd21874928be89fafcbfdccc8", "score": "0.5668422", "text": "def update(self):\n sql = 'SELECT path, content FROM posts WHERE updated = 1'\n rows = self.execute(sql).fetchall()\n # posts that were not changed\n paths = [path for path, content in rows if not Post(path).diff(content)]\n self.update_records(paths=paths, mapping={'updated': 0})\n # posts that were changed\n self._delete_updated()\n posts = [Post(path) for path, content in rows if Post(path).diff(content)]\n for post in posts:\n post.update_time()\n self._load_post(post)", "title": "" }, { "docid": "da4344666f34a3931e84131b51b7c9da", "score": "0.56624925", "text": "def post_count(self):\r\n return sum([item._method == 'POST' for item in self])", "title": "" }, { "docid": "e4f1fb77c6c16ca657d199910c501c23", "score": "0.5653265", "text": "def upserted_count(self):\n ...", "title": "" }, { "docid": "e2c7232a07509cf43d73712ff679168d", "score": "0.5652402", "text": "def get_post_count(user):\n count = Post.objects.filter(publisher=user).count()\n return count", "title": "" }, { "docid": "8c898c8eff1c44a6f5734c2931913018", "score": "0.56517", "text": "def update_on_save(sender, instance, created, **kwargs):\n instance.booking.calculate_total()", "title": "" }, { "docid": "87f261721986ffe2f1c86b40e303a8b3", "score": "0.56443864", "text": "def update_on_delete(sender, instance, **kwargs):\n instance.purchase.update_total()", "title": "" }, { "docid": "b624c59ce2878c301a0f696e54092094", "score": "0.56439525", "text": "def update_total(self):\n\n for item in self.item_list:\n if item == \"white cake\":\n self.total += Cake.white_cake_price\n elif item == \"crepe cake\":\n self.total += Cake.crepe_cake_price\n elif item == \"birthday cake\":\n self.total += Cake.birthday_cake_price\n\n print(f\"{self.username}'s total: ${round(self.total, 2)}\")\n return round(self.total, 2)", "title": "" }, { "docid": "02885200240c9f3b26d131ffbc350397", "score": "0.5628756", "text": "def postUpdate():", "title": "" }, { "docid": "d0e728fa74d1bcc93bc27f3326e30311", "score": "0.5622293", "text": "def upvote(self, request, pk):\n post = get_object_or_404(Post, id=pk)\n\n post.upvotes = F(\"upvotes\") + 1 # for atomicity\n post.save(update_fields=[\"upvotes\"])\n post.refresh_from_db()\n\n return Response(PostSerializer(post).data)", "title": "" }, { "docid": "d7b977d17b904ad88afc3e89ca123f62", "score": "0.56076914", "text": "def update_total_progress(self, progress):\n self.total_progress += progress", "title": "" }, { "docid": "e4157c956c8c9d2bc1cf722b515cb9b4", "score": "0.5598298", "text": "def on_update(self, count):\n self._logger.info(\"Update count : %s \" % count)", "title": "" }, { "docid": "4db84d93a86176cc62473c64c2c99c66", "score": "0.5596598", "text": "def update_score(self, points):\n self.score += points", "title": "" }, { "docid": "e87761f0ed81f5599a822196e036ecc1", "score": "0.5590453", "text": "def tally(self, score):\n self._score += score\n self._count += 1", "title": "" }, { "docid": "0bbb09c379f32452064c5dfa91f55d89", "score": "0.5589981", "text": "def update_total(self):\n\n self.order_total = self.lineitems.aggregate(\n Sum('lineitem_total'))['lineitem_total__sum'] or 0\n\n self.delivery_cost = settings.TRANSPORT_COST\n self.grand_total = self.order_total + self.delivery_cost\n self.save()", "title": "" }, { "docid": "b088d8c6f5c3f6bed63bbf72345ef64f", "score": "0.5589249", "text": "def update_score(self):\r\n self.__score += 1", "title": "" }, { "docid": "4248276be008e84c3c0597e0b35ee1d1", "score": "0.5587861", "text": "def customCount(num):\n\tglobal total\n\tglobal backup\n\tglobal app\n\ttry:\n\t\tvalue = int(app.getEntry(\"n=\"))\n\texcept TypeError:\n\t\treturn\n\tbackup = total\n\ttotal += value\n\tapp.setLabel(\"total\", \"Total: \" + str(total))", "title": "" }, { "docid": "8bb711e4b13c9a9ef8da9f03d4ec2df6", "score": "0.55869377", "text": "def counter_view(request):\n PushTheButtonsChannels.new_push_available(\"asd\",\"asd\", \"123\")\n if request.method == 'POST':\n Counter.objects.create()\n if request.method == 'PATCH':\n counter = Counter.objects.last()\n counter.count = counter.count + 1\n counter.save()\n return get_last_counter_response()", "title": "" }, { "docid": "222d04bfc918892c1eb54d06282b2c78", "score": "0.55812633", "text": "def update_total_marks(self):\n\t\tmarks = 0.0\n\t\tquestions = self.fixed_questions.all()\n\t\tfor question in questions:\n\t\t\tmarks += question.points\n\t\tfor question_set in self.random_questions.all():\n\t\t\tquestion_set.marks = question_set.questions.first().points\n\t\t\tquestion_set.save()\n\t\t\tmarks += question_set.marks * question_set.num_questions\n\t\tself.total_marks = marks\n\t\tself.save()", "title": "" }, { "docid": "d1bc1943021730b0e3bd6bb06f632bed", "score": "0.5566658", "text": "def ex_decrease_count(post_id):\n votes = None\n table = db.Experience\n\n try:\n Experience = table.find_one({'_id': int(post_id)})\n votes = Experience['votedown']\n\n except Exception as e:\n print('Error finding element : {}'.format(e))\n\n new_vote = int(votes) + 1\n try:\n db.Experience.update_one(\n {'_id': int(post_id)},\n {\"$set\": {\"votedown\" : new_vote}\n }\n )\n except Exception as e:\n print('Error updating count: {}'.format(e))", "title": "" }, { "docid": "9558738a93d6b5af00eeb0a7a27a060c", "score": "0.5549557", "text": "def updateGames(self):\n self.games += 1", "title": "" }, { "docid": "1a0b89cfaa9e8a5e4039eadd68d3ec84", "score": "0.55487174", "text": "def test_post_stats(self):\n user_id = 0\n posts = ['Hello World!', 'I love Flask!', 'Some other very interesting post']\n for post in posts:\n add_post(user_id, post)\n\n with self.client:\n response = self.client.get(f'/posts/stats')\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n today = date.today().strftime(\"%d/%-m/%Y\")\n self.assertEqual(data['data']['stats'][today], len(posts))", "title": "" }, { "docid": "6130dcdeebab710223371db47b080784", "score": "0.553675", "text": "def update_api_hit(self):\n self.api_hits = self.api_hits + 1\n self.save()", "title": "" }, { "docid": "8a34fd35b98b793987a754695e71d116", "score": "0.5535996", "text": "def total_count(self, total_count):\n\n self._total_count = total_count", "title": "" }, { "docid": "08e0c9262ad8883007a2d390125aa625", "score": "0.5535078", "text": "def update_on_delete(sender, instance, **kwargs):\n instance.order.update_total()", "title": "" }, { "docid": "e12b1d8d5935dd10257937f045d81cb2", "score": "0.5535001", "text": "def total_blogs(self, total_blogs):\n\n self._total_blogs = total_blogs", "title": "" }, { "docid": "fe5d2cad24d42e0ba864bb761d029426", "score": "0.5534335", "text": "def update(self):\n\t\tself.n += 1", "title": "" }, { "docid": "847a8d1ce32cb853b59a79219b280a83", "score": "0.5526297", "text": "def modified_count(self):\n ...", "title": "" }, { "docid": "847a8d1ce32cb853b59a79219b280a83", "score": "0.5526297", "text": "def modified_count(self):\n ...", "title": "" }, { "docid": "0ba79465f91353922f6ddbcdd04dfcfb", "score": "0.55221945", "text": "def increment(self):\r\n self.count += 1", "title": "" }, { "docid": "28d54b694345a06562fc9d8feec686e1", "score": "0.5516208", "text": "def increment(self):\n self.count += 1", "title": "" }, { "docid": "1bc0b08b736ef0dd33187b89c627bf67", "score": "0.5514694", "text": "def increment_hits(self, id):\n self.filter(pk=id).update(hits=F('hits') + 1, last_used=now())", "title": "" }, { "docid": "1e8e0f2c32748f9a602583794c26c34c", "score": "0.55146694", "text": "async def _update_feed_update_entries(self, feed_external_ids: Set[str]) -> int:\n update_external_ids = self._managed_external_ids.intersection(feed_external_ids)\n count_updated = len(update_external_ids)\n await self._update_entities(update_external_ids)\n return count_updated", "title": "" }, { "docid": "70eca883709f657874a4ac2ad9f98283", "score": "0.55144423", "text": "def add(self, count: int = 1) -> int:\n self.quantity = self.quantity + count\n return count", "title": "" }, { "docid": "36779f08f3c995678519f1dc4801cfdd", "score": "0.55022866", "text": "def getTotalCount(self) -> int:\n ...", "title": "" }, { "docid": "e8fe17ceb3c0738d9bfaa85f7f2e7054", "score": "0.54985416", "text": "def post_detail(request, id):\n post = get_object_or_404(Post, pk=id)\n post.views += 1 # clock up the number of post views\n post.save()\n return render(request, \"blog/postdetail.html\", {'post': post})", "title": "" }, { "docid": "94497cf07ca978c32750b508a84f73e4", "score": "0.5487382", "text": "async def counter(self, ctx, member: discord.Member, points: int):\n await self.config.member(member).counter.set(points)\n await ctx.send(\n \"Done. {} now has {} total messages counted.\".format(member.mention, points)\n )", "title": "" }, { "docid": "0b7e654c29319acfccd77629fa89554e", "score": "0.5475299", "text": "def increment(self, count: int):", "title": "" }, { "docid": "4db47fbf5700ac5c9ff017f47483eb97", "score": "0.5469122", "text": "def total_count(self):\n return self._total_count", "title": "" }, { "docid": "14aacbf367fa06c610d076d907d2fe8f", "score": "0.5464134", "text": "def update_on_delete(sender, instance, **kwargs):\n instance.booking.calculate_total()", "title": "" }, { "docid": "98787b872693add01f03eced9ed4afc3", "score": "0.54535794", "text": "def update_progress(self, tray: int, count: int, total: int) -> None:\n if tray == -1:\n for i in range(6):\n self.objects[i]['progress'].setMaximum(total)\n self.objects[i]['progress'].setValue(count)\n else:\n self.objects[tray]['progress'].setMaximum(total)\n self.objects[tray]['progress'].setValue(count)", "title": "" }, { "docid": "40797848835c3fb6e8735b704d6caa66", "score": "0.5452677", "text": "def upvotepost(post_id):\n\n post_id = int(post_id)\n vote_value = 1\n validateVote(post_id, current_user, vote_value)\n return redirect(redirect_url())", "title": "" }, { "docid": "ac97307c0ca8443a1c2f5e5f5615a2ed", "score": "0.5448473", "text": "def process_post(self, message):\n if self.person.online == True:\n like_total = 0\n\n for topic in message.topics:\n if topic in self.interests:\n like_total += self.interests[topic]\n\n like_total = self.facets.process_post(message, like_total, self.person)\n self.model.logger.log(0, \"%r had reaction of %d to %r\" % (self.person, like_total, message))\n self.repost_decide(message)\n return like_total\n return None", "title": "" }, { "docid": "8dd23c975f929f2953c28543f8f28138", "score": "0.5446579", "text": "def getTotalCount(self):\n return sum(self._tallies)", "title": "" }, { "docid": "7833fa2b5a045589244d72e896036fbe", "score": "0.5439383", "text": "def update_posts_on_tick(self):\r\n for market in self.map.markets:\r\n if market.product < market.product_capacity:\r\n market.product = min(market.product + market.replenishment, market.product_capacity)\r\n for storage in self.map.storages:\r\n if storage.armor < storage.armor_capacity:\r\n storage.armor = min(storage.armor + storage.replenishment, storage.armor_capacity)", "title": "" } ]
c513c30ba32a62d232eab81145ddc9f0
Moves the aliens to the right.
[ { "docid": "f2ac8bb5224bf272097534b659d3890c", "score": "0.72090906", "text": "def alien_right(self):\n for alien_col in self._aliens:\n for alien in alien_col:\n if alien != None:\n alien.x = alien.x + ALIEN_H_WALK", "title": "" } ]
[ { "docid": "323e0c73ed9e7793c55a6f67e0c32085", "score": "0.80261123", "text": "def move_aliens_right(self):\n self.x += ALIEN_H_WALK", "title": "" }, { "docid": "5ea375b75597fc52cec7d2c55d54c02d", "score": "0.75233734", "text": "def _moveRight(self,list):\n for row in list:\n for alien in row:\n if alien != None:\n alien.right += ALIEN_H_WALK", "title": "" }, { "docid": "e3eb2dd7b58ff345a9fd371393b81189", "score": "0.7446899", "text": "def move_right(self):\n self.x += 1", "title": "" }, { "docid": "9de8b3cf988a87d2c56f4c085f4cbe86", "score": "0.7301645", "text": "def turn_right(self):\n self.perform_action(actions.RIGHT)", "title": "" }, { "docid": "227afe5371c2997e2b523a503cb8ae02", "score": "0.72393227", "text": "def turnRight(self):\n pass", "title": "" }, { "docid": "1a2ab0935826bd3514be1b20c7c1befe", "score": "0.7196056", "text": "def go_right(self):\n self.change_x = 4\n self.direction = \"R\"", "title": "" }, { "docid": "2360cae620e7f20d2c4a4003ba0b8eb6", "score": "0.71806276", "text": "def go_right(self):\n self.change_x = 6\n self.direction = \"R\"", "title": "" }, { "docid": "1b19aba17d17d42c113e8eb733163ff1", "score": "0.7167389", "text": "def turn_right(self):\n self.direction = self.direction.right()", "title": "" }, { "docid": "4a07b891f9b47ebe4e70bcb28a679373", "score": "0.7163237", "text": "def shift_right(self):\n self.x_pos += self.rightward_shift", "title": "" }, { "docid": "0d90d906c0cbb3afdf92bd2a9da05e5d", "score": "0.7150836", "text": "def go_right(self):\r\n \r\n \r\n \r\n self.change_x = 3\r\n self.direction = \"R\"\r\n \r\n self.walking = True", "title": "" }, { "docid": "930cfbffdddca7cd122577c884c53f1c", "score": "0.7147136", "text": "def go_right(self):\n self.change_x = 5\n self.direction = \"R\"", "title": "" }, { "docid": "bad9c702503382a02d29119dfa20f329", "score": "0.71310544", "text": "def move_right(self):\n self.steps += \"r\"", "title": "" }, { "docid": "0618d474c53b4bdb3be08ef390554fca", "score": "0.7077225", "text": "def turnRight(self):", "title": "" }, { "docid": "0618d474c53b4bdb3be08ef390554fca", "score": "0.7077225", "text": "def turnRight(self):", "title": "" }, { "docid": "612c330fa2fbbde179b2f7ba0538e6d9", "score": "0.7049387", "text": "def turn_right(self):\n if self._direction == 'NORTH':\n self._direction = 'EAST'\n return\n if self._direction == 'SOUTH':\n self._direction = 'WEST'\n return\n if self._direction == 'EAST':\n self._direction = 'SOUTH'\n return\n if self._direction == 'WEST':\n self._direction = 'NORTH'\n return", "title": "" }, { "docid": "46906bda119f30f91e5aaa85efd6e110", "score": "0.69984835", "text": "def moveRightby1(self):\n self.addTarget([(self.x + 96, self.y)])\n self.setIndex(self.index + 1)", "title": "" }, { "docid": "b78d790ca73f44055ceeb97f27b053bd", "score": "0.6969685", "text": "def turn_right(self):\n\t\tif self.direction == self.north:\n\t\t\tself.direction = self.east\n\t\telif self.direction == self.east:\n\t\t\tself.direction = self.south\n\t\telif self.direction == self.south:\n\t\t\tself.direction = self.west\n\t\telif self.direction == self.west:\n\t\t\tself.direction = self.north", "title": "" }, { "docid": "a1024a96e65dec7d7e9b5994b9b657f7", "score": "0.696408", "text": "def go_right(self):\n self.vel_x = 6", "title": "" }, { "docid": "c640b59f64dbbc6daf290d291e4afdf7", "score": "0.69594675", "text": "def move_right(self):\n for block in self.blocks:\n block.move_right()", "title": "" }, { "docid": "d35075befd22d44fa2f96c4d6c2a8192", "score": "0.6941765", "text": "def go_right(self):\n self.change_x = self.speed\n self.direction = 'r'", "title": "" }, { "docid": "8ad8ce92c424f79671957649daafc8b7", "score": "0.6869159", "text": "def move_aliens_left(self):\n self.x -= ALIEN_H_WALK", "title": "" }, { "docid": "5686caf1348d4097f372939773f23cda", "score": "0.68576485", "text": "def rotate_right(self):\r\n\r\n if self.direction == 'N':\r\n self.direction = 'E'\r\n elif self.direction == 'S':\r\n self.direction = 'W'\r\n elif self.direction == 'W':\r\n self.direction = 'N'\r\n else:\r\n self.direction = 'S'", "title": "" }, { "docid": "cf92511412746a2538d37d8b31e536b6", "score": "0.6848221", "text": "def go_right(self):\n self.change_x = 6", "title": "" }, { "docid": "cf92511412746a2538d37d8b31e536b6", "score": "0.6848221", "text": "def go_right(self):\n self.change_x = 6", "title": "" }, { "docid": "ef9a6d5d050167c3cecd638d946de488", "score": "0.67971265", "text": "def move_right(self) -> None:\n if self.velocity == (-VELOCITY_NORM, 0):\n return\n self.velocity = (VELOCITY_NORM, 0)", "title": "" }, { "docid": "f5c6602a1d12f6a801b106136f304924", "score": "0.67806274", "text": "def move_right(self):\n self.score += 0.02\n if self.map[self.hero.position[1]][self.hero.position[0] + 1] == Service.wall:\n return\n self.hero.right()\n self.interact()", "title": "" }, { "docid": "71bd4c5d9c461e0cca51af2a169d67c5", "score": "0.67345506", "text": "def right():\n current_face_index = directions.index(robot_location[2])\n robot_location[2] = directions[current_face_index + 1] \\\n if current_face_index < 3 else directions[0]", "title": "" }, { "docid": "4a37a07e302c03fdc0842a92ea8e61d2", "score": "0.6722755", "text": "def turn_right():\n turn_left()\n turn_left()\n turn_left()", "title": "" }, { "docid": "cbc160aaff44ca34af42e3e07002bb5c", "score": "0.6699034", "text": "def move_right(self):\n return self.__move_right", "title": "" }, { "docid": "fe9cce2596eb36179ac95da76520c2d3", "score": "0.669735", "text": "def move_right(self, ship):\n pygame.event.post(Event(pygame.KEYDOWN, key=pygame.K_RIGHT))\n gf.check_events(self.ai_settings, self.screen, self.stats,\n self.play_button, self.ship, self.aliens, self.bullets)\n self.ship.update()", "title": "" }, { "docid": "b99ce1237605e2f1604eb72443b923d7", "score": "0.6645299", "text": "def _do_turn_right(self):\n self.board_state.turn_right()", "title": "" }, { "docid": "76c8306b51fc254fc01ca2bbc1b94e3c", "score": "0.6589981", "text": "def move_right(self):\n self.lcd_byte(\n self.LCD_CURSORSHIFT | self.LCD_DISPLAYMOVE | self.LCD_MOVERIGHT,\n self.LCD_CMD)", "title": "" }, { "docid": "0c88c920ee7b47ca0d5fb0297db02929", "score": "0.65388656", "text": "def turn_right(self):\n if self.head_direction_x == 1:\n self.head_direction_x = 0\n self.head_direction_y = 1\n elif self.head_direction_y == 1:\n self.head_direction_x = -1\n self.head_direction_y = 0\n elif self.head_direction_x == -1:\n self.head_direction_x = 0\n self.head_direction_y = -1\n else:\n self.head_direction_x = 1\n self.head_direction_y = 0", "title": "" }, { "docid": "e19cce7580b45de3a2260f26088677f4", "score": "0.65274364", "text": "def right(self):\n if RIGHT in self.possible_moves():\n self.state = self.__right()\n self.__fill_random_field()", "title": "" }, { "docid": "7ae7b5c61602c9774b1f30050e11c36c", "score": "0.6526534", "text": "def turn_right(self, node):\n dirs = ['N', 'E', 'S', 'W']\n return dirs[(dirs.index(node.direction)+1) % len(dirs)]", "title": "" }, { "docid": "cf1d3e90f51611bce4a796985713b7da", "score": "0.65255666", "text": "def right(self):\n right()", "title": "" }, { "docid": "60588b2825d9ae014b60d09f034f5204", "score": "0.65144587", "text": "def moveRight(self):\n # Check if not too close to the rightside wall, and not moving too fast\n if self.x < 497 and self.xVelocity < 8:\n self.xVelocity += 1", "title": "" }, { "docid": "4113759d5ab6c2c5934b7313459033e5", "score": "0.6506617", "text": "def move_right(self):\n # global continue_up, continue_down, continue_right, continue_left\n self.continue_up = 0\n self.continue_down = 0\n self.continue_left = 0\n if self.current_line.text and self.current_line.number == self.lines.total:\n self.lines.add() # create emtpy line\n\n try: # if tab, move 4 spaces\n if self.current_line.x - 6 < self.current_line.indentation and \\\n self.current_line.text[self.current_line.x - 6:self.current_line.x - 6 + 4] == ' ' and \\\n self.current_line.y == self.current_line.end_y:\n self.current_line.x += 4\n return\n except BareException:\n pass\n\n if self.config['cursor_acceleration']:\n move_rate = min(self.config['cursor_max_horizontal_speed'], int(self.continue_right / 10.0) + 1)\n else:\n move_rate = 1\n self.continue_right += 1\n self.current_line.x += move_rate", "title": "" }, { "docid": "100faba998257426442eb38177fcde17", "score": "0.6504694", "text": "def robot_right(self):\r\n\t self.x = self.x + 1\r\n\t if self.x > 9:\r\n\t\t self.x = 9", "title": "" }, { "docid": "9813eee101b294e426a70bf04f35a7f0", "score": "0.649412", "text": "def right(self):\n for col in self.tiles:\n for row in reversed(col):\n if row:\n row.slide(self, (0,1))", "title": "" }, { "docid": "e344f4b6f2514e59960bfb695221465a", "score": "0.64802814", "text": "def right(self):\n return self.place(self.direction, (self.facing + 0.5)%2, self.grid)", "title": "" }, { "docid": "eeeffd80450377fa380ff319da45b9bc", "score": "0.6478905", "text": "def MoveRight(self):\n self.location += pygame.Vector2(5, 0)\n self.ConfirmMove()", "title": "" }, { "docid": "9d25c0372f8b94b11008b2109d2b9240", "score": "0.64629257", "text": "def rightDirection(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "ccf24a7f7fcd6c0155816859aafd2514", "score": "0.64615583", "text": "def go_right(self):\r\n self.change_x = 6\r\n if current_level_no == 5:\r\n self.change_x -=12\r\n self.direction=\"R\"", "title": "" }, { "docid": "9260803436090954f1bfd40b816254ff", "score": "0.6449436", "text": "def go_right(self): \n self.change_x = 6", "title": "" }, { "docid": "7d7858553e417b209e086e6792172642", "score": "0.64476615", "text": "def go_right(self):\n if self.location.x != (Game.SIZE - 1):\n self.location.x += 1\n else:\n self.location.x = 0\n return self.location.x", "title": "" }, { "docid": "7e0f498b98dea694caef9e24ebebc6ff", "score": "0.64418066", "text": "def turn_right(self) -> \"Direction\":\n index = DIRECTIONS.index(self)\n return DIRECTIONS[(index + 1) % len(DIRECTIONS)]", "title": "" }, { "docid": "cb7639abb6583b271ee4efd9036e0288", "score": "0.64379036", "text": "def move_aliens_down(self):\n self.y -= ALIEN_V_WALK/ALIEN_ROWS", "title": "" }, { "docid": "045b2a9756e75a1a4d68d6ea9aaab914", "score": "0.643437", "text": "def move_right(self):\r\n self.speedX = 10", "title": "" }, { "docid": "169c0f42ef3e2aa663d1c430dbfa48a4", "score": "0.64147055", "text": "def step_right(self):\n if self.o == AgentOrientation.UP:\n return AgentPosition(self.x + 1, self.y, self.o)\n if self.o == AgentOrientation.RIGHT:\n return AgentPosition(self.x, self.y + 1, self.o)\n if self.o == AgentOrientation.DOWN:\n return AgentPosition(self.x - 1, self.y, self.o)\n if self.o == AgentOrientation.LEFT:\n return AgentPosition(self.x, self.y - 1, self.o)", "title": "" }, { "docid": "4b46264681641ccbf3e5f8dd0583575b", "score": "0.6411701", "text": "def alien_down(self):\n for alien_col in self._aliens:\n for alien in alien_col:\n if alien != None:\n alien.y = alien.y - ALIEN_V_WALK\n if self._direction == 2:\n self._direction = -1\n elif self._direction == -2:\n self._direction = 1", "title": "" }, { "docid": "29b721f45f8b16749cee62f63de26db5", "score": "0.63975936", "text": "def indent_right(self):\n self._indent_num += 2", "title": "" }, { "docid": "c361c8fa89b710e8228edc1df27edea2", "score": "0.63844", "text": "def right(self):\n\n self._robot_check()\n self.hRobotEngine.rot('Right')", "title": "" }, { "docid": "10db3fcd9109bf2987b31a10a7ad1a36", "score": "0.6371906", "text": "def move_right(self):\n self._time += 1\n if self._position < len(self._list) - 1:\n self._position += 1\n return True\n else:\n return False", "title": "" }, { "docid": "6f38a970a46fa00141531fe2bd3390c9", "score": "0.6366915", "text": "def keyboard_right(self, _):\n self.state = g.update_keyboard(g.KeyPress.RIGHT, self.state)", "title": "" }, { "docid": "6fe34597ab11f596e3451cadd6c5d9a5", "score": "0.6365901", "text": "def turn_around():\n\tturn_right()\n\tturn_right()", "title": "" }, { "docid": "2a86b8fbe811428a2a432301541ef65d", "score": "0.63468903", "text": "def move_player_right(state):\n state[GameConstants.MOVE] = min(state[GameConstants.MOVE] + 1, 1)", "title": "" }, { "docid": "78d2d53037254123a1ef6da6b28a1d95", "score": "0.6317262", "text": "def right(self):\n self.set_motor_dps(self.MOTOR_LEFT, self.get_speed())\n self.set_motor_dps(self.MOTOR_RIGHT, 0)", "title": "" }, { "docid": "759e5787551ab335f93548ecd20f69d9", "score": "0.6305845", "text": "def push_right(self):\n excepted_turn_speed = self._turn + self._joystick_bottom_radius*self.boost\n max_turn_speed = self._joystick_bottom_radius*self._speed_limit\n self._turn = min(excepted_turn_speed, max_turn_speed)\n self._actual_joystick_position += Coordinates(self._turn, self._forward)\n self.callback(self._actual_joystick_position)", "title": "" }, { "docid": "af49b7e2c6f8fd56f6faa8a811b23cd9", "score": "0.62648904", "text": "def turtle_right():\n\tx = turtle.xcor()\n\tx += 50\n\tturtle.setx(x)", "title": "" }, { "docid": "94ea3b57c85006405fc2ef352a709f1c", "score": "0.62537396", "text": "def follow_right_side(self, direction):\n\n if self.look(direction+1)[0] == -1: # if can't go right\n\n if self.look(direction + 3)[0] == 0: # if can go left\n direction = direction + 3 # go left\n elif not self.look(direction)[0] == 0:\n direction = direction + 2 # go back\n else:\n direction = direction + 1 # go right\n\n return self.re_direct(direction)", "title": "" }, { "docid": "f42d32fe3469b3519d4147113d363b2f", "score": "0.6248863", "text": "def set_right(self, right=True):\r\n self.right = right", "title": "" }, { "docid": "a54cc189dadc7d8d6662a27baf3221f3", "score": "0.62386817", "text": "def move_right(temp):\r\n if pos[0]<475:\r\n screen.move(item, 20,0)", "title": "" }, { "docid": "a54cc189dadc7d8d6662a27baf3221f3", "score": "0.62386817", "text": "def move_right(temp):\r\n if pos[0]<475:\r\n screen.move(item, 20,0)", "title": "" }, { "docid": "424281e83bc3ab831b143142c7af7449", "score": "0.62359005", "text": "def move_left(self):\n self.x -= 1", "title": "" }, { "docid": "6ff31c69586cee06ae4b3e046ff80aa2", "score": "0.6217734", "text": "def rotate_right(self):\n old = self.current_variant_grid.copy()\n self.current_variant_grid = []\n\n for col in range(10):\n new_row = ''\n for row in old[::-1]: # Go through rows backwards.\n new_row += row[col]\n self.current_variant_grid.append(new_row)\n\n self.current_variant += 'R'\n self.snapshot_variant()", "title": "" }, { "docid": "82783ba063b715ce0113c3acf4a20687", "score": "0.61786914", "text": "def turn_left_right(self, right=True):\n if self.head_direct['x'] is 0:\n t_dir = self.head_direct['dir']\n if right:\n self.head_direct['x'] = self.speed\n self.head_direct['dir'] = 'e'\n else:\n self.head_direct['x'] = -self.speed\n self.head_direct['dir'] = 'w'\n self.head_direct['chng'] = t_dir+self.head_direct['dir'] \n self.head_direct['y'] = 0\n self.change = True", "title": "" }, { "docid": "2bd86e818eac181de1a055ff49cbdbd8", "score": "0.6168088", "text": "def right(self, angle):\n self.car.turn(RoboHatEduCar.CMD_TURN_RIGHT, angle=angle)", "title": "" }, { "docid": "96a77de9a07da9646fe8eb60dc7e7cc5", "score": "0.6162483", "text": "def do_right_turn(robot_name):\n global current_direction_index\n\n current_direction_index += 1\n turtleboi.right(90)\n if current_direction_index > 3:\n current_direction_index = 0\n\n return True, ' > '+robot_name+' turned right.'", "title": "" }, { "docid": "947c6d013a6239137bd607b4daf076e1", "score": "0.61555636", "text": "def move_right_to_position(self, x):\n while self._center_block.x != x:\n self.move_right()", "title": "" }, { "docid": "0bbc5211490608ce07bcf3df89593d68", "score": "0.6153763", "text": "def right(self):\n return Direction((self.value + 1) % 4)", "title": "" }, { "docid": "fe7dfef740518dfe1e2acab98b91aad5", "score": "0.6148186", "text": "def move_player_right_stop(state):\n state[GameConstants.MOVE] -= 1", "title": "" }, { "docid": "d6184b8779df2dd90bfcf57e83c54226", "score": "0.61448944", "text": "def rotateRight(self):\n\t\tnewBoard = [[row[i] for row in self._boardArray] for i in range(self._size)]\n\t\tself._boardArray = newBoard", "title": "" }, { "docid": "938968d4b30446bb8b3b2cca91c76773", "score": "0.61303926", "text": "def setMovingRight(self, value):\n self.__moving_right = value", "title": "" }, { "docid": "a21ca367a9120f7fee760ea08626fd23", "score": "0.6128651", "text": "def _right(self,i):\n return 2 * i + 2", "title": "" }, { "docid": "386b1eb0ff285fa230adbcc9f3b477ea", "score": "0.6119397", "text": "def set_move_right(self, b):\n self.__move_right = b", "title": "" }, { "docid": "e7e5552ceac6c36188e36c510c4a80c4", "score": "0.61143905", "text": "def setRight(self, r):\n self.__right = r", "title": "" }, { "docid": "569e3ee45d769811fbbe8b2cdf436166", "score": "0.61020255", "text": "def right(self, i):\n return 2 * i + 2", "title": "" }, { "docid": "06b2689c3ea26f35ea6dbe2a58956efe", "score": "0.6097891", "text": "def OnRightEvent(self, event):\n self.click = 'RIGHT'\n self.ProcessClick(event)", "title": "" }, { "docid": "68f33c67435e74412d6c1e1a5101f0ac", "score": "0.6093975", "text": "def right(self, right):\n\n self._right = right", "title": "" }, { "docid": "68f33c67435e74412d6c1e1a5101f0ac", "score": "0.6093975", "text": "def right(self, right):\n\n self._right = right", "title": "" }, { "docid": "6c0a48360c694ff04de55549522f2bbc", "score": "0.60925967", "text": "def move_right(self, board, direction='d'):\n orignal_x = self.x_pos\n oendx = self.endx\n orignal_y = self.y_pos\n oendy = self.endy\n self.y_pos += 1\n self.endy += 1\n return_value_move = move_maadi(self, board, direction)\n if return_value_move == 1:\n board.bufferboard[orignal_x:oendx, orignal_y:oendy] = \"\"\n board.bufferboard[self.x_pos:self.endx, self.y_pos:self.endy] = self.struct\n return True\n elif return_value_move == 0:\n self.x_pos = orignal_x\n self.endx = oendx\n self.y_pos = orignal_y\n self.endy = oendy\n return False", "title": "" }, { "docid": "0952dd58425ec7c0bca27fde2131a8ff", "score": "0.6087421", "text": "def right(self):\n return self.place(self.location, (self.facing + 0.5) % 2, self.table)", "title": "" }, { "docid": "127a18beb9b6833ccc59e9d05e618455", "score": "0.60837144", "text": "def move_right(self, step_size):\n # begin homework 2 : problem 2\n # Flip the coin...\n rand = np.random.uniform(0.0, 1.0) # Get random float between 0-1\n # Determine whether to move left, right, or stay put\n if rand < self.prob_move_right_if_right:\n# print('Move right')\n step_size = step_size\n elif rand < (self.prob_move_left_if_right+self.prob_move_right_if_right): # Check if should really move left\n# print('Move left')\n step_size = -step_size\n else:\n# print('Stay put')\n step_size = 0\n # end homework 2 : problem 2\n return self._move_(step_size)", "title": "" }, { "docid": "4e3468b6ec616593a838455c2091a47b", "score": "0.60663515", "text": "def right(piece, size=1):\n\t\treturn Move(piece, abs(size), 0)", "title": "" }, { "docid": "16a4385439c0c45d7db32cd987931ed3", "score": "0.6044891", "text": "def move_all_right(self, start_col, how_much = 1):\n for shift_num in range(0, self.width - start_col - how_much ):\n for row_num in range(0, self.height):\n self.move_elem( [row_num, self.width - 1 - shift_num - how_much] , [row_num, self.width - 1 - shift_num])", "title": "" }, { "docid": "d03bf9b331a09788e4e16d2597e67bcd", "score": "0.60428387", "text": "def set_right(self, right):\n self.right = right\n self.update()", "title": "" }, { "docid": "d03bf9b331a09788e4e16d2597e67bcd", "score": "0.60428387", "text": "def set_right(self, right):\n self.right = right\n self.update()", "title": "" }, { "docid": "58ef0b26d046f777757ed4a26f1f2920", "score": "0.60296077", "text": "def shift_right(self):\n self.left.append(self.head)\n if self.right:\n self.head = self.right.pop()\n else:\n self.head = self.blank_symbol", "title": "" }, { "docid": "3ce59e538e2085bc686cdb328451cabe", "score": "0.6025403", "text": "def right(self):\n return self.translate(1, 0)", "title": "" }, { "docid": "d0dcc8740211ed54050b3e27d7eae5b6", "score": "0.6023065", "text": "def right(self):\n self.lband[0:0] = self.rband[0:1] # insert first element of r to l\n self.rband[0:1] = [] # remove first element of r\n if self.rband == []:\n self.rband = [0]", "title": "" }, { "docid": "2d4a2d2d52b3369a3c5cb35459fc48be", "score": "0.6022119", "text": "def RightActions(self):\n if self.direction == 'U':\n self.actions += ['R', 'F']\n elif self.direction == 'L':\n self.actions += ['R', 'R', 'F']\n elif self.direction == 'D':\n self.actions += ['L', 'F']\n else:\n self.actions.append('F')", "title": "" }, { "docid": "f4554cbac68699825c74445b2730a4ea", "score": "0.6017823", "text": "def _right(self):\n self._band.right()", "title": "" }, { "docid": "6a5875835d4a5481fa3f5c125163cb14", "score": "0.59816206", "text": "def right(self, right: List[RightElement]):\n\n self._right = right", "title": "" }, { "docid": "3abc7b40ecd2c7b20a6c291c85a9546b", "score": "0.59738564", "text": "def move_right(self):\n copy = self.config[:]\n arg = copy.index(0)\n if (arg+1)%3 == 0:\n return None\n copy[arg] = copy[arg+1]\n copy[arg+1] = 0\n return PuzzleState(copy,self.n,parent = self,action=\"Right\",cost = self.cost+1)", "title": "" }, { "docid": "c28ad6b8732a92f7d08a22e090fc7947", "score": "0.5971885", "text": "def turn_around():\n for i in range(2):\n turn_left()", "title": "" }, { "docid": "32f44f9965eae252e4935966b466eced", "score": "0.59695727", "text": "def sh_right(self):\n if self.pointer == len(self.buf)-1:\n self.buf.append(0)\n self.pointer += 1", "title": "" }, { "docid": "1c0057cfa247bd991827d5bce2a04c27", "score": "0.5965794", "text": "def set_right(self, right):\n self.right = right", "title": "" }, { "docid": "a50a8d225af790037fecdab3adc6d262", "score": "0.59386617", "text": "def rotate_right(self):\n self.state = np.rot90(self.state, 3)\n return self", "title": "" }, { "docid": "19a619d522415a4b1da396efea9f1aa6", "score": "0.5929927", "text": "def turnRight(self, angle=None):\n self.parent.turnRight(angle)", "title": "" } ]
71f970be8e50de1a97900e71ccd59979
Specifies the clear text password used to encrypt traffic. This field will not be displayed.
[ { "docid": "08db6c2f6f5e31900a83b8ab61939029", "score": "0.6029469", "text": "def privacy_password(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"privacy_password\")", "title": "" } ]
[ { "docid": "8175e7a43331357572d476380ddec002", "score": "0.7123115", "text": "def passwordInClear(self):", "title": "" }, { "docid": "0dacc1dd60134b989efcff3f93d26faa", "score": "0.69737387", "text": "def password(self):\n raise AttributeError('password: write-only field')", "title": "" }, { "docid": "9c5629f9e0fcad538c12ddfc799a02be", "score": "0.6960299", "text": "def password(self, value):\n self._password = value", "title": "" }, { "docid": "4b9baffd9451afbbaabb2830e4decc1c", "score": "0.6940211", "text": "def get_cleartext_password(self):\n if self.umkp:\n return self.umkp.get_cleartext_password()", "title": "" }, { "docid": "47586b03b3c7fde85048ae3d27f5a439", "score": "0.68972176", "text": "def password(self, value):\n self._auth_password = value", "title": "" }, { "docid": "a4dc1c884ec6e0d2b28b6dd626368e49", "score": "0.68680084", "text": "def password(self):\n del self._password", "title": "" }, { "docid": "0517788eb27ae947d53043fb5fcf3273", "score": "0.6854975", "text": "def password(self):\n raise AttributeError('la contrasena no es valida.')", "title": "" }, { "docid": "d324132960eeb88f2296651ede8a86c7", "score": "0.6772423", "text": "def password(self):\n raise AttributeError(\"For security purposes, 'password' is not a readable attribute.\")", "title": "" }, { "docid": "e43a352067a26710a23e139fa84efbd8", "score": "0.6769146", "text": "def PASSWORD(self):\n return \"PASSWORD\"", "title": "" }, { "docid": "dc63dcc84b26a71d79a5d9a7956bae8a", "score": "0.6766925", "text": "def show_pwd_encrypt(self):\r\n if self.ent_encrypt_pwd[\"show\"] == \"*\":\r\n self.ent_encrypt_pwd[\"show\"] = \"\"\r\n self.ent_encrypt_confirm[\"show\"] = \"\"\r\n else:\r\n self.ent_encrypt_pwd[\"show\"] = \"*\"\r\n self.ent_encrypt_confirm[\"show\"] = \"*\"", "title": "" }, { "docid": "dcb920963e64c088d7cef666a2eeec7c", "score": "0.67080224", "text": "def set_unusable_password(self):\n\t\tself._password = make_password(None)", "title": "" }, { "docid": "e1ca76ece58a4bf305535aee50a00999", "score": "0.66501784", "text": "def password(self) -> str:\n return pulumi.get(self, \"password\")", "title": "" }, { "docid": "c6cc0cdf3a5f9ebeeb93fe02c13c9c4e", "score": "0.6592658", "text": "def password (self): \n return self.cfg['password']", "title": "" }, { "docid": "fc5c36efb52ed3129386db42a913d0aa", "score": "0.6578044", "text": "def set_Password(self, value):\n InputSet._set_input(self, 'Password', value)", "title": "" }, { "docid": "76ed14dfcc14b7ef6ad411e4175e58bb", "score": "0.65679425", "text": "def password(self) -> Optional[str]:\n return pulumi.get(self, \"password\")", "title": "" }, { "docid": "76ed14dfcc14b7ef6ad411e4175e58bb", "score": "0.65679425", "text": "def password(self) -> Optional[str]:\n return pulumi.get(self, \"password\")", "title": "" }, { "docid": "76ed14dfcc14b7ef6ad411e4175e58bb", "score": "0.65679425", "text": "def password(self) -> Optional[str]:\n return pulumi.get(self, \"password\")", "title": "" }, { "docid": "76ed14dfcc14b7ef6ad411e4175e58bb", "score": "0.65679425", "text": "def password(self) -> Optional[str]:\n return pulumi.get(self, \"password\")", "title": "" }, { "docid": "76ed14dfcc14b7ef6ad411e4175e58bb", "score": "0.65679425", "text": "def password(self) -> Optional[str]:\n return pulumi.get(self, \"password\")", "title": "" }, { "docid": "76ed14dfcc14b7ef6ad411e4175e58bb", "score": "0.65679425", "text": "def password(self) -> Optional[str]:\n return pulumi.get(self, \"password\")", "title": "" }, { "docid": "445bb3c353e689747ce70a12f99381bc", "score": "0.6555305", "text": "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "title": "" }, { "docid": "445bb3c353e689747ce70a12f99381bc", "score": "0.6555305", "text": "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "title": "" }, { "docid": "445bb3c353e689747ce70a12f99381bc", "score": "0.6555305", "text": "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "title": "" }, { "docid": "445bb3c353e689747ce70a12f99381bc", "score": "0.6555305", "text": "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "title": "" }, { "docid": "445bb3c353e689747ce70a12f99381bc", "score": "0.6555305", "text": "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "title": "" }, { "docid": "0ef4b55353ce362b779e8e5a6209c716", "score": "0.655324", "text": "def password(self, password):\n\n self._password = password", "title": "" }, { "docid": "0ef4b55353ce362b779e8e5a6209c716", "score": "0.655324", "text": "def password(self, password):\n\n self._password = password", "title": "" }, { "docid": "0ef4b55353ce362b779e8e5a6209c716", "score": "0.655324", "text": "def password(self, password):\n\n self._password = password", "title": "" }, { "docid": "0ef4b55353ce362b779e8e5a6209c716", "score": "0.655324", "text": "def password(self, password):\n\n self._password = password", "title": "" }, { "docid": "0ef4b55353ce362b779e8e5a6209c716", "score": "0.655324", "text": "def password(self, password):\n\n self._password = password", "title": "" }, { "docid": "323a7fc577f10a4898e76c85c4d5ea17", "score": "0.6546978", "text": "def password(self) -> str:\n return STCObject(self.proto_handle)['Password']", "title": "" }, { "docid": "bee02f2039fb7f918d55b51c30f2f8da", "score": "0.65108085", "text": "def password(self):\n return self.__password", "title": "" }, { "docid": "bee02f2039fb7f918d55b51c30f2f8da", "score": "0.65108085", "text": "def password(self):\n return self.__password", "title": "" }, { "docid": "d2626513d592e0c82d0334b874fcb39f", "score": "0.65080214", "text": "def PASSWORD(self):\n return TYPE.NEO4J + SETTING.DELIMITER + SETTING.PASSWORD", "title": "" }, { "docid": "8ba4de5ff7d9909453b9efa9501fbeca", "score": "0.65079844", "text": "def set_password(self, password):\n\t\tself.password = encrypt_password(password)", "title": "" }, { "docid": "2dec8374fa60f1aa05926b758aad27bf", "score": "0.6503956", "text": "def setPassword(self, p):\n self.__password = p", "title": "" }, { "docid": "ecaa271f3f58228753c8f0a5ef293f49", "score": "0.6494732", "text": "def setpassword(self, pwd):\r\n self.pwd = pwd", "title": "" }, { "docid": "6ea16c7b2358cc2cf694ad4920f60665", "score": "0.6491275", "text": "def password(self):\n return self._password", "title": "" }, { "docid": "6ea16c7b2358cc2cf694ad4920f60665", "score": "0.6491275", "text": "def password(self):\n return self._password", "title": "" }, { "docid": "6ea16c7b2358cc2cf694ad4920f60665", "score": "0.6491275", "text": "def password(self):\n return self._password", "title": "" }, { "docid": "6ea16c7b2358cc2cf694ad4920f60665", "score": "0.6491275", "text": "def password(self):\n return self._password", "title": "" }, { "docid": "1d85f61bde4a0038c134e2fe5411f1bc", "score": "0.6488742", "text": "def password(self):\n raise AttributeError('password is not a readable attribute.')", "title": "" }, { "docid": "1d85f61bde4a0038c134e2fe5411f1bc", "score": "0.6488742", "text": "def password(self):\n raise AttributeError('password is not a readable attribute.')", "title": "" }, { "docid": "1d85f61bde4a0038c134e2fe5411f1bc", "score": "0.6488742", "text": "def password(self):\n raise AttributeError('password is not a readable attribute.')", "title": "" }, { "docid": "1d85f61bde4a0038c134e2fe5411f1bc", "score": "0.6488742", "text": "def password(self):\n raise AttributeError('password is not a readable attribute.')", "title": "" }, { "docid": "1d85f61bde4a0038c134e2fe5411f1bc", "score": "0.6488742", "text": "def password(self):\n raise AttributeError('password is not a readable attribute.')", "title": "" }, { "docid": "1d85f61bde4a0038c134e2fe5411f1bc", "score": "0.6488742", "text": "def password(self):\n raise AttributeError('password is not a readable attribute.')", "title": "" }, { "docid": "7c4f232d692d4d256e3969ba56be43eb", "score": "0.6462508", "text": "def password(self):\n raise AttributeError(\"Password is not a readable attribute\")", "title": "" }, { "docid": "61b605f0d2948d93a7a928521ff2b963", "score": "0.64580625", "text": "def password(self):\n raise AttributeError('password is not a readable attribute')", "title": "" }, { "docid": "90ad07e2cf05c2672cc371e0f19ccae6", "score": "0.64372694", "text": "def set_password(self, password):\n self.password = password", "title": "" }, { "docid": "e3f7af3f79279da5481ea2fb1e719ef0", "score": "0.64292514", "text": "def password(self):\n\t\traise AttributeError('password is not a readable attribute!')", "title": "" }, { "docid": "128bbfd919ccf95b2e052c0f1981d447", "score": "0.6421736", "text": "def password(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"password\")", "title": "" }, { "docid": "11b069bb3ddb50a205910d92f2d99d9c", "score": "0.6420136", "text": "def password(self):\n\n return self._password", "title": "" }, { "docid": "81f7209463c890fa5f23dfb9a7b8aadf", "score": "0.6414876", "text": "def password(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"password\")", "title": "" }, { "docid": "81f7209463c890fa5f23dfb9a7b8aadf", "score": "0.6414876", "text": "def password(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"password\")", "title": "" }, { "docid": "81f7209463c890fa5f23dfb9a7b8aadf", "score": "0.6414876", "text": "def password(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"password\")", "title": "" }, { "docid": "57d360238bddf3eafd97ea805e1a05e7", "score": "0.64063025", "text": "def password(self):\r\n raise AttributeError('password is not a readable attribute.')", "title": "" }, { "docid": "e28714a55ae3032a9de3160b1fd8a99b", "score": "0.63976896", "text": "def show_password(ctx, new_value):\n ctx.obj.show_password = new_value\n ctx.obj.update_conf(CONF_PATH)\n ok_msg(\"Conf updated\")", "title": "" }, { "docid": "c6fb9c90c501790fc0a2bf58a2f23226", "score": "0.63892806", "text": "def set_Password(self, value):\n super(GetBookmarkInputSet, self)._set_input('Password', value)", "title": "" }, { "docid": "7927a56e129e7e30763ce8562c963c98", "score": "0.636576", "text": "def password(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"password\")", "title": "" }, { "docid": "7927a56e129e7e30763ce8562c963c98", "score": "0.636576", "text": "def password(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"password\")", "title": "" }, { "docid": "7927a56e129e7e30763ce8562c963c98", "score": "0.636576", "text": "def password(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"password\")", "title": "" }, { "docid": "7927a56e129e7e30763ce8562c963c98", "score": "0.636576", "text": "def password(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"password\")", "title": "" }, { "docid": "7927a56e129e7e30763ce8562c963c98", "score": "0.636576", "text": "def password(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"password\")", "title": "" }, { "docid": "7927a56e129e7e30763ce8562c963c98", "score": "0.636576", "text": "def password(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"password\")", "title": "" }, { "docid": "1a17ab4b015edc84a8a0acda579de480", "score": "0.63433045", "text": "def password(self, value):\n\t\tself._password = value\n\t\tself.passwordHash = hashlib.sha224(value).hexdigest()", "title": "" }, { "docid": "911c3786396a0cba35f1009812c5f41d", "score": "0.6325003", "text": "def show_pwd_decrypt(self):\r\n if self.ent_decrypt_pwd[\"show\"] == \"*\":\r\n self.ent_decrypt_pwd[\"show\"] = \"\"\r\n else:\r\n self.ent_decrypt_pwd[\"show\"] = \"*\"", "title": "" }, { "docid": "a2012becbcd9897edafaf6b115e4c066", "score": "0.6324489", "text": "def password(self) -> str:\n\n return self.__decoder(self.__password)", "title": "" }, { "docid": "e8f861cfc2c39baf8c79443100a1b71d", "score": "0.6287318", "text": "def setModemPass(self, string):\n string = self.comms.setModemModePassword(string, unitCode=self.device_id)\n self.modem_pass = string\n return string", "title": "" }, { "docid": "07cb2ff1f0b506ba62a6c6c052ebeb75", "score": "0.62814856", "text": "def Password(self):\n if self.force_auto_sync:\n self.get('Password')\n return self._Password", "title": "" }, { "docid": "16ab615bd09f3cbf1c92049502438e15", "score": "0.62741816", "text": "def remove_password(self):\n self.password.text = \"\"\n if not self.isUserAdmin():\n self.runAsAdmin()\n else:\n username = self.get_username()\n password = \"\"\n self.set_password(username, password)", "title": "" }, { "docid": "9fdf83a6257aae4494023cf09d2b7cfd", "score": "0.61588955", "text": "def get_password(self):\r\n return self.password", "title": "" }, { "docid": "f2ce94a8daf875118f366671242dbd25", "score": "0.6132136", "text": "def keyword_preference_window_remove_password(self):\n self.Preferences_window_clear_password()\n self.Preferences_window_clear_confirm_password()", "title": "" }, { "docid": "15211e583e03390fb9087da0413fde75", "score": "0.6121864", "text": "def getPassword():", "title": "" }, { "docid": "cb137d00eb901a20fd39d10675d88832", "score": "0.6116795", "text": "def the_password(text: str) -> \"Enter\":\n return Enter.the_secret(text)", "title": "" }, { "docid": "d858021e608ad55f733b063849eda8b3", "score": "0.61085296", "text": "def password(self, new_password: str) -> NoReturn:\n\n self.__password = self.__encoder(new_password)", "title": "" }, { "docid": "b0506a2a80685c0d3e1c690512aa71e4", "score": "0.61043006", "text": "def set_password(self, ppass):\n self.pw = crypt.crypt(ppass, crypt.mksalt())", "title": "" }, { "docid": "966eac0b58414ebe1a96695926d031b5", "score": "0.60975856", "text": "def password_plain(self, password_plain):\n self.password = generate_password_hash(password_plain)", "title": "" }, { "docid": "aaa3965330baf27e3f93406cd0b241d2", "score": "0.60685927", "text": "def _set_password(self, password):\n\t\tself._password = self.__encrypt_password(password)", "title": "" }, { "docid": "680d8769794d42cd5e39ffd5ebf9beb8", "score": "0.6066814", "text": "def get_password(self):\n\n return str(self.password)", "title": "" }, { "docid": "ef61a02e0b855bffe61520e89ab79bdd", "score": "0.60601413", "text": "def password(self, password):\n if self._configuration.client_side_validation and password is None:\n raise ValueError(\"Invalid value for `password`, must not be `None`\") # noqa: E501\n\n self._password = password", "title": "" }, { "docid": "65b9eedc6273d545c41d86362fe22a3b", "score": "0.60594463", "text": "def non_autoscale_password(self):\n return self.get_raw('non_autoscale_password')", "title": "" }, { "docid": "be4cc0296825c714d079bdcaa5cf2213", "score": "0.60508627", "text": "def set_haproxy_stat_password(self):\n if not self.get_state('haproxy.stat.password'):\n password = ''.join([\n random.choice(string.ascii_letters + string.digits)\n for n in range(32)])\n self.set_state('haproxy.stat.password', password)", "title": "" }, { "docid": "c7da1288757d71d93a107440196848b4", "score": "0.6035899", "text": "def create_password(self):\n\n all_chars = LETTERS\n if not self._no_digits:\n all_chars += DIGITS\n if not self._no_symbols:\n all_chars += SYMBOLS\n\n self._password = ''.join(secrets.choice(all_chars)\n for i in range(self._length))\n return self._password", "title": "" }, { "docid": "87651f8acf71d1b9d350cd0165b8a3e4", "score": "0.60280085", "text": "def get_args_secure(cls):\n return [\"password\"]", "title": "" }, { "docid": "87651f8acf71d1b9d350cd0165b8a3e4", "score": "0.60280085", "text": "def get_args_secure(cls):\n return [\"password\"]", "title": "" }, { "docid": "4e990a76ed27ef988f7faf47efac6839", "score": "0.6025506", "text": "def change_password(self):\n self.dialbox = dialogbox.Dialog(self.gui, 'change_password')", "title": "" }, { "docid": "a24ba60cb287944380866339735d8bce", "score": "0.6016152", "text": "def admin_password(self):\n return self._configFile.get(Config.PASS, None)", "title": "" }, { "docid": "fdce9373dc9d6ea616f91fe464ffdb7b", "score": "0.60037684", "text": "def db_pass():\n if not env.db_pass:\n env.db_pass = getpass(\"Enter the database password: \")\n return env.db_pass", "title": "" }, { "docid": "44c585389c128c6e1dee4bdf55e38c86", "score": "0.59936064", "text": "def change_password(self):\n if self.password.text:\n if not self.isUserAdmin():\n self.runAsAdmin()\n else:\n username = self.get_username()\n password = self.password.text\n self.set_password(username, password)\n self.password.text = \"\"", "title": "" }, { "docid": "335994194983329ad42f22ebd6449498", "score": "0.59891045", "text": "def catalog_admin_password(self) -> Optional[pulumi.Input['SecureStringArgs']]:\n return pulumi.get(self, \"catalog_admin_password\")", "title": "" }, { "docid": "bb2a3806ae2dfda6d7a78db39874b927", "score": "0.5978722", "text": "def getPassword(self): \n return self.__password", "title": "" }, { "docid": "1f9594c00c391c61e72e9e273bdee8b5", "score": "0.5974075", "text": "def set_password(self, raw_password):\r\n self.password = make_password(raw_password)", "title": "" }, { "docid": "9bff18ec5d640626a704b2eed29ea740", "score": "0.59662074", "text": "def password(self, password):\n\t\tself.password_hash = generate_password_hash(password)", "title": "" }, { "docid": "c31987115530ecab8bbf8ab2997015c2", "score": "0.59635246", "text": "def Password(request):\n schema = schemaish.Structure()\n schema.add('Password', schemaish.String())\n\n form = formish.Form(schema, 'form')\n form['Password'].widget = formish.Password()\n return form", "title": "" }, { "docid": "ad393f94a243976f570e15be7352472f", "score": "0.5963103", "text": "def test_RemovesPasswordFromConnectionSettings(self):\n assert 'password' not in self.rpc.config['connection_settings']", "title": "" }, { "docid": "0a22efd1b4b217a87f2dbada83c3a4e7", "score": "0.5957486", "text": "def get_password(self):\r\n\t\treturn self.__password", "title": "" }, { "docid": "7fb7ffd2d0d5ffcb6e4ce1c850e5ebab", "score": "0.5954629", "text": "def set_pw(cls, pw):\n return pw", "title": "" }, { "docid": "e0476ff3c847cc6dee1b8f1e3c0d800c", "score": "0.5954385", "text": "def send_password(self, password=None):\n _pw = password or self.password\n data_set = messages.DataSet(value=_pw)\n cmd = messages.CommandMessage(command=\"P\", command_type=\"1\", data_set=data_set)\n logger.info(\"Sending password to meter\")\n self.transport.send(cmd.to_bytes())", "title": "" } ]
ffd0299e38b55062b10879e459715b0a
genereates two qubit identity equal circuit with given length
[ { "docid": "12e2084757eca5a7228e3ce921786841", "score": "0.7121084", "text": "def two_qubit_circuit(length: int, qubit_one: int, qubit_two: int):\n\n p = Program()\n\n for j in range(int(length/2)):\n theta = 2 * np.pi * random.random()\n gate_list = [RZ(theta, qubit_one), RX(np.pi / 2, qubit_one), RX(- np.pi / 2, qubit_one),\n CZ(qubit_one, qubit_two),\n RZ(theta, qubit_two), RX(np.pi / 2, qubit_two), RX(- np.pi / 2, qubit_two), CZ(qubit_two, qubit_one)]\n new_gate = random.choice(gate_list)\n p.inst(new_gate)\n\n p += p.dagger()\n\n return Program('PRAGMA PRESERVE_BLOCK') + p + Program('PRAGMA END_PRESERVE_BLOCK')", "title": "" } ]
[ { "docid": "3bbdc1b872fee8bfda6527b96be9eb4e", "score": "0.6130477", "text": "def test_two_qubit_gates_with_symbols(gate: cirq.Gate, expected_length: int):\n q0 = cirq.GridQubit(5, 3)\n q1 = cirq.GridQubit(5, 4)\n original_circuit = cirq.Circuit(gate(q0, q1))\n converted_circuit = original_circuit.copy()\n cgoc.ConvertToSqrtIswapGates().optimize_circuit(converted_circuit)\n assert len(converted_circuit) <= expected_length\n\n # Check if unitaries are the same\n for val in np.linspace(0, 2 * np.pi, 12):\n assert _unitaries_allclose(\n cirq.resolve_parameters(original_circuit, {'t': val}),\n cirq.resolve_parameters(converted_circuit, {'t': val}))", "title": "" }, { "docid": "323d4e692dd9d99e1d7cfeb0ac233584", "score": "0.5952881", "text": "def test_multi_cregs(self):\n\n # ┌───┐ ░ ┌─┐\n # qr_0: ──■────────────┤ X ├─░─┤M├─────────\n # ┌─┴─┐ ┌───┐└─┬─┘ ░ └╥┘┌─┐\n # qr_1: ┤ X ├──■──┤ H ├──■───░──╫─┤M├──────\n # └───┘┌─┴─┐└───┘ ░ ║ └╥┘┌─┐\n # qr_2: ──■──┤ X ├───────────░──╫──╫─┤M├───\n # ┌─┴─┐└───┘ ░ ║ ║ └╥┘┌─┐\n # qr_3: ┤ X ├────────────────░──╫──╫──╫─┤M├\n # └───┘ ░ ║ ║ ║ └╥┘\n # c: 2/════════════════════════╩══╬══╩══╬═\n # 0 ║ 1 ║\n # ║ ║\n # d: 2/═══════════════════════════╩═════╩═\n # 0 1\n qr = QuantumRegister(4, \"qr\")\n cr1 = ClassicalRegister(2, \"c\")\n cr2 = ClassicalRegister(2, \"d\")\n circuit = QuantumCircuit(qr, cr1, cr2)\n circuit.cx(qr[0], qr[1])\n circuit.cx(qr[2], qr[3])\n circuit.cx(qr[1], qr[2])\n circuit.h(qr[1])\n circuit.cx(qr[1], qr[0])\n circuit.barrier(qr)\n circuit.measure(qr[0], cr1[0])\n circuit.measure(qr[1], cr2[0])\n circuit.measure(qr[2], cr1[1])\n circuit.measure(qr[3], cr2[1])\n\n coupling = CouplingMap([[0, 1], [0, 2], [2, 3]]) # linear [1, 0, 2, 3]\n property_set = {}\n with self.assertWarnsRegex(DeprecationWarning, r\"^The class.*is deprecated\"):\n actual = BIPMapping(coupling, objective=\"depth\")(circuit, property_set)\n self.assertEqual(5, actual.depth())\n\n CheckMap(coupling)(actual, property_set)\n self.assertTrue(property_set[\"is_swap_mapped\"])", "title": "" }, { "docid": "24bdea335c37bba2886a805f49781e64", "score": "0.5805034", "text": "def _generate_circuit_sequence(\n repeated_circuit: circuits.Circuit,\n different_circuit: circuits.Circuit,\n length: int,\n position: int,\n):\n if position >= length:\n raise ValueError(f\"Position {position} should be < {length}\")\n\n return circuits.Circuit(\n list(\n chain.from_iterable(\n [\n (\n repeated_circuit if i != position else different_circuit\n ).operations\n for i in range(length)\n ]\n )\n )\n )", "title": "" }, { "docid": "fa7f56dbb30c6bdaf883cbce48f124de", "score": "0.57560784", "text": "def test_two_qubit_gates(gate: cirq.Gate, expected_length: int):\n q0 = cirq.GridQubit(5, 3)\n q1 = cirq.GridQubit(5, 4)\n original_circuit = cirq.Circuit(gate(q0, q1))\n converted_circuit = original_circuit.copy()\n cgoc.ConvertToSqrtIswapGates().optimize_circuit(converted_circuit)\n cig.SQRT_ISWAP_GATESET.serialize(converted_circuit)\n assert len(converted_circuit) <= expected_length\n assert _unitaries_allclose(original_circuit, converted_circuit)", "title": "" }, { "docid": "2a79052d835fdfd4a228aebfa73656ce", "score": "0.5681587", "text": "def test_two_qubit_synthesis_to_directional_cx_multiple_registers(self):\n # TODO: should make check more explicit e.g. explicitly set gate\n # direction in test instead of using specific fake backend\n backend = FakeVigo()\n conf = backend.configuration()\n qr0 = QuantumRegister(1)\n qr1 = QuantumRegister(1)\n coupling_map = CouplingMap(conf.coupling_map)\n triv_layout_pass = TrivialLayout(coupling_map)\n qc = QuantumCircuit(qr0, qr1)\n qc.unitary(random_unitary(4, seed=12), [qr0[0], qr1[0]])\n unisynth_pass = UnitarySynthesis(\n basis_gates=conf.basis_gates,\n coupling_map=None,\n backend_props=backend.properties(),\n pulse_optimize=True,\n natural_direction=False,\n )\n pm = PassManager([triv_layout_pass, unisynth_pass])\n qc_out = pm.run(qc)\n\n unisynth_pass_nat = UnitarySynthesis(\n basis_gates=conf.basis_gates,\n coupling_map=None,\n backend_props=backend.properties(),\n pulse_optimize=True,\n natural_direction=True,\n )\n\n pm_nat = PassManager([triv_layout_pass, unisynth_pass_nat])\n qc_out_nat = pm_nat.run(qc)\n self.assertEqual(Operator(qc), Operator(qc_out))\n self.assertEqual(Operator(qc), Operator(qc_out_nat))", "title": "" }, { "docid": "f6cc3e5608df757c3201d742ce1693eb", "score": "0.55983335", "text": "def add_singleQ_based_twoQ_clifford(index, gate_seq_1, gate_seq_2, **kwargs):\n index_1 = index % 24 # randomly sample from single qubit cliffords (24)\n # randomly sample from single qubit cliffords (24)\n index_2 = (index // 24) % 24\n add_singleQ_clifford(index_1, gate_seq_1, )\n add_singleQ_clifford(index_2, gate_seq_2)", "title": "" }, { "docid": "4ee3f5a4b0be0a70dffcf1f4e7a33b74", "score": "0.5594737", "text": "def test_qubit_identity(self, rep, tol):\n\n p = 0.543\n\n dev = qml.device(\"default.tensor\", wires=1, representation=rep)\n\n @qml.qnode(dev)\n def circuit(x):\n \"\"\"Test quantum function\"\"\"\n qml.RX(x, wires=0)\n return qml.expval(qml.Identity(0))\n\n assert np.isclose(circuit(p), 1, atol=tol, rtol=0)", "title": "" }, { "docid": "bc4b90d4734a2e6e379901635eed1604", "score": "0.5589917", "text": "def circuit_identity(n):\n if n <= 0:\n return CircuitZero\n if n == 1:\n return CIdentity\n return Concatenation(*((CIdentity,) * n))", "title": "" }, { "docid": "eb1c3298d327352f75f0c189de7ffc25", "score": "0.55772567", "text": "def add_CNOT_like_twoQ_clifford(index, gate_seq_1, gate_seq_2, **kwargs):\n index_1 = index % 3 # randomly sample from S1 (3)\n index_2 = (index // 3) % 3 # randomly sample from S1 (3)\n # randomly sample from single qubit cliffords (24)\n index_3 = (index // 3 // 3) % 24\n # randomly sample from single qubit cliffords (24)\n index_4 = (index // 3 // 3 // 24) % 24\n\n generator = kwargs.get('generator', 'CZ')\n if generator == 'CZ':\n add_singleQ_S1(index_1, gate_seq_1)\n add_singleQ_S1_Y2p(index_2, gate_seq_2)\n gate_seq_1.append(Gate.I)\n gate_seq_2.append(Gate.CZ)\n add_singleQ_clifford(index_3, gate_seq_1)\n add_singleQ_clifford(index_4, gate_seq_2)\n\n elif generator == 'iSWAP':\n add_singleQ_S1(index_1, gate_seq_1)\n add_singleQ_S1_Z2p(index_2, gate_seq_2)\n gate_seq_1.append(Gate.I)\n gate_seq_2.append(Gate.iSWAP)\n gate_seq_1.append(Gate.X2p)\n gate_seq_2.append(Gate.I)\n gate_seq_1.append(Gate.I)\n gate_seq_2.append(Gate.iSWAP)\n add_singleQ_clifford(index_3, gate_seq_1)\n add_singleQ_clifford(index_4, gate_seq_2)", "title": "" }, { "docid": "555890a62921b9d25189a6f87a3d6ae4", "score": "0.5503414", "text": "def initialize_entangled_qubits(final_measure=True):\n\n circuits = []\n qr = QuantumRegister(3)\n if final_measure:\n cr = ClassicalRegister(3)\n regs = (qr, cr)\n else:\n regs = (qr,)\n\n # Initialize |000+111> -> |000+110>\n circuit = QuantumCircuit(*regs)\n circuit.h(0)\n circuit.cx(0, 1)\n circuit.cx(0, 2)\n circuit.initialize([1, 0], [0])\n if final_measure:\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n circuits.append(circuit)\n\n # Initialize |000+111> -> |010+111>\n circuit = QuantumCircuit(*regs)\n circuit.h(0)\n circuit.cx(0, 1)\n circuit.cx(0, 2)\n circuit.initialize([0, 1], [1])\n if final_measure:\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n circuits.append(circuit)\n\n # Initialize |000+111> -> |000+011>\n circuit = QuantumCircuit(*regs)\n circuit.h(0)\n circuit.cx(0, 1)\n circuit.cx(0, 2)\n circuit.initialize([1, 0], [2])\n if final_measure:\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n circuits.append(circuit)\n\n # Initialize |000+111> -> |000+100>\n circuit = QuantumCircuit(*regs)\n circuit.h(0)\n circuit.cx(0, 1)\n circuit.cx(0, 2)\n circuit.initialize([1, 0, 0, 0], [0, 1])\n if final_measure:\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n circuits.append(circuit)\n\n # Initialize |000+111> -> |100+110>\n circuit = QuantumCircuit(*regs)\n circuit.h(0)\n circuit.cx(0, 1)\n circuit.cx(0, 2)\n circuit.initialize([0, 0, 0, 1], [0, 2])\n if final_measure:\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n circuits.append(circuit)\n\n # Initialize |000+111> -> |000+100+010+110+100+101+011+111>\n circuit = QuantumCircuit(*regs)\n circuit.h(0)\n circuit.cx(0, 1)\n circuit.cx(0, 2)\n circuit.initialize([0.5, 0.5, 0.5, 0.5], [1, 2])\n if final_measure:\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n circuits.append(circuit)\n\n # Initialize |000+111> -> |100>\n circuit = QuantumCircuit(*regs)\n circuit.h(0)\n circuit.cx(0, 1)\n circuit.cx(0, 2)\n circuit.initialize([0, 0, 0, 0, 0, 0, 0, 1], [0, 1, 2])\n if final_measure:\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n circuits.append(circuit)\n\n return circuits", "title": "" }, { "docid": "1adfaa1bdaa7fe465a968736e6f218c9", "score": "0.55020934", "text": "def add_SWAP_like_twoQ_clifford(index, gate_seq_1, gate_seq_2, **kwargs):\n index_1 = index % 24 # randomly sample from single qubit cliffords (24)\n # randomly sample from single qubit cliffords (24)\n index_2 = (index // 24) % 24\n generator = kwargs.get('generator', 'CZ')\n if generator == 'CZ':\n gate_seq_1.append(Gate.I)\n gate_seq_2.append(Gate.Y2p)\n gate_seq_1.append(Gate.I)\n gate_seq_2.append(Gate.CZ)\n gate_seq_1.append(Gate.Y2p)\n gate_seq_2.append(Gate.Y2m)\n gate_seq_1.append(Gate.I)\n gate_seq_2.append(Gate.CZ)\n gate_seq_1.append(Gate.Y2m)\n gate_seq_2.append(Gate.Y2p)\n gate_seq_1.append(Gate.I)\n gate_seq_2.append(Gate.CZ)\n add_singleQ_clifford(index_1, gate_seq_1)\n add_singleQ_clifford(index_2, gate_seq_2)\n\n elif generator == 'iSWAP':\n gate_seq_1.append(Gate.I)\n gate_seq_2.append(Gate.X2m)\n gate_seq_1.append(Gate.I)\n gate_seq_2.append(Gate.iSWAP)\n gate_seq_1.append(Gate.X2m)\n gate_seq_2.append(Gate.I)\n gate_seq_1.append(Gate.I)\n gate_seq_2.append(Gate.iSWAP)\n gate_seq_1.append(Gate.I)\n gate_seq_2.append(Gate.X2m)\n gate_seq_1.append(Gate.I)\n gate_seq_2.append(Gate.iSWAP)\n add_singleQ_clifford(index_1, gate_seq_1)\n add_singleQ_clifford(index_2, gate_seq_2)", "title": "" }, { "docid": "7cf0ffcfd080f5773adbdaa2f8ae599d", "score": "0.55015194", "text": "def q1():\n # number of different sides and length\n length = 5\n sid = 2\n\n # first set can be 2, next 2, next 2 and so on for length 5 or 2 ** 5\n tot = sid ** length\n print(\"Question 1: {}\".format(tot))", "title": "" }, { "docid": "674c673c1fbbc7d5b3c23d58de205daa", "score": "0.54571664", "text": "def test_coupling_map_unequal_durations(self, opt):\n qr = QuantumRegister(2)\n circ = QuantumCircuit(qr)\n circ.append(random_unitary(4, seed=1), [1, 0])\n backend = FakeVigo()\n tqc = transpile(\n circ,\n backend=backend,\n optimization_level=opt,\n translation_method=\"synthesis\",\n layout_method=\"trivial\",\n )\n tqc_index = {qubit: index for index, qubit in enumerate(tqc.qubits)}\n self.assertTrue(\n all(\n (\n (0, 1) == (tqc_index[instr.qubits[0]], tqc_index[instr.qubits[1]])\n for instr in tqc.get_instructions(\"cx\")\n )\n )\n )", "title": "" }, { "docid": "5bd6725709b5a3d2bdf9f8060c66da20", "score": "0.5422984", "text": "def test_repeat(self):\n\n features = list(range(3))\n\n expected_names = self.QUEUES[2][1] + self.QUEUES[2][1]\n expected_wires = self.QUEUES[2][2] + self.QUEUES[2][2]\n\n op = qml.IQPEmbedding(features, wires=range(3), n_repeats=2)\n tape = op.expand()\n\n for i, gate in enumerate(tape.operations):\n assert gate.name == expected_names[i]\n assert gate.wires.labels == tuple(expected_wires[i])", "title": "" }, { "docid": "9a875a97b59aa82a53a642aa562ae753", "score": "0.53944117", "text": "def circuit_built(circuit):", "title": "" }, { "docid": "8de663ad5b0ec5be3ad0a32df0e334f3", "score": "0.5392747", "text": "def test_two_qubit_synthesis_to_directional_cx_from_coupling_map(self):\n # TODO: should make check more explicit e.g. explicitly set gate\n # direction in test instead of using specific fake backend\n backend = FakeVigo()\n conf = backend.configuration()\n qr = QuantumRegister(2)\n coupling_map = CouplingMap([[0, 1], [1, 2], [1, 3], [3, 4]])\n triv_layout_pass = TrivialLayout(coupling_map)\n qc = QuantumCircuit(qr)\n qc.unitary(random_unitary(4, seed=12), [0, 1])\n unisynth_pass = UnitarySynthesis(\n basis_gates=conf.basis_gates,\n coupling_map=coupling_map,\n backend_props=backend.properties(),\n pulse_optimize=True,\n natural_direction=False,\n )\n pm = PassManager([triv_layout_pass, unisynth_pass])\n qc_out = pm.run(qc)\n\n unisynth_pass_nat = UnitarySynthesis(\n basis_gates=conf.basis_gates,\n coupling_map=coupling_map,\n backend_props=backend.properties(),\n pulse_optimize=True,\n natural_direction=True,\n )\n\n pm_nat = PassManager([triv_layout_pass, unisynth_pass_nat])\n qc_out_nat = pm_nat.run(qc)\n # the decomposer defaults to the [1, 0] direction but the coupling\n # map specifies a [0, 1] direction. Check that this is respected.\n self.assertTrue(\n all(((qr[1], qr[0]) == instr.qubits for instr in qc_out.get_instructions(\"cx\")))\n )\n self.assertTrue(\n all(((qr[0], qr[1]) == instr.qubits for instr in qc_out_nat.get_instructions(\"cx\")))\n )\n self.assertEqual(Operator(qc), Operator(qc_out))\n self.assertEqual(Operator(qc), Operator(qc_out_nat))", "title": "" }, { "docid": "8bbc7c72a7b76021aff856dc1731feec", "score": "0.53244096", "text": "def test_pk_gen_2():\n dev = LedgerQRL()\n\n answer = dev.send(INS_TEST_PK_GEN_2, 0, 0)\n assert answer is not None\n assert len(answer) == 32\n leaf = binascii.hexlify(answer).upper()\n print(leaf)\n assert leaf == \"98E68D7AB40D358B5B0F4DF4C86AAE78B444BD50248C02773CF1965FAEA092AE\"\n sys.stdout.flush()\n\n answer = dev.send(INS_TEST_PK_GEN_2, 0, 20)\n assert len(answer) == 32\n leaf = binascii.hexlify(answer).upper()\n print(leaf)\n assert leaf == \"8E66C0B26238BC9E12804A83AEF0429E9A666266001A826B5025889B45AE86A3\"\n sys.stdout.flush()\n\n answer = dev.send(INS_TEST_PK_GEN_2, 0, 40)\n assert len(answer) == 32\n leaf = binascii.hexlify(answer).upper()\n print(leaf)\n assert leaf == \"D2BAD383B25900503A34FA126ABB19D3AAC6FC110F431929C7EB18E613E101F8\"\n sys.stdout.flush()", "title": "" }, { "docid": "99db10fad448c8f59c7aa0e21059c839", "score": "0.53199863", "text": "def get2qb():\n circuit = QuantumCircuit(2, 2)\n return circuit", "title": "" }, { "docid": "b7b2414ebc8a5f74312a9d4c95b67b43", "score": "0.53060704", "text": "def add_iSWAP_like_twoQ_clifford(index, gate_seq_1, gate_seq_2, **kwargs):\n generator = kwargs.get('generator', 'CZ')\n index_1 = index % 3 # randomly sample from S1_Y2p (3)\n index_2 = (index // 3) % 3 # randomly sample from S1_X2p(3)\n # randomly sample from single qubit cliffords (24)\n index_3 = (index // 3 // 3) % 24\n # randomly sample from single qubit cliffords (24)\n index_4 = (index // 3 // 3 // 24) % 24\n\n if generator == 'CZ':\n add_singleQ_S1_Y2p(index_1, gate_seq_1)\n add_singleQ_S1_X2p(index_2, gate_seq_2)\n gate_seq_1.append(Gate.I)\n gate_seq_2.append(Gate.CZ)\n gate_seq_1.append(Gate.Y2p)\n gate_seq_2.append(Gate.X2m)\n gate_seq_1.append(Gate.I)\n gate_seq_2.append(Gate.CZ)\n add_singleQ_clifford(index_3, gate_seq_1)\n add_singleQ_clifford(index_4, gate_seq_2)\n\n elif generator == 'iSWAP':\n add_singleQ_S1(index_1, gate_seq_1)\n add_singleQ_S1(index_2, gate_seq_2)\n gate_seq_1.append(Gate.I)\n gate_seq_2.append(Gate.iSWAP)\n add_singleQ_clifford(index_3, gate_seq_1)\n add_singleQ_clifford(index_4, gate_seq_2)", "title": "" }, { "docid": "4a2625c52202f35104d3cd6a1a88d1df", "score": "0.52617085", "text": "def nqubit_1pauli(pauli, i, n):\n #create identity padding\n iden1 = [qto.identity(2) for j in range(i)]\n iden2 = [qto.identity(2) for j in range(n-i-1)]\n\n #combine into total operator list that is in proper order\n oplist = iden1 + [pauli] + iden2\n\n #create final operator by using tensor product on unpacked operator list\n operator = qt.tensor(*oplist)\n\n return operator", "title": "" }, { "docid": "73a940fb8fa0a5cf42ad2861d4b28cf2", "score": "0.52558017", "text": "def your_circuit(oracle):\n # phase kickback trick\n yield cirq.X(q2), cirq.H(q2)\n\n # equal superposition over input bits\n yield cirq.H(q0), cirq.H(q1)\n\n # query the function\n yield oracle\n\n # interference to get result, put last qubit into |1>\n yield cirq.H(q0), cirq.H(q1), cirq.H(q2)\n\n # a final OR gate to put result in final qubit\n yield cirq.X(q0), cirq.X(q1), cirq.CCX(q0, q1, q2)\n yield cirq.measure(q2)", "title": "" }, { "docid": "1660f52f1e84c14cd43ef6be66e688f4", "score": "0.5244218", "text": "def make_bits_equal(self):\n difference = abs(len(self.num1) - len(self.num2))\n if len(self.num1) == len(self.num2):\n print(\"Equal\")\n elif len(self.num1) > len(self.num2):\n for i in range(difference):\n self.num2 = \"0\" + self.num2\n elif len(self.num2) > len(self.num1):\n for i in range(difference):\n self.num1 = \"0\" + self.num1", "title": "" }, { "docid": "8284513df19115b1059f755f1cc8acb7", "score": "0.524299", "text": "def eq0405():", "title": "" }, { "docid": "d84487e86c7583353a631f30b77e43be", "score": "0.52325124", "text": "def pad_with_identity(circuit, k, n):\n circuit_n = circuit.cdim\n combined_circuit = circuit + circuit_identity(n)\n permutation = (list(range(k)) + list(range(circuit_n, circuit_n + n)) +\n list(range(k, circuit_n)))\n return (CPermutation.create(invert_permutation(permutation)) <<\n combined_circuit << CPermutation.create(permutation))", "title": "" }, { "docid": "f3380dd50ea745a606bb71d9b52b24b6", "score": "0.5230486", "text": "def __generate_from_4x_to_5x() -> Problem:\n a = random.choice(range(41, 50))\n b = random.choice(range(50, a + 10))\n return __extend_no_carry_borrow(a, b - a)", "title": "" }, { "docid": "e605058e07bb0a9699116ac3d0d96c16", "score": "0.5218623", "text": "def nqubit_2pauli(ipauli, jpauli, i, j, n):\n #create identity padding\n iden1 = [qto.identity(2) for m in range(i)]\n iden2 = [qto.identity(2) for m in range(j-i-1)]\n iden3 = [qto.identity(2) for m in range(n-j-1)]\n\n #combine into total operator list\n oplist = iden1 + [ipauli] + iden2 + [jpauli] + iden3\n\n # apply tensor product on unpacked oplist\n operator = qt.tensor(*oplist)\n\n return operator", "title": "" }, { "docid": "ecd7c33cc95b0801d7e5ac104f8ce4ac", "score": "0.5214872", "text": "def test_prep_circuit_large2(self):\n # Input vector (normalized)\n vec = np.array(list(np.ones(32)) + list(np.zeros(32)))\n\n # Make a tree from the vector\n tree = BinaryTree(vec)\n\n # Do the state preparation circuit\n qreg = QuantumRegister(6)\n circ = QuantumCircuit(qreg)\n tree.preparation_circuit(circ, qreg)\n\n # Do the swaps to get the ordering of amplitudes to match with the input vector\n for ii in range(len(qreg) // 2):\n circ.swap(qreg[ii], qreg[-ii - 1])\n\n # Check that the circuit produces the correct state\n state = np.real(self.final_state(circ))\n\n # Note: The output state has an additional ancilla needed to do the multi-controlled-Y rotations,\n # so we discard the additional (zero) amplitudes when comparing to the input vector\n self.assertTrue(np.allclose(state[:len(vec)], vec / np.linalg.norm(vec, ord=2)))", "title": "" }, { "docid": "8966041d2e44968c492bd204b7fb99c7", "score": "0.5196272", "text": "def test_two_qubit_synthesis_to_directional_cx_from_coupling_map_natural_false(self):\n # TODO: should make check more explicit e.g. explicitly set gate\n # direction in test instead of using specific fake backend\n backend = FakeVigo()\n conf = backend.configuration()\n qr = QuantumRegister(2)\n coupling_map = CouplingMap([[0, 1], [1, 2], [1, 3], [3, 4]])\n triv_layout_pass = TrivialLayout(coupling_map)\n qc = QuantumCircuit(qr)\n qc.unitary(random_unitary(4, seed=12), [0, 1])\n unisynth_pass = UnitarySynthesis(\n basis_gates=conf.basis_gates,\n coupling_map=coupling_map,\n backend_props=backend.properties(),\n pulse_optimize=True,\n natural_direction=False,\n )\n pm = PassManager([triv_layout_pass, unisynth_pass])\n qc_out = pm.run(qc)\n\n unisynth_pass_nat = UnitarySynthesis(\n basis_gates=conf.basis_gates,\n coupling_map=coupling_map,\n backend_props=backend.properties(),\n pulse_optimize=True,\n natural_direction=False,\n )\n\n pm_nat = PassManager([triv_layout_pass, unisynth_pass_nat])\n qc_out_nat = pm_nat.run(qc)\n # the decomposer defaults to the [1, 0] direction but the coupling\n # map specifies a [0, 1] direction. Check that this is respected.\n self.assertTrue(\n all(((qr[1], qr[0]) == instr.qubits for instr in qc_out.get_instructions(\"cx\")))\n )\n self.assertTrue(\n all(((qr[1], qr[0]) == instr.qubits for instr in qc_out_nat.get_instructions(\"cx\")))\n )\n self.assertEqual(Operator(qc), Operator(qc_out))\n self.assertEqual(Operator(qc), Operator(qc_out_nat))", "title": "" }, { "docid": "5b8eb254dc562353375700500e67926e", "score": "0.5195183", "text": "def joint_card(c1,c2):\n return '{' + ('1' if c1[1] == c2[1] == '1' else '0') + ':' + ('1' if c1[3] == c2[3] == '1' else 'M') + '}'", "title": "" }, { "docid": "357ec7d9764c28c00274f7e4e36a698e", "score": "0.5186929", "text": "def test_QI2():\n # Tests checked against Mathematica noteboook `costingsf.nb`\n # Arguments are otherwise random\n assert QI2(1234, 5678) == (32, 64, 5519)\n assert QI2(7120, 1340111) == (4, 32768, 204052)", "title": "" }, { "docid": "f6ff814b48ebf1f000d51185762015d7", "score": "0.5177073", "text": "def test_prep_ctrl_random_twoq(self):\n for _ in range(50):\n vec = np.random.randn(4)\n tree = BinaryTree(vec)\n\n register = QuantumRegister(2)\n ctrl_reg = QuantumRegister(2)\n circ = QuantumCircuit(register, ctrl_reg)\n ctrl_key = 0\n\n tree.preparation_circuit(circ, register, control_register=ctrl_reg, control_key=ctrl_key)\n\n circ.swap(register[0], register[1])\n\n state = np.real(TestBinaryTree.final_state(circ))\n\n self.assertTrue(np.allclose(state[:len(vec)], vec / np.linalg.norm(vec, ord=2)))", "title": "" }, { "docid": "392daa1854fbb4d394bb86c933599999", "score": "0.517426", "text": "def __generate_from_5x_to_4x() -> Problem:\n a = random.choice(range(50, 58))\n b = random.choice(range(a - 9, 50))\n return __extend_no_carry_borrow(a, b - a)", "title": "" }, { "docid": "819858c64ba08ec36e573850c9e7a4b9", "score": "0.51735836", "text": "def create_data(self, length):\n i = np.random.randint(2 ,size=length) * 2 - 1\n q = np.random.randint(2 ,size=length) * 2 - 1\n return i, q", "title": "" }, { "docid": "7bfd9a613df6c7abacc5fdd812ce900d", "score": "0.51712984", "text": "def test_prepare_negative_amplitudes_two_qubits2(self):\n # Generate all sign configurations\n one_neg = set(permutations((-1, 1, 1, 1)))\n two_neg = set(permutations((-1, -1, 1, 1)))\n three_neg = set(permutations((-1, -1, -1, 1)))\n four_neg = {(-1, -1, -1, -1)}\n\n for sign in one_neg | two_neg | three_neg | four_neg:\n # Input vector\n vec = np.array([1, 2, 3, 4], dtype=np.float64)\n vec *= np.array(sign, dtype=np.float64)\n\n # Get a BinaryTree\n tree = BinaryTree(vec)\n\n # Get a quantum register\n qreg = QuantumRegister(2)\n circ = QuantumCircuit(qreg)\n\n # Get the state preparation circuit\n tree.preparation_circuit(circ, qreg)\n\n # Swap qubits to compare with natural ordering of vector\n circ.swap(qreg[0], qreg[1])\n\n # Make sure the final state is the same as the input vector\n state = np.real(self.final_state(circ))\n self.assertTrue(np.allclose(state, vec / np.linalg.norm(vec, ord=2)))", "title": "" }, { "docid": "ca4a04ec5ee1b1835fbd5d84b592643e", "score": "0.5168186", "text": "def test_selfinnerproduct(self):\n qkclass = QuantumKernel(feature_map=self.feature_map)\n qc = qkclass.construct_circuit(self.x)\n self._check_circuit(qc, check_measurements=True, check_inverse=True)", "title": "" }, { "docid": "b2b7496d98f840eaf3a7edbd1f98224e", "score": "0.51566935", "text": "def test_two_qubit_synthesis_to_directional_cx_from_coupling_map_natural_none(self):\n # TODO: should make check more explicit e.g. explicitly set gate\n # direction in test instead of using specific fake backend\n backend = FakeVigo()\n conf = backend.configuration()\n qr = QuantumRegister(2)\n coupling_map = CouplingMap([[0, 1], [1, 2], [1, 3], [3, 4]])\n triv_layout_pass = TrivialLayout(coupling_map)\n qc = QuantumCircuit(qr)\n qc.unitary(random_unitary(4, seed=12), [0, 1])\n unisynth_pass = UnitarySynthesis(\n basis_gates=conf.basis_gates,\n coupling_map=coupling_map,\n backend_props=backend.properties(),\n pulse_optimize=True,\n natural_direction=False,\n )\n pm = PassManager([triv_layout_pass, unisynth_pass])\n qc_out = pm.run(qc)\n\n unisynth_pass_nat = UnitarySynthesis(\n basis_gates=conf.basis_gates,\n coupling_map=coupling_map,\n backend_props=backend.properties(),\n pulse_optimize=True,\n natural_direction=None,\n )\n\n pm_nat = PassManager([triv_layout_pass, unisynth_pass_nat])\n qc_out_nat = pm_nat.run(qc)\n # the decomposer defaults to the [1, 0] direction but the coupling\n # map specifies a [0, 1] direction. Check that this is respected.\n self.assertTrue(\n all(((qr[1], qr[0]) == instr.qubits for instr in qc_out.get_instructions(\"cx\")))\n )\n self.assertTrue(\n all(((qr[0], qr[1]) == instr.qubits for instr in qc_out_nat.get_instructions(\"cx\")))\n )\n self.assertEqual(Operator(qc), Operator(qc_out))\n self.assertEqual(Operator(qc), Operator(qc_out_nat))", "title": "" }, { "docid": "a8048578c69d1a5bc510dc432637ea54", "score": "0.5130123", "text": "def __idiv__(self, other):\n \n pass", "title": "" }, { "docid": "104b28686f433117f1f96742c210000f", "score": "0.510977", "text": "def test_two_qubit_synthesis_not_pulse_optimal(self):\n backend = FakeVigo()\n conf = backend.configuration()\n qr = QuantumRegister(2)\n coupling_map = CouplingMap([[0, 1], [1, 2], [1, 3], [3, 4]])\n triv_layout_pass = TrivialLayout(coupling_map)\n qc = QuantumCircuit(qr)\n qc.unitary(random_unitary(4, seed=12), [0, 1])\n unisynth_pass = UnitarySynthesis(\n basis_gates=conf.basis_gates,\n coupling_map=coupling_map,\n backend_props=backend.properties(),\n pulse_optimize=False,\n natural_direction=True,\n )\n pm = PassManager([triv_layout_pass, unisynth_pass])\n qc_out = pm.run(qc)\n if isinstance(qc_out, QuantumCircuit):\n num_ops = qc_out.count_ops()\n else:\n num_ops = qc_out[0].count_ops()\n self.assertIn(\"sx\", num_ops)\n self.assertGreaterEqual(num_ops[\"sx\"], 16)", "title": "" }, { "docid": "d3018a171de0bc3f002ec9147df2a9b1", "score": "0.5099306", "text": "def test_single_qubit_identity_with_target(self):\n qc = QuantumCircuit(1)\n qc.unitary([[1.0, 0.0], [0.0, 1.0]], 0)\n dag = circuit_to_dag(qc)\n unitary_synth_pass = UnitarySynthesis(target=FakeBelemV2().target)\n result_dag = unitary_synth_pass.run(dag)\n result_qc = dag_to_circuit(result_dag)\n self.assertEqual(result_qc, QuantumCircuit(1))", "title": "" }, { "docid": "b0a6d091d722790efe91922ec25b6b58", "score": "0.50964344", "text": "def test_two_qubit_natural_direction_true_gate_length_raises(self):\n # this assumes iswawp pulse optimal decomposition doesn't exist\n backend = FakeVigo()\n conf = backend.configuration()\n for _, nduv in backend.properties()._gates[\"cx\"].items():\n nduv[\"gate_length\"] = (4e-7, nduv[\"gate_length\"][1])\n nduv[\"gate_error\"] = (7e-3, nduv[\"gate_error\"][1])\n qr = QuantumRegister(2)\n coupling_map = CouplingMap([[0, 1], [1, 0], [1, 2], [1, 3], [3, 4]])\n triv_layout_pass = TrivialLayout(coupling_map)\n qc = QuantumCircuit(qr)\n qc.unitary(random_unitary(4, seed=12), [0, 1])\n unisynth_pass = UnitarySynthesis(\n basis_gates=conf.basis_gates,\n backend_props=backend.properties(),\n pulse_optimize=True,\n natural_direction=True,\n )\n pm = PassManager([triv_layout_pass, unisynth_pass])\n with self.assertRaises(TranspilerError):\n pm.run(qc)", "title": "" }, { "docid": "d74dbf5029e96032b3b4865101f86628", "score": "0.50770205", "text": "def add_singleQ_clifford(index, gate_seq, pad_with_I=True):\n length_before = len(gate_seq)\n # Paulis\n if index == 0:\n gate_seq.append(Gate.I)\n elif index == 1:\n gate_seq.append(Gate.Xp)\n elif index == 2:\n gate_seq.append(Gate.Yp)\n elif index == 3:\n gate_seq.append(Gate.Xp)\n gate_seq.append(Gate.Yp)\n\n # 2pi/3 rotations\n elif index == 4:\n gate_seq.append(Gate.Y2p)\n gate_seq.append(Gate.X2p)\n elif index == 5:\n gate_seq.append(Gate.Y2m)\n gate_seq.append(Gate.X2p)\n elif index == 6:\n gate_seq.append(Gate.Y2p)\n gate_seq.append(Gate.X2m)\n elif index == 7:\n gate_seq.append(Gate.Y2m)\n gate_seq.append(Gate.X2m)\n elif index == 8:\n gate_seq.append(Gate.X2p)\n gate_seq.append(Gate.Y2p)\n elif index == 9:\n gate_seq.append(Gate.X2m)\n gate_seq.append(Gate.Y2p)\n elif index == 10:\n gate_seq.append(Gate.X2p)\n gate_seq.append(Gate.Y2m)\n elif index == 11:\n gate_seq.append(Gate.X2m)\n gate_seq.append(Gate.Y2m)\n\n # pi/2 rotations\n elif index == 12:\n gate_seq.append(Gate.X2p)\n elif index == 13:\n gate_seq.append(Gate.X2m)\n elif index == 14:\n gate_seq.append(Gate.Y2p)\n elif index == 15:\n gate_seq.append(Gate.Y2m)\n elif index == 16:\n gate_seq.append(Gate.X2p)\n gate_seq.append(Gate.Y2p)\n gate_seq.append(Gate.X2m)\n elif index == 17:\n gate_seq.append(Gate.X2p)\n gate_seq.append(Gate.Y2m)\n gate_seq.append(Gate.X2m)\n\n # Hadamard-Like\n elif index == 18:\n gate_seq.append(Gate.Y2p)\n gate_seq.append(Gate.Xp)\n elif index == 19:\n gate_seq.append(Gate.Y2m)\n gate_seq.append(Gate.Xp)\n elif index == 20:\n gate_seq.append(Gate.X2p)\n gate_seq.append(Gate.Yp)\n elif index == 21:\n gate_seq.append(Gate.X2m)\n gate_seq.append(Gate.Yp)\n elif index == 22:\n gate_seq.append(Gate.X2p)\n gate_seq.append(Gate.Y2p)\n gate_seq.append(Gate.X2p)\n elif index == 23:\n gate_seq.append(Gate.X2m)\n gate_seq.append(Gate.Y2p)\n gate_seq.append(Gate.X2m)\n else:\n raise ValueError(\n 'index is out of range. it should be smaller than 24 and greater'\n ' or equal to 0: ', str(index))\n\n length_after = len(gate_seq)\n if pad_with_I:\n # Force the clifford to have a length of 3 gates\n for i in range(3-(length_after-length_before)):\n gate_seq.append(Gate.I)", "title": "" }, { "docid": "2a32e5dca26c60d46d1a81730d653799", "score": "0.50734425", "text": "def two_qubit_AllXY(q0, q1, RO_target='all',\n sequence_type='sequential',\n replace_q1_pulses_X180=False,\n double_points=False):\n\n pulse_combinations = [['I', 'I'], ['X180', 'X180'], ['Y180', 'Y180'],\n ['X180', 'Y180'], ['Y180', 'X180'],\n ['X90', 'I'], ['Y90', 'I'], ['X90', 'Y90'],\n ['Y90', 'X90'], ['X90', 'Y180'], ['Y90', 'X180'],\n ['X180', 'Y90'], ['Y180', 'X90'], ['X90', 'X180'],\n ['X180', 'X90'], ['Y90', 'Y180'], ['Y180', 'Y90'],\n ['X180', 'I'], ['Y180', 'I'], ['X90', 'X90'],\n ['Y90', 'Y90']]\n\n pulse_combinations_tiled = pulse_combinations + pulse_combinations\n if double_points:\n pulse_combinations = [val for val in pulse_combinations\n for _ in (0, 1)]\n\n if replace_q1_pulses_X180:\n pulse_combinations_q1 = ['X180' for val in pulse_combinations]\n\n pulse_combinations_q0 = pulse_combinations\n pulse_combinations_q1 = pulse_combinations_tiled\n\n\n filename = join(base_qasm_path, 'two_qubit_AllXY.qasm')\n qasm_file = mopen(filename, mode='w')\n qasm_file.writelines('qubit {} \\nqubit {} \\n'.format(q0, q1))\n\n for pulse_comb_q0, pulse_comb_q1 in zip(pulse_combinations_q0,\n pulse_combinations_q1):\n qasm_file.writelines('\\ninit_all\\n')\n if sequence_type == 'interleaved':\n qasm_file.writelines('{} {}\\n'.format(pulse_comb_q0[0], q0) +\n '{} {}\\n'.format(pulse_comb_q1[0], q1) +\n '{} {}\\n'.format(pulse_comb_q0[1], q0) +\n '{} {}\\n'.format(pulse_comb_q1[1], q1))\n elif sequence_type == 'sandwiched':\n qasm_file.writelines('{} {}\\n'.format(pulse_comb_q1[0], q1) +\n '{} {}\\n'.format(pulse_comb_q0[0], q0) +\n '{} {}\\n'.format(pulse_comb_q0[1], q0) +\n '{} {}\\n'.format(pulse_comb_q1[1], q1))\n elif sequence_type == 'sequential':\n qasm_file.writelines('{} {}\\n'.format(pulse_comb_q0[0], q0) +\n '{} {}\\n'.format(pulse_comb_q0[1], q0) +\n '{} {}\\n'.format(pulse_comb_q1[0], q1) +\n '{} {}\\n'.format(pulse_comb_q1[1], q1))\n elif sequence_type == 'simultaneous':\n qasm_file.writelines('{} {} |'.format(pulse_comb_q0[0], q0) +\n '{} {}\\n'.format(pulse_comb_q1[0], q1) +\n '{} {} |'.format(pulse_comb_q0[1], q0) +\n '{} {}\\n'.format(pulse_comb_q1[1], q1))\n else:\n raise ValueError(\"sequence_type {} \".format(sequence_type) +\n \"['interleaved', 'simultaneous', \" +\n \"'sequential', 'sandwiched']\")\n qasm_file.writelines('RO {} \\n'.format(RO_target))\n\n qasm_file.close()\n return qasm_file", "title": "" }, { "docid": "f76973307c3be04639b3e076ff3c812f", "score": "0.5068147", "text": "def test_different_number_of_virtual_and_physical_qubits(self):\n\n # q_0: ──■────■───────\n # ┌─┴─┐ │\n # q_1: ┤ X ├──┼────■──\n # └───┘ │ ┌─┴─┐\n # q_2: ──■────┼──┤ X ├\n # ┌─┴─┐┌─┴─┐└───┘\n # q_3: ┤ X ├┤ X ├─────\n # └───┘└───┘\n circuit = QuantumCircuit(4)\n circuit.cx(0, 1)\n circuit.cx(2, 3)\n circuit.cx(0, 3)\n circuit.cx(1, 2)\n\n coupling = CouplingMap.from_line(5)\n with self.assertRaises(TranspilerError):\n with self.assertWarnsRegex(DeprecationWarning, r\"^The class.*is deprecated\"):\n BIPMapping(coupling)(circuit)", "title": "" }, { "docid": "0873a1f806dc3a21013b982b12221759", "score": "0.5045172", "text": "def create_circuit(self, angles):\n\n p = Program()\n ro = p.declare(\"ro\", memory_type='BIT', memory_size=4)\n\n # name the individual qubits for more clarity\n # ancilla_qubit = [0]\n # index_qubit = [1]\n # data_qubit = [2]\n # class_qubit = [3]\n\n #######################################\n #START of the state preparation routine\n\n # put the ancilla and the index qubits into uniform superposition\n p += H(0)\n p += H(1)\n\n def u3(program, theta, qubit):\n program += RZ(3*np.pi, qubit)\n program += RX(np.pi/2, qubit)\n program += RZ(theta+np.pi, qubit)\n program += RX(np.pi/2, qubit)\n program += RZ(0, qubit)\n return program\n\n # loading the test vector (which we wish to classify)\n p += CNOT(0, 2)\n p = u3(p, -angles[0], 2)\n p += CNOT(0, 2)\n p = u3(p, angles[0], 2)\n\n # flipping the ancilla qubit > this moves the input vector to the |0> state of the ancilla\n p += X(0)\n\n # loading the first training vector\n # [0,1] -> class 0\n # we can load this with a straightforward Toffoli\n\n p += CCNOT(0, 1, 2)\n\n # flip the index qubit > moves the first training vector to the |0> state of the index qubit\n p += X(1)\n\n # loading the second training vector\n # [0.78861, 0.61489] -> class 1\n\n p += CCNOT(0, 1, 2)\n\n p += CNOT(1, 2)\n p = u3(p, angles[1], 2)\n p += CNOT(1, 2)\n p = u3(p, -angles[1], 2)\n\n p += CCNOT(0, 1, 2)\n\n p += CNOT(1, 2)\n p = u3(p, -angles[1], 2)\n p += CNOT(1, 2)\n p = u3(p, angles[1], 2)\n\n # END of state preparation routine\n ####################################################\n\n # at this point we would usually swap the data and class qubit\n # however, we can be lazy and let the Qiskit compiler take care of it\n\n # flip the class label for training vector #2\n\n p += CNOT(1, 3)\n\n #############################################\n # START of the mini distance-based classifier\n\n # interfere the input vector with the training vectors\n p += H(0)\n\n # Measure all qubits and record the results in the classical registers\n p += MEASURE(0, ro[0])\n p += MEASURE(1, ro[1])\n p += MEASURE(2, ro[2])\n p += MEASURE(3, ro[3])\n\n # END of the mini distance-based classifier\n #############################################\n\n return p", "title": "" }, { "docid": "c041f6c136be3f8619ab245e4d7e1a95", "score": "0.5038822", "text": "def test_no_state_modification_circuit(self) -> None:\n\n for nbqbit in range(min_nbqbit, max_nbqbit):\n prog = Program()\n qbits = prog.qalloc(nbqbit)\n\n random_angles = [rd.random() * 2 * np.pi for _ in range(3 * nbqbit)]\n\n for i in range(len(qbits)):\n prog.apply(RX(random_angles[3 * i]), qbits[i])\n prog.apply(RX(random_angles[3 * i + 1]), qbits[i])\n prog.apply(RX(random_angles[3 * i + 2]), qbits[i])\n\n prog.apply(QFT(nbqbit), qbits)\n prog.apply(QFT(nbqbit).dag(), qbits)\n\n for i in range(len(qbits)):\n prog.apply(RX(random_angles[3 * i]).dag(), qbits[i])\n prog.apply(RX(random_angles[3 * i + 1]).dag(), qbits[i])\n prog.apply(RX(random_angles[3 * i + 2]).dag(), qbits[i])\n\n circuit = prog.to_circ(inline=True)\n\n for topology in generate_custom_topologies(nbqbit):\n qpu = Sabre() | (QuameleonPlugin(topology=topology) | PyLinalg())\n result = qpu.submit(circuit.to_job())\n assert result.raw_data[0].state.int == 0", "title": "" }, { "docid": "6de8bb1d61aef05ae792bdfc32b7f014", "score": "0.50318724", "text": "def test_two_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):\n n_wires = 2\n dev = device(n_wires)\n skip_if(dev, {\"returns_probs\": False})\n if not dev.supports_operation(op(wires=range(n_wires)).name):\n pytest.skip(\"op not supported\")\n\n rnd_state = init_state(n_wires)\n\n @qml.qnode(dev)\n def circuit():\n qml.StatePrep(rnd_state, wires=range(n_wires))\n op(wires=range(n_wires))\n return qml.probs(wires=range(n_wires))\n\n res = circuit()\n\n expected = np.abs(mat @ rnd_state) ** 2\n assert np.allclose(res, expected, atol=tol(dev.shots))", "title": "" }, { "docid": "223db33425e0a7293ab9a606bd7ba8fa", "score": "0.5009359", "text": "def eq02():", "title": "" }, { "docid": "ac8059fd9fae84aac4558f9154f3333d", "score": "0.49888164", "text": "def __generate_plus8_eq_plus3_minus5_plus10() -> Problem:\n a = 10 * random.choice(__digits_no49) + random.choice([5, 6])\n return __extend_no_carry_borrow(a, 8)", "title": "" }, { "docid": "578472535a549079a5d73875e75ed590", "score": "0.49776545", "text": "def test_already_executable_circuit(self) -> None:\n\n for nbqbit in range(min_nbqbit, max_nbqbit):\n prog = Program()\n qbits = prog.qalloc(nbqbit)\n\n for i in range(len(qbits) - 1):\n prog.apply(H, qbits[i])\n prog.apply(Z, qbits[i])\n prog.apply(X.ctrl(), qbits[i + 1], qbits[i])\n\n circuit = prog.to_circ(inline=True)\n\n sabre = Sabre()\n batch = Batch(jobs=[circuit.to_job()])\n hardware_specs = HardwareSpecs()\n batch_result = sabre.compile(batch, hardware_specs)\n computed_circuit = batch_result.jobs[0].circuit\n\n check_circuits_equality(circuit, computed_circuit)", "title": "" }, { "docid": "c59f9aad0510ab9efa72ec42542a91e8", "score": "0.49725848", "text": "def check_circuits_equality(circuit_1: Circuit, circuit_2: Circuit) -> None:\n\n # We check the equality of circuits properties one by one excepted the qregister which can not be compared\n assert circuit_1.ops == circuit_2.ops\n assert circuit_1.gateDic == circuit_2.gateDic\n assert circuit_1.var_dic == circuit_2.var_dic", "title": "" }, { "docid": "f0cffc616854b95819f55122d8c04236", "score": "0.497025", "text": "def circuit_hash(self):\n raise NotImplementedError", "title": "" }, { "docid": "d2f1dcfc2950875b8c817bbb65814a77", "score": "0.49614215", "text": "def get_equal_sign_orthants(s, circuits_information_M1, circuits_information_M2):\n\n equal_sign_vectors = []\n\n #TODO: create them dynamically\n orthants = list(itertools.product([-1,0,1],repeat=s))\n\n solution_orthants = []\n #Steps 2,3,4\n for orthant in orthants:\n print(orthant)\n conformal_circuits_M1 = circuits_information_M1.get_conformal_circuits(orthant)\n\n #union of the circuits conformal to the orthant\n U_M1 = Utils.union(conformal_circuits_M1)\n\n is_zero_or_invalid = True\n if U_M1 != None:\n for elt in U_M1:\n if elt != 0:\n is_zero_or_invalid = False\n\n\n #print(U_M1)\n\n if U_M1 != None and not is_zero_or_invalid and Utils.has_equal_sign(orthant, U_M1):\n #Si el ortante tiene soporte igual a la union de los circuitos de M1, sigo con el paso 3 del algoritmo.\n conformal_circuits_M2 = circuits_information_M2.get_conformal_circuits(orthant)\n U_M2 = Utils.union(conformal_circuits_M2)\n if U_M2 != None and Utils.has_equal_sign(orthant, U_M2):\n #equal_sign_vectors.append([U_M1, U_M2])\n solution_orthants.append(orthant)\n #equal_sign_vectors.append([U_M1, U_M2])\n\n #Stop searching in this orthant\n continue\n\n print(\"Two vectors with the same sign, corresponding to the orthant %s, are %s, from M1, and %s, from M2.\" % (orthant, U_M1, U_M2))\n else:\n continue\n #Isn't useful\n return solution_orthants", "title": "" }, { "docid": "68da894061da9ee419a8bc830a541780", "score": "0.49587256", "text": "def test_prep_circuit_one_qubit2(self):\n # Two element vector\n vec = [0.0, 1.0]\n\n # Make a BinaryTree\n tree = BinaryTree(vec)\n\n # Get a circuit and register to prepare the vector in\n qreg = QuantumRegister(1)\n circ = QuantumCircuit(qreg)\n\n # Get the state preparation circuit\n tree.preparation_circuit(circ, qreg)\n\n # Get the final state\n state = list(np.real(self.final_state(circ)))\n\n self.assertTrue(np.array_equal(state, vec))", "title": "" }, { "docid": "c28fdd7894d5d61451db75696d49b6ba", "score": "0.49433023", "text": "def test_two_qubit_natural_direction_true_duration_fallback(self):\n # this assumes iswawp pulse optimal decomposition doesn't exist\n backend = FakeVigo()\n conf = backend.configuration()\n # conf.basis_gates = [gate if gate != \"cx\" else \"iswap\" for gate in conf.basis_gates]\n qr = QuantumRegister(2)\n coupling_map = CouplingMap([[0, 1], [1, 0], [1, 2], [1, 3], [3, 4]])\n triv_layout_pass = TrivialLayout(coupling_map)\n qc = QuantumCircuit(qr)\n qc.unitary(random_unitary(4, seed=12), [0, 1])\n unisynth_pass = UnitarySynthesis(\n basis_gates=conf.basis_gates,\n coupling_map=coupling_map,\n backend_props=backend.properties(),\n pulse_optimize=True,\n natural_direction=True,\n )\n pm = PassManager([triv_layout_pass, unisynth_pass])\n qc_out = pm.run(qc)\n self.assertTrue(\n all(((qr[0], qr[1]) == instr.qubits for instr in qc_out.get_instructions(\"cx\")))\n )", "title": "" }, { "docid": "53e42f4985e2a98d85da2c542120ecd8", "score": "0.49384433", "text": "def _prepare_circuits(base_circuit, observables):\n circuits = list()\n\n if isinstance(observables, ComposedOp):\n observables = SummedOp([observables])\n for obs in observables:\n circuit = base_circuit.copy()\n circuit.append(obs[1], qargs=list(range(base_circuit.num_qubits)))\n circuits.append(circuit)\n return circuits, observables", "title": "" }, { "docid": "74e440f942ce99b7439e9ca7bd0fbab2", "score": "0.493633", "text": "def U_h(self, circuit, l, n_i, m, n_phiReg, w_phiReg, n_aReg, w_aReg, n_bReg, w_bReg, wReg, eReg, pReg, hReg, w_hReg,\n P_phi,\n P_a, P_b):\n for k in range(n_i + m):\n # for k in range(1):\n print(\"k: \", k)\n countsList = self.generateParticleCounts(n_i, m, k) # reduce the available number of particles\n\n for counts in countsList:\n n_phi, n_a, n_b = counts[0], counts[1], counts[2]\n # controlled R-y from |0> to |k> on all qubits with all possible angles depending on n_phi, n_a, n_b, and flavor\n # for flavor in ['phi']:\n # if n_phi == 3 and n_a == 0 and n_b ==0:\n # print(\"counts: \", counts)\n # print(\"after if : \", n_phi, n_a, n_b)\n\n for flavor in ['phi', 'a', 'b']:\n angle = self.U_hAngle(flavor, n_phi, n_a, n_b, P_phi, P_a, P_b)\n\n #phiControl, aControl, and bControl are the corresponding work registers, and since we call\n # numberControl we also add x gates on the respective number registers (aka n_phi, n_a, n_b)\n phiControl = self.numberControl(circuit, l, n_phi, n_phiReg, w_phiReg)\n # print(\"qiskit phiControl: \", phiControl)\n aControl = self.numberControl(circuit, l, n_a, n_aReg, w_aReg)\n # print(\"qiskit aControl: \", aControl)\n bControl = self.numberControl(circuit, l, n_b, n_bReg, w_bReg)\n # print(\"qiskit bControl: \", bControl)\n\n circuit.ccx(phiControl, aControl, wReg[0])\n circuit.ccx(bControl, wReg[0], wReg[1])\n\n self.flavorControl(circuit, flavor, pReg, wReg, wReg, (k * self._p_len), 2,\n 4) # wReg[4] is work qubit but is reset to 0\n circuit.ccx(wReg[1], wReg[2], wReg[3])\n circuit.ccx(eReg[0], wReg[3], wReg[4])\n\n self.twoLevelControlledRy(circuit, l, angle, k + 1, wReg[4], hReg, w_hReg)\n\n circuit.ccx(eReg[0], wReg[3], wReg[4]) # next steps undo work qubits\n circuit.ccx(wReg[1], wReg[2], wReg[3])\n self.flavorControl(circuit, flavor, pReg, wReg, wReg, (k * self._p_len), 2,\n 4) # wReg[4] is work qubit but is reset to 0\n circuit.ccx(bControl, wReg[0], wReg[1])\n circuit.ccx(phiControl, aControl, wReg[0])\n self.numberControlT(circuit, l, n_b, n_bReg, w_bReg)\n self.numberControlT(circuit, l, n_a, n_aReg, w_aReg)\n self.numberControlT(circuit, l, n_phi, n_phiReg, w_phiReg)\n\n # subtract from the counts register depending on which flavor particle emitted\n for flavor, countReg, workReg in zip(['phi', 'a', 'b'], [n_phiReg, n_aReg, n_bReg],\n [w_phiReg, w_aReg, w_bReg]):\n self.flavorControl(circuit, flavor, pReg, wReg, wReg, (k * self._p_len), 0,\n 1) # wReg[4] is work qubit but is reset to 0\n self.minus1(circuit, l, countReg, workReg, wReg[0], wReg[1], 0)\n self.flavorControl(circuit, flavor, pReg, wReg, wReg, (k * self._p_len), 0,\n 1) # wReg[4] is work qubit but is reset to 0\n\n # apply x on eReg if hReg[m] = 0, apply another x so we essentially control on not 0 instead of 0\n isZeroControl = self.numberControl(circuit, l, 0, hReg, w_hReg)\n circuit.cx(isZeroControl, eReg[0])\n circuit.x(eReg[0])\n self.numberControlT(circuit, l, 0, hReg, w_hReg)", "title": "" }, { "docid": "58c8dfeeec411da2cdf5d9680abc0d2f", "score": "0.49305674", "text": "def __generate_plus2_eq_minus8_plus10() -> Problem:\n a = 10 * random.choice(__digits_no49) + random.choice([8, 9])\n return __extend_no_carry_borrow(a, 2)", "title": "" }, { "docid": "665951a7772536c0aeda8f5e49492388", "score": "0.49253783", "text": "def two_component_identical_lj():\n T = 1\n sig = np.array([1, 1])\n eps = np.array([1, 1])\n eps_01 = geometric(eps[0], eps[1])\n sig_01 = arithmetic(sig[0], sig[1])\n rhos = np.array([0.005, 0.005]) / sig**3\n\n lj = oz.System(kT=T)\n\n r = lj.r\n lj.set_interaction(0, 0, oz.lennard_jones(r, eps[0], sig[0]))\n lj.set_interaction(1, 1, oz.lennard_jones(r, eps[1], sig[1]))\n lj.set_interaction(0, 1, oz.lennard_jones(r, eps_01, sig_01))\n\n lj.solve(rhos=rhos, closure_name='hnc')\n return lj", "title": "" }, { "docid": "f7705933edd534e3bfc411eeea98bf01", "score": "0.49243945", "text": "def generateBinaryID(self):\n bid = cantorPairing(self.id1, self.id2)\n self.addNewMember('bid',bid)", "title": "" }, { "docid": "f7705933edd534e3bfc411eeea98bf01", "score": "0.49243945", "text": "def generateBinaryID(self):\n bid = cantorPairing(self.id1, self.id2)\n self.addNewMember('bid',bid)", "title": "" }, { "docid": "f7705933edd534e3bfc411eeea98bf01", "score": "0.49243945", "text": "def generateBinaryID(self):\n bid = cantorPairing(self.id1, self.id2)\n self.addNewMember('bid',bid)", "title": "" }, { "docid": "f7705933edd534e3bfc411eeea98bf01", "score": "0.49243945", "text": "def generateBinaryID(self):\n bid = cantorPairing(self.id1, self.id2)\n self.addNewMember('bid',bid)", "title": "" }, { "docid": "3fb6c4bce4383bc59792bac257adfb6c", "score": "0.4908937", "text": "def random_circuit_encoding(self, n_ops, random_state):\n return random_state.rand(3 * n_ops)", "title": "" }, { "docid": "99b7c4e237167fac45b64bf4b3bfca06", "score": "0.4902847", "text": "def __generate_plus6_eq_minus4_plus10() -> Problem:\n a = 10 * random.choice(__digits_no49) + random.choice([4, 9])\n return __extend_no_carry_borrow(a, 6)", "title": "" }, { "docid": "d3fb5ff6cbd7848d23116b0a021b63c0", "score": "0.4900984", "text": "def main():\n # Set the number of symbols (i.e. the generation size in RLNC terminology)\n # and the size of a symbol in bytes\n symbols = 8\n symbol_size = 1\n\n # Create encoder/decoder factory used to build actual encoders/decoders\n encoder_factory = kodo.PerpetualEncoderFactoryBinary(symbols, symbol_size)\n encoder = encoder_factory.build()\n\n decoder_factory = kodo.PerpetualDecoderFactoryBinary(symbols, symbol_size)\n decoder = decoder_factory.build()\n\n # The perpetual encoder supports three operation modes;\n #\n # 1) Random pivot mode (default)\n # The pivot element is drawn at random for each coding symbol\n #\n # example generated vectors (for width = 2)\n #\n # 0 0 1 X X 0\n # X X 0 0 0 1\n # X 0 0 0 1 X\n # X X 0 0 0 1\n # 0 1 X X 0 0\n # 0 1 X X 0 0\n #\n # 1 X X 0 0 0\n # 0 0 0 1 X X\n # .\n # .\n #\n # 2) Pseudo systematic\n # Pivot elements are generated with indices 0,1,2, ... , n,\n # after which the generator reverts to the default random pivot\n #\n # example generated vectors (for width = 2)\n #\n # 1 X X 0 0 0\n # 0 1 X X 0 0\n # 0 0 1 X X 0\n # 0 0 0 1 X X\n # X 0 0 0 1 X\n # X X 0 0 0 1\n #\n # (additional vectors generated using the random mode)\n #\n # 3) Pre-charging\n # For the first \"width\" symbols, the pivot index is 0. After that,\n # the pseudo-systematic mode is used. Finally, pivots are drawn at\n # random resulting in the indices 0 (width times), 1,2, ... , n\n #\n # example generated vectors (for width = 2)\n #\n # 1 X X 0 0 0\n # 1 X X 0 0 0\n # 0 1 X X 0 0\n # 0 0 1 X X 0\n # 0 0 0 1 X X\n #\n # X 0 0 0 1 X\n # X X 0 0 0 1\n #\n # (additional vectors generated using the random mode)\n #\n # The operation mode is set in the following. Note that if both\n # pre-charging and pseudo-systematic is enabled, pre-charging takes\n # precedence.\n\n # Enable the pseudo-systematic operation mode - faster\n #encoder.set_pseudo_systematic(True)\n\n # Enable the pre-charing operation mode - even faster\n encoder.set_pre_charging(True)\n\n print(\"Pseudo-systematic is {}\\nPre-charging is {}\".format(\n \"on\" if encoder.pseudo_systematic() else \"off\",\n \"on\" if encoder.pre_charging() else \"off\"))\n\n # The width of the perpetual code can be set either as a number of symbols\n # using set_width(), or as a ratio of the generation size using\n # set_width_ratio().\n #\n # The default width is set to 10% of the generation size.\n print(\"The width ratio defaults to: {} (therefore the calculated width is \"\n \"{})\".format(encoder.width_ratio(), encoder.width()))\n encoder.set_width(6)\n\n print(\"The width was set to: {} (therefore the calculated width ratio is \"\n \"{})\".format(encoder.width(), encoder.width_ratio()))\n\n encoder.set_width_ratio(0.2)\n print(\"The width ratio was set to: {} (therefore the calculated width is \"\n \"{})\".format(encoder.width_ratio(), encoder.width()))\n\n # Create some data to encode. In this case we make a buffer\n # with the same size as the encoder's block size (the max.\n # amount a single encoder can encode)\n # Just for fun - fill the input data with random data\n data_in = os.urandom(encoder.block_size())\n\n # Assign the data buffer to the encoder so that we can\n # produce encoded symbols\n encoder.set_const_symbols(data_in)\n\n while not decoder.is_complete():\n # Encode a packet into the payload buffer\n payload = encoder.write_payload()\n print(\"Payload: {}\".format(binascii.hexlify(payload)))\n if random.choice([True, False]):\n print(\"Packet dropped on channel\")\n continue\n\n # Pass that packet to the decoder\n decoder.read_payload(payload)\n\n # The decoder is complete, now copy the symbols from the decoder\n data_out = decoder.copy_from_symbols()\n\n # Check we properly decoded the data\n if data_out == data_in:\n print(\"Data decoded correctly\")\n else:\n print(\"Unexpected failure to decode please file a bug report :)\")\n sys.exit(1)", "title": "" }, { "docid": "430ccd293f554d4fe3e0ac656887d732", "score": "0.4898766", "text": "def eq01():", "title": "" }, { "docid": "18dbe39d5bbb767b290f97b0eba34331", "score": "0.48945558", "text": "def __generate_plus8_eq_minus2_plus10() -> Problem:\n a = 10 * random.choice(__digits_no49) + random.choice([2, 3, 4, 7, 8, 9])\n return __extend_no_carry_borrow(a, 8)", "title": "" }, { "docid": "b334d51b06c52c97ede3a6c06e14d3b3", "score": "0.48936254", "text": "def same_quantum(self, a, b):\r\n return a.same_quantum(b)", "title": "" }, { "docid": "a32c63c844577dfc22ff3f24ad558b82", "score": "0.48922", "text": "def apply_controlled_two_qubit_gate(self, mat, q_id1, q_id2, q_id3):\n # Move the qubits to the correct position\n self.swap_qubits(q_id1, self.qubits[0])\n self.swap_qubits(q_id2, self.qubits[1])\n self.swap_qubits(q_id3, self.qubits[2])\n\n first_mat = np.block([[np.eye(4), np.zeros((4, 4))], [np.zeros((4, 4)), mat]])\n total_mat = np.kron(first_mat, np.eye(2 ** (len(self.qubits) - 3)))\n\n self.qubit = np.dot(total_mat, self.qubit)", "title": "" }, { "docid": "8aab37b94cef18d2d048bc6a65bae5d4", "score": "0.48921895", "text": "def test_all_to_all_topology(self) -> None:\n\n for nbqbit in range(min_nbqbit, max_nbqbit):\n circuit = generate_random_circuit(nbqbit)\n\n sabre = Sabre()\n batch = Batch(jobs=[circuit.to_job()])\n hardware_specs = HardwareSpecs()\n batch_result = sabre.compile(batch, hardware_specs)\n computed_circuit = batch_result.jobs[0].circuit\n\n check_circuits_equality(circuit, computed_circuit)", "title": "" }, { "docid": "bccb6b5cfa6ea6e4d9d2bac633828298", "score": "0.4887278", "text": "def test_init_with_qnodes(self):\n dev = qml.device(\"default.qubit\", wires=1)\n\n def circuit(x):\n qml.RX(x, wires=0)\n return qml.expval(qml.PauliZ(0))\n\n qnodes = [qml.QNode(circuit, dev) for i in range(4)]\n qc = qml.QNodeCollection(qnodes)\n\n assert qc.qnodes == qnodes\n assert len(qc) == 4", "title": "" }, { "docid": "1b9c30bf90e7dbe965016996a8f5d3b5", "score": "0.48850244", "text": "def build_circuit(routers):", "title": "" }, { "docid": "68abb31619154078720ebba820638406", "score": "0.48816547", "text": "def test_identical_costs():\n S = np.transpose(np.array([[1.0, 1], [1, 1], [1, 1], [1, 1]]))\n M = np.zeros(np.shape(S))\n Q = np.full(np.shape(S), 1.0)\n solver = RandomizedSolver(\n [0, 0, 0, 0], [1, 1, 1, 1], [2, 2], encoder(-S, M, Q)\n )\n for _ in range(1000):\n check_test_solution(solver, T=1)", "title": "" }, { "docid": "136298ebfc12522e165fade0ebbd16b9", "score": "0.48801643", "text": "def input_combos(combo_len):\n return itertools.product((False, True), repeat=combo_len)", "title": "" }, { "docid": "c551c5474c8984a337aeb282b8801724", "score": "0.48780307", "text": "def generate_sequences_identical(draw, elements=[lists, integers]):\n seq_x = draw(lists(integers(min_value=1, max_value=1), min_size=3, max_size=10_000))\n seq_y = draw(\n lists(\n integers(min_value=2, max_value=2), min_size=len(seq_x), max_size=len(seq_x)\n )\n )\n\n return seq_x, seq_y", "title": "" }, { "docid": "7f0eff5fc392a0a003d38b1d02dab236", "score": "0.4875794", "text": "def test_prepare_negative_amplitudes_two_qubits(self):\n # Input vector\n vec = [-0.4, 0.4, -0.8, 0.2]\n\n # Get a BinaryTree\n tree = BinaryTree(vec)\n\n # Get a Quantum Register\n regA = QuantumRegister(1)\n regB = QuantumRegister(1)\n circ = QuantumCircuit(regA, regB)\n\n # Get the state preparation circuit\n tree.preparation_circuit(circ, regA, regB)\n\n # Swap the qubits to compare to the natural ordering of the vector\n circ.swap(regA[0], regB[0])\n\n # Make sure the final state of the circuit is the same as the input vector\n state = np.real(self.final_state(circ))\n self.assertTrue(np.allclose(state, vec))", "title": "" }, { "docid": "78ff8e29838f27b69782aaeaa09a83ba", "score": "0.48712075", "text": "def circuit_new(circuit):", "title": "" }, { "docid": "c075df7b500ca16653c24dd186c60d5d", "score": "0.486678", "text": "def __init__(self,\n ctrl: QubitType,\n targets: Tuple[QubitType],\n params: List[float] = None,\n circuit: QuantumCircuit = None):\n\n def ccz(circuit, ctrl1, ctrl2, target):\n # Verified\n circuit.comment(\"CCZ\")\n from qiskit.extensions.standard.h import HGate\n circuit._attach(HGate(target, circuit).inverse())\n circuit.ccx(ctrl1, ctrl2, target)\n circuit._attach(HGate(target, circuit).inverse())\n\n def crzz(circuit, theta, ctrl, target):\n # Verified\n circuit.comment(\"c-RZZ\")\n circuit.cu1(theta, ctrl, target)\n circuit.cx(ctrl, target)\n circuit.cu1(theta, ctrl, target)\n circuit.cx(ctrl, target)\n\n def crzz_inv(circuit, theta, ctrl, target):\n # Verified\n circuit.comment(\"c-RZZ^{-1}\")\n circuit.cx(ctrl, target)\n circuit.cu1(-theta, ctrl, target)\n circuit.cx(ctrl, target)\n circuit.cu1(-theta, ctrl, target)\n\n def crx(circuit, theta, ctrl, target):\n # Verified\n circuit.comment(\"CRX\")\n # Apply the supposed c-RX operation.\n circuit.cu3(theta, pi/2, 3*pi/2, ctrl, target)\n # For the moment, QISKit adds a phase to the U-gate, so we\n # need to correct this phase with a controlled Rzz.\n crzz(circuit, pi, ctrl, target)\n\n def csqrtx_inv(circuit, ctrl, target):\n # Verified\n circuit.comment(\"c-sqrt(X)^{-1}\")\n circuit.h(target)\n circuit.t(target)\n circuit.cx(ctrl, target)\n circuit.t(ctrl)\n circuit.tdg(target)\n circuit.h(target)\n circuit.cz(ctrl, target)\n circuit.cx(ctrl, target)\n\n\n def csqrtx(circuit, ctrl, target):\n # Verified\n circuit.comment(\"c-sqrt(X)\")\n circuit.cx(ctrl, target)\n circuit.cz(ctrl, target)\n circuit.h(target)\n circuit.t(target)\n circuit.tdg(ctrl)\n circuit.cx(ctrl, target)\n circuit.tdg(target)\n circuit.h(target)\n\n if params is None:\n # Default parameters for a simple Hamiltonian (no powers)\n params = [0.19634953, 0.37900987, 0.9817477, 1.87900984, 0.58904862]\n\n used_qubits = [ctrl, targets[0], targets[1]]\n\n super().__init__(self.__class__.__name__, # name\n [], # parameters\n used_qubits, # qubits\n circuit) # circuit\n\n self.comment(\"[HS] Start.\")\n ccz(self, ctrl, targets[0], targets[1])\n crx(self, params[0], ctrl, targets[1])\n csqrtx_inv(self, ctrl, targets[1])\n crzz(self, params[1], ctrl, targets[1])\n crx(self, params[2], ctrl, targets[0])\n crzz(self, params[3], ctrl, targets[0])\n self.ccx(ctrl, targets[0], targets[1])\n crx(self, params[4], ctrl, targets[0])\n self.ccx(ctrl, targets[0], targets[1])\n ccz(self, ctrl, targets[0], targets[1])\n self.comment(\"[HS] End.\")", "title": "" }, { "docid": "621d19adf53f7dbe19aae66fc3bdb4c2", "score": "0.48654687", "text": "def apply_controlled_gate(self, mat, q_id1, q_id2):\n first_mat = 1\n second_mat = 1\n nr1 = self.qubits.index(q_id1)\n nr2 = self.qubits.index(q_id2)\n\n min_nr = min(nr1, nr2)\n max_nr = max(nr1, nr2)\n\n total_amount = len(self.qubits)\n before = min_nr\n after = total_amount - max_nr - 1\n mid = total_amount - before - after - 2\n\n if before > 0:\n first_mat = np.eye(2 ** before)\n second_mat = np.eye(2 ** before)\n\n # Apply first part of Matrix\n if min_nr == nr1:\n first_mat = np.kron(first_mat, np.eye(2))\n second_mat = np.kron(second_mat, mat)\n else:\n first_mat = np.kron(first_mat, np.array([[1, 0], [0, 0]]))\n second_mat = np.kron(second_mat, np.array([[0, 0], [0, 1]]))\n\n if mid > 0:\n first_mat = np.kron(first_mat, np.eye(2 ** mid))\n second_mat = np.kron(second_mat, np.eye(2 ** mid))\n\n # Apply second part of Matrix\n if min_nr == nr1:\n first_mat = np.kron(first_mat, np.array([[1, 0], [0, 0]]))\n second_mat = np.kron(second_mat, np.array([[0, 0], [0, 1]]))\n else:\n first_mat = np.kron(first_mat, np.eye(2))\n second_mat = np.kron(second_mat, mat)\n\n if after > 0:\n first_mat = np.kron(first_mat, np.eye(2 ** after))\n second_mat = np.kron(second_mat, np.eye(2 ** after))\n\n apply_mat = first_mat + second_mat\n self.qubit = np.dot(apply_mat, self.qubit)", "title": "" }, { "docid": "0227504e3c701e5ebf8a78b9699befe6", "score": "0.48646224", "text": "def test_providing_no_observable_and_no_wires_shot_vector(self):\n num_wires = 2\n\n shots1 = 1\n shots2 = 10\n shots3 = 1000\n dev = qml.device(\"default.qubit\", wires=num_wires, shots=[shots1, shots2, shots3])\n\n @qml.qnode(dev)\n def circuit():\n qml.Hadamard(wires=0)\n return qml.sample()\n\n res = circuit()\n\n assert isinstance(res, tuple)\n\n expected_shapes = [(num_wires,), (num_wires, shots2), (num_wires, shots3)]\n assert len(res) == len(expected_shapes)\n assert (r.shape == exp_shape for r, exp_shape in zip(res, expected_shapes))", "title": "" }, { "docid": "1a621244a18e07d64996d9dee1d54f87", "score": "0.4860904", "text": "def test_append_qubit_observables(self):\n with AnnotatedQueue() as q:\n # wire repetition is deliberate, Queue contains no checks/logic\n # for circuits\n ops = [\n qml.Hadamard(wires=0),\n qml.PauliX(wires=1),\n qml.PauliY(wires=1),\n qml.Hermitian(np.ones([2, 2]), wires=7),\n ]\n assert q.queue == ops", "title": "" }, { "docid": "bdf6bf8147f1294b0977baf912a1d03a", "score": "0.48548517", "text": "def cl_encode(lengths):\n dic = dict(lengths)\n items = [dic.get(i, 0) for i in range(max(dic) + 1)]\n pos = 0\n while pos < len(items):\n if items[pos] == 0:\n # count repetitions of 0\n i = pos + 1\n while i < len(items) and items[i] == 0:\n i += 1\n if i - pos < 3:\n for i in range(pos, i):\n yield items[i]\n pos = i + 1\n else:\n repeat = i - pos\n if repeat < 11:\n yield (17, repeat - 3)\n else:\n yield (18, repeat - 11)\n pos = i\n else:\n item = items[pos]\n yield item\n i = pos + 1\n while i < len(items) and items[i] == item:\n i += 1\n repeat = i - pos - 1 # number of repetitions after 1st occurrence\n if repeat < 3:\n for i in range(repeat):\n yield item\n else:\n nb = repeat - 3\n while nb > 3:\n yield (16, 3)\n nb -= 3\n yield (16, nb)\n pos += repeat + 1", "title": "" }, { "docid": "bbe3c8a15a90dd7e35d98abdd29d83a4", "score": "0.48457897", "text": "def test_two_qubit_synthesis_to_directional_cx_from_gate_errors(self):\n # TODO: should make check more explicit e.g. explicitly set gate\n # direction in test instead of using specific fake backend\n backend = FakeVigo()\n conf = backend.configuration()\n qr = QuantumRegister(2)\n coupling_map = CouplingMap(conf.coupling_map)\n triv_layout_pass = TrivialLayout(coupling_map)\n qc = QuantumCircuit(qr)\n qc.unitary(random_unitary(4, seed=12), [0, 1])\n unisynth_pass = UnitarySynthesis(\n basis_gates=conf.basis_gates,\n coupling_map=None,\n backend_props=backend.properties(),\n pulse_optimize=True,\n natural_direction=False,\n )\n pm = PassManager([triv_layout_pass, unisynth_pass])\n qc_out = pm.run(qc)\n\n unisynth_pass_nat = UnitarySynthesis(\n basis_gates=conf.basis_gates,\n coupling_map=None,\n backend_props=backend.properties(),\n pulse_optimize=True,\n natural_direction=True,\n )\n\n pm_nat = PassManager([triv_layout_pass, unisynth_pass_nat])\n qc_out_nat = pm_nat.run(qc)\n self.assertEqual(Operator(qc), Operator(qc_out))\n self.assertEqual(Operator(qc), Operator(qc_out_nat))", "title": "" }, { "docid": "e3470d7b7b341f73c8b07e1843c49491", "score": "0.48433205", "text": "def test_truncate_connected_qubits(self):\n backend = self.backend()\n circuit = QuantumCircuit(20, 1)\n circuit.h(5)\n circuit.cx(5, 6)\n circuit.cx(6, 2),\n circuit.cx(2, 3)\n circuit.measure(3, 0)\n result = backend.run(circuit, shots=1).result()\n metadata = result.results[0].metadata\n self.assertEqual(metadata[\"num_qubits\"], 4)\n self.assertEqual(metadata[\"active_input_qubits\"], [2, 3, 5, 6])", "title": "" }, { "docid": "6bcac3b0aa656749da6df98bb703e0fc", "score": "0.48364782", "text": "def __generate_plus6_eq_plus1_minus5_plus10() -> Problem:\n a = 10 * random.choice(__digits_no49) + random.choice([5, 6, 7, 8])\n return __extend_no_carry_borrow(a, 6)", "title": "" }, { "docid": "2cc7120287205b8f69dc6dde95821b44", "score": "0.48315218", "text": "def generate_cross_traffic(mode, rate, q, p, n):\r\n # Constants\r\n capacity = 10e9/8 # capacity in bytes;\r\n #max_variance = 8e4 # maximum variance in bytes^2, \r\n max_variance = 8e4 # maximum variance in bytes^2, \r\n mean_packet_size = 500 # mean packet size in bytes\r\n \r\n # Compute u and s^2\r\n m = mean_packet_size\r\n v = q * max_variance\r\n u = math.log(m**2/math.sqrt(v + m**2))\r\n s = math.sqrt(math.log(1 + v/m**2))\r\n\r\n # initialize packet size vector\r\n packet_size = mean_packet_size * ones(n)\r\n\r\n # generate variable packet length\r\n if mode == 1 or mode == 3:\r\n # generate n lognormal values\r\n for i in range(1,n):\r\n p_size = 0\r\n while (p_size < 72 or p_size > 1526):\r\n p_size = round(random.lognormal(u, s))\r\n packet_size[i-1] = p_size\r\n \r\n # Mean packet size after discarding packets that are too small or too big\r\n realized_mean_packet_size = mean(packet_size)\r\n \r\n # Compute average interpacket gap in bytes\r\n if mode == 2: # mode 2 is the only mode where packet length is fixed\r\n mean_ipd = round(mean_packet_size * capacity / rate)\r\n mean_ipg = mean_ipd - mean_packet_size\r\n else:\r\n mean_ipd = round(realized_mean_packet_size * capacity / rate)\r\n mean_ipg = mean_ipd - realized_mean_packet_size \r\n \r\n # initialize idle vector\r\n idle = mean_ipg * ones(n) \r\n\r\n # generate variable interpacket gap\r\n if mode == 2 or mode == 3:\r\n k = 0 # keep track of number of packets generated\r\n while k <= n:\r\n if k == 0: # first idle\r\n chain_length = 1 \r\n else:\r\n chain_length = random.geometric(p)\r\n\r\n # set zero interpacket gap for\r\n for j in range(1, chain_length):\r\n if k+j-1 < n:\r\n idle[k+j-1] = 0\r\n\r\n # set sufficient interpacket gap between the chain and the next packet to achieve the desired data rate\r\n mean_ipg_to_set = mean_ipg * chain_length \r\n\r\n if k+chain_length-1 < n:\r\n idle[k+chain_length-1] = mean_ipg_to_set # we might change this to include some randomness later\r\n\r\n k = k + chain_length\r\n\r\n \r\n # Total time in bytes\r\n stream_length = sum(idle) + sum(packet_size)\r\n\r\n # Generated data rate\r\n generated_rate = capacity * sum(packet_size)/(sum(idle) + sum(packet_size))\r\n\r\n return (idle, packet_size, stream_length, generated_rate)", "title": "" }, { "docid": "fdaa75b597fc5eae5b261b60f64addcd", "score": "0.48175004", "text": "def bell(Qubit1, Qubit2):\n h=Hadamard(Q0) if Qubit1==0 else Hadamard(Q1) \n x=np.concatenate(np.outer(h, Q1 if Qubit2==1 else Q0))\n return CNOT(x)", "title": "" }, { "docid": "716675537743fc0ce2d99d3e95895875", "score": "0.48159602", "text": "def cross_rp_ind(self, i1, i2):\n half = int(random.randrange(1, self.GENS - 1))\n\n gen1 = i1['gens'][:half]\n gen1.extend(i2['gens'][half:])\n\n gen2 = i2['gens'][:half]\n gen2.extend(i1['gens'][half:])\n\n return Individual(gen1), Individual(gen2)", "title": "" }, { "docid": "52aeb83bca15e0fb25b51cad17a603b8", "score": "0.48128745", "text": "def test_prep_circuit_three_qubits(self):\n # Input vector\n vec = np.array([1, 2, 3, 4, 5, 6, 7, 8], dtype=np.float64)\n\n # Make a tree from the vector\n tree = BinaryTree(vec)\n\n # Get the state preparation circuit\n areg = QuantumRegister(2)\n breg = QuantumRegister(1)\n circ = QuantumCircuit(areg, breg)\n tree.preparation_circuit(circ, areg, breg)\n\n # Add a swaps to make the ordering of the qubits match the input vector\n # Note: This is because the last bit is the most significant in qiskit, not the first.\n circ.swap(areg[0], breg[0])\n\n # Check that the circuit produces the correct state\n state = list(np.real(self.final_state(circ)))\n self.assertTrue(np.allclose(state, vec / np.linalg.norm(vec, ord=2)))", "title": "" }, { "docid": "8e349786d6fa485d05d8d545923436bb", "score": "0.48094317", "text": "def test_extend_multiple_interface_qnodes(self):\n qc = qml.QNodeCollection()\n\n def circuit(x):\n qml.RX(x, wires=0)\n return qml.expval(qml.PauliZ(0))\n\n dev = qml.device(\"default.qubit\", wires=1)\n qnodes = [\n qml.QNode(circuit, dev, interface=\"autograd\"),\n qml.QNode(circuit, dev, interface=None),\n ]\n\n with pytest.raises(ValueError, match=\"do not all use the same interface\"):\n qc.extend(qnodes)", "title": "" }, { "docid": "e5b70cd6537ac800ecc721e07e7faf99", "score": "0.48064592", "text": "def __generate_plus4_eq_minus6_plus10() -> Problem:\n a = 10 * random.choice(__digits_no49) + random.choice([6, 7, 8, 9])\n return __extend_no_carry_borrow(a, 4)", "title": "" }, { "docid": "472a7e7971e16a10c9cae623e2517039", "score": "0.48055854", "text": "def test_single_switch_offset(self):\n\n for k in range(10):\n sk = str(k)\n for i in range(9):\n si = str(i)\n for j in range(i+1, 10):\n sj = str(j)\n self.assertNotEqual(encode(sk+si+sj), encode(sk+sj+si))", "title": "" }, { "docid": "b76edc641106a9e6b940fdfb02b5237f", "score": "0.48009458", "text": "def test_output(self):\n '''Every sequence has 100 bases.'''\n input_seqs = \\\n ['TTACATACCATACAGTGCGCTAGCGGGTGACAGATATAATGCAGATCCAT'\n 'ACAGACCAGATGGCAGACATGTGTTGCAGSCTGCAAGTGCAACGCGGTGA',\n 'GCAGAGTGCCGCAATGACGTGCGCCAAAGCGGTGACAGGGTGACAGTGAA'\n 'CCAAGTGACAAGTGAACAGGTGCCAGAGTGACCGAGTGACCAGTGGACCA',\n 'CAGAGTGCCGCAATGACGTGCGCCAAAGCGGACAAAGCACCATGACAAGT'\n 'ACACAGGTGACAGTGACAAGACAGAGGTGACACAGAGAAAGtGGGTGTGA',\n 'ATCGATTAAGCTATAACAGATAACATAGACATTGCGCCCATAATAGATAA'\n 'CTGACACCTGACCAGTGCCAGATGACCAGTGCCAGATGGACGACAGTAGC',\n 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'\n 'CTGACACCTGACCAGTGCCAGATGACCAGTGCCAGATGGACGACAGTAGC',\n 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'\n 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA']\n sample_indexes = [\n {'1' : 2}, {'1' : 2, '2' : 3}, {'1' : 4, '3' : 5},\n {'1' : 5}, {'1' : 5}, {'1' : 5}\n ]\n reversed_complement_sample_indexes = [\n {'1' : 7}, {'1' : 2, '2' : 2, '3' : 3},\n {'1' : 8, '2' : 1, '3' : 6}, {'1' : 1},\n {'1' : 1}, {'1' : 1}\n ]\n cap_sizes = []\n cap_size = 25\n while cap_size <= 50:\n cap_sizes.append(cap_size)\n cap_size = int(cap_size*2)\n if cap_size == cap_sizes[-1]:\n cap_size += 1\n if cap_size != 50:\n # Always have a start or end read of length max_readlet_size\n cap_sizes.append(50)\n with open(self.output_file, 'w') as output_stream:\n for i in xrange(6):\n print_readletized_output(\n input_seqs[i], sample_indexes[i],\n reversed_complement_sample_indexes[i],\n '0:' + str(i), cap_sizes,\n output_stream=output_stream,\n min_readlet_size=25, readlet_interval=5,\n max_readlet_size=50, no_polyA=True\n )\n collected_readlets = []\n with open(self.output_file) as processed_stream:\n for readlet in processed_stream:\n collected_readlets.append(readlet.rstrip().split('\\t')[1:])\n '''Each read from input_reads spans 100 bases, and from the\n arguments passed to go() above, noncapping readlets should span 50.\n The capping fraction should arrange for one readlet spanning 25\n bases on either end of a given read. There should be 13 readlets in\n total. Spot-check some readlets after checking for read info.'''\n read_info = [info.split('\\x1e') for _, info\n in collected_readlets]\n read_info = [info[3:4]\n + [info[-i].split('\\x1f')\n for i in xrange(4, 0, -1)]\n for info in read_info if len(info) > 3]\n sample_indexes, reversed_complement_sample_indexes \\\n = [{} for i in xrange(6)], [{} for i in xrange(6)]\n # All-A read should have been skipped completely\n self.assertEquals(len(read_info), 5)\n for i in xrange(5):\n for j in xrange(len(read_info[i][1])):\n sample_indexes[i][read_info[i][1][j]] \\\n = int(read_info[i][3][j])\n reversed_complement_sample_indexes[i][read_info[i][2][j]] \\\n = int(read_info[i][4][j])\n read_info[i] = read_info[i][:-2]\n read_info[i][1] = sample_indexes[i]\n read_info[i][2] = reversed_complement_sample_indexes[i]\n self.assertTrue([\n 'TTACATACCATACAGTGCGCTAGCGGGTGACAGATATAATGCAGATCCAT'\n 'ACAGACCAGATGGCAGACATGTGTTGCAGSCTGCAAGTGCAACGCGGTGA',\n sample_indexes[0],\n reversed_complement_sample_indexes[0]\n ] in read_info)\n self.assertTrue([\n 'GCAGAGTGCCGCAATGACGTGCGCCAAAGCGGTGACAGGGTGACAGTGAA'\n 'CCAAGTGACAAGTGAACAGGTGCCAGAGTGACCGAGTGACCAGTGGACCA',\n sample_indexes[1],\n reversed_complement_sample_indexes[1]\n ] in read_info)\n self.assertTrue([\n 'CAGAGTGCCGCAATGACGTGCGCCAAAGCGGACAAAGCACCATGACAAGT'\n 'ACACAGGTGACAGTGACAAGACAGAGGTGACACAGAGAAAGtGGGTGTGA',\n sample_indexes[2],\n reversed_complement_sample_indexes[2]\n ] in read_info)\n self.assertTrue([\n 'ATCGATTAAGCTATAACAGATAACATAGACATTGCGCCCATAATAGATAA'\n 'CTGACACCTGACCAGTGCCAGATGACCAGTGCCAGATGGACGACAGTAGC',\n sample_indexes[3],\n reversed_complement_sample_indexes[3]\n ] in read_info)\n self.assertTrue([\n 'ATCGATTAAGCTATAACAGATAACATAGACATTGCGCCCATAATAGATAA'\n 'CTGACACCTGACCAGTGCCAGATGACCAGTGCCAGATGGACGACAGTAGC',\n sample_indexes[3],\n reversed_complement_sample_indexes[3]\n ] in read_info)\n self.assertTrue([\n 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'\n 'CTGACACCTGACCAGTGCCAGATGACCAGTGCCAGATGGACGACAGTAGC',\n sample_indexes[3],\n reversed_complement_sample_indexes[3]\n ] in read_info)\n # Capping readlets\n self.assertTrue([ 'ATCGATTAAGCTATAACAGATAACA'\n 'TAGACATTGCGCCCATAATAGATAA',\n '0:3+\\x1e0\\x1e50'\n ] in collected_readlets or\n [ 'ATCGATTAAGCTATAACAGATAACA'\n 'TAGACATTGCGCCCATAATAGATAA',\n '0:3+\\x1e0\\x1e50\\x1e'\n 'ATCGATTAAGCTATAACAGATAACA'\n 'TAGACATTGCGCCCATAATAGATAA'\n 'CTGACACCTGACCAGTGCCAGATGA'\n 'CCAGTGCCAGATGGACGACAGTAGC'\n '\\x1e1\\x1e1\\x1e1\\x1e1'\n ] in collected_readlets\n )\n self.assertTrue([ 'CCAGTGCCAGATGGACGACAGTAGC',\n '0:3+\\x1e75\\x1e0'\n ] in collected_readlets or\n [ 'CCAGTGCCAGATGGACGACAGTAGC',\n '0:3+\\x1e75\\x1e0\\x1e'\n 'ATCGATTAAGCTATAACAGATAACA'\n 'TAGACATTGCGCCCATAATAGATAA'\n 'CTGACACCTGACCAGTGCCAGATGA'\n 'CCAGTGCCAGATGGACGACAGTAGC'\n '\\x1e1\\x1e1\\x1e1\\x1e1'\n ] in collected_readlets\n )\n # Noncapping readlets\n self.assertTrue([ 'GTCAG'\n 'TTATCTATTATGGGCGCAATGTCTA'\n 'TGTTATCTGTTATAGCTTAA',\n '0:3-\\x1e5\\x1e45'\n ] in collected_readlets or\n [ 'GTCAG'\n 'TTATCTATTATGGGCGCAATGTCTA'\n 'TGTTATCTGTTATAGCTTAA',\n '0:3-\\x1e5\\x1e45\\x1e'\n 'ATCGATTAAGCTATAACAGATAACA'\n 'TAGACATTGCGCCCATAATAGATAA'\n 'CTGACACCTGACCAGTGCCAGATGA'\n 'CCAGTGCCAGATGGACGACAGTAGC'\n '\\x1e1\\x1e1\\x1e1\\x1e1'\n ] in collected_readlets\n )\n self.assertTrue([ 'TAATAGATAA'\n 'CTGACACCTGACCAGTGCCAGATGA'\n 'CCAGTGCCAGATGGA',\n '0:3+\\x1e40\\x1e10'\n ] in collected_readlets or\n [ 'TAATAGATAA'\n 'CTGACACCTGACCAGTGCCAGATGA'\n 'CCAGTGCCAGATGGA',\n '0:3+\\x1e40\\x1e10\\x1e'\n 'ATCGATTAAGCTATAACAGATAACA'\n 'TAGACATTGCGCCCATAATAGATAA'\n 'CTGACACCTGACCAGTGCCAGATGA'\n 'CCAGTGCCAGATGGACGACAGTAGC'\n '\\x1e1\\x1e1\\x1e1\\x1e1'\n ] in collected_readlets\n )\n # Ensure no polyA readlets\n self.assertEquals([readlet for readlet in collected_readlets\n if set(readlet[0]) == _polyA], [])", "title": "" }, { "docid": "281af1ea2200aae5e6bfebf99911911c", "score": "0.47987187", "text": "def test_associativity_exact():\n\n N = 5\n g = Grid_1D.periodic(N)\n\n a0 = g.rand(0)\n b0 = g.rand(0)\n c1 = g.rand(0).D\n\n eq1 = a0 ^ (b0 ^ c1)\n eq2 = (a0 ^ b0) ^ c1\n\n assert not eq1 == eq2", "title": "" }, { "docid": "de8f989edf5a1d8cafe5cbb022cad5dd", "score": "0.47964856", "text": "def test_qv_natural(self):\n qv64 = QuantumVolume(5, seed=15)\n\n def construct_passmanager(basis_gates, coupling_map, synthesis_fidelity, pulse_optimize):\n seed = 2\n _map = [SabreLayout(coupling_map, max_iterations=2, seed=seed)]\n _unroll3q = Unroll3qOrMore()\n _swap_check = CheckMap(coupling_map)\n _swap = [\n BarrierBeforeFinalMeasurements(),\n SabreSwap(coupling_map, heuristic=\"lookahead\", seed=seed),\n ]\n _optimize = [\n Collect2qBlocks(),\n ConsolidateBlocks(basis_gates=basis_gates),\n UnitarySynthesis(\n basis_gates,\n synthesis_fidelity,\n coupling_map,\n pulse_optimize=pulse_optimize,\n natural_direction=True,\n ),\n Optimize1qGates(basis_gates),\n ]\n\n pm = PassManager()\n pm.append(_map) # map to hardware by inserting swaps\n pm.append(_unroll3q)\n pm.append(_swap_check)\n pm.append(_swap)\n pm.append(_optimize)\n return pm\n\n coupling_map = CouplingMap([[0, 1], [1, 2], [3, 2], [3, 4], [5, 4]])\n basis_gates = [\"rz\", \"sx\", \"cx\"]\n\n pm1 = construct_passmanager(\n basis_gates=basis_gates,\n coupling_map=coupling_map,\n synthesis_fidelity=0.99,\n pulse_optimize=True,\n )\n pm2 = construct_passmanager(\n basis_gates=basis_gates,\n coupling_map=coupling_map,\n synthesis_fidelity=0.99,\n pulse_optimize=False,\n )\n\n qv64_1 = pm1.run(qv64.decompose())\n qv64_2 = pm2.run(qv64.decompose())\n edges = [list(edge) for edge in coupling_map.get_edges()]\n self.assertTrue(\n all(\n [qv64_1.qubits.index(qubit) for qubit in instr.qubits] in edges\n for instr in qv64_1.get_instructions(\"cx\")\n )\n )\n self.assertEqual(Operator(qv64_1), Operator(qv64_2))", "title": "" }, { "docid": "3f3fc7e6872534e7e7f3b584c2830f6c", "score": "0.47952512", "text": "def test_bc_inout():\n k = 10 # number of workers\n t = 1000\n shape = {\n \"num-phases\": 1,\n \"num-workers\": k,\n \"activity-infix\": \"Buffer\",\n \"activity-prefix\": \"Serialization\",\n \"activity-suffix\": \"Deserialization\",\n \"phase-duration\": t,\n }\n data = generate(\"bc_inout\", shape, 1.0)\n\n # the prefixes and suffixes must have the same bc\n for w in range(1, k):\n assert data[epoch(0)][('Serialization', w)] == data[epoch(0)][('Deserialization', w)]", "title": "" }, { "docid": "e0346df113493bb8957e4faac3221ab6", "score": "0.47899675", "text": "def w(self, length):\n q = Qubits(length)\n l = [Integer(0)]*q.size\n for ix in xrange(length):\n number = Integer(2)**Integer(ix)\n l[number] = (Integer(1)/length)**(half)\n q.v = Matrix(l)\n return q", "title": "" }, { "docid": "bb1a1a244003abf99a0d0582ba962bb0", "score": "0.47890088", "text": "def test_gate_more_two_qubits(self) -> None:\n with pytest.raises(PluginException):\n circuit = generate_qft_circuit(max_nbqbit, inline=False)\n qpu = Sabre() | (QuameleonPlugin(topology=Topology(type=TopologyType.LNN)) | PyLinalg())\n qpu.submit(circuit.to_job())", "title": "" }, { "docid": "3b49304b52f1048b1d788902ee0b9ad3", "score": "0.4785169", "text": "def add_twoQ_clifford(index, gate_seq_1, gate_seq_2):\n if (index < 0):\n raise ValueError(\n 'index is out of range. it should be smaller than 11520 and '\n 'greater or equal to 0: ', str(index))\n elif (index < 576):\n add_singleQ_based_twoQ_clifford(index, gate_seq_1, gate_seq_2)\n elif (index < 5184 + 576):\n add_CNOT_like_twoQ_clifford(index, gate_seq_1, gate_seq_2)\n elif (index < 5184 + 5184 + 576):\n add_iSWAP_like_twoQ_clifford(index, gate_seq_1, gate_seq_2)\n elif (index < 576 + 5184 + 5184 + 576):\n add_SWAP_like_twoQ_clifford(index, gate_seq_1, gate_seq_2)\n else:\n raise ValueError(\n 'index is out of range. it should be smaller than 11520 and '\n 'greater or equal to 0: ', str(index))\n\n pass", "title": "" }, { "docid": "90c82c2f77a2d77c7389b50a8da111f1", "score": "0.47811636", "text": "def generate_cuboids():\n yield (1, 1, 1)\n c = 2\n while True:\n for a in range(1, c+1):\n for b in range(a, c+1):\n yield (a, b, c)\n c += 1", "title": "" } ]
6f7832c72923e4e0dee8f9a49ea9635e
Removes a flag from a video.
[ { "docid": "f517e7999eef48df8f719e8d1e866d98", "score": "0.63815784", "text": "def allow_video(self, video_id):\r\n videos = dict([[video.video_id, video] for video in self._video_library.get_all_videos()])\r\n \r\n if video_id in videos:\r\n video = videos[video_id]\r\n if video.flagged == True:\r\n video.allow()\r\n print (f\"Successfully removed flag from video: {video.title}\")\r\n else:\r\n print(\"Cannot remove flag from video: Video is not flagged\")\r\n else:\r\n print(\"Cannot remove flag from video: Video does not exist\")", "title": "" } ]
[ { "docid": "c42b7812b127cfa046024ccae96f9efc", "score": "0.7078656", "text": "def allow_video(self, video_id):\n video_to_unflag = self._video_library.get_video(video_id)\n\n if video_to_unflag == None:\n print(\"Cannot remove flag from video: Video does not exist\")\n return\n elif video_to_unflag.video_id not in self._flagged:\n print(\"Cannot remove flag from video: Video is not flagged\")\n return\n else:\n modified_flagged = {}\n for y in self._flagged:\n if video_to_unflag.video_id == y:\n continue\n else:\n modified_flagged[y] = self._flagged[y]\n print(\"Successfully removed flag from video:\", video_to_unflag.title)\n self._flagged = modified_flagged", "title": "" }, { "docid": "7f5e20b25a9434a8194240d211bf2f61", "score": "0.70257044", "text": "def allow_video(self, video_id: str):\n video_to_unflag = self._video_library.get_video(video_id)\n if video_to_unflag is None:\n print(\"Cannot remove flag from video: Video does not exist\")\n elif video_to_unflag.flag_reason == \"\":\n print(\"Cannot remove flag from video: Video is not flagged\")\n else:\n video_to_unflag.flag_reason = \"\"\n print(f\"Successfully removed flag from video: {video_to_unflag.title}\")", "title": "" }, { "docid": "8c53200d230958ba89b3b55d903c195f", "score": "0.7007885", "text": "def allow_video(self, video_id):\n video = self._video_library.get_video(video_id)\n if video is None:\n print(\"Cannot remove flag from video: Video does not exist\")\n return\n\n if not video.flagged[0]:\n print(\"Cannot remove flag from video: Video is not flagged\")\n return\n\n video.flagged = [False, \"\"]\n print(\"Successfully removed flag from video: {0}\".format(video.title))", "title": "" }, { "docid": "3b30678967af06d44161f66fae49977e", "score": "0.68557155", "text": "def remove_flag(self, flag):\n if self.get_flags():\n self.set_flags(''.join(set(self.get_flags()) - set(flag)))", "title": "" }, { "docid": "7d95c7425948ff80ec686ca3a9045ea8", "score": "0.6775681", "text": "def remove_flag(self, flag):\n if 'Status' in self or 'X-Status' in self:\n self.set_flags(''.join(set(self.get_flags()) - set(flag)))", "title": "" }, { "docid": "88e3dfa712edd295cb4c1b1cfc348f90", "score": "0.63627285", "text": "def delete_video(self):\n self.multipler_request('video/%s/delete' % self.file_id)", "title": "" }, { "docid": "a1afceb52542302b5da36d6867e3d8dd", "score": "0.6319069", "text": "def flag_video(self, video_id, flag_reason=\"\"):\n print(\"flag_video needs implementation\")", "title": "" }, { "docid": "a1afceb52542302b5da36d6867e3d8dd", "score": "0.6319069", "text": "def flag_video(self, video_id, flag_reason=\"\"):\n print(\"flag_video needs implementation\")", "title": "" }, { "docid": "a1afceb52542302b5da36d6867e3d8dd", "score": "0.6319069", "text": "def flag_video(self, video_id, flag_reason=\"\"):\n print(\"flag_video needs implementation\")", "title": "" }, { "docid": "a1afceb52542302b5da36d6867e3d8dd", "score": "0.6319069", "text": "def flag_video(self, video_id, flag_reason=\"\"):\n print(\"flag_video needs implementation\")", "title": "" }, { "docid": "a1afceb52542302b5da36d6867e3d8dd", "score": "0.6319069", "text": "def flag_video(self, video_id, flag_reason=\"\"):\n print(\"flag_video needs implementation\")", "title": "" }, { "docid": "7a35289ed27f1dc0c354979f70205800", "score": "0.62957114", "text": "def flag_video(self, video_id, flag_reason=\"\"):\r\n print(\"flag_video needs implementation\")", "title": "" }, { "docid": "332e73ed770503daba8c2c4407673de6", "score": "0.61014146", "text": "def flag_video(self, video_id, flag_reason=\"\"):\n video = self._video_library.get_video(video_id)\n if video is None:\n print(\"Cannot flag video: Video does not exist\")\n return\n\n if video.flagged[0]:\n print(\"Cannot flag video: Video is already flagged\")\n return\n\n flag_reason = \"Not supplied\" if flag_reason == \"\" else flag_reason\n video.flagged = [True, flag_reason]\n if self._play_vid_tag == video_id or self._paused_vid_tag == video_id:\n # Manually make the paused video played, so that we\n # can stop it.\n self._play_vid_tag = video_id\n self.stop_video()\n print(\"Successfully flagged video: {0} (reason: {1})\".format(\n video.title, flag_reason))", "title": "" }, { "docid": "31cdd1dc8b86a9ad34dec31fcf6b20ae", "score": "0.5965828", "text": "def remove():", "title": "" }, { "docid": "4274121ee096f0a2fe3744d35b194e86", "score": "0.5933706", "text": "def flag_video(self, video_id: str, flag_reason=\"\"):\n video_to_flag = self._video_library.get_video(video_id)\n if video_to_flag is None:\n print(\"Cannot flag video: Video does not exist\")\n elif video_to_flag.flag_reason != \"\":\n print(\"Cannot flag video: Video is already flagged\")\n else:\n if self._currently_playing_video is not None and video_id == self._currently_playing_video.video_id:\n self.stop_video()\n video_to_flag.flag_reason = flag_reason if flag_reason != \"\" else \"Not supplied\"\n print(f\"Successfully flagged video: {video_to_flag.title} (reason: {video_to_flag.flag_reason})\")", "title": "" }, { "docid": "835a2cc7b5d697c843c7d38cf6ea6bb9", "score": "0.5906251", "text": "def stop_video(self):\r\n if len(self.play) > 0:\r\n a = self.play.pop()\r\n print(\"Stopping video: \"+a)\r\n self.co = 0\r\n else:\r\n print(\"Cannot stop video: No video is currently playing\")\r\n # print(\"stop_video needs implementation\")\r", "title": "" }, { "docid": "67d0a1b4e03505c8e5a88bb5f579bb37", "score": "0.58743685", "text": "def flag_video(self, video_id, flag_reason=\"\"):\n video_to_flag = self._video_library.get_video(video_id)\n if video_to_flag == None:\n print(\"Cannot flag video: Video does not exist\")\n elif video_id in self._flagged.keys():\n print(\"Cannot flag video: Video is already flagged\")\n else:\n if flag_reason == \"\":\n flag_reason = \"Not supplied\"\n if self._currently_playing == video_to_flag.video_id or self._paused == video_to_flag.video_id:\n self.stop_video()\n print(\"Successfully flagged video:\", video_to_flag.title, \"(reason:\", flag_reason + \")\")\n self._flagged[video_id] = flag_reason", "title": "" }, { "docid": "a36c16e302c12c6e034103b502b04a6f", "score": "0.5871676", "text": "def delete_flag(self):\n self.__status = Flag.NotFlagged\n self.__start = None\n self.__due_date = None\n self.__completed = None\n self._track_changes()", "title": "" }, { "docid": "701229fc7cda664c06e7d184a645c8cf", "score": "0.5860052", "text": "def do_delete(input_m3u, index):\n delete_video(input_m3u, index)", "title": "" }, { "docid": "70b23b3a7d78ec0d09c41e9e8cec50e0", "score": "0.5826584", "text": "def flag_video(self, video_id, flag_reason=\"Not supplied\"):\r\n videos = dict([[video.video_id, video] for video in self._video_library.get_all_videos()])\r\n \r\n if video_id in videos:\r\n video = videos[video_id]\r\n if video.flagged == False:\r\n if video == self._current_video:\r\n self.stop_video()\r\n video.flag(flag_reason)\r\n print (f\"Successfully flagged video: {video.title} (reason: {video.flagged_reason})\")\r\n else:\r\n print(\"Cannot flag video: Video is already flagged\")\r\n else:\r\n print(\"Cannot flag video: Video does not exist\")", "title": "" }, { "docid": "9ffdde65d040ff32988a9de9aacea7a6", "score": "0.5796712", "text": "def unset_flag(FLAG):\r\n global F\r\n F = F & ~FLAG", "title": "" }, { "docid": "ecd1df11dec06a6cfb2eba03a1c8999b", "score": "0.57002217", "text": "def remove(value):", "title": "" }, { "docid": "fd41fb2baf073adf6e37aee29907caf2", "score": "0.56939673", "text": "def _clear_flag(cpu: 'Cpu', flag: StatusFlag) -> None:\n if flag == StatusFlag.carry:\n cpu.status.carry = False\n elif flag == StatusFlag.decimal:\n cpu.status.decimal = False\n elif flag == StatusFlag.interrupt_disable:\n cpu.status.interrupt_disable = False\n elif flag == StatusFlag.overflow:\n cpu.status.overflow = False\n else:\n raise NotImplementedError(f'{flag} mode is not supported')", "title": "" }, { "docid": "47f5c93d3bccb51602d7a3b87cdba8df", "score": "0.5656351", "text": "def stop_video(self):\n if len(self.playing) == 0:\n print(\"Cannot stop video: No video is currently playing\")\n elif len(self.playing) > 0:\n videoToStop = self.playing.pop()\n print(f\"Stopping video: {videoToStop.title}\")", "title": "" }, { "docid": "a4f5e0cd533bcef0b91b3e1072dfea90", "score": "0.5641556", "text": "def video_wall_off(self):\n self.send_command(0x84, 0x0)", "title": "" }, { "docid": "e373c51b92a1f5fe26f622c13925224f", "score": "0.5591142", "text": "def remove_bit(self, index: int) -> None:\r\n\t\tself.bitfield &= ~self.to_bitfield(index)", "title": "" }, { "docid": "e123b773adf7cf210da4ae884e83ee47", "score": "0.55888903", "text": "def RemoveOneFlag(flag_name, flag_values=FLAGS):\n if flag_name in flag_values.FlagDict():\n flag_values.__delattr__(flag_name)", "title": "" }, { "docid": "2ef8fd6dd4698fedbed4a7b62428f7d7", "score": "0.55867434", "text": "def stop_video(self):\r\n\r\n if self._current_video == None or self._video_stat == 'STP' or self._current_video.flagged == True:\r\n print(\"Cannot stop video: No video is currently playing\")\r\n\r\n else:\r\n self._stopped_video = self._current_video\r\n self._video_stat = 'STP'\r\n print(f\"Stopping video: {self._current_video.title}\")", "title": "" }, { "docid": "ea332c9773860ccaebd3e627204a634b", "score": "0.555717", "text": "def remove_flags(self, messages, flags):\n return self._store('-FLAGS', messages, flags)", "title": "" }, { "docid": "39e54f0d445d7abb41bcbcd300f240b1", "score": "0.5547637", "text": "def remove(self, building):\n entry = self.entries[building]\n entry[-1] = SkylineTracker.REMOVED\n pass", "title": "" }, { "docid": "68e22021f8f53be44017f62ac41e610d", "score": "0.5529644", "text": "def unflag(self, msg_id):\n return objects_module.messages.unflag(self.khoros_object, msg_id)", "title": "" }, { "docid": "b1fbc6afcfc69abbfd167cba10b61f22", "score": "0.55286944", "text": "def delete_flag_file(self, context, flag_file):\n try:\n os.remove(flag_file)\n except OSError:\n LOG.error(\"Failed to delete %s flag.\"\n % flag_file)\n pass", "title": "" }, { "docid": "8f408f789e22ccc03bd59068f1e2d4fa", "score": "0.5513272", "text": "def delete_position(seq, frame):\n seq.keyframe_delete(data_path='[\"launch\"]', frame=frame)", "title": "" }, { "docid": "d82587f265bc72e6fc12a6bd8078dfbd", "score": "0.5442011", "text": "def clear_flags(self):\n for flag in self.flags:\n self.flags[flag] = 0", "title": "" }, { "docid": "d88f1cd16887b1df2c10ee147a5e3a54", "score": "0.5391082", "text": "def remove_movie (self, title, year):\n title=re.sub(r'[?|$|!|:|#]',r'',title)\n movie_meta = '%s (%d)' % (title, year)\n folder = re.sub(r'[?|$|!|:|#]',r'',self.db[self.movies_label][movie_meta]['alt_title'])\n del self.db[self.movies_label][movie_meta]\n self._update_local_db(filename=self.db_filepath, db=self.db)\n dirname = os.path.join(self.movie_path, folder)\n filename = os.path.join(self.movie_path, folder, movie_meta + '.strm')\n if xbmcvfs.exists(dirname):\n xbmcvfs.delete(filename)\n xbmcvfs.rmdir(dirname)\n return True\n return False", "title": "" }, { "docid": "9349933f10e6bfcb34e9660c6bd31337", "score": "0.5389633", "text": "def stop_video(self):\n\n if self._play_vid_tag is None:\n print(\"Cannot stop video: No video is currently playing\")\n return\n\n self._paused_vid_tag = None\n print(\"Stopping video: {0}\".format(\n self._video_library.get_video(self._play_vid_tag).title))\n self._play_vid_tag = None", "title": "" }, { "docid": "806d081aa8271322ed7c72f358972dc0", "score": "0.5350334", "text": "def clean_flags(container, flags):\n for flag in container:\n if flag in flags:\n flags.remove(flag)", "title": "" }, { "docid": "b2b9ce8387ecaf46e9fe1a304f0528e9", "score": "0.53394014", "text": "def remove_from_playlist(self, playlist_name, video_id):\r\n print(\"remove_from_playlist needs implementation\")", "title": "" }, { "docid": "1fa1495e795a858eca92c65192681d4d", "score": "0.5337086", "text": "def deleteFlag(self, flagId):\n cursor = self.__getCursor()\n cursor.execute(\"\"\"\n DELETE FROM `karma_comments` SET `Text` = %s WHERE `Id` = %s\n \"\"\", (flagId, ))\n cursor.close()\n self.__connection.commit()", "title": "" }, { "docid": "6101a7dd3506e3cbda2389d12618b50d", "score": "0.5325414", "text": "def clear_flag(flag, width, flag_bits, xmin, xmax, ymin, ymax, logpath=None):\n process(['/cluster/GAMMA_SOFTWARE-20161207/ISP/bin/clear_flag', flag, width, flag_bits, xmin, xmax, ymin, ymax], logpath=logpath)", "title": "" }, { "docid": "fe4c1cfc29f813d2b3a731fbe73dab78", "score": "0.531339", "text": "def video_stop(self):\n self.widgetCanvas.config(width=0, height=0)\n self.list_player.pause()\n self.video_status = 'stopped'", "title": "" }, { "docid": "082fdba9fbbfd408c5da1219a4a49906", "score": "0.5301001", "text": "def stop_video(self):\n if self.playing==\"\" and self.pause==\"\":\n print(\"Cannot stop video: No video is currently playing\")\n else:\n if self.pause!=\"\":\n stop=self._video_library._videos[self.pause]._title\n print(\"Stopping video: {}\".format(stop))\n self.playing=\"\"\n self.pause=\"\"\n else:\n stop=self._video_library._videos[self.playing]._title\n print(\"Stopping video: {}\".format(stop))\n self.playing=\"\"\n self.pause=\"\"\n\n # print(\"stop_video needs implementation\")", "title": "" }, { "docid": "67af941989ba61b541b64af70d7e2d77", "score": "0.5289763", "text": "def editflag(x,y,flags):\n count = flags.count(x)\n if y == 0 and count > 0:\n flags.remove(x)\n elif y == 0 and count == 0:\n print('The value', x ,'does not exist in flags.')\n elif y == 1 and count == 0:\n flags = flags.append(x)\n elif y == 1 and count > 0:\n print('The value', x ,'already exists in flags')\n else:\n print('editflag command was used incorrectly.')\n return(flags)", "title": "" }, { "docid": "31a6622870def1a48d84ee5452868796", "score": "0.52867866", "text": "def remove(self, vPath):\r\n os.remove(self.ospath(vPath) )\r\n #notify(AssetRemoved(self, vpath))\r", "title": "" }, { "docid": "4c71b981c9bd675f244b408ddf227a37", "score": "0.5284856", "text": "def remove(path: AnyStr, verbose: Optional[int] = 0) -> None:", "title": "" }, { "docid": "131a4f36d74250dca9285b222e12e374", "score": "0.5278465", "text": "def removeTexture():\n\ttry:\n\t\tdel logic.skyTexture\n\t\tdel logic.skyTextureAnimated\n\texcept:\n\t\tpass", "title": "" }, { "docid": "7235d2893473e0c320be1990c62b1ebc", "score": "0.5274217", "text": "def remove_vector(self, vec_id):\n pass", "title": "" }, { "docid": "850cc2723fd71c567a453a74d973c5c6", "score": "0.52727956", "text": "def api_remove_favorite(self):\n raise NotImplementedError", "title": "" }, { "docid": "850cc2723fd71c567a453a74d973c5c6", "score": "0.52727956", "text": "def api_remove_favorite(self):\n raise NotImplementedError", "title": "" }, { "docid": "a300dbfffb8c560c3b51d88f7e3e7b87", "score": "0.52612627", "text": "def delete_files(self):\n\n small_video_path = self.small_video()\n try:\n os.unlink(self.videofile.path)\n if small_video_path:\n os.unlink(small_video_path)\n except:\n pass", "title": "" }, { "docid": "daca0bb76ca2a3577020a0e661dbb172", "score": "0.5259757", "text": "def stop_video(self):\n if self._currently_playing_video is None:\n print(\"Cannot stop video: No video is currently playing\")\n return \n print(f\"Stopping video: {self._currently_playing_video.title}\")\n self._currently_playing_video = None\n self._current_video_status = None", "title": "" }, { "docid": "04a77ddc9b40f3661d8c83de954bc7d8", "score": "0.5249311", "text": "def remove(self, obj):", "title": "" }, { "docid": "514ede108b6fbf8783c3964f899a4c31", "score": "0.5245793", "text": "def remove_video_before_db(examples: List[Dict]) -> List[Dict]:\n for eg in examples:\n if \"video\" in eg:\n del eg[\"video\"]\n if \"video\" in eg:\n del eg[\"options\"]\n\n return examples", "title": "" }, { "docid": "638994eed7c2dbcc2b77bda9421a561c", "score": "0.5237131", "text": "def remove(self, oid):", "title": "" }, { "docid": "638994eed7c2dbcc2b77bda9421a561c", "score": "0.5237131", "text": "def remove(self, oid):", "title": "" }, { "docid": "7bcc742fecd460a2c3dbd6faad0d9ca3", "score": "0.52347565", "text": "def remove_mode(self, token):\n self._switcher.remove_mode(token)", "title": "" }, { "docid": "2e60792e5ed8e1c588ad9500d1d76d93", "score": "0.5188642", "text": "def remove_from_playlist(self, playlist_name, video_id):\n print(\"remove_from_playlist needs implementation\")", "title": "" }, { "docid": "2e60792e5ed8e1c588ad9500d1d76d93", "score": "0.5188642", "text": "def remove_from_playlist(self, playlist_name, video_id):\n print(\"remove_from_playlist needs implementation\")", "title": "" }, { "docid": "a71a36d0051f4f211d0dd41dcc421c29", "score": "0.5184038", "text": "def _remove(self, pattern):", "title": "" }, { "docid": "2baa27afd4e8e2495a64d5886f88ab72", "score": "0.51829296", "text": "def remove(name, send_events=True, moving=None, loading=False):", "title": "" }, { "docid": "69b6db305b448f9b4f0a9b5c5d9d7761", "score": "0.5178258", "text": "def remove_clip(self, clip, add_pending=False):\n\n self.callbacks.on_clip_stop(clip)\n if clip.name not in self.players:\n # clip was already removed/stopped ?\n return\n player = self.players[clip.name]\n player.stop()\n del self.players[clip.name]\n self.clips = [ c for c in self.clips if c.name != clip.name ]\n\n if not add_pending and len(self.clips) == 0 and self.stop_if_empty:\n self.stop()\n raise AllClipsDone()", "title": "" }, { "docid": "90d2c89484c4acfc49340a9a76226fc1", "score": "0.51759607", "text": "def _remove(self, _):\n\t\tself._delete_torrent()\n\t\tself.parent.remove_widget(self)\n\t\tos.remove(self.uri)\n\t\tos.remove(self.ids.img.source)", "title": "" }, { "docid": "4efcdf5e7ceeb4cd9ed2734b56e4ffe6", "score": "0.5175291", "text": "def remove_member(self, participant, recorder):\n self.set_take_for(participant, ZERO, recorder)", "title": "" }, { "docid": "1f74d5d41ef9a0dd1043f3eacf640daf", "score": "0.5173915", "text": "def _delWindowStyleFlag(self, flag, advanced=False):\n\t\tmechanics = self._getWindowStyleMechanics(advanced)\n\t\tif self._constructed():\n\t\t\tmechanics[2](mechanics[1]() & (~flag))\n\t\telse:\n\t\t\tself._preInitProperties[mechanics[0]] = self._preInitProperties[mechanics[0]] & (~flag)", "title": "" }, { "docid": "30e7c536f1d69bd3098eca4e95fcce60", "score": "0.51729625", "text": "async def remove(self, ctx, *, word: lower):\n await self.bot.db.update_guild_config(ctx.guild.id, {'$pull': {'detections.filters': word}})\n await ctx.send(self.bot.accept)", "title": "" }, { "docid": "aea192d8826cacb3a7e2bddf566ae4d7", "score": "0.5162678", "text": "def remove(self, e):\n # write code here\n if e in self.vals:\n self.vals[e] -= 1", "title": "" }, { "docid": "3f4873d476789e36a82cca464428dd3f", "score": "0.5160594", "text": "def remove(self, path):", "title": "" }, { "docid": "8c7f134d8629a3f4956ee324e434360c", "score": "0.51525944", "text": "def remove(self, e):\n try:\n self.vals[e] -= 1\n except:\n self.vals[e] = 0", "title": "" }, { "docid": "0455af9c4fed4ecb628992d19b6686cb", "score": "0.5140673", "text": "def remove(self, e):\r\n # write code here\r\n try:\r\n self.vals[e] -= 1\r\n except:\r\n pass", "title": "" }, { "docid": "88862f8dc3b9a188916a072b1b190498", "score": "0.5140368", "text": "def remove(self, e):\r\n # write code here\r\n try:\r\n self.vals[e]-=1 \r\n except:\r\n self.vals[e]=0", "title": "" }, { "docid": "02c82f2864189e51627989d9714fbd9f", "score": "0.51352584", "text": "def delete_event(widget=None, *data):\n global SILENT\n camera.release();\n fps_counter.quit();\n if not SILENT:\n sound.play(\"close\")\n time.sleep(3)\n sound.quit()\n return False", "title": "" }, { "docid": "351cc22455b3c26d1257873739dab34b", "score": "0.51309896", "text": "def stop_video(self):\n if (self.currently_playing != \"none\") :\n print(f\"Stopping video: {self.currently_playing}\")\n self.currently_playing = \"none\"\n self.playing = False\n return\n print(\"Cannot stop video: No video is currently playing\")", "title": "" }, { "docid": "bbb43f83074b2502231ab99ff3c8e315", "score": "0.5114603", "text": "def remove(self):\n ...", "title": "" }, { "docid": "0a51b21ba72c75f1c9eef98baf23087f", "score": "0.5103221", "text": "def clearFlags(self, key: str, flags: int) -> None:\n path = self._path + key\n self._api.setEntryFlags(path, self._api.getEntryFlags(path) & ~flags)", "title": "" }, { "docid": "c7a3e14f97a580e6d7ca3ea24d9a52e1", "score": "0.51011646", "text": "def remove_one(self):\n ...", "title": "" }, { "docid": "8eaf63dd2a2ed907fed0b06412256081", "score": "0.5098991", "text": "def removeFrame(self, frame):\n if self._frames.__contains__(frame):\n self._frames.remove(frame)\n self._updateGrid()\n self.update()", "title": "" }, { "docid": "1e9e35f773d49e14645c98a185e6ea9b", "score": "0.50926566", "text": "def stop_video(self):\n if self._currently_playing != None:\n video_playing = self._video_library.get_video(self._currently_playing)\n print(\"Stopping video:\", video_playing.title)\n self._currently_playing = None\n elif self._paused != None:\n video_paused = self._video_library.get_video(self._paused)\n print(\"Stopping video:\", video_paused.title)\n self._paused = None\n else:\n print(\"Cannot stop video: No video is currently playing\")", "title": "" }, { "docid": "bcfdbdae0d83c3a9eddbb0c0667ecbf5", "score": "0.50840265", "text": "def rem(self, coisa):\r\n self.gui.rem(coisa)", "title": "" }, { "docid": "221e1c6016726fbfdf7fdf98460c44b7", "score": "0.50806963", "text": "def stop_video(self, address: int = None) -> Response:\r\n address = self._get_address(address)\r\n data = {\"arg\": None}\r\n cmd_url = (\r\n \"http://\"\r\n + self.info[\"url\"]\r\n + \":8181/api/luminaire/\"\r\n + address\r\n + \"/command/PLAY_VIDEO_FILE\"\r\n )\r\n response = requests.post(\r\n cmd_url, json=data, cookies=self.info[\"cookiejar\"], verify=False\r\n )\r\n response = self._check_response_for_error(response)\r\n print(f\"Stopped video file from playing at address: {address}\")\r\n return response", "title": "" }, { "docid": "7a68fd46b69e788df13bc28e87e20821", "score": "0.50803006", "text": "def unmark(self, pos):\n byte = ~(1 << self._bitpos(pos))\n byte = cast(byte, types.int8)\n self.buf[self._bytepos(pos)] &= byte", "title": "" }, { "docid": "f8b69114c4ad8e152fb6c1b50c94bb09", "score": "0.5074984", "text": "def remove_state(self, state):\n state = state.format(relation_name=self.relation_name)\n value = _get_flag_value(state)\n if not value:\n return\n if self.key in value['conversations']:\n value['conversations'].remove(self.key)\n if value['conversations']:\n set_flag(state, value)\n else:\n clear_flag(state)", "title": "" }, { "docid": "41b3cde542bc36b65913d954321ebe22", "score": "0.5070644", "text": "async def _remove(self, version: int) -> None:\n py_typecheck.check_type(version, (int, np.integer))\n\n path = self._get_path_for_version(version)\n if await file_utils.exists(path):\n await file_utils.rmtree(path)\n logging.info('Program state removed: %s', path)", "title": "" }, { "docid": "44a170cb7cb08024b448005abbe9bac9", "score": "0.5070521", "text": "def remove(self, count = -1):\n if self.is_readonly:\n BufferBase._logger.debug('buffer is readonly')\n return\n pass", "title": "" }, { "docid": "29d2ce7c49b9ede17bdbc97a4386507b", "score": "0.5058651", "text": "def remove(self):\n glDeleteBuffers(1, [self.vbo_array])\n\n if self.vdata_indices is not None:\n glDeleteBuffers(1, [self.vbo_element_array])\n\n glDeleteVertexArrays(1, [self.vao])\n self.dirty = True\n print(\"Item {}: removing myself.\".format(self.label))", "title": "" }, { "docid": "93240a1923ff99ed298841d46eb9ccb0", "score": "0.50540644", "text": "def remove_option(self, parameter):\n\n if parameter in self.options.keys():\n # Find index of parameter and remove\n index = self.options[parameter].index\n self.options.pop(parameter)\n self.lines.pop(index)\n\n # Adjust indices of following in list\n for option in self.lines[index:]:\n option.index -= 1", "title": "" }, { "docid": "5c5a2f2d4143cf7437774a621e23f47e", "score": "0.50523704", "text": "def release(self):\n if self._video is not None:\n del self._video", "title": "" }, { "docid": "4eb0f22e3ef9b3f59df14a2bcd44a418", "score": "0.503461", "text": "def remove_face(face_id):\n vrs_client = create_vrs_client()\n collection_id = get_collection_id(vrs_client)\n vrs_client.remove_face(collection_id, face_id)\n save_dir = './results/{0}/{1}.jpg'.format(collection_id, face_id)\n if os.path.exists(save_dir):\n os.remove(save_dir)", "title": "" }, { "docid": "56376de9f8df1b9ed2150d6e72a55314", "score": "0.50332105", "text": "def remove_overlay(overlay_id):\n if overlay_id != -1:\n camera.remove_overlay(overlay_id)", "title": "" }, { "docid": "ab9c55301a732ae67ba43340668a71a2", "score": "0.5031563", "text": "def remove(path):\n pass", "title": "" }, { "docid": "d99f248a1b81248f1823cbd30895b48f", "score": "0.5026668", "text": "def delete_frame(self, index):\n frame = self.frames[index]\n del self.frames[index]\n frame.is_attached = False\n self.renumber_from_index(index)", "title": "" }, { "docid": "b959e758c14d0c0f50100bd922d3f85b", "score": "0.5026593", "text": "def remove(self, path):\n pass", "title": "" }, { "docid": "5af4c1685255d4699e35997134b224c7", "score": "0.50100666", "text": "def remove(self, x, y):\n\n if self.map[y][x] == 'I':\n self.map[y][x] = self.starting_map[y][x]\n self.industrial -= 1\n\n elif self.map[y][x] == 'C':\n self.map[y][x] = self.starting_map[y][x]\n self.commercial -= 1\n\n elif self.map[y][x] == 'R':\n self.map[y][x] = self.starting_map[y][x]\n self.residential -= 1\n\n else:\n print(\"You can't remove %s\" % self.map[y][x])", "title": "" }, { "docid": "f059b62537333ebed461aa71e0fca8d8", "score": "0.500491", "text": "def delExifTag(self, tag, record = None):\n self.__getExif__().removeTag(tag, record)", "title": "" }, { "docid": "9be240cd908f276e996c1869c7acb484", "score": "0.5000761", "text": "def remove_activity(self, seq):\n return self.plan.remove_activity(seq)", "title": "" }, { "docid": "5ee9b03d224d33aeb1b8f0f2bd0f9102", "score": "0.49978122", "text": "def remove(self):\n del self.tracker\n self.init_bounding_box = None\n # del self", "title": "" }, { "docid": "7221093869e2f1eaa0dfe498769e2582", "score": "0.49967402", "text": "def apply_transform(\n self,\n video_path: str,\n output_path: str,\n metadata: Optional[List[Dict[str, Any]]] = None,\n ) -> str:\n return F.remove_audio(video_path, output_path, metadata=metadata)", "title": "" }, { "docid": "f41f75db85e768d43dc142931e66a560", "score": "0.49944264", "text": "def deleteAvatar(self, av):\n if av in self.paintedAvatars:\n del self.paintedAvatars[av]\n self.scoreDirty = True", "title": "" }, { "docid": "e7450c288fe46fc9d5334861719d6647", "score": "0.4993601", "text": "def remove(self, key):\n try:\n self.slots[key] = -1\n except IndexError:\n pass", "title": "" }, { "docid": "5de8153ee7e3cf1e04372944d889bcf0", "score": "0.49884316", "text": "def remove(self, value):\n self.prune()\n index = self.items.index(value)\n del(self.items[index])\n del(self.times[index])", "title": "" }, { "docid": "ccd1ac2f1e2f08fb7534168d599a43a1", "score": "0.49862194", "text": "def removeProjectBrowserFlags(key, prefClass=Prefs):\n prefClass.settings.remove(\"Project/BrowserFlags/\" + key)", "title": "" } ]
41be8483af4a03a53aafa33f33ad536c
pc_work_time(pll_freqdet_cf_sptr self) > float
[ { "docid": "64d794bfd74730c9df565151b581a2c3", "score": "0.7803105", "text": "def pc_work_time(self):\n return _analog_swig.pll_freqdet_cf_sptr_pc_work_time(self)", "title": "" } ]
[ { "docid": "d7ad19f22cde099f35faba2f23a68439", "score": "0.7412064", "text": "def pc_work_time(self):\n return _analog_swig.frequency_modulator_fc_sptr_pc_work_time(self)", "title": "" }, { "docid": "29328812c152e88a0a6f211c963781ad", "score": "0.74057776", "text": "def pc_work_time_var(self):\n return _analog_swig.pll_freqdet_cf_sptr_pc_work_time_var(self)", "title": "" }, { "docid": "f9b37921b58c8140454b426c5e7777f7", "score": "0.7399984", "text": "def pc_work_time(self):\n return _analog_swig.fmdet_cf_sptr_pc_work_time(self)", "title": "" }, { "docid": "9e76fcdb593903055363eba18a15e0ee", "score": "0.73106974", "text": "def pc_work_time(self):\n return _analog_swig.ctcss_squelch_ff_sptr_pc_work_time(self)", "title": "" }, { "docid": "7c829268ba67e84f20b970a5af9d9c5d", "score": "0.729066", "text": "def pc_work_time(self):\n return _analog_swig.pwr_squelch_ff_sptr_pc_work_time(self)", "title": "" }, { "docid": "29dc53ff637fe09e76e35f681cd9d3c1", "score": "0.7243308", "text": "def pc_work_time(self):\n return _analog_swig.rail_ff_sptr_pc_work_time(self)", "title": "" }, { "docid": "497714b6e48d179ef68aa7bb53bee1cb", "score": "0.7242102", "text": "def pc_work_time_total(self):\n return _analog_swig.pll_freqdet_cf_sptr_pc_work_time_total(self)", "title": "" }, { "docid": "f6d5467685b180c73f227e361c257afd", "score": "0.7228759", "text": "def pc_work_time(self):\n return _analog_swig.agc2_ff_sptr_pc_work_time(self)", "title": "" }, { "docid": "c3b519a5f139ea60d2be39ffada16965", "score": "0.72140354", "text": "def pc_work_time(self):\n return _analog_swig.agc_ff_sptr_pc_work_time(self)", "title": "" }, { "docid": "b529ab921fcc7c95789ad6c7b7f32e48", "score": "0.7192511", "text": "def pc_work_time(self) -> \"float\":\n return _analog_swig.vectornoise_source_sptr_pc_work_time(self)", "title": "" }, { "docid": "c1beab8206bbe4563cb1826cea91773f", "score": "0.71527445", "text": "def pc_work_time(self):\n return _analog_swig.quadrature_demod_cf_sptr_pc_work_time(self)", "title": "" }, { "docid": "26c87b4c6ee3ba70a11fee41d13a4388", "score": "0.712065", "text": "def pc_work_time(self):\n return _analog_swig.phase_modulator_fc_sptr_pc_work_time(self)", "title": "" }, { "docid": "47a4234288e4e87949a43332794677d1", "score": "0.7102779", "text": "def pc_work_time_avg(self):\n return _analog_swig.pll_freqdet_cf_sptr_pc_work_time_avg(self)", "title": "" }, { "docid": "7242364749410f4ec10d64bb62c71064", "score": "0.7065058", "text": "def pc_work_time(self):\n return _analog_swig.probe_avg_mag_sqrd_f_sptr_pc_work_time(self)", "title": "" }, { "docid": "6d15cab3609d7469e90807b32cdabda9", "score": "0.70258147", "text": "def pc_work_time(self):\n return _analog_swig.probe_avg_mag_sqrd_cf_sptr_pc_work_time(self)", "title": "" }, { "docid": "56b1e9152db8fe2f8e6de07cddc0c12c", "score": "0.69960356", "text": "def pc_work_time(self):\n return _analog_swig.fastnoise_source_f_sptr_pc_work_time(self)", "title": "" }, { "docid": "59bad19d30eb3cbdf761887561e09a81", "score": "0.69819754", "text": "def pc_work_time_var(self):\n return _analog_swig.frequency_modulator_fc_sptr_pc_work_time_var(self)", "title": "" }, { "docid": "82f60ab09734a40f385806c54c27b7df", "score": "0.6972227", "text": "def pc_work_time_var(self):\n return _analog_swig.fmdet_cf_sptr_pc_work_time_var(self)", "title": "" }, { "docid": "9d15d8367cef12b0b917d6c01e7425ef", "score": "0.6971883", "text": "def pc_work_time_total(self):\n return _analog_swig.frequency_modulator_fc_sptr_pc_work_time_total(self)", "title": "" }, { "docid": "775de39e9e33f8205f667074dbe067cc", "score": "0.69609547", "text": "def pc_work_time(self):\n return _analog_swig.pll_carriertracking_cc_sptr_pc_work_time(self)", "title": "" }, { "docid": "27d0427a4fa2c96a8044294e0800ce81", "score": "0.6958828", "text": "def pc_work_time_var(self) -> \"float\":\n return _analog_swig.vectornoise_source_sptr_pc_work_time_var(self)", "title": "" }, { "docid": "1cf1a9f00a3a81686c5e22cd98094b27", "score": "0.695432", "text": "def pc_work_time(self):\n return _analog_swig.noise_source_f_sptr_pc_work_time(self)", "title": "" }, { "docid": "e5be29c4e6dc5d8a1b91bf024de973e3", "score": "0.69370335", "text": "def pc_work_time(self):\n return _analog_swig.sig_source_f_sptr_pc_work_time(self)", "title": "" }, { "docid": "21c4fb0c87ed34460775e14e7ac83456", "score": "0.69241405", "text": "def pc_work_time_var(self):\n return _analog_swig.pwr_squelch_ff_sptr_pc_work_time_var(self)", "title": "" }, { "docid": "5e4894eaa2d400316847f88d0cca1cde", "score": "0.692132", "text": "def pc_work_time_total(self):\n return _analog_swig.ctcss_squelch_ff_sptr_pc_work_time_total(self)", "title": "" }, { "docid": "d0706ccf75c6846618b4657bb2b7ad64", "score": "0.69201225", "text": "def pc_work_time_total(self):\n return _analog_swig.fmdet_cf_sptr_pc_work_time_total(self)", "title": "" }, { "docid": "6a6de00c5d7b4e55eb3f3fad861a5b9b", "score": "0.6905098", "text": "def pc_work_time(self):\n return _analog_swig.dpll_bb_sptr_pc_work_time(self)", "title": "" }, { "docid": "f05fe3613fc05b88df935d5abdeb9b37", "score": "0.6885262", "text": "def pc_work_time(self):\n return _analog_swig.probe_avg_mag_sqrd_c_sptr_pc_work_time(self)", "title": "" }, { "docid": "c154ba1e2a9f6f2594c94d2506743c55", "score": "0.6867522", "text": "def pc_work_time_total(self):\n return _analog_swig.pwr_squelch_ff_sptr_pc_work_time_total(self)", "title": "" }, { "docid": "83944ba44dca3a908e56f405062612de", "score": "0.6850926", "text": "def pc_work_time_var(self):\n return _analog_swig.ctcss_squelch_ff_sptr_pc_work_time_var(self)", "title": "" }, { "docid": "384e51e5a41a867ee0dd7b972bafaf6f", "score": "0.6850233", "text": "def pc_work_time(self):\n return _analog_swig.pll_refout_cc_sptr_pc_work_time(self)", "title": "" }, { "docid": "15e6b0e50cdce5391927bb2ac9332d66", "score": "0.68024087", "text": "def pc_work_time(self):\n return _analog_swig.cpfsk_bc_sptr_pc_work_time(self)", "title": "" }, { "docid": "5c3cd7711f83f24078113f6b5fe13908", "score": "0.6800897", "text": "def pc_work_time(self):\n return _analog_swig.simple_squelch_cc_sptr_pc_work_time(self)", "title": "" }, { "docid": "fdd680a8b724bfa3179d4d9b1e106d6a", "score": "0.6784502", "text": "def pc_work_time_avg(self):\n return _analog_swig.fmdet_cf_sptr_pc_work_time_avg(self)", "title": "" }, { "docid": "9f127993b6f9f20a72c21f34cc019438", "score": "0.6783983", "text": "def pc_work_time(self):\n return _analog_swig.fastnoise_source_c_sptr_pc_work_time(self)", "title": "" }, { "docid": "0cca3c7d153aca6c6f681a9582b43d5e", "score": "0.6775182", "text": "def pc_work_time_var(self):\n return _analog_swig.rail_ff_sptr_pc_work_time_var(self)", "title": "" }, { "docid": "5e4e048b322853bcd1f0ab9f3249930f", "score": "0.6772286", "text": "def pc_work_time_var(self):\n return _analog_swig.probe_avg_mag_sqrd_f_sptr_pc_work_time_var(self)", "title": "" }, { "docid": "9a4c9c65e3cb24cb7117e07f7075662b", "score": "0.6764401", "text": "def pc_work_time_total(self) -> \"float\":\n return _analog_swig.vectornoise_source_sptr_pc_work_time_total(self)", "title": "" }, { "docid": "a44c21313054bfe50a77d06ade326d31", "score": "0.6761048", "text": "def pc_work_time_var(self):\n return _analog_swig.probe_avg_mag_sqrd_cf_sptr_pc_work_time_var(self)", "title": "" }, { "docid": "e0a5e7462aab557fd3f7923597932d12", "score": "0.6760417", "text": "def pc_work_time_var(self):\n return _analog_swig.agc2_ff_sptr_pc_work_time_var(self)", "title": "" }, { "docid": "b783309b4d42006a5cc7d50c133f0e22", "score": "0.67549384", "text": "def pc_work_time(self):\n return _analog_swig.agc3_cc_sptr_pc_work_time(self)", "title": "" }, { "docid": "23034cea953da077e014699dff089aae", "score": "0.67503524", "text": "def pc_work_time_total(self):\n return _analog_swig.agc2_ff_sptr_pc_work_time_total(self)", "title": "" }, { "docid": "0410024186b499b536a9172007c75d5c", "score": "0.67487293", "text": "def pc_work_time_avg(self):\n return _analog_swig.ctcss_squelch_ff_sptr_pc_work_time_avg(self)", "title": "" }, { "docid": "8bf79e569edbf0dd9b557e7c8a33a246", "score": "0.6741014", "text": "def pc_work_time_var(self):\n return _analog_swig.quadrature_demod_cf_sptr_pc_work_time_var(self)", "title": "" }, { "docid": "60bcf3c95be785f820c60141999fddd0", "score": "0.67409813", "text": "def pc_work_time(self):\n return _analog_swig.fastnoise_source_s_sptr_pc_work_time(self)", "title": "" }, { "docid": "7e611d616af7ae1c7bc7e5ce52d484fa", "score": "0.6740544", "text": "def pc_work_time_total(self):\n return _analog_swig.rail_ff_sptr_pc_work_time_total(self)", "title": "" }, { "docid": "f9f9f4f87d1accbe042aa8dec59a2e98", "score": "0.67403096", "text": "def pc_work_time_total(self):\n return _analog_swig.quadrature_demod_cf_sptr_pc_work_time_total(self)", "title": "" }, { "docid": "486b0c4acf93a15bc7cb7e69b2fcf22d", "score": "0.67372745", "text": "def pc_work_time(self):\n return _analog_swig.fastnoise_source_i_sptr_pc_work_time(self)", "title": "" }, { "docid": "c96c822c331a93841cd90bcbe3f5a333", "score": "0.67229986", "text": "def pc_work_time(self):\n return _analog_swig.feedforward_agc_cc_sptr_pc_work_time(self)", "title": "" }, { "docid": "366dff9347f24991c0b436ad944189c1", "score": "0.67082274", "text": "def pc_work_time_var(self):\n return _analog_swig.agc_ff_sptr_pc_work_time_var(self)", "title": "" }, { "docid": "cf2ccc8f3de042ba5667e920ff7936ab", "score": "0.6698095", "text": "def pc_work_time_total(self):\n return _analog_swig.phase_modulator_fc_sptr_pc_work_time_total(self)", "title": "" }, { "docid": "18b4059d40455f07fc417276b6defa08", "score": "0.66918594", "text": "def pc_work_time_total(self):\n return _analog_swig.fastnoise_source_f_sptr_pc_work_time_total(self)", "title": "" }, { "docid": "4d7f31421ab25d09c635b72c76635f17", "score": "0.668798", "text": "def pc_work_time(self):\n return _analog_swig.pwr_squelch_cc_sptr_pc_work_time(self)", "title": "" }, { "docid": "ddd75f870bd76343d77ad02d9cd76e27", "score": "0.66821015", "text": "def pc_work_time(self):\n return _analog_swig.noise_source_c_sptr_pc_work_time(self)", "title": "" }, { "docid": "66e556410f58d09c28fd695921eaace9", "score": "0.6679886", "text": "def pc_work_time(self):\n return _analog_swig.agc2_cc_sptr_pc_work_time(self)", "title": "" }, { "docid": "83d73508157af1adb30c623c0289a723", "score": "0.6669301", "text": "def pc_work_time(self):\n return _analog_swig.agc_cc_sptr_pc_work_time(self)", "title": "" }, { "docid": "b899723be65a650eefc7bc6bd85c4b45", "score": "0.666848", "text": "def pc_work_time_var(self):\n return _analog_swig.fastnoise_source_f_sptr_pc_work_time_var(self)", "title": "" }, { "docid": "557071461d579c9de62b2a41a8352712", "score": "0.6664019", "text": "def pc_work_time(self):\n return _ofdm_allocator_swig.per_measure_decimator_sptr_pc_work_time(self)", "title": "" }, { "docid": "1c0f3c139b2cafc886212a694b9d53d5", "score": "0.66571707", "text": "def pc_work_time_total(self):\n return _analog_swig.agc_ff_sptr_pc_work_time_total(self)", "title": "" }, { "docid": "708ffc5e28f7af3c30eb2fc4c594d209", "score": "0.66526145", "text": "def pc_work_time_var(self):\n return _analog_swig.phase_modulator_fc_sptr_pc_work_time_var(self)", "title": "" }, { "docid": "d5ea3aa594f1a06918328a08c55123a3", "score": "0.6639918", "text": "def pc_work_time(self):\n return _analog_swig.noise_source_i_sptr_pc_work_time(self)", "title": "" }, { "docid": "1a069ca8dca55398572ec81cda5147ec", "score": "0.6639638", "text": "def pc_work_time(self):\n return _analog_swig.noise_source_s_sptr_pc_work_time(self)", "title": "" }, { "docid": "f11708c7dc76e064b71884449b5c070e", "score": "0.6636667", "text": "def pc_work_time(self):\n return _rs_cpp_swig.rs_decoder_custom_sptr_pc_work_time(self)", "title": "" }, { "docid": "f06c33ffa3f6d3b32019f4b3caf50fdb", "score": "0.6635314", "text": "def pc_work_time_var(self):\n return _analog_swig.probe_avg_mag_sqrd_c_sptr_pc_work_time_var(self)", "title": "" }, { "docid": "5603adf34d37a2724e7ecb14a89d2a36", "score": "0.6634663", "text": "def pc_work_time_avg(self):\n return _analog_swig.pwr_squelch_ff_sptr_pc_work_time_avg(self)", "title": "" }, { "docid": "45c235115a1e2b12b0de26422650982a", "score": "0.6625815", "text": "def pc_work_time_avg(self):\n return _analog_swig.frequency_modulator_fc_sptr_pc_work_time_avg(self)", "title": "" }, { "docid": "64c76041132b1924d5ae84e60dca6af4", "score": "0.6625564", "text": "def pc_work_time_total(self):\n return _analog_swig.noise_source_f_sptr_pc_work_time_total(self)", "title": "" }, { "docid": "54cf675f70b8879ba112517eb488e9e1", "score": "0.6623171", "text": "def pc_work_time_var(self):\n return _analog_swig.pll_carriertracking_cc_sptr_pc_work_time_var(self)", "title": "" }, { "docid": "d6e1cf475a45b338753f9fdd265d7507", "score": "0.6611798", "text": "def pc_work_time(self):\n return _analog_swig.sig_source_c_sptr_pc_work_time(self)", "title": "" }, { "docid": "bef8b11e74afd9c1130200e6c810b7a7", "score": "0.66093135", "text": "def pc_work_time_total(self):\n return _analog_swig.sig_source_f_sptr_pc_work_time_total(self)", "title": "" }, { "docid": "122379aec80674a467358062cb65154b", "score": "0.6607422", "text": "def pc_work_time_avg(self):\n return _analog_swig.quadrature_demod_cf_sptr_pc_work_time_avg(self)", "title": "" }, { "docid": "d5695b0a0b0a996778855439a6789310", "score": "0.6601821", "text": "def pc_work_time(self):\n return _analog_swig.sig_source_i_sptr_pc_work_time(self)", "title": "" }, { "docid": "74a5b8e0fd6f1a007e2aeba871c8d809", "score": "0.6599279", "text": "def pc_work_time_total(self):\n return _analog_swig.probe_avg_mag_sqrd_f_sptr_pc_work_time_total(self)", "title": "" }, { "docid": "c17a691396ac6172e7a6b3dcb7f7d3dd", "score": "0.6598899", "text": "def pc_work_time_avg(self):\n return _analog_swig.pll_carriertracking_cc_sptr_pc_work_time_avg(self)", "title": "" }, { "docid": "1dd541b0b9f17970550339c8796f74fb", "score": "0.6586623", "text": "def pc_work_time_total(self):\n return _analog_swig.probe_avg_mag_sqrd_cf_sptr_pc_work_time_total(self)", "title": "" }, { "docid": "9c559163bcd09f46ea446fcf80c5c36e", "score": "0.6586281", "text": "def pc_work_time_avg(self):\n return _analog_swig.phase_modulator_fc_sptr_pc_work_time_avg(self)", "title": "" }, { "docid": "013eaef6116ec5b097abc8b4a81561f2", "score": "0.6581525", "text": "def pc_work_time_total(self):\n return _analog_swig.pll_carriertracking_cc_sptr_pc_work_time_total(self)", "title": "" }, { "docid": "fedb7178dc05b51facc3b862166b57e3", "score": "0.65559775", "text": "def time(self) -> float:", "title": "" }, { "docid": "fedb7178dc05b51facc3b862166b57e3", "score": "0.65559775", "text": "def time(self) -> float:", "title": "" }, { "docid": "373129970d00922f898269645f95a6da", "score": "0.6542838", "text": "def pc_work_time(self):\n return _analog_swig.sig_source_s_sptr_pc_work_time(self)", "title": "" }, { "docid": "87b310b4a7d9add31be0af3521402e4b", "score": "0.65424454", "text": "def pc_work_time_var(self):\n return _analog_swig.noise_source_f_sptr_pc_work_time_var(self)", "title": "" }, { "docid": "6187f33bcc317aa983ae5d955f4071e9", "score": "0.6531379", "text": "def pc_work_time(self):\n return _ofdm_allocator_swig.frame_equalizer_alix_sptr_pc_work_time(self)", "title": "" }, { "docid": "78cec66ee1ee681e13be3d828d9d599a", "score": "0.65247273", "text": "def pc_work_time_avg(self) -> \"float\":\n return _analog_swig.vectornoise_source_sptr_pc_work_time_avg(self)", "title": "" }, { "docid": "0e6cbeaa164fc89d1d9c790117b679d0", "score": "0.6523604", "text": "def pc_work_time_avg(self):\n return _analog_swig.rail_ff_sptr_pc_work_time_avg(self)", "title": "" }, { "docid": "6894f53a5d683cece254a5a337e1d11c", "score": "0.6518545", "text": "def pc_work_time_avg(self):\n return _analog_swig.sig_source_f_sptr_pc_work_time_avg(self)", "title": "" }, { "docid": "ef0ecb57b4be4eb2819a1e68a183230c", "score": "0.65044546", "text": "def pc_work_time_total(self):\n return _analog_swig.fastnoise_source_c_sptr_pc_work_time_total(self)", "title": "" }, { "docid": "60612a2e9e43506a06743c5c00ba8788", "score": "0.6504314", "text": "def pc_work_time_var(self):\n return _analog_swig.fastnoise_source_c_sptr_pc_work_time_var(self)", "title": "" }, { "docid": "63d018c859431da2b03ccb84f83687d1", "score": "0.649921", "text": "def pc_work_time_var(self):\n return _analog_swig.sig_source_f_sptr_pc_work_time_var(self)", "title": "" }, { "docid": "3f96d963e877447c823d34f72dfe238f", "score": "0.6494056", "text": "def pc_work_time_total(self):\n return _analog_swig.simple_squelch_cc_sptr_pc_work_time_total(self)", "title": "" }, { "docid": "7c19e7fdc375fbc4e8328b8b00189aad", "score": "0.64792967", "text": "def pc_work_time_var(self):\n return _analog_swig.dpll_bb_sptr_pc_work_time_var(self)", "title": "" }, { "docid": "131955466936a7cedc6e4fc5efa3861e", "score": "0.64788073", "text": "def pc_work_time_total(self):\n return _analog_swig.probe_avg_mag_sqrd_c_sptr_pc_work_time_total(self)", "title": "" }, { "docid": "cbe999222f6220811138b1310bbf9c2c", "score": "0.64755124", "text": "def pc_work_time_avg(self):\n return _analog_swig.agc2_ff_sptr_pc_work_time_avg(self)", "title": "" }, { "docid": "4a31abe3b337e4b27fe3fd1fd4660466", "score": "0.6474265", "text": "def pc_work_time_avg(self):\n return _analog_swig.fastnoise_source_f_sptr_pc_work_time_avg(self)", "title": "" }, { "docid": "d94c1143f317fe2e3e2bd20b7aed30e1", "score": "0.64702106", "text": "def pc_work_time_var(self):\n return _analog_swig.fastnoise_source_i_sptr_pc_work_time_var(self)", "title": "" }, { "docid": "9d2096b04f962f53bdba93eed729547a", "score": "0.64698154", "text": "def pc_work_time_var(self):\n return _ofdm_allocator_swig.per_measure_decimator_sptr_pc_work_time_var(self)", "title": "" }, { "docid": "da2f47aaf8144aef672245d48524eb2a", "score": "0.6468819", "text": "def pc_work_time_total(self):\n return _analog_swig.fastnoise_source_i_sptr_pc_work_time_total(self)", "title": "" }, { "docid": "1517c1927d1315d064fa7fc71bad6f58", "score": "0.6467133", "text": "def pc_work_time_var(self):\n return _analog_swig.fastnoise_source_s_sptr_pc_work_time_var(self)", "title": "" }, { "docid": "e7540a28656cb1380f5d97c13d54f9ad", "score": "0.64589554", "text": "def pc_work_time_total(self):\n return _analog_swig.fastnoise_source_s_sptr_pc_work_time_total(self)", "title": "" }, { "docid": "25aac2b4e2cc3e2a8db50a9a20a25df1", "score": "0.6457247", "text": "def pc_work_time_avg(self):\n return _analog_swig.pll_refout_cc_sptr_pc_work_time_avg(self)", "title": "" }, { "docid": "96bf7521b39fa0fd4f0734fe798892c2", "score": "0.6446931", "text": "def pc_work_time_total(self):\n return _rs_cpp_swig.rs_decoder_custom_sptr_pc_work_time_total(self)", "title": "" } ]
38a4e4a6142363076e0e5fd4ce2c8300
Set the end time.
[ { "docid": "2d79efedc2e0fe4bf7e27917603fc06a", "score": "0.8860281", "text": "def set_end_time(self, t):\n self.end = spss_time(t)", "title": "" } ]
[ { "docid": "ea848c0b6f5e014782c1d4d2645a38c7", "score": "0.89383185", "text": "def end_time(self, end_time):\n\n self._end_time = end_time", "title": "" }, { "docid": "ea848c0b6f5e014782c1d4d2645a38c7", "score": "0.89383185", "text": "def end_time(self, end_time):\n\n self._end_time = end_time", "title": "" }, { "docid": "ea848c0b6f5e014782c1d4d2645a38c7", "score": "0.89383185", "text": "def end_time(self, end_time):\n\n self._end_time = end_time", "title": "" }, { "docid": "ea848c0b6f5e014782c1d4d2645a38c7", "score": "0.89383185", "text": "def end_time(self, end_time):\n\n self._end_time = end_time", "title": "" }, { "docid": "ea848c0b6f5e014782c1d4d2645a38c7", "score": "0.89383185", "text": "def end_time(self, end_time):\n\n self._end_time = end_time", "title": "" }, { "docid": "ea848c0b6f5e014782c1d4d2645a38c7", "score": "0.89383185", "text": "def end_time(self, end_time):\n\n self._end_time = end_time", "title": "" }, { "docid": "ea848c0b6f5e014782c1d4d2645a38c7", "score": "0.89383185", "text": "def end_time(self, end_time):\n\n self._end_time = end_time", "title": "" }, { "docid": "ea848c0b6f5e014782c1d4d2645a38c7", "score": "0.89383185", "text": "def end_time(self, end_time):\n\n self._end_time = end_time", "title": "" }, { "docid": "ea848c0b6f5e014782c1d4d2645a38c7", "score": "0.89383185", "text": "def end_time(self, end_time):\n\n self._end_time = end_time", "title": "" }, { "docid": "ea848c0b6f5e014782c1d4d2645a38c7", "score": "0.89383185", "text": "def end_time(self, end_time):\n\n self._end_time = end_time", "title": "" }, { "docid": "082f56d38adb43fd76bd251087334145", "score": "0.87735015", "text": "def endtime(self, endtime):\n\n self._endtime = endtime", "title": "" }, { "docid": "321461abd895ef45407ef40c4272cf65", "score": "0.87264866", "text": "def end_time(self, end_time: str):\n\n self._end_time = end_time", "title": "" }, { "docid": "07c9bf091dacf7b3e112c1ca4ce7d66c", "score": "0.8701888", "text": "def set_end_time(self, end_time):\n self.end_time.append(end_time)", "title": "" }, { "docid": "6faf2e3de33a910332c808bd491bd8b5", "score": "0.8698118", "text": "def set_end_time(self, val):\n with self._condition:\n self._end_time = val\n self._invoke_upd_callbacks()", "title": "" }, { "docid": "3beb087196c52aac77520219f3ed34b8", "score": "0.8685146", "text": "def time_end(self, time_end):\n \n self._time_end = time_end", "title": "" }, { "docid": "d8fa930514632d840c4cac5b87229b3c", "score": "0.85402113", "text": "def time_end(self):\n fin=tm.time()\n self.time(t=fin)\n self.end = fin", "title": "" }, { "docid": "3722bd4b6a680acc1fbf3e97de89c117", "score": "0.841898", "text": "def set_end_time(self, end_time):\n self.__check_time(end_time)\n if isinstance(end_time, str):\n end_time = self.__iso_time_to_timestamp(end_time)\n self.__end_time = end_time\n return self", "title": "" }, { "docid": "f63fe2ebdd1e926a22d8e4eb4f6f706b", "score": "0.8335043", "text": "def setEndTime(self, endTime):\n\n pass", "title": "" }, { "docid": "c3b95c0f5504a13df64628d37d60d3f8", "score": "0.8327098", "text": "def _set_end_time(self, future):\n self._end_time = datetime.datetime.now()", "title": "" }, { "docid": "c20fe153f9fef87ea21207cbcddd46e0", "score": "0.8195801", "text": "def ending_time(self, ending_time):\n\n self._ending_time = ending_time", "title": "" }, { "docid": "b14b17c573bee04d85554f4a6b6e0df4", "score": "0.81608075", "text": "def setend(self, v):\n\t\tself.end = self._parse_time(v)", "title": "" }, { "docid": "66bc6beab6da13cb37428d566546d920", "score": "0.8094813", "text": "def end(self, end_time: typing.Optional[int] = None) -> None:", "title": "" }, { "docid": "c4026154e139529f2117bfcffe6b42f2", "score": "0.8059397", "text": "def set_end_time(self):\n\t\tself.end_time = datetime.today()\n\t\treturn self.end_time", "title": "" }, { "docid": "9d618e2c8dca1ef12822ee0b95225bc7", "score": "0.8009447", "text": "def end(self):\n self.end_time = datetime.now()", "title": "" }, { "docid": "13779c2dd47c01e56594606458280ce0", "score": "0.7864838", "text": "def end_time_stamp(self, end_time_stamp):\n\n self._end_time_stamp = end_time_stamp", "title": "" }, { "docid": "49865918938e8b8e50a0bbdd96217f98", "score": "0.78506166", "text": "def set_end_datetime(self, end_datetime=None):\n self.end_datetime = TimePoint(end_datetime)", "title": "" }, { "docid": "f4e9fe6ea42cfd7521d9fab398a8df39", "score": "0.78215367", "text": "def set_starttime(self, endtime):", "title": "" }, { "docid": "f4e4f10462943b759ff47b51d2274587", "score": "0.7795095", "text": "def end_time(self, end_time, root=False):\n if root:\n root_header = self._root.name\n root_header.end_time = end_time\n else:\n header = self.current_header()\n header.end_time = end_time", "title": "" }, { "docid": "0fb7db3c7ead74286b4e3e7422d22482", "score": "0.7661106", "text": "def changeEnd(self, newTime):\r\n if isinstance(newTime, datetime.datetime):\r\n self.endTime = newTime\r\n else:\r\n while not isinstance(newTime, datetime.datetime):\r\n newTime = datetime.datetime(input(\"Please enter a valid endtime \"))\r\n self.endTime = newTime", "title": "" }, { "docid": "7b3db05e75e4bbbaa810fa5195d4f0eb", "score": "0.75743765", "text": "def end_timestamp(self, end_timestamp):\n\n self._end_timestamp = end_timestamp", "title": "" }, { "docid": "7b3db05e75e4bbbaa810fa5195d4f0eb", "score": "0.75743765", "text": "def end_timestamp(self, end_timestamp):\n\n self._end_timestamp = end_timestamp", "title": "" }, { "docid": "7b3db05e75e4bbbaa810fa5195d4f0eb", "score": "0.75743765", "text": "def end_timestamp(self, end_timestamp):\n\n self._end_timestamp = end_timestamp", "title": "" }, { "docid": "c59256faed3fbceac277cdfd71f7434b", "score": "0.75393707", "text": "def end_ms(self, end_ms: int):\n\n self._end_ms = end_ms", "title": "" }, { "docid": "718bfbf08ff73edd4b00c12dd3f1eb9e", "score": "0.75385743", "text": "def end(self):\n self.update()\n self.__end_time = datetime.datetime.now()", "title": "" }, { "docid": "0ca1c9594a9bd77fbb7a9a3e1c0410df", "score": "0.75230235", "text": "def end_ms(self, end_ms):\n\n self._end_ms = end_ms", "title": "" }, { "docid": "50cccd0e2a5d9552b20889121a067452", "score": "0.73678607", "text": "def change_time(self, start, end):\n self.start = start\n self.end = end", "title": "" }, { "docid": "165e808e35293de335e0cccc10b35ed4", "score": "0.736143", "text": "def build_set_end_time(self, build_set_end_time):\n self._build_set_end_time = build_set_end_time", "title": "" }, { "docid": "dd74f5edda3a3f5a7b33ebcbca33288d", "score": "0.73559564", "text": "def setfinalTime(self,finalTime):\n self.finalTime = finalTime", "title": "" }, { "docid": "606336c0f082dc6461316bda6593e2ca", "score": "0.7300512", "text": "def end_time(self) -> time:\n return self._endtime", "title": "" }, { "docid": "f4819b062b6583cb409d54bb7e356b8f", "score": "0.7291166", "text": "def SetEndDateTime(self, year, month, day, hour):\n self.endDateTime = datetime(year = year, month = month, day = day, hour = hour)", "title": "" }, { "docid": "b70ad941448d12f570992b1b7e26d9a3", "score": "0.72413206", "text": "def end_time(self):\n return self.__end_time", "title": "" }, { "docid": "149187edc542d07c5d734bb6345a69d1", "score": "0.7227893", "text": "def set_new_end_time(self, new_end_second):\n # Find index of the new end time\n if new_end_second > self.end_time:\n raise ValueError(\"New end second {} is out of bounds for \"\n \"hypnogram of length {} seconds\".format(\n new_end_second, self.end_time\n ))\n init_ind = np.where(new_end_second > self.inits)[0][-1]\n self.inits = self.inits[:init_ind+1]\n self.stages = self.stages[:init_ind+1]\n self.durations = self.durations[:init_ind+1]\n\n # Update last duration\n old_end = self.inits[-1] + self.durations[-1]\n self.durations[-1] -= old_end - new_end_second", "title": "" }, { "docid": "66590508719b7a80ef83c6b97c9e0985", "score": "0.71978176", "text": "def get_end_time(self):\n return self.__end_time", "title": "" }, { "docid": "66590508719b7a80ef83c6b97c9e0985", "score": "0.71978176", "text": "def get_end_time(self):\n return self.__end_time", "title": "" }, { "docid": "93319a9da47203f1a1bdc14e26928003", "score": "0.7182533", "text": "def end(self, time):\n\n pass", "title": "" }, { "docid": "139710a4b23583a61cb2dbd778e1bb81", "score": "0.7172467", "text": "def get_end_time(self):\n\t\treturn self.end_time", "title": "" }, { "docid": "b8039a1b0fe97e6e6802129d36204422", "score": "0.71616536", "text": "def get_end_time(self):\n return self.end_time", "title": "" }, { "docid": "9e848d64499a009c7846c64c3426d08d", "score": "0.7160628", "text": "def end_time(self) -> datetime:\n return self._end_time", "title": "" }, { "docid": "9e848d64499a009c7846c64c3426d08d", "score": "0.7160628", "text": "def end_time(self) -> datetime:\n return self._end_time", "title": "" }, { "docid": "97a0839c1978a72889dba5666f4f25cf", "score": "0.7153701", "text": "def end_hour(self, end_hour):\n\n self._end_hour = end_hour", "title": "" }, { "docid": "5a30767ba7767d195c882fbf7d056c7d", "score": "0.7148667", "text": "def get_endtime(self):\n return self.endtime", "title": "" }, { "docid": "6b39d2b64c3e448542660726ca5cdbb5", "score": "0.71470857", "text": "def set_run_time(self, start_time, end_time):\n self.__run_time = self.__timediff(start_time, end_time)", "title": "" }, { "docid": "087f7537d4d0ed121203751ba8b0a47a", "score": "0.71307623", "text": "def get_end_time(self):\n return self.end", "title": "" }, { "docid": "bf804e4548e7ffd434c2876478bb3440", "score": "0.71289474", "text": "def end_time(self):\n return self._end_time", "title": "" }, { "docid": "bf804e4548e7ffd434c2876478bb3440", "score": "0.71289474", "text": "def end_time(self):\n return self._end_time", "title": "" }, { "docid": "bf804e4548e7ffd434c2876478bb3440", "score": "0.71289474", "text": "def end_time(self):\n return self._end_time", "title": "" }, { "docid": "bf804e4548e7ffd434c2876478bb3440", "score": "0.71289474", "text": "def end_time(self):\n return self._end_time", "title": "" }, { "docid": "bf804e4548e7ffd434c2876478bb3440", "score": "0.71289474", "text": "def end_time(self):\n return self._end_time", "title": "" }, { "docid": "bf804e4548e7ffd434c2876478bb3440", "score": "0.71289474", "text": "def end_time(self):\n return self._end_time", "title": "" }, { "docid": "c11e468f1cab6a3b7b63094825f1f919", "score": "0.7127488", "text": "def update_end(self, new_end):\n self.end = new_end", "title": "" }, { "docid": "8d57d4e49c1e5d1bfe847cdb8efc66a9", "score": "0.7117663", "text": "def end_time(self):\n return self._end.strftime('%H:%M:%S')", "title": "" }, { "docid": "732523a1494e4f67ae6421ab42a23f0e", "score": "0.710389", "text": "def game_end_date_time(self, game_end_date_time):\n\n self._game_end_date_time = game_end_date_time", "title": "" }, { "docid": "60fbcd518dc8c4309e413ff91e7a1684", "score": "0.7075628", "text": "def end(self, end):\n\n self._end = end", "title": "" }, { "docid": "60fbcd518dc8c4309e413ff91e7a1684", "score": "0.7075628", "text": "def end(self, end):\n\n self._end = end", "title": "" }, { "docid": "60fbcd518dc8c4309e413ff91e7a1684", "score": "0.7075628", "text": "def end(self, end):\n\n self._end = end", "title": "" }, { "docid": "69ea3c3c0d0bf8b15e7d918a788ea2f1", "score": "0.7073205", "text": "def endtime(self):\n return self.end", "title": "" }, { "docid": "e8212cf2b93f60aaa6719125d9b33d13", "score": "0.70285743", "text": "def get_end_time(self):\n raise NotImplementedError", "title": "" }, { "docid": "827532f4fb5a79d04bfea50e9d14594a", "score": "0.70154494", "text": "def end_time(self) -> str:\n return self._end_time", "title": "" }, { "docid": "fa0655d95456fe3202154ff4be8e9006", "score": "0.70113933", "text": "def set_interval_end(self, end):\n start = self.interval[0]\n self.interval = [start, end]", "title": "" }, { "docid": "c6fb3c8c9c6628faa5431253da071272", "score": "0.7004442", "text": "def enddate(self, enddate: datetime):\n\n self._enddate = enddate", "title": "" }, { "docid": "3c66166c4096784f3a9697f800926d77", "score": "0.69960314", "text": "def get_end_time(self) -> str:\n return self.end_time", "title": "" }, { "docid": "719aa33c19b3c7c854b70b7ca765ebc8", "score": "0.69868577", "text": "def end(self) -> None:\n self.ended_at = datetime.now(timezone.utc)", "title": "" }, { "docid": "87b51530949f73e5340ac368edc31795", "score": "0.69811934", "text": "def end_value(self, end_value):\n\n self._end_value = end_value", "title": "" }, { "docid": "0bee36f598daf2e625110e3fc1bbf0d4", "score": "0.69797057", "text": "def _update_end(self, end, tenant_id):\n self.usage_end_dt[tenant_id] = end", "title": "" }, { "docid": "738b72e283818d780e53a22e3731b42b", "score": "0.69554305", "text": "def setTime(self, time):\n\n pass", "title": "" }, { "docid": "727b7c2c5966d5e776a8563efc32a603", "score": "0.69545716", "text": "def end_range_duration(self, end_range_duration):\n\n self._end_range_duration = end_range_duration", "title": "" }, { "docid": "f963c1d541ebfcda2f9f675664fb54da", "score": "0.6937458", "text": "def endtime(self):\n\n if hasattr(self, \"_endtime\"):\n return self._endtime\n else:\n return None", "title": "" }, { "docid": "e5364d468dc0066b92bcb6d44eb90606", "score": "0.69240916", "text": "def last_end(self, ts):\n if not isinstance(ts, datetime.datetime):\n raise TypeError(\"Expected datetime.datetime, got {}\".format(ts))\n self._last_end = ts\n self.agent_mgr.status_set(\"last_end\", str(self.last_end))\n self.info(\"Last end: {}\".format(ts))", "title": "" }, { "docid": "46718deecc92bc93459104e1a4bb3f3d", "score": "0.69224596", "text": "def end_time(self) -> str:\n return pulumi.get(self, \"end_time\")", "title": "" }, { "docid": "46718deecc92bc93459104e1a4bb3f3d", "score": "0.69224596", "text": "def end_time(self) -> str:\n return pulumi.get(self, \"end_time\")", "title": "" }, { "docid": "46718deecc92bc93459104e1a4bb3f3d", "score": "0.69224596", "text": "def end_time(self) -> str:\n return pulumi.get(self, \"end_time\")", "title": "" }, { "docid": "46718deecc92bc93459104e1a4bb3f3d", "score": "0.69224596", "text": "def end_time(self) -> str:\n return pulumi.get(self, \"end_time\")", "title": "" }, { "docid": "46718deecc92bc93459104e1a4bb3f3d", "score": "0.69224596", "text": "def end_time(self) -> str:\n return pulumi.get(self, \"end_time\")", "title": "" }, { "docid": "46718deecc92bc93459104e1a4bb3f3d", "score": "0.69224596", "text": "def end_time(self) -> str:\n return pulumi.get(self, \"end_time\")", "title": "" }, { "docid": "46718deecc92bc93459104e1a4bb3f3d", "score": "0.69224596", "text": "def end_time(self) -> str:\n return pulumi.get(self, \"end_time\")", "title": "" }, { "docid": "46718deecc92bc93459104e1a4bb3f3d", "score": "0.69224596", "text": "def end_time(self) -> str:\n return pulumi.get(self, \"end_time\")", "title": "" }, { "docid": "e6cc7c8c5a1f118170f389d45fb256cc", "score": "0.69179606", "text": "def ended_date_time(self, ended_date_time):\n\n self._ended_date_time = ended_date_time", "title": "" }, { "docid": "73843bfcfdd713c81d9605461bb7327c", "score": "0.6907088", "text": "def set_timestamp(self, start, end):\n self.__start_time = start\n self.__end_time = end", "title": "" }, { "docid": "dcdf8776df9f5622cf078d65d470d72a", "score": "0.69038534", "text": "def stop_timer(self):\n self.end = time.time()\n self.date_end = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")\n #increment so can start and stop repeatedly\n self.time += self.end - self.start", "title": "" }, { "docid": "cd83ea70de9ebc772837b76977dcc2e9", "score": "0.6903614", "text": "def get_end(self):\r\n return datetime.datetime.combine(self.end_date, self.end_time)", "title": "" }, { "docid": "428356b74a8fb87e016281fab0615eee", "score": "0.6902841", "text": "def end_date(self, end_date):\n\n self._end_date = end_date", "title": "" }, { "docid": "428356b74a8fb87e016281fab0615eee", "score": "0.6902841", "text": "def end_date(self, end_date):\n\n self._end_date = end_date", "title": "" }, { "docid": "428356b74a8fb87e016281fab0615eee", "score": "0.6902841", "text": "def end_date(self, end_date):\n\n self._end_date = end_date", "title": "" }, { "docid": "428356b74a8fb87e016281fab0615eee", "score": "0.6902841", "text": "def end_date(self, end_date):\n\n self._end_date = end_date", "title": "" }, { "docid": "428356b74a8fb87e016281fab0615eee", "score": "0.6902841", "text": "def end_date(self, end_date):\n\n self._end_date = end_date", "title": "" }, { "docid": "428356b74a8fb87e016281fab0615eee", "score": "0.6902841", "text": "def end_date(self, end_date):\n\n self._end_date = end_date", "title": "" }, { "docid": "428356b74a8fb87e016281fab0615eee", "score": "0.6902841", "text": "def end_date(self, end_date):\n\n self._end_date = end_date", "title": "" }, { "docid": "428356b74a8fb87e016281fab0615eee", "score": "0.6902841", "text": "def end_date(self, end_date):\n\n self._end_date = end_date", "title": "" }, { "docid": "428356b74a8fb87e016281fab0615eee", "score": "0.6902841", "text": "def end_date(self, end_date):\n\n self._end_date = end_date", "title": "" }, { "docid": "428356b74a8fb87e016281fab0615eee", "score": "0.6902841", "text": "def end_date(self, end_date):\n\n self._end_date = end_date", "title": "" } ]
db9f171c1c3e83d889bd8b0d52a853ae
Main function, starts the program, setups the paths and submits the bidnumber to process
[ { "docid": "9f9cdb5c08ad1252520c46caea6628bc", "score": "0.59576374", "text": "def main(bid_number, config, logger, test=False,\n test_bids=5, timeout=30, validations=False):\n pd.options.mode.chained_assignment = None\n\n # Load paths from config\n home = os.environ[config[\"PATHS\"][\"HOME\"]]\n\n # Load paths from config\n paths_dict = createPaths(home, config)\n\n # Write initial error flag\n set_error_flag(bid_number, paths_dict['error_log_path'], str(1), logger)\n\n # to do bulk scoring: check tp_bid sample and get all unique bids\n bid_numbers = pd.DataFrame()\n if test:\n try:\n tp20_bid = pd.read_csv(home + '/data/tp_bid.csv', dtype=str)\n bid_numbers = tp20_bid['NVP_BID_NR'].unique()\n\n print \"Bid numbers found: \"\n #print bid_numbers\n except RuntimeError as e:\n print_error_message(e, \"Error 3.0: General producer error due to test run\",\n logger, False)\n sys.exit(1)\n\n master = pd.DataFrame()\n master_result = pd.DataFrame()\n sql = ''\n\n if test:\n bids_to_score = test_bids\n if bids_to_score == -1:\n bids_to_score = len(bid_numbers)\n else:\n bids_to_score = 1\n\n for i in range(0, bids_to_score):\n if test:\n bid_number = str(bid_numbers[i])\n\n if test:\n logger.info(\"Processing bid # \" + str(i + 1) + \" of \" + str(len(bid_numbers)))\n print \"Processing bid # \" + str(i + 1) + \" of \" + str(len(bid_numbers))\n\n logger.info(\"Processing bid: \" + bid_number)\n\n # Get data\n try:\n response, tp20_bid_shpr, tp20_ceiling_svc, tncvcel, tp_accessorial = \\\n get_data(home, bid_number, config, test, logger)\n except Exception, e:\n if test:\n continue\n else:\n # print_error_message(e, \"Error 2.2a: Data transformation issues: \", logger)\n sys.exit(1)\n\n try:\n # test for CWT threshold if CWT exists,\n # helps prevent CWT over threshold from going to consumer\n cwt = response[response.Product_Mode.isin(['AIR_CWT', 'GND_CWT'])]\n if not cwt.empty:\n # test CWT threshold\n cwt_filename = paths_dict['model_path'] + config[\"MODELS\"][\"CWT\"] + \".p\"\n with open(cwt_filename, \"rb\") as pickle_file:\n air_bt_threshold, air_density_threshold, air_size_threshold, air_cohort_map, \\\n air_incentive_map, gnd_bt_threshold, gnd_density_threshold, \\\n gnd_size_threshold, \\\n gnd_cohort_map, gnd_incentive_map = pickle.load(pickle_file)\n\n # air cwt check\n air_cwt = cwt[cwt.Product == 'Air_CWT']\n if not air_cwt.empty:\n air_max = air_size_threshold['MAX VALUE'].max()\n air_cwt_value = cwt[cwt.Product == 'Air_CWT']['Bid_List_Rev_Wkly'].max()\n\n if air_cwt_value > air_max:\n raise RuntimeError(\"Error 2.2a: Data transformation issues: \"\n \"CWT threshold reached\")\n\n # gnd cwt check\n gnd_cwt = cwt[cwt.Product == 'Gnd_CWT']\n if not gnd_cwt.empty:\n gnd_max = gnd_size_threshold['MAX VALUE'].max()\n gnd_cwt_value = cwt[cwt.Product == 'Gnd_CWT']['Bid_List_Rev_Wkly'].max()\n\n if gnd_cwt_value > gnd_max:\n raise RuntimeError(\"Error 2.2a: Data transformation issues: \"\n \"CWT threshold reached\")\n\n # Enqueue the data\n master = master.append(response)\n\n result, result_file, p2c_file = enqueue(response, tp20_bid_shpr, timeout, bid_number,\n paths_dict['c2p_path'],\n paths_dict['p2c_path'], tncvcel,\n paths_dict['log_path'])\n master_result = master_result.append(result)\n\n except RuntimeError as e:\n print_error_message(e, \"\", logger, False)\n\n if test:\n continue\n else:\n sys.exit(1)\n except (IOError, OSError) as e:\n print_error_message(e, \"Error 3.2a: Model cannot be loaded: \" +\n config[\"MODELS\"][\"CWT\"], logger)\n\n try:\n # store data\n if validations:\n test = True\n sql_result = put_data(home, bid_number, config, test, result,\n tp20_ceiling_svc, tp_accessorial, logger)\n if test:\n sql = sql + sql_result\n # True\n except Exception, e:\n print_error_message(e, \"\", logger, False)\n logger.warning(\"Bid \" + bid_number + \" scoring failed.\")\n else:\n cleanup(paths_dict['c2p_path'] + result_file)\n logger.info(\"Bid \" + bid_number + \" successfully scored.\")\n # Once done with everything write success flag\n set_error_flag(bid_number, paths_dict['error_log_path'], str(0), logger)\n\n # Output master dataset immediately\n #file_name = paths_dict['log_path'] + bid_number + '-' + p2c_file + '-master.csv'\n #master.to_csv(file_name)\n logger.debug('******************Exiting function main_Producer')\n if test:\n master_result.to_csv(\"results.csv\")\n sql_file = open(\"sql_results.txt\", \"w\")\n sql_file.write(sql)\n sql_file.close()", "title": "" } ]
[ { "docid": "5eb65670ae87521a2fc47c3a73258747", "score": "0.67149067", "text": "def main(argv):\n\n\tlogger.setLevel(logging.INFO)\n\thandleOptions(argv)\n\t# Load the config file\n\tconfigfile = 'config.json'\n\twith open(configfile, 'r') as f:\n\t\tconfig = json.load(f)\n\n\tcreateOutputFolder()\n\n\tlogger.info(\"Start the grabbing process\")\n\tgrabber = Grabber(config, linkparser)\n\tgrabber.execute()\n\n\tlogger.info(\"Start creating the web package\")\n\twebPackageCreator = WebPackageCreator(config)\n\twebPackageCreator.create()\n\n\tlogger.info(\"Start creating the dns package\")\n\tdnsPackageCreator = DNSCreater(config)\n\tdnsPackageCreator.create()", "title": "" }, { "docid": "a66fef5e5ea4e26f9c772525accbf2cb", "score": "0.6053314", "text": "def main(self,argv):\n cli.setup_logging(\"/tmp/setup.out\")\n os = cli.get_os_type()\n if (os not in [\"mac\"]):\n cli.fatal(\"Untested OS: \" + os)\n cli.log.info(\"Starting Program\")\n cli.log.info(\"ARGS:\" + str(argv))\n self.cmd(\"ls / | head -5 # test ability to run shell commands\") \n self.install_grakn_in_osx() # Start Grakn \n #self.clear_grakn_db() # Erase the data (Stops services)\n #self.install_grakn_in_osx() # Start Grakn \n \n self.test_grakn_in_osx()", "title": "" }, { "docid": "6caabe4e6793f4e23f18df52f721679e", "score": "0.6003799", "text": "def main():\n parser = argparse.ArgumentParser(description='boj-tool: a CLI tool for BOJ')\n parser.add_argument('-v', '--verbose', help='set log level to INFO',\n action='store_true')\n parser.add_argument('-d', '--debug', help='set log level to DEBUG',\n action='store_true')\n subparsers = parser.add_subparsers(dest='subparser')\n login_parser = subparsers.add_parser('login')\n submit_parser = subparsers.add_parser('submit')\n submit_parser.add_argument('number', type=int, help='the problem number')\n submit_parser.add_argument('filename', help='filename to submit')\n stat_parser = subparsers.add_parser('stats')\n stat_parser.add_argument('-u', '--user', type=str,\n help='the user to show stats')\n version_parser = subparsers.add_parser('version')\n args = parser.parse_args()\n\n initialize()\n if args.verbose:\n logger.setLevel(logging.INFO)\n elif args.debug:\n logger.setLevel(logging.DEBUG)\n\n if args.subparser == 'login':\n login()\n elif args.subparser == 'submit':\n submit(args.number, args.filename)\n print_result(args.number)\n elif args.subparser == 'stats':\n stats(args.user)\n elif args.subparser == 'version':\n version()", "title": "" }, { "docid": "5c3edb446efd4212324d179ce9a483d8", "score": "0.5855898", "text": "def process():\n\tparser = argparse.ArgumentParser(description=description)\n\tparser.add_argument(\"config\", help=\"file name of the JSON configuration file\")\n\tparser.add_argument(\"-b\", \"--book\", action='store_true', help=\"Create an EPUB3 package\")\n\tparser.add_argument(\"-f\", \"--folder\", action='store_true', help=\"Create a folder with the book content\")\n\n\targs = parser.parse_args()\n\tfrom rp2book import start\n\t# start(args.config, package=args.book, folder=args.folder, temporary=args.tempfile,\n\t# \t logger= _create_logger(\"log\") if args.logging else None)\n\tstart(args.config, package=args.book, folder=args.folder)", "title": "" }, { "docid": "a2d6f1ffae26dc2828c5286dcc8ee18f", "score": "0.5838961", "text": "def main():\n path = \"./Bag_flight/\"\n files = [f for f in os.listdir(path) if f[-4:] == '.bag']\n if not files:\n print('No bag files found!')\n return None\n for f in files:\n print(\"\\nreading bag: \" + str(f))\n with rosbag.Bag(path + f) as bag:\n bag_df_dict = get_bag_data_pandas(bag)\n data_df = processing(bag_df_dict=bag_df_dict, id=f[:-4])", "title": "" }, { "docid": "c50c97257373e9ea439903040fb92e38", "score": "0.58337957", "text": "def main():\n\n # anyway output thr banner information\n banner() \n\n # set paths of project \n paths.ROOT_PATH = os.getcwd() \n setPaths()\n \n # received command >> cmdLineOptions\n cmdLineOptions.update(conf.webConfig)\n \n # loader script,target,working way(threads? gevent?),output_file from cmdLineOptions\n # and send it to conf\n initOptions(cmdLineOptions)\n\n # run!\n run()", "title": "" }, { "docid": "37ff9a8373d4935f4d428fdc43dcf0b0", "score": "0.5825765", "text": "def main():\n artist = args.artist or getInput(\"Who wrote the song?\")\n album = args.album or getInput(\"What album is the song on?\")\n track = args.track or getInput(\"What is the track name?\")\n trackNum = args.num or getInput(\"What number is the song on the album?\")\n fmt = args.fmt or getInput(\"What format is the song in?\")\n itunes = args.itunes or getInput(\"Where is your iTunes library located? (default: '/Users/{}/Music/iTunes/iTunes Media/')\".format(getpass.getuser()))\n\n if itunes.strip() == \"\":\n itunes = \"/Users/{}/Music/iTunes/iTunes Media\".format(getpass.getuser())\n\n trackNum = trackNum.zfill(2)\n\n print(itunes)\n print(artist)\n print(album)\n print(track)\n print(trackNum)\n print(genPath(artist, album, track, trackNum, fmt, itunes))\n path = genPath(artist, album, track, trackNum, fmt, itunes)\n out = d.trackSeek(path, artist, album, track, trackNum, fmt)\n for song in out:\n addToiTunes(itunes, song)", "title": "" }, { "docid": "66efb48a9e722cca9e4ed621db950062", "score": "0.5799956", "text": "def main():\n with logbook.NestedSetup(_get_log_handlers()).applicationbound():\n logger.info('Py-expander started!')\n try:\n # Set subliminal cache first.\n if config.SHOULD_FIND_SUBTITLES:\n logger.debug('Setting subtitles cache...')\n configure_subtitles_cache()\n # Parse input arguments.\n if len(sys.argv) == 3:\n directory = sys.argv[1]\n filename = sys.argv[2]\n if directory == config.DEFAULT_PATH:\n torrent_path = os.path.join(directory, filename)\n logger.info('Input is a file: {}'.format(torrent_path))\n else:\n torrent_path = directory\n logger.info('Input is a dir: {}'.format(torrent_path))\n expand_torrent(torrent_path)\n elif len(sys.argv) == 2:\n expand_torrent(sys.argv[1])\n else:\n expand_torrent_from_transmission()\n except:\n logger.exception('Critical exception occurred!')\n raise", "title": "" }, { "docid": "d233dc0534600ec6d411de9283e46e50", "score": "0.5783133", "text": "def main():\n with open(argv[1]) as file:\n #dic = parse_file(file)\n #get_info(dic)\n path = '/Users/gustavotamasco/PycharmProjects/tcc'\n files = get_file(path)\n new_files =[]\n for f in files:\n nf = \"'{0}'\".format(f)\n new_files.append(nf)\n run_blast(new_files)", "title": "" }, { "docid": "cf2a0ead0d7798c1f777d90c6c4f12b5", "score": "0.5773897", "text": "def main():\n try:\n number_accounts, ip, user_pattern, user_pwd_pattern, node_pattern, node_pwd_pattern = parse_args()\n except ValueError:\n pass\n generate_and_save(number_accounts, ip, user_pattern, user_pwd_pattern, node_pattern, node_pwd_pattern)", "title": "" }, { "docid": "cf3352c5ac59b1128c898ab17f9110ad", "score": "0.5770266", "text": "def main():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"bank_code\",\n help=\"3 letter code representing the bank that the data comes from\"\n )\n parser.add_argument(\n \"input_file\",\n help=\"PDF file containing bank transation data\"\n )\n parser.add_argument(\n \"output_file\",\n help=\"Name of CSV file that output will be saved to\"\n )\n program_args = parser.parse_args()\n bank_code = program_args.bank_code\n input_file = program_args.input_file\n output_file = program_args.output_file\n pdf2ynab(bank_code, input_file, output_file)", "title": "" }, { "docid": "e44d640db816f6db16fb7f0cf29beb68", "score": "0.57688487", "text": "def main():\n # Detect BBP installation\n bbp_install = InstallCfg.getInstance()\n\n prog_base = os.path.basename(sys.argv[0])\n usage = \"usage: %s [options]\" % (prog_base)\n parser = optparse.OptionParser(usage)\n parser.add_option(\"-c\", \"--codebase\", type=\"string\", action=\"store\",\n dest=\"codebase\",\n help=\"Codebase for the simulation: %s\" %\n (CODEBASES))\n parser.add_option(\"-e\", \"--event\", type=\"string\", action=\"store\",\n dest=\"event\",\n help=\"Validation event (should be configured in BBP)\")\n parser.add_option(\"-d\", \"--dir\", type=\"string\", action=\"store\",\n dest=\"simdir\",\n help=\"Simulation directory\")\n parser.add_option(\"--skip-rupgen\", action=\"store_true\", dest=\"skiprupgen\",\n help=\"Skip the rupture generator, run only 1 simulation\")\n parser.add_option(\"--hypo-rand\", action=\"store_true\", dest=\"hyporand\",\n help=\"Enables hypocenter randomization\")\n parser.add_option(\"--no-hypo-rand\", action=\"store_false\", dest=\"hyporand\",\n help=\"Disables hypocenter randomization\")\n parser.add_option(\"-n\", \"--num-simulations\", type=\"int\", action=\"store\",\n dest=\"numsim\", help=\"Number of simulations to run\")\n parser.add_option(\"--email\", type=\"string\", action=\"store\",\n dest=\"email\", help=\"Email for job notifications\")\n (options, args) = parser.parse_args()\n\n # Validate codebase to use\n codebase = options.codebase\n if codebase is None:\n print \"Please specify a codebase!\"\n sys.exit(1)\n codebase = codebase.lower()\n if codebase not in CODEBASES:\n print \"Codebase needs to be one of: %s\" % (CODEBASES)\n\n # Check for event\n event = options.event\n if event is None:\n print \"Please provide a validation event!\"\n sys.exit(1)\n event_names = validation_cfg.VE_EVENTS.get_all_names()\n events = [v_event.lower() for v_event in event_names]\n if event.lower() not in events:\n print (\"Event %s does not appear to be properly configured on BBP\" %\n (event))\n print (\"Available options are: %s\" % (event_names))\n print \"Please provide another event or check your BBP installation.\"\n sys.exit(1)\n val_obj = validation_cfg.VE_EVENTS.get_event_by_print_name(event)\n\n # Check if we want to run the rupture generator\n skip_rupgen = options.skiprupgen\n\n # Check for hypocenter randomization\n if options.hyporand is None:\n print \"Please specify --hypo-rand or --no-hypo-rand!\"\n sys.exit(1)\n\n if options.hyporand:\n hypo_rand = True\n else:\n hypo_rand = False\n\n if not skip_rupgen:\n # Get source file\n try:\n source_file = val_obj.get_input(codebase, \"source\").strip()\n except KeyError:\n print (\"Unable to get source file for event %s, codebase %s!\" %\n (event, codebase))\n sys.exit(1)\n if not source_file:\n print (\"Source file for event %s, codebase %s not specified!\" %\n (event, codebase))\n sys.exit(1)\n else:\n # No need to get the source file, we start from the srf\n source_file = None\n try:\n srf_file = val_obj.get_input(codebase, \"srf\").strip()\n except KeyError:\n print (\"Event %s does not have a srf file for codebase %s!\" %\n (event, codebase))\n sys.exit(1)\n if not srf_file:\n print (\"Event %s does not have a srf file for codebase %s!\" %\n (event, codebase))\n sys.exit(1)\n # Force number of simulations to 1\n options.numsim = 1\n\n # Check for the simulation directory\n simdir = options.simdir\n if simdir is None:\n print \"Please provide a simulation directory!\"\n sys.exit(1)\n simdir = os.path.abspath(simdir)\n if os.path.exists(simdir):\n print \"Simulation directory exists: %s\" % (simdir)\n opt = raw_input(\"Do you want to delete its contents (y/n)? \")\n if opt.lower() != \"y\":\n print \"Please provide another simulation directory!\"\n sys.exit(1)\n opt = raw_input(\"ARE YOU SURE (y/n)? \")\n if opt.lower() != \"y\":\n print \"Please provide another simulation directory!\"\n sys.exit(1)\n # Delete existing directory (we already asked the user twice!!!)\n shutil.rmtree(simdir)\n\n # Pick up number of simulations to run\n numsim = options.numsim\n if numsim < 1 or numsim > MAX_SIMULATIONS:\n print (\"Number of simulations should be between 1 and %d\" %\n (MAX_SIMULATIONS))\n sys.exit(1)\n\n # Check for e-mail address\n email = options.email\n if email is None:\n print \"Please provide an e-mail address for job notifications\"\n sys.exit(1)\n\n # Make sure user has configured the setup_bbp_epicenter_env.sh script\n setup_bbp_env = os.path.join(bbp_install.A_INSTALL_ROOT,\n \"utils/batch/setup_bbp_epicenter_env.sh\")\n if not os.path.exists(setup_bbp_env):\n print (\"Cannot find setup_bbp_epicenter_env.sh script!\")\n print (\"Expected at: %s\" % (setup_bbp_env))\n sys.exit(1)\n # Create simulation directories\n prefix = \"%s-%s\" % (event.lower(), codebase.lower())\n # Make sure we remove spaces from prefix (e.g. for the \"Loma Prieta\" event)\n prefix = prefix.replace(\" \", '')\n os.makedirs(simdir)\n indir = os.path.join(simdir, \"Sims\", \"indata\")\n outdir = os.path.join(simdir, \"Sims\", \"outdata\")\n tmpdir = os.path.join(simdir, \"Sims\", \"tmpdata\")\n logsdir = os.path.join(simdir, \"Sims\", \"logs\")\n xmldir = os.path.join(simdir, \"Xml\")\n srcdir = os.path.join(simdir, \"Src\")\n for mdir in [indir, outdir, tmpdir, logsdir, xmldir, srcdir]:\n os.makedirs(mdir)\n # Generate source files if needed\n if source_file is not None:\n generate_src_files(numsim, source_file, srcdir, prefix, hypo_rand)\n # Generate xml files\n generate_xml(bbp_install, numsim, srcdir, xmldir,\n logsdir, event, codebase, prefix,\n skip_rupgen)\n # Write pbs file\n write_pbs(bbp_install, numsim, simdir, xmldir, email, prefix)", "title": "" }, { "docid": "26cd6c2af325c94684ef9d3886f9c709", "score": "0.5756447", "text": "def launch_barrier ():\n os.chdir(\"barrier\")\n subprocess.call(\"barrier.exe\")\n os.chdir(\"..\")\n return 0", "title": "" }, { "docid": "178409e8cf07166f24c9f38f913a6d8b", "score": "0.5754545", "text": "def main():\n args = get_args()\n bottlenum = args.num_bottles \n \n bottledown = bottlenum - 1\n if bottlenum < 1:\n die('N ({}) must be a positive integer'.format(bottlenum))\n while bottlenum >= 1:\n if bottlenum == 1:\n print('{} bottle of beer on the wall,'.format(bottlenum))\n print('{} bottle of beer,'.format(bottlenum)) \n else:\n print('{} bottles of beer on the wall,'.format(bottlenum))\n print('{} bottles of beer,'.format(bottlenum))\n print('Take one down, pass it around,')\n if bottledown == 1:\n print('{} bottle of beer on the wall!'.format(bottledown))\n print('') \n else:\n if bottledown == 0: \n print('{} bottles of beer on the wall!'.format(bottledown))\n else:\n print('{} bottles of beer on the wall!'.format(bottledown))\n print('') \n bottlenum -= 1\n bottledown -= 1", "title": "" }, { "docid": "4740ffd7ba2c8e0cb2ce0717abb89438", "score": "0.5730241", "text": "def run(self):\n self.parse_args()\n\n if self.options.file_root:\n # check if the argument is pointing to a file on disk\n file_root = os.path.abspath(self.options.file_root)\n self.config[\"file_roots\"] = {\"base\": _expand_glob_path([file_root])}\n\n if self.options.pillar_root:\n # check if the argument is pointing to a file on disk\n pillar_root = os.path.abspath(self.options.pillar_root)\n self.config[\"pillar_roots\"] = {\"base\": _expand_glob_path([pillar_root])}\n\n if self.options.states_dir:\n # check if the argument is pointing to a file on disk\n states_dir = os.path.abspath(self.options.states_dir)\n self.config[\"states_dirs\"] = [states_dir]\n\n if self.options.local:\n self.config[\"file_client\"] = \"local\"\n if self.options.master:\n self.config[\"master\"] = self.options.master\n\n caller = salt.cli.caller.Caller.factory(self.config)\n\n if self.options.doc:\n caller.print_docs()\n self.exit(salt.defaults.exitcodes.EX_OK)\n\n if self.options.grains_run:\n caller.print_grains()\n self.exit(salt.defaults.exitcodes.EX_OK)\n\n caller.run()", "title": "" }, { "docid": "c92c3899081029f7caaf5a4037eb5e03", "score": "0.57288873", "text": "def main(args):\n\n # Set up processed data directories\n # Trailing slash not expected by rest of program\n args[\"root\"] = args[\"root\"].rstrip(\"/\")\n processed_data_dir = args[\"root\"] + \"/processed_data\"\n if not os.path.exists(processed_data_dir):\n os.mkdir(processed_data_dir)\n\n # Read processor requests\n for processor in args[\"processors\"]:\n print(\"################################\\n\")\n print(\"Working on processor %s\" % processor[\"name\"])\n save_processor_info(processed_data_dir, processor)\n processor_handler = processor_handlers[processor[\"name\"]]\n processor_handler(args[\"root\"], args[\"name\"], processor[\"args\"])\n print(\"\")", "title": "" }, { "docid": "9a1ae0b7c5547a53d55923f42bfb1ae6", "score": "0.5685069", "text": "def main(): \n args = get_args()\n config = get_namelist(args)\n infill(args, config)\n print '\\nFinished\\n'", "title": "" }, { "docid": "194599c7d1746708ab091609c186477b", "score": "0.5684453", "text": "def main():\n\n # Parsing command line arguments -->\n cron_mode = False\n debug_mode = False\n develop_mode = False\n\n cmd_options, args = getopt.gnu_getopt(\n sys.argv[1:], \"\", [\n \"cron\",\n \"debug\",\n \"develop-mode\",\n ])\n\n for option, value in cmd_options[:]:\n if option == \"--cron\":\n cron_mode = True\n elif option == \"--debug\":\n debug_mode = True\n elif option == \"--develop-mode\":\n develop_mode = True\n else:\n raise Error(\"Logical error.\")\n # Parsing command line arguments <--\n\n pcli.log.setup(\n debug_mode = debug_mode | develop_mode,\n level = logging.ERROR if cron_mode else None)\n\n global _DEVELOP_MODE\n _DEVELOP_MODE = develop_mode\n\n pid_file = os.path.join(tempfile.gettempdir(), \"rutracker-get-new-torrents\")\n\n try:\n pid_file_fd = psys.daemon.acquire_pidfile(pid_file)\n except psys.daemon.PidFileLockedError as e:\n LOG.error(\"Exiting: the PID file is locked by another process.\")\n else:\n try:\n get_new_torrents()\n except TemporaryError as e:\n LOG.warning(e)\n finally:\n psys.daemon.release_pidfile(pid_file, pid_file_fd)", "title": "" }, { "docid": "5b45c4529f669ea5e737629e555ae217", "score": "0.56844145", "text": "def main():\n input_lines = []\n if len(sys.argv) >= 1:\n input_lines = sys.argv[1:]\n if len(input_lines) == 0:\n print(\"\"\"Usage: - python bf.py \"<YOUR BRAINFUCK PROGRAM>\" to run a program\n - python --help to see a detailed help\"\"\")\n elif input_lines[0] == '--help' or input_lines[0] == '-h':\n help(main)\n return\n else:\n run(input_lines)\n return", "title": "" }, { "docid": "62a23c40484f51bc45ca9a2ecd817b7e", "score": "0.56780726", "text": "def main():\n print('Importing scrapers')\n scrapers = scraper.import_scrapers()\n print('Caculating available episodes')\n avail_episode = database.fetch_available_episodes()\n if len(avail_episode) == 0:\n print('No episode is available at present')\n return\n print('These episodes are available:')\n for ep in avail_episode:\n print('{} of {}'.format(ep['ep'], ep['name']))\n print('Download starts:')\n for ep in avail_episode:\n print('Ep.{} of {} is processing'.format(ep['ep'], ep['name']))\n for __scraper in scrapers:\n try:\n url = __scraper.get_download_url(**ep)\n path = downloader.download(url=url,\n save_path=configure.TORRENT_SAVE_PATH,\n **ep)\n if configure.ENABLE_AUTO_DOWNLOAD:\n if not utorrent.is_token_initialized():\n print('Refreshing token')\n utorrent.refresh_token()\n print('Importing torrent into utorrent.')\n utorrent.add_torrent(path, ep['folder'])\n print('Import completed successfully.')\n database.set_downloaded_episode(ep['name'], ep['ep'])\n break\n except FileNotFoundError:\n print('Scraper cannot find the file')\n # If last scraper is used\n if __scraper is scrapers[-1]:\n print('File cannot be found in all scraper. Try next time.')", "title": "" }, { "docid": "1dfafbf218933613930fae0c1b52e0da", "score": "0.5674581", "text": "def main():\n if not exists('config.json'):\n shutil.copy('default_config.json', 'config.json')\n print(\"Created 'config.json'. Please configure this file first.\")\n return\n\n # Load configuration of the project.\n Config.load()\n\n config = Config.get_config()\n step = config['step']\n\n if step == 'PI':\n print(\"Personality Insights analysis started..\")\n process = PersonalityInsights(config)\n elif step == 'LIWC':\n print(\"LIWC analysis started..\")\n process = LIWC(config)\n elif step == 'Preprocess':\n print(\"Preprocessing started..\")\n process = Preprocessing(config)\n elif step == 'QuickLIWC':\n print(\"Started LIWC process with pre-loaded LIWC files..\")\n process = LIWCQuick(config)\n else:\n print(\"Unknown step specified in configuration.\\nProgram terminated.\")\n return\n process.execute()", "title": "" }, { "docid": "4615310e119d88a4053084e7ead92eaf", "score": "0.5674094", "text": "def main():\n logging.basicConfig(level=logging.INFO, format='%(levelname)-8s: %(message)s')\n parser = argparse.ArgumentParser(description=__doc__.strip())\n\n parser.add_argument('portfolio',\n help=('A CSV file which contains the tickers of assets and '\n 'number of units'))\n parser.add_argument('--dbdir', default=database.DEFAULT_DIR,\n help=\"Database directory to write all the downloaded files.\")\n parser.add_argument('-i', '--ignore-missing-issuer', action='store_true',\n help=\"Ignore positions where the issuer implementation is missing\")\n parser.add_argument('-o', '--ignore-options', action='store_true',\n help=(\"Ignore options positions \"\n \"(only works with Beancount export file)\"))\n\n parser.add_argument('--visible', action='store_true',\n help=\"Run with a visible browser window (not headless).\")\n parser.add_argument('-b', '--driver-exec', action='store',\n default=\"/usr/local/bin/chromedriver\",\n help=\"Path to chromedriver executable.\")\n args = parser.parse_args()\n db = database.Database(args.dbdir)\n\n # Load up the list of assets from the exported Beancount file.\n assets = beansupport.read_portfolio(args.portfolio, args.ignore_options)\n\n # Fetch baskets for each of those.\n driver = None\n for row in sorted(assets):\n if not row.issuer and args.ignore_missing_issuer:\n logging.warning(\"Ignoring missing issuer for {}\".format(row.ticker))\n continue\n try:\n driver, _ = fetch_holdings(row.ticker, row.issuer, driver, db,\n args.ignore_missing_issuer, args)\n except Exception:\n traceback.print_exc()\n continue\n if driver:\n driver.close()", "title": "" }, { "docid": "235a326da6160ec7324f72f027fe0409", "score": "0.5662144", "text": "def mainProgram(self):", "title": "" }, { "docid": "40c55bcfa6634cb4b41550de3b6516ad", "score": "0.56590545", "text": "def main(argv):\n # parse command line\n for p in sys.argv[1:]:\n pair = p.split('=')\n if (2 != len(pair)):\n print 'bad parameter: %s' % p\n break\n else:\n parameters[pair[0]] = pair[1]\n else:\n # set up logging\n bname = os.path.splitext( os.path.basename(__file__) )[0]\n log_config_file = os.path.join('/home/pims/dev/programs/python/pims/files', bname + '_log.conf')\n logging.config.fileConfig(log_config_file)\n log = logging.getLogger('inputs')\n if params_ok(log):\n try:\n log = logging.getLogger('process')\n backfill_ossbtmf_roadmaps(parameters['date_range'], parameters['batch_dir'], log)\n except Exception, e:\n # Log error\n log.process.error( e.message )\n return -1\n # Message with time when done\n log.debug('Done %s.\\n' % datetime.datetime.now() + '-'*99)\n return 0\n print_usage()", "title": "" }, { "docid": "87cf60d0cd2f13d4b71927be5ba1083f", "score": "0.5648502", "text": "def main(self):\n try:\n self.path_runs.set_execution_directory()\n\n self.sequence = create_local_fasta_file(\n self.path_runs.get_path_execution(),\n self.opts.fromFasta,\n self.opts.inputFasta,\n self.opts.toolname,\n self.framework)\n\n copy_necessary_files(\n self.path_runs.get_path_execute(),\n self.path_runs.get_path_execution(),\n self.framework.get_framework())\n\n self.framework.set_command(\n self.path_runs.get_path_execution(),\n 'aemt-pop-up2')\n\n size = int(self.opts.sizePopulation) / 15\n\n cl = [\n self.framework.get_command(),\n str(0),\n self.opts.sizePopulation,\n str(size),\n str(size),\n str(size),\n str(size),\n str(size),\n str(size),\n str(size),\n str(size),\n str(size),\n str(size),\n str(size),\n str(size),\n str(size),\n str(size),\n str(size),\n str(size),\n str(0),\n str(0),\n self.opts.VanderWaalsWeight,\n self.opts.ChargeWeight,\n str(0),\n str(0),\n str(0),\n str(0),\n str(0),\n self.opts.SolvWeight,\n self.opts.HbondWeight,\n os.path.join(self.path_runs.get_path_execution(), \"fasta.txt\"),\n os.path.join(self.path_runs.get_path_execution(), \"result.txt\"),\n os.path.join(self.path_runs.get_path_execution(), \"pop_meamt.txt\"),\n os.path.join(self.path_runs.get_path_execution(), \"protein.pdb\"),\n os.path.join(self.path_runs.get_path_execution(), \"saida1.txt\"),\n os.path.join(self.path_runs.get_path_execution(), \"angles.txt\"),\n str(0),\n os.path.join(self.path_runs.get_path_execution(), \"meat.txt\"),\n '&']\n\n retProcess = subprocess.Popen(cl, 0, None, None, None, False)\n retProcess.wait()\n\n path_output, file_output = os.path.split(self.opts.output)\n\n result, html = get_result_files(\n self.path_runs.get_path_execution(), self.opts.toolname)\n\n send_output_results(path_output, file_output, result)\n\n except Exception, e:\n show_error_message(str(e))", "title": "" }, { "docid": "93c5b87b0e85d0b61b6f4155ebeb07d5", "score": "0.56413895", "text": "def main():\r\n print(\"Program started...\")", "title": "" }, { "docid": "9fa8c9fbb1ee16b09824d86355e708ee", "score": "0.56383514", "text": "def main():\n # Check parameters\n parameters = args_check(sys.argv[1:])\n print(\"\\nExtracting PB sequences from trajectory.\\n\")\n # Extraction of PB sequences with pbxplore\n table_seq = mk_table_seq(parameters[1], parameters[2])\n print(\"\\nComputing frequences of PB (This may take a few minutes)\\n\")\n # Computation of frequencies\n table_pos_freq = mk_table_pos_freq(table_seq)\n table_dbpos_freq = mk_table_dbpos_freq(table_seq)\n print(\"Computing Mutual Information (This may take a few minutes)\\n\")\n # Computation of Mutual Information\n table_mi = mk_mi_table(table_pos_freq, table_dbpos_freq)\n mi_heatmap(table_mi, parameters[0])\n # Write results in output directory\n table_seq.to_csv(parameters[0]+\"PB_seq_table.csv\")\n table_pos_freq.to_csv(parameters[0]+\"PB_pos_freq_table.csv\")\n table_dbpos_freq.to_csv(parameters[0]+\"PB_dbpos_freq_table.csv\")\n table_mi.to_csv(parameters[0]+\"MI_table.csv\")\n print(\"Done!\\n\")\n sys.exit()", "title": "" }, { "docid": "0387ba897ed2be3b307b469f53cb07ea", "score": "0.56347436", "text": "def main():\n # Parse script parameters\n parser = setup_arg_parser()\n args, _ = parser.parse_known_args()\n\n # Select a queue to add\n selected_queue = select_queue(args)\n\n # Install the selected printer\n if selected_queue:\n install_printer(selected_queue)", "title": "" }, { "docid": "485707c004fecd46b86fd5d4f8a2ecd2", "score": "0.5624613", "text": "def main():\n ''' Parsing arguments from command line , there are two arguments\n a) directory : must be a directory like example c:\\\\\n b) interval : the interval must be the number of seconds\n\n example:\n python client.py --directory=\"C:\\Users\\UserName\\OpenDrop\" --interval=2\n '''\n (options, arguments) = getParse()\n\n if not validateArguments(options.directory, options.interval):\n raise ClientException(\"Invalid Arguments\")\n\n ''' Creating multiprocess elements '''\n mutex = multiprocessing.Lock()\n queue = multiprocessing.Queue()\n OpenDrop = Client(cwd=options.directory, mutex=mutex)\n interval = 1\n ''' moving all solutions to multiple processes '''\n\n message = \"\"\" ================ Welcome to OpenDrop Client ================= \\n \\\n Monitoring Folder: {0} :\"\"\".format(OpenDrop.working_directory)\n\n ''' Preparing all processes '''\n scan = multiprocessing.Process(target=OpenDrop.run, args=(queue, options.interval,))\n saver = multiprocessing.Process(target=OpenDrop.saveData, args=(queue,))\n scan.start()\n saver.start()\n \n print message\n while True:\n raw_input(\"Press enter \")\n with mutex:\n out = raw_input(\"Press q for quit : \")\n if out == 'q':\n scan.terminate()\n saver.terminate()\n return", "title": "" }, { "docid": "997afa9348ae9bcf5a419bb07122f911", "score": "0.5624492", "text": "def main(self):\n\n log.divider(\"setting up\", char=\"=\")\n\n self.require_server_connection()\n\n self.finish_parameters()\n\n if self.args.certify_files:\n self.certify_local_files()\n\n if self.args.logout:\n return self.logout()\n\n self.login()\n\n if self.args.wipe_existing_files:\n self.wipe_files()\n\n self.jpoll_key = self.jpoll_open_channel()\n\n if self.args.submission_kind == \"batch\":\n submit_future = self.batch_submit_references()\n elif self.args.submission_kind == \"certify\":\n submit_future = self.certify_files()\n elif self.args.submission_kind == \"references\":\n submit_future = self.submit_references()\n elif self.args.submission_kind == \"mappings\":\n submit_future = self.submit_mappings()\n\n if self.args.monitor_processing:\n monitor_future = self.monitor()\n\n if self.args.wait_for_completion:\n self.submission_complete(submit_future)\n\n if self.args.monitor_processing:\n monitor = self.monitor_complete(monitor_future)\n if monitor.exit_status == 0:\n self._ready_url = monitor.result\n self._results_id = self._ready_url.split('/')[-1]\n\n log.standard_status()\n\n self._error_count = log.errors()\n self._warning_count = log.warnings()\n\n if self.auto_confirm is True:\n if self._error_count == 0:\n self._confirmation = self.confirm()\n else:\n self.cancel()\n\n self.get_file_map()\n\n return log.errors()", "title": "" }, { "docid": "dda81ec024f31537cf8b4c9ded449a3e", "score": "0.5622697", "text": "def main():\n\n exit_status = ExitStatus.OK\n try:\n args = parser.parse_args()\n #print(args)\n if args.save:\n save_args(args.save)\n\n if not os.path.exists(args.directory):\n os.makedirs(args.directory)\n\n names = name_generator(\n args.format, type=args.payload.short, rand=args.rand_method.name,\n idx_format='{{0:0{}d}}'.format(len(str(args.number))))\n\n for i in range(args.number):\n fn = os.path.join(args.directory, next(names))\n with open(fn, 'wb+') as f:\n payload = args.payload.generate(args.rand_method, *args.chunk_size)\n for p in payload:\n f.write(p)\n\n except IOError as (errno, strerror):\n print(\"I/O error({0}): {1}\".format(errno, strerror))\n exit_status = ExitStatus.ERROR\n except OSError as (errno, strerror):\n print(\"OS error({0}): {1}\".format(errno, strerror))\n exit_status = ExitStatus.ERROR\n #except:\n # print \"Unexpected error:\", sys.exc_info()\n # exit_status = ExitStatus.ERROR\n\n return exit_status", "title": "" }, { "docid": "0e57a5230df7191a7b8071341c7786fd", "score": "0.56152314", "text": "def main(self):\n parser = self.setup_parser()\n args = parser.parse_args()\n self.parse(args)\n sys.exit(self.run(args))", "title": "" }, { "docid": "8dcadd0ddf3d2fbea99b29ceca83fa9c", "score": "0.5609144", "text": "def main():\n print(\"starting boids...\")\n\n parser = argparse.ArgumentParser(\n description=\"Implementing Craig Reynolds Bpid...\")\n parser.add_argument('--num_boids', dest='n', required=False)\n args = parser.parse_args()\n\n # Set the initial number of boids\n n = 100\n if args.n:\n n = int(args.n)\n\n # Create boids\n boids = Boids(n)\n\n # Set up plot\n fig = plt.figure()\n ax = plt.axes(xlim=(0, width), ylim=(0, height))\n\n # size and shape of the markers for body\n pts, = ax.plot([], [], markersize=10, c='k', marker='o',\n ls='None') # Create empty 2D Line object\n # size and shape of the markers for beak\n beak, = ax.plot([], [], markersize=4, c='r', marker='o',\n ls='None') # Create empty 2D Line object\n anim = animation.FuncAnimation(\n fig, tick, fargs=(pts, beak, boids), interval=50)\n\n # Add a 'button_press' event handler\n cid = fig.canvas.mpl_connect(\n 'button_press_event', boids.button_press) # mouse button is pressed\n\n plt.show()", "title": "" }, { "docid": "c8f3ba8e57c28b08321b6211fdfc48a4", "score": "0.5593138", "text": "def main():\n\n print('Welcome to the self-checkout system of Wake-mart.')\n\n num_items, total_cost = scanPrices()\n\n total_cost = discount(num_items, total_cost)\n\n num_items, total_cost = promotion(num_items, total_cost)\n\n makePayment(total_cost)", "title": "" }, { "docid": "b03dd98ffaa0f65c5b02397cd192b6d4", "score": "0.5572765", "text": "def main():\n logger.info(\"Started script.\")\n pw = PriceWatch()\n\n if pw.config[\"general\"][\"send_test_notification\"] == \"1\":\n logger.info(\"Sending test notification and exiting.\")\n notify.test_notification()\n sys.exit()\n elif (\n pw.config[\"general\"][\"wishlist_url\"]\n == \"https://www.amazon.co.uk/hz/wishlist/ls/S0M3C0D3\"\n ):\n config_file_path = Path(Path(__file__).parent, \"config.json\").resolve()\n logger.error(f\"You need to fill in the config file:\\n{config_file_path}\")\n sys.exit()\n\n page = pw.request_page()\n # In parsing the first page, pagination will be followed and requested/parsed.\n pw.parse_wishlist(page)\n new_cheaper_items = pw.compare_prices()\n if new_cheaper_items:\n notify.send_notification(wishlist_item_list=new_cheaper_items)\n\n pw.json_man.save_wishlist_json(pw.wishlist)\n logger.info(\"Finished.\")", "title": "" }, { "docid": "b428fffb548aeea009928beeb8cc0f65", "score": "0.55690086", "text": "def main():\n print('Welcome to the Brain Games!')\n name = cli.welcome_user()\n print('What number is missing in the progression?')\n check = arifm.game()\n if check:\n print('Congratulations, {}!'.format(name))\n else:\n print(\"Let's try again, {}!\".format(name))", "title": "" }, { "docid": "4ac8d3430daaa37d2cc1a3d161f21f40", "score": "0.55647093", "text": "def main():\n\n\tparser = OptionParser()\n\tparser.add_option(\"-p\", dest=\"pdbfile\", help=\"pdbfile\")\n\tparser.add_option(\"-P\", dest=\"pdblist\", help=\"pdblist\")\n\tparser.add_option(\"-s\", dest=\"statefile\", help=\"statefile\")\n\tparser.set_description(main.__doc__)\n\t(options, args) = parser.parse_args()\n\n\tpdbfiles = []\n\tif options.pdblist:\n\t\ttry:\n\t\t\tPDBLIST = open(options.pdblist)\n\t\texcept:\n\t\t\tprint \"unable to open pdblist\",options.pdblist\n\t\t\tsys.exit()\n\n\t\tfor line in PDBLIST.readlines():\n\t\t\tline = string.strip(line)\n\t\t\tpdbfiles.append(line)\t\n\n\telif options.pdbfile:\n\t\tpdbfiles.append(options.pdbfile)\n\telse:\n\t\tparser.print_help()\n\t\tsys.exit()\n\n\tif not options.statefile:\n\t\tparser.print_help()\n\n\n\tgridlig = grid()\n\tgridbb = grid()\n\n\ttry:\n\t\tSTATEFILE = open(options.statefile)\n\texcept:\n\t\tprint \"unable to open statefile\"\n\t\tsys.exit()\n\n\tfor line in STATEFILE.readlines():\n\t\tline = string.strip(line)\n\t\twords = string.split(line)\n\t\t\n\t\tif \"gridlig\" in line:\n\t\t\tgridlig.read(words[1])\n\n\t\tif \"gridbb\" in line:\n\t\t\tgridbb.read(words[1])\n\t\t\t\n\t\t\n\tprotein = Molecule()\n\tfor pdbfile in pdbfiles:\n\t\tprotein.clear()\n\t\tprotein.readPDB(pdbfile)\n\n\t\tfailed = False\n\t\tfor chain in protein.chain:\n\t\t\tfor residue in chain.residue:\n\t\t\t\tfor atom in residue.atom:\n\t\t\t\t\t\n\t\t\t\t\tif atom.kind == \"HETATM\":\n\t\t\t\t\t\tif atom.name[0] == \"V\":\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\tx = atom.coord.x\n\t\t\t\t\t\ty = atom.coord.y\n\t\t\t\t\t\tz = atom.coord.z\n\t\t\t\t\t\tocc = atom.occupancy\n\t\t\n\t\t\t\t\t\tif occ > 0.0:\n\t\t\t\t\t\t\tif not gridlig.isInGrid(x, y, z):\n\t\t\t\t\t\t\t\tprint pdbfile,atom.name,atom.file_id,\"out of gridlig boundaries\"\n\t\t\t\t\t\t\t\tfailed = True\n\t\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t\tzn = gridlig.getZone(x,y,z)\t\n\t\t\t\t\t\t\tif not gridlig.zone[zn[0]][zn[1]][zn[2]]:\n\t\t\t\t\t\t\t\tprint pdbfile,atom.name,atom.file_id,\"not occupied in gridlig\"\n\t\t\t\t\t\t\t\tfailed = True\n\t\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\tif not gridbb.isInGrid(x,y,z):\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\tzn = gridbb.getZone(x,y,z)\n\t\t\t\t\t\tif gridbb.zone[zn[0]][zn[1]][zn[2]]:\n\t\t\t\t\t\t\tprint pdbfile,atom.name,atom.file_id,\"clashes with backbone\"\n\t\t\t\t\t\t\tfailed = True\n\t\t\t\t\t\t\tcontinue\n\n\t\tif not failed:\n\t\t\tprint pdbfile,\".... passes\"", "title": "" }, { "docid": "42a648141e2419a95833a8034d78c490", "score": "0.55433804", "text": "def main() -> NoReturn:\n version = bobber_version()\n args = parse_args(version)\n execute_command(args, version)", "title": "" }, { "docid": "f0a37186d057d3b988d1825e86e3f5e0", "score": "0.5538347", "text": "def main():\n try:\n logging.info('Initializing')\n\n #Get environmental values from transmission\n #Path should be $TR_TORRENT_DIR/$TR_TORRENT_NAME\n\n TORRENT_DIR, TORRENT_NAME = config.get_environmental_variables_from_transmission()\n\n extract_all(TORRENT_DIR)\n _choose_handler(TORRENT_DIR, TORRENT_NAME)\n _cleanup_temp(TORRENT_DIR)\n logging.info('Done!')\n except:\n logging.exception(\"Critical exception occurred: \")\n raise", "title": "" }, { "docid": "79a6ce4166f71ba4a23231df630e7cd2", "score": "0.5537185", "text": "def main():\n application = BoaApp(0)\n application.MainLoop()", "title": "" }, { "docid": "321fbf81842237360c4e1794541c3a6b", "score": "0.5532704", "text": "def main():\n _setup_logging()\n song_winners.workflow()\n spotify_songs.workflow()\n votes.workflow()", "title": "" }, { "docid": "3ee74120d247a5df731794fa3dec5cc3", "score": "0.55205685", "text": "def run(self):\n my_bulb = BulbBlinker( my_id = self.id,\n bpm = self.bpm, \n host = self.host,\n adjustment = self.adjustment,\n bulb_objects_list = self.bulb_objects_list, \n above_neighbor = (self.id + 1) % 13, \n below_neighbor = (self.id - 1) % 13, \n turned_on_list = self.turned_on_list)\n my_bulb.daemon = True \n my_bulb.start()\n self.check_ordering()", "title": "" }, { "docid": "2e2c9b9f73f8fdca22f4d25a4b07dc60", "score": "0.5520292", "text": "def main():\n\n\tusage = \"usage: %prog <project_id> <file>\"\n\tdescription = \"%prog imports dirb files into Lair\"\n\tparser = OptionParser(usage=usage, description=description,\n\t\t\t\t\t\t\tversion=\"%prog 0.0.1\")\n\n\t(options, args) = parser.parse_args()\n\tif len(args) < 2:\n\t\tprint parser.get_usage()\n\t\tsys.exit(1)\n\n\tproject_id, result_resource = args\n\tproject = dirb.parse(project_id, result_resource)\n\n\t# Connect to the database\n\tdb = api.db_connect()\n\tapi.save(project, db, dirb.TOOL)\n\tsys.exit(0)", "title": "" }, { "docid": "4314c1408e37cf60dcd240257ff3a682", "score": "0.5519621", "text": "def main():\n args = get_args()\n setup_logs(args, args.out_prefix)\n\n\n # Parse Brenda Ligands file\n logging.info(\"Starting to parse ligands\")\n brenda_ligands_file = f\"{args.out_prefix}_brenda_ligands.json\"\n brenda_ligands = parse_ligands_file(brenda_ligands_file,\n args.brenda_ligands, args.load_prev,\n args.debug)\n logging.info(\"Done parsing ligands\")\n\n # Parse brenda ec classes and enzymes\n ec_stats_file = f\"{args.out_prefix}_brenda_ec_stats.json\"\n enzymes_data_file = f\"{args.out_prefix}_brenda_enzymes_data.json\"\n\n # Parse brenda flat file\n # Ec_stats contains enzyme class wide parameters\n # enzymes_data contains actual enzymes\n logging.info(\"Starting to parse flat file\")\n ec_stats, enzymes_data = parse_flat_file(ec_stats_file, \n enzymes_data_file, \n args.brenda_flat_file,\n args.load_prev,\n args.out_prefix,\n args.debug)\n logging.info(\"Done parsing flat file\")\n\n # Get the list of compounds from enzymes data \n logging.info(\"Starting to extract compound list\")\n compounds_list_file = f\"{args.out_prefix}_compound_list.json\"\n compound_list = get_compound_list(enzymes_data, compounds_list_file, \n args.load_prev)\n logging.info(\"Done extracting compound list\")\n\n logging.info(\"Starting to extract inchi/chebi\")\n chebi_inchi_set_file = f\"{args.out_prefix}_chebi_inchi_set.json\"\n chebi_inchi_set = extract_unique_inchi_chebi(chebi_inchi_set_file, \n brenda_ligands,\n args.load_prev)\n logging.info(\"Done extracting inchi/chebi\")\n\n logging.info(\"Starting to map chebi to smiles\")\n # Map brenda ligand chebi to smiles\n mapped_chebi_file = f\"{args.out_prefix}_chebi_to_smiles.json\"\n unmapped_chebi_file = f\"{args.out_prefix}_chebi_unmapped.json\"\n mapped_chebi, unmapped_chebi = map_chebi_to_smiles(chebi_inchi_set, \n mapped_chebi_file,\n unmapped_chebi_file, \n args.load_prev,\n args.out_prefix,\n args.use_cirpy,\n args.cirpy_log)\n logging.info(\"Done mapping chebi to smiles\")\n\n logging.info(\"Starting to map inchi to smiles\")\n # Map brenda ligand inchi to smiles\n mapped_inchi_file = f\"{args.out_prefix}_inchi_to_smiles.json\"\n unmapped_inchi_file = f\"{args.out_prefix}_inchi_unmapped.json\"\n mapped_inchi, unmapped_inchi = map_inchi_to_smiles(chebi_inchi_set,\n mapped_inchi_file,\n unmapped_inchi_file,\n args.load_prev,\n args.out_prefix,\n args.use_cirpy,\n args.cirpy_log)\n logging.info(\"Done mapping inchi to smiles\")\n\n logging.info(\"Starting to resolve all compounds to smiles\")\n # Now resolve all compounds\n mapped_comps_file = f\"{args.out_prefix}_compounds_to_smiles.json\"\n unmapped_comps_file = f\"{args.out_prefix}_compounds_unmapped.json\"\n\n # TODO: Fix this so that it doesn't return strings and lists, but only one\n # of the two types \n mapped_compounds, unmapped_compounds = map_compounds_to_smiles(compound_list, brenda_ligands, \n mapped_inchi, mapped_chebi,\n mapped_comps_file, unmapped_comps_file,\n args.load_prev, args.out_prefix,\n args.use_cirpy, args.cirpy_log, \n args.opsin_loc) \n logging.info(\"Done resolving all compounds to smiles\")\n\n\n # Standardize all smiles mappings!\n # Load from file if it exists\n if not args.no_standardize:\n logging.info(\"Starting standardizer\")\n mapped_standardized_file = f\"{args.out_prefix}_compounds_to_standardized_smiles.json\"\n mapped_compounds = standardize_smiles(mapped_standardized_file, mapped_compounds,\n args.standardizer_log, args.load_prev, \n args.multiprocess_num)\n logging.info(\"Done with standardizer\")\n\n\n logging.info(\"Beginning to export all files\")\n # Reverse the mapping for reference later \n smiles_to_names_file = f\"{args.out_prefix}_smiles_to_comp_names.json\"\n smiles_to_names = reverse_mapping(mapped_compounds, \n outfile = smiles_to_names_file)\n\n # Add gene sequences and compound smiles back to the dataframe.\n enzymes_data, rxn_set, compound_set = add_compounds(\n enzymes_data, mapped_compounds)\n rxn_set, compound_set = pd.DataFrame(rxn_set), pd.DataFrame(compound_set)\n\n # Ouput finished mapping to file\n enzymes_mapped_data_file = f\"{args.out_prefix}_enzymes_data_complete_mapped.json\"\n rxn_final_tsv = f\"{args.out_prefix}_rxn_final.tsv\"\n compounds_final_tsv = f\"{args.out_prefix}_rxn_compounds.tsv\"\n utils.dump_json(enzymes_data,\n enzymes_mapped_data_file,\n pretty_print=False)\n\n compound_set.to_csv(compounds_final_tsv, sep=\"\\t\")\n rxn_set.to_csv(rxn_final_tsv, sep=\"\\t\")\n\n # Output statistics about all the data collected\n summary_file = f\"{args.out_prefix}_stats.json\"\n stats_summary = {\n \"mapped_compound_smiles\": len(mapped_compounds),\n \"unmapped_compound_smiles\": len(unmapped_compounds),\n \"mapped_inchis\": len(mapped_inchi),\n \"unmapped_inchis\": len(unmapped_inchi),\n \"mapped_chebi\": len(mapped_chebi),\n \"unmapped_chebi\": len(unmapped_chebi),\n }\n\n stats_summary[\"Num ec classes\"] = len(ec_stats)\n\n # Update with other statistics and print this to a file\n stats_summary.update(\n parse_brenda_stats.get_rxn_compound_stats(rxn_set, compound_set)\n )\n utils.dump_json({k: str(v) for k, v in stats_summary.items()},\n summary_file)", "title": "" }, { "docid": "161425c07e5d026e7b708d9a378b2a0a", "score": "0.5518024", "text": "def test_launch_bs(run_cli_process_launch_command, fixture_code, generate_remote_data):\n from aiida_kkr.cmdline.launch import launch_bs\n from aiida.engine.processes.calcjobs.tasks import PreSubmitException\n code = fixture_code('kkr.kkr').store()\n params = kkrparams(params_type='kkr')\n params.set_multiple_values(\n MIN=-10,\n MAX=5,\n NPT2=12,\n RCLUSTZ=2.3,\n TEMPR=50,\n )\n\n param_node = Dict(dict=params.get_dict()).store()\n path = os.path.abspath(os.path.join(THISFILE_PATH, '../../files/bd_dump_bs/parent_kkr'))\n remote = generate_remote_data(code.computer, path).store()\n options = ['--kkr', code.uuid, '--parameters', param_node.uuid, '--parent-folder', remote.uuid]\n run_cli_process_launch_command(launch_bs, options=options, raises=PreSubmitException)", "title": "" }, { "docid": "9e72155a4c14785f4d6f7a5c7a836693", "score": "0.5516675", "text": "def main():\n args = get_cmdline_arguments()\n bord_config = configreader.ConfigReader(args.config)\n count = create_html(bord_config['content_dir'], bord_config['output_dir'])\n print ('Created', count, 'HTML files')\n if (args.server):\n print ('Serving at:', str(args.server))\n server.start_server(bord_config['output_dir'], args.server)", "title": "" }, { "docid": "f0a8b95fc16c26f3db613753c0b17b2d", "score": "0.5509966", "text": "def main():\n options = Options()\n app = Trader(options)\n x = 0\n while x < 1:\n # app.call_ticker()\n test = app.balances()\n # app()\n # app.trade_history()\n # app.buy(amount=1, rate=.001, coin='BTC_ETH')\n # app.sell(amount=8, rate=.00000130, coin='BTC_DOGE')\n # order = app.buy()\n\n # if app.cancel(order):\n # print(\"Order {}.. !! canceled\".format(order))\n # time.sleep(.05)\n #\n # history = app.quote_history()\n print(test)\n x += 1\n\n if app.main():\n sys.exit('Alphagriffin.com | 2017')\n return True", "title": "" }, { "docid": "9849beb2becdf68e2bf5a55ba6c8e058", "score": "0.5497161", "text": "def main():\n\n BASIC.run(PROGRAM)", "title": "" }, { "docid": "0bd1ee4e3a3c53c0b332a70d92705c3a", "score": "0.549565", "text": "def run_program():\n\n if not aig.are_parameters_valid():\n raise Exception(\"You must provide the name of input file as first argument!\")\n\n graph = aig.parse_input(sys.argv[1])\n path = read_path()\n\n print(compute_path_cost(path, graph))", "title": "" }, { "docid": "918c5dd264aa819f502eccd9910855ed", "score": "0.549286", "text": "def main():\n epilog = \"DT179G Assignment 3 v\" + __version__\n parser = argparse.ArgumentParser(description=__desc__, epilog=epilog, add_help=True)\n parser.add_argument('nth', metavar='nth', type=int, nargs='?', default=30,\n help=\"nth Fibonacci sequence to find.\")\n\n global LOGGER # ignore warnings raised from linters, such as PyLint!\n LOGGER = create_logger()\n\n args = parser.parse_args()\n nth_value = args.nth # nth value to sequence. Will fallback on default value!\n\n fib_details = { # store measurement information in a dictionary\n 'fib iteration': fibonacci_iterative(nth_value),\n 'fib recursion': fibonacci_recursive(nth_value),\n 'fib memory': fibonacci_memory(nth_value)\n }\n\n print_statistics(fib_details, nth_value) # print information in console\n\n write_to_file(fib_details) # write data files", "title": "" }, { "docid": "0a84589821cca136350f458b3fcfd575", "score": "0.54918534", "text": "def main():\n\targs = get_args()\n\tbottles = args.num_bottles\n\n\tfor bottles in range(bottles,0,-1): # -1 decrease by 1 each time \n\t\tif bottles > 2:\n\t\t\tprint('{} bottles of beer on the wall,'.format(bottles))\n\t\t\tprint('{} bottles of beer,'.format(bottles))\n\t\t\tprint('Take one down, pass it around,')\n\t\t\tprint('{} bottles of beer on the wall!'.format(bottles-1)+'\\n')\n\t\tif bottles ==2:\n\t\t\tprint('{} bottles of beer on the wall,'.format(bottles))\n\t\t\tprint('{} bottles of beer,'.format(bottles))\n\t\t\tprint('Take one down, pass it around,')\n\t\t\tprint('{} bottle of beer on the wall!'.format(bottles-1)+'\\n') # proper grammer for 1 bottle\n\t\tif bottles == 1:\n\t\t\tprint('{} bottle of beer on the wall,'.format(bottles)) # proper grammer for 1 bottle\n\t\t\tprint('{} bottle of beer,'.format(bottles)) # proper grammer for 1 bottle\n\t\t\tprint('Take one down, pass it around,')\n\t\t\tprint('{} bottles of beer on the wall!'.format(bottles-1)) # 0 bottles is proper grammer\n\t\t\tbreak", "title": "" }, { "docid": "483978573f788bb33960325a7b7f12f3", "score": "0.5483947", "text": "def main():\n args = parse_arguments()\n if args.sub == 'byname':\n request_result(punk_requests.get_beer_by_name(args.name), args.file)\n elif args.sub == 'byid':\n request_result(punk_requests.get_beer_by_id(args.id), args.file)\n elif args.sub == 'byinterval':\n request_result(punk_requests.get_beer_brewed_in(args.from_date, args.until_date), args.file)\n elif args.sub == 'byfood':\n request_result(punk_requests.get_beer_by_food(args.food_pairs), args.file)", "title": "" }, { "docid": "ea842d0c4094cc8f0b450f6c16ebefc3", "score": "0.547629", "text": "def main(argv):\n keystore, signing_key, verify_key_hex, networker, blockchain_thread, gui_thread, dns = init(*parse_args(argv))\n\n # User Interaction\n while True:\n command = None\n if gui_thread.is_alive():\n if not gui_receive_queue.empty():\n command = gui_receive_queue.get(block=True)\n print('Command from gui: {}'.format(command))\n else:\n continue\n\n if not command:\n print('Action: ')\n command = get_command()\n\n if command == 'help':\n help_str = (\"\"\" Available commands:\n help: prints commands\n transaction <to> <amount> : Create transaction\n mine: mine a new block\n balance [<name>]: Print balance (name optional)\n dump: print blockchain\n peers: print peers\n key <filename> : Save current key to <filename>\n import <key> <name> : Imports a public key associated with \\\n<name> from file <file> to the keystore\n deletekey <name> : Deletes key associated with <name> from\\\nkeystore\n export <filename> : Exports one own public key to file\\\n<filename>\n save: Save blockchain to bc_file.txt\n exit: exits program\n \"\"\")\n print(help_str)\n if dns:\n dns_str = (\"\"\" DNS-only commands:\n register <domain> <ip> : Registers an available domain to an ip (costs 20 coins)\n update <domain> <ip> : Updates an existing already owned domain with a new ip (costs 20 coins)\n transfer <to> <domain> : Transfers an owned domain to another user (costs 1 coin)\n auction <domain> : Offers an owned domain for auction (costs 1 coin)\n bid <amount> <domain> : Places a bid of <amount> on the auctioned domain.\n resolve <domain> : Resolves the domain name and prints the ip (if the domain does not exist, prints '')\n \"\"\")\n print(dns_str)\n gui_send_queue.put(help_str)\n elif command == 'exit':\n receive_queue.put(('exit', '', 'local'))\n send_queue.put(None)\n keystore.save()\n blockchain_thread.join()\n networker.join()\n if gui_thread.is_alive():\n gui_thread.join()\n sys.exit()\n elif command == 'mine':\n receive_queue.put(('mine', verify_key_hex, 'local'))\n elif re.fullmatch(r'transaction \\w+ \\d+', command):\n t = command.split(' ')\n # Create new Transaction, sender = hex(public_key),\n # signature = signed hash of the transaction\n if int(t[2]) <= 0:\n print('Transactions must contain a amount greater than zero!')\n continue\n recipient = keystore.resolve_name(t[1])\n if recipient == 'Error':\n continue\n timestamp = time.time()\n # fee equals 5% of the transaction amount - at least 1\n fee = int(math.ceil(int(t[2]) * 0.05))\n transaction = create_transaction(dns, verify_key_hex, str(recipient),\n int(t[2]), fee, timestamp, DNS_Data('', '', ''), signing_key)\n\n receive_queue.put(('new_transaction',\n transaction,\n 'local'\n ))\n elif command == 'dump':\n # gui_send_queue.put(vars(my_blockchain))\n # pprint(vars(my_blockchain))\n receive_queue.put(('dump', '', 'local'))\n elif command == 'peers':\n networker_command_queue.put('print_peers')\n elif re.fullmatch(r'key \\w+', command):\n try:\n t = command.split(' ')\n save_key(signing_key, t[1])\n print('Key saved successfully')\n except Exception as e:\n print('Could not save key')\n print(e)\n elif re.fullmatch(r'import \\w+ \\w+', command):\n try:\n t = command.split(' ')\n if keystore.resolve_name(t[2]):\n keystore.update_key(t[2], t[1])\n else:\n print('importing public key')\n keystore.add_key(t[2], t[1])\n except Exception as e:\n print('Could not import key')\n print(e)\n elif re.fullmatch(r'deletekey \\w+', command):\n try:\n t = command.split(' ')\n if keystore.resolve_name(t[1]):\n keystore.update_key(t[1], '')\n else:\n print(\n f'Could not delete {t[1]} from keystore.',\n ' Was it spelt right?')\n except Exception as e:\n print('Could not delete key')\n print(e)\n elif re.fullmatch(r'export \\w+', command):\n try:\n print('Exporting public key')\n t = command.split(' ')\n save_key(verify_key_hex, t[1])\n except Exception as e:\n print('Could not export public key')\n print(e)\n elif command == 'balance':\n receive_queue.put(('print_balance',\n (verify_key_hex, time.time()),\n 'local'))\n elif re.fullmatch(r'balance \\w+', command):\n t = command.split(' ')\n account = keystore.resolve_name(t[1])\n if account != 'Error':\n receive_queue.put(('print_balance',\n (account, time.time()),\n 'local'\n ))\n elif re.fullmatch(r'resolve \\w+\\.\\w+', command):\n if not dns:\n print('Command not supported!')\n continue\n t = command.split(' ')\n receive_queue.put(('dns_lookup',\n t[1],\n 'local'\n ))\n elif re.fullmatch(r'(register|update) \\w+\\.\\w+ \\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}', command):\n if not dns:\n print('Command not supported!')\n continue\n t = command.split(' ')\n if not validate_ip(t[2]):\n print('Not a valid ip')\n continue\n recipient = '0'\n timestamp = time.time()\n # fee for domain registration equals 20\n fee = 20\n typ = t[0][0]\n data = DNS_Data(typ, t[1], t[2])\n transaction = create_transaction(dns, verify_key_hex, str(recipient), 0,\n fee, timestamp, data, signing_key)\n\n receive_queue.put(('new_transaction',\n transaction,\n 'local'\n ))\n elif re.fullmatch(r'transfer \\w+ \\w+\\.\\w+', command):\n if not dns:\n print('Command not supported!')\n continue\n t = command.split(' ')\n account = keystore.resolve_name(t[1])\n if account != 'Error':\n recipient = account\n timestamp = time.time()\n # fee for domain transfer equals 1\n fee = 1\n typ = 't'\n data = DNS_Data(typ, t[2], '')\n transaction = create_transaction(dns, verify_key_hex, str(recipient), 0,\n fee, timestamp, data, signing_key)\n receive_queue.put(('new_transaction',\n transaction,\n 'local'\n ))\n elif re.fullmatch(r'auction \\w+\\.\\w+', command):\n if not dns:\n print('Command not supported!')\n continue\n t = command.split(' ')\n recipient = '0'\n timestamp = time.time()\n # fee for auction equals 1\n fee = 1\n typ = 't'\n data = DNS_Data(typ, t[1], '')\n transaction = create_transaction(dns, verify_key_hex, str(recipient), 0,\n fee, timestamp, data, signing_key)\n\n receive_queue.put(('new_transaction',\n transaction,\n 'local'\n ))\n elif re.fullmatch(r'bid \\d+ \\w+\\.\\w+', command):\n if not dns:\n print('Command not supported!')\n continue\n t = command.split(' ')\n recipient = '0'\n timestamp = time.time()\n # fee for bid equals 1\n fee = 1\n typ = 'b'\n amount = int(t[1])\n data = DNS_Data(typ, t[2], '')\n transaction = create_transaction(dns, verify_key_hex, str(recipient), amount,\n fee, timestamp, data, signing_key)\n receive_queue.put(('new_transaction',\n transaction,\n 'local'\n ))\n elif command == 'save':\n receive_queue.put(('save',\n '',\n 'local'\n ))\n elif command == 'gui':\n # if dns:\n # print('gui not yet supported for DNS Chain')\n # continue\n print(\"open gui\")\n gui_thread.start()\n gui_send_queue.put(('signing_key', signing_key, 'local'))\n\n else:\n print('Command not found!')", "title": "" }, { "docid": "102a5a04bf02a2b75f0d5da52ae8e173", "score": "0.5474822", "text": "def main(self, argv):", "title": "" }, { "docid": "8fb92df960cc977514bfbcbc6c0c30fc", "score": "0.5474742", "text": "def main():\n engine.run_game(settings.DESCR_PRIME, prime.ask_question)", "title": "" }, { "docid": "c8122dc64784738c878decc2c605752f", "score": "0.546062", "text": "def main():\n # create log file first\n create_logs()\n # log start info\n logging.info('Starting new enrollment flow now...')\n logging.info('Waiting for user space to fully load...')\n # waiting for user space\n wait_for_userspace()\n # check if the jss is available\n # commented out, only used for troubleshooting purposes, i.e. remote offices fail to enroll\n #check_jss_connection()\n # set the computer name\n logging.info('setting the computer name...')\n set_computername()\n # ensure dependencies and jamf binary is present\n logging.info('Installing initial dependencies for DEP Notify flow...')\n install_dependencies()\n # install dependency list for before we continue...\n run_jamf_policy(DEPENDENCY_LIST)\n # start DEP Notify flow\n write_to_dnlog('Command: Image: %s' % DEPSCREEN)\n # please modify this code to match the verbiage you want to display\n write_to_dnlog('Command: MainTitle: Welcome to myOrg')\n write_to_dnlog('Command: MainText: Please wait while we setup and configure your Mac at MyOrg '\n 'This should only take a few minutes and will require a restart of your Mac. '\n 'If you need assistance please contact IT at [email protected]')\n write_to_dnlog('Status: Preparing your system...')\n start_dep_notify()\n # install base software/config\n run_jamf_policy(POLICY_LIST)\n # apply security compliance and reboot last\n run_jamf_policy(SECURITY_LIST)\n software_updates()\n write_to_dnlog('Status: Enrollment is complete, exiting...')\n write_to_dnlog('Command: Quit')", "title": "" }, { "docid": "54314d4beffe9402f299840136debdc4", "score": "0.54560435", "text": "def main():\n pickle = str(sys.argv[3])\n print \"Getting network.... \"\n net = cPickle.load(open(pickle, 'rb'))\n size = get_network_size(net)\n print \"Network size = %s\" % size\n image_path = str(sys.argv[1])\n gt = str(sys.argv[2])\n classifier = CraterClassifier(net)\n print \"Running the pyramid...\"\n pyramid = Pyramid(image_path, 10, size, gt, classifier)\n drawing_data = pyramid.runPyramid()\n cPickle.dump(pyramid.hitlist,open(DRAWING_DATA_OUT, 'wb'))", "title": "" }, { "docid": "b13890bcbc6086e23d2b901a69ed2d1c", "score": "0.5447834", "text": "def main():\n print('Welcome to the Brain Games!')", "title": "" }, { "docid": "ebcc79a5c9d535d9e0bba7b1a0f6d75f", "score": "0.5444025", "text": "def main():\n\n # Get the current working dir. path;\n current_dir = str(pathlib.Path().absolute())\n\n # Get current time\n now = datetime.now()\n current_time = now.strftime(\"%d-%m-%Y\") + '_' + now.strftime(\"%H_%M_%S\")\n\n # ---------------------------- Set LOG environment ----------------------------\n # set up logging to file - see previous section for more details\n logger_path = f'{current_dir}/LOGs/{current_time}.log'\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(name)-20s: %(levelname)-8s %(message)s',\n filename=logger_path,\n filemode='w')\n # define a Handler which writes INFO messages or higher to the sys.stderr\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n # set a format which is simpler for console use\n formatter = logging.Formatter('[%(name)-20s]: [%(levelname)s] %(message)s')\n # tell the handler to use this format\n console.setFormatter(formatter)\n # add the handler to the root logger\n logging.getLogger('').addHandler(console)\n\n # Now, we can log to the root logger, or any other logger. First the root...\n logging.info(':: The Process started.')\n\n # Now, define a couple of other loggers which might represent areas in your\n # application:\n\n distance = logging.getLogger('main.distance-file')\n # ---------------------------- End LOG environment ----------------------------\n\n # reads the config file, if it exists;\n config_file_path = current_dir + \"\\\\general_config.txt\"\n # if config file exists continue, else create a new one;\n if os.path.isfile(config_file_path):\n # Get the available configuration file;\n with open(config_file_path) as file:\n config = json.load(file)\n logging.info(\":: Read configuration file complete.\\n\")\n pass\n else:\n logging.info(\":: File not found! Creating a new configuration...\")\n logging.info(\n \":: Please check the Readme file for information regarding the settings configuration standards.\\n\")\n config = config_gen(current_dir, config_file_path)\n\n # Get the configuration settings;\n (proteins_path, is_convex_relax, multi_start_phase, mdjeep_source, protein_black_list,\n global_debug_value) = config.values()\n\n # get the names and directories for all the available proteins; dict(protein_name: its_dir);\n proteins = dict(zip(os.listdir(config[\"proteins_path\"]), glob.glob(f\"{proteins_path}\\\\*\"))) # dict\n\n # ---------------------------- Now starts the tests ----------------------------\n # Create the Node test directory - Node - run date in current directory\n logging.info(':: Creating the test directory...')\n # First we will save the proteins names and paths to be read by the SDP Matlab script;\n # And also, create the test paths for each one, saving it on a dictionary\n protein_tests = {}\n\n proteins_filepath_dir = current_dir + f\"\\\\Matlab\\\\proteins.txt\"\n\n with open(proteins_filepath_dir, 'w+') as file:\n for protein, path in proteins.items():\n # We will not load the proteins on the blacklist;\n if protein in protein_black_list:\n continue\n else:\n directory = f'\\\\Tests\\\\{current_time}_{protein}'\n test_path = current_dir + directory\n # Create the directory\n os.mkdir(test_path)\n # Saves the directory\n protein_tests[f\"{protein}\"] = test_path\n logging.info(f\":: Directory '{directory}' created.\")\n\n # Writes this information to Matlab usage\n # Scheme: Node -- node_path -- test_path\n file.write(f\"{protein},{path},{test_path}\\n\")\n\n # Create the distance files for every available proteins\n for node in protein_tests.keys():\n # distance file generator -- generates the distance file, if it doesn't exists, and returns it's directory:\n try:\n # TODO: Insert the overwrite=distance_overwrite option\n gen_distance_file(node, proteins[f\"{node}\"])\n distance.info(\":: Process completed successfully, waiting for data to be read...\\n\")\n\n except OSError as err:\n distance.warning(f\":: Distance file generator found an error with node: {node} \\n\"\n f\":: {err}.\")\n distance.warning(\":: The process was interrupted!\")\n continue\n\n # ------------ Matlab (YALMIP): SDP Program\n\n # SDP launch and start phase:\n logging.info(':: Start [SDP] program phase.')\n # prepare a structure file for he matlab bash script (SDP Execution);\n launch_sdp(current_dir)\n logging.info(\"SPG environment configuration set.\\n\")\n\n # ------------ Python Refinement: SPG Program\n num_nodes = len(proteins.keys())\n nd_counter = 1 # node counter\n for node in protein_tests.keys():\n logging.info(f\":: #{nd_counter} Node: {node} of {num_nodes}\")\n # --- Open PDB File\n node_path = proteins[f'{node}']\n pdb_path = node_path + f'\\\\{node}.txt'\n dist_path = node_path + \"\\\\dist.txt\"\n test_path = protein_tests[f'{node}']\n pdb = open_pdb_file(pdb_path)\n logging.debug(':: Environmental properties successful loaded.')\n distancias, u, v, lb, ub, prop_dist = env_set(dist_path)\n\n # Now begins the refinement process of each protein\n # SPG launch and start phase\n logging.info(\":: Start [SPG] program phase.\")\n ops = (prop_dist, is_convex_relax, multi_start_phase, global_debug_value)\n launch_spg(node, pdb, test_path, distancias, lb, ub, u, v, ops)\n # -----\n nd_counter += 1\n\n # ------------ General statistics: static\n launch_statistics(f'{current_dir}/Statistics/{current_time}.txt', protein_tests)\n # End", "title": "" }, { "docid": "0bdf51636417aa862634dcdbfb677537", "score": "0.5436598", "text": "def run():\n main()", "title": "" }, { "docid": "7020080d53520238d2265b1adc42c1dd", "score": "0.5423083", "text": "def main():\n\n # FIXME need to verify parameters or otherwise validate input\n\n # parse command line arguments\n args = bbargparse.parse_inputs()\n\n # if args not ok, then squawk and return exit code of -1\n if not args_ok(args):\n return -1\n\n # show desired game day results\n show_results(args)\n\n # return exit code zero for success\n return 0", "title": "" }, { "docid": "b18ab5eb6ed5ca0310ac031f5d75e6c1", "score": "0.54151267", "text": "def main():\n parser = argparse.ArgumentParser(description=\"Wrapper for the GROMACS grompp module.\",\n formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))\n parser.add_argument('-c', '--config', required=False, help=\"This file can be a YAML file, JSON file or JSON string\")\n\n # Specific args of each building block\n required_args = parser.add_argument_group('required arguments')\n required_args.add_argument('--input_gro_path', required=True)\n required_args.add_argument('--input_top_zip_path', required=True)\n required_args.add_argument('--output_tpr_path', required=True)\n parser.add_argument('--input_cpt_path', required=False)\n parser.add_argument('--input_ndx_path', required=False)\n parser.add_argument('--input_mdp_path', required=False)\n\n args = parser.parse_args()\n config = args.config if args.config else None\n properties = settings.ConfReader(config=config).get_prop_dic()\n\n # Specific call of each building block\n grompp(input_gro_path=args.input_gro_path, input_top_zip_path=args.input_top_zip_path,\n output_tpr_path=args.output_tpr_path, input_cpt_path=args.input_cpt_path,\n input_ndx_path=args.input_ndx_path, input_mdp_path=args.input_mdp_path,\n properties=properties)", "title": "" }, { "docid": "6b6b2570d3a95189963ebaf6039d457c", "score": "0.54100776", "text": "def main():\n here = os.path.abspath(os.path.dirname(__file__))\n app_file = os.path.join(here, \"app.py\")\n os.environ[\"FLASK_APP\"] = app_file\n os.environ[\"FLASK_ENV\"] = \"development\"\n command = \"python -m flask run\"\n command = command.replace(\"python\", sys.executable)\n print(\" --- Interactive Bayesian Optimisation ---\")\n print(\" * Please, open your browser at the address shown below\")\n subprocess.run(command.split(\" \"))", "title": "" }, { "docid": "0a7914578b1dd53c90d9bf0c9f594c94", "score": "0.5403009", "text": "def _main():\n # connect_port support in Stem\n assert get_distribution('stem').version > '1.4.0', \\\n 'Stem module version must be greater than 1.4.0.'\n parser = ArgumentParser(description=\"Generate Tor paths.\")\n parser.add_argument(\"--port\", type=int, default=9051,\n help=\"tor control port.\")\n parser.set_defaults(network_protection=True)\n args = parser.parse_args()\n\n controller = connect_port(port=args.port)\n if not controller:\n sys.stderr.write(\"ERROR: Couldn't connect to tor.\\n\")\n sys.exit(1)\n if not controller.is_authenticated():\n controller.authenticate()\n findpath(controller)\n controller.close()", "title": "" }, { "docid": "c18d0960bab3423d92ad4b6fbc20a6ce", "score": "0.53985465", "text": "def main():\n\n # Get command line arguments\n jid = sys.argv[1]\n name_temp = sys.argv[2]\n if len(sys.argv) == 4: # 'no_of_processes' is an optional command line argument that defaults to 16 if not given\n no_of_processes = int(sys.argv[3])\n sam = SamModelCaller(jid, name_temp, no_of_processes)\n elif len(sys.argv) == 5:\n no_of_processes = int(sys.argv[3])\n write_output = bool(sys.argv[4])\n sam = SamModelCaller(jid, name_temp, no_of_processes, write_output)\n else:\n sam = SamModelCaller(jid, name_temp)\n\n\n\n\n sam.sam_multiprocessing()", "title": "" }, { "docid": "7a1d931a1d790b4b4437757cb31e0d90", "score": "0.5383743", "text": "def main():\n properties = read_properties_from_file('Sales.csv')\n settings = read_settings_from_file('Settings.csv')\n print(stripes)\n print(\n \"\\nWelcome to Kimberlynet - Sweden's largest residential site\\n\\nWe will help you find a property, please\"\n \" answer a few questions before we start!\\n\")\n print(stripes)\n interest = control_float('Please enter current interest (%): ')\n deposit = control_int('Please enter your deposit: ')\n deduction = control_float('Please enter current deduction (%): ')\n check = False\n while not check:\n menu(settings)\n choice = menu_choice()\n execute(choice, properties, settings, interest, deposit, deduction)\n if choice == 9:\n check = True", "title": "" }, { "docid": "612611e765809c8a7e313ce8cc612787", "score": "0.53809285", "text": "def main():\n parser = bonobo.get_argument_parser()\n with bonobo.parse_args(parser) as options:\n bonobo.run(get_graph(**options), services=get_services(**options))", "title": "" }, { "docid": "dad4e95a08cc99857681179a5b7b9f57", "score": "0.53760874", "text": "def main(argv):\r\n groceryfile = ''\r\n emailfile = ''\r\n try:\r\n opts, _ = getopt.getopt(argv,\"hgl:el:\",[\"glfile=\",\"elfile=\"])\r\n except getopt.GetoptError:\r\n print ('split_the_bill.py --gl <grocerylist.txt> --el <emaillist.txt>')\r\n print ('Please provide input files or -h for help')\r\n sys.exit(2)\r\n for opt, arg in opts:\r\n if opt == '-h':\r\n print ('split_the_bill.py --gl <grocerylist.txt> --el <emaillist.txt>')\r\n sys.exit()\r\n elif opt in (\"-gl\", \"--glfile\"):\r\n groceryfile = arg\r\n elif opt in (\"-el\", \"--elfile\"):\r\n emailfile = arg\r\n \r\n try:\r\n mf = open(groceryfile, \"r\", encoding='utf-8')\r\n mf.seek(0)\r\n grocery_list = mf.read()\r\n if grocery_list == '':\r\n grocery_list = \"{}\"\r\n grocery_list_json = json.loads(grocery_list)\r\n except:\r\n raise FileNotFoundError('Grocery list not found!')\r\n finally:\r\n mf.close()\r\n\r\n try:\r\n ef = open(emailfile, \"r\", encoding='utf-8')\r\n ef.seek(0)\r\n email_list = ef.read().splitlines()\r\n if email_list == []:\r\n email_list = [\"Total\"]\r\n except:\r\n raise FileNotFoundError('Email list not found!')\r\n finally:\r\n ef.close()\r\n print(json.dumps(map_price_and_email(email_list,grocery_list_json)))", "title": "" }, { "docid": "5f433db1079924698c632e74339ed771", "score": "0.5361722", "text": "def main():\r\n print(\"DASHNextGen_Service_Report.py is Starting\")\r\n login_into_dash(\"./DASHLoginInfo.json\")\r\n navigate_to_reports_and_click_excel()\r\n time.sleep(5)\r\n grab_downloaded_report()\r\n csv_to_database(\"./DASHLoginInfo.json\")\r\n file_cleanup()\r\n print(\"We have uploaded to the database.\")\r\n logout_session()", "title": "" }, { "docid": "98a51693f0501415a9e1824aabb01d83", "score": "0.53591925", "text": "def main():\n tidy_uploads()\n do_publications()\n do_things_grid()\n \n copy_directory('things/grid-backgrounds/')\n copy_directory('things/phd/')", "title": "" }, { "docid": "ceda46838c16325aa24614e88a1efe47", "score": "0.53558826", "text": "def main():\n\n parser = argparse.ArgumentParser(description=\"Cherry picking automation.\")\n\n # Arg information\n parser.add_argument(\n \"--filename\", help=\"path to whl file we are copying\", required=True)\n parser.add_argument(\n \"--new_py_ver\", help=\"two digit py version eg. 27 or 33\", required=True)\n\n args = parser.parse_args()\n\n # Argument checking\n args.filename = os.path.abspath(args.filename)\n check_existence(args.filename)\n regex_groups = re.search(TF_NIGHTLY_REGEX, args.filename)\n directory = regex_groups.group(1)\n package = regex_groups.group(2)\n version = regex_groups.group(3)\n origin_tag = regex_groups.group(4)\n old_py_ver = re.search(r\"(cp\\d\\d)\", origin_tag).group(1)\n\n # Create new tags\n new_tag = origin_tag.replace(old_py_ver, \"cp\" + args.new_py_ver)\n\n # Copy the binary with the info we have\n copy_binary(directory, origin_tag, new_tag, version, package)", "title": "" }, { "docid": "6fb1c589ddefa00eb547dc4546b2da58", "score": "0.5355522", "text": "def main():\n _main()", "title": "" }, { "docid": "9e10366c86d668faee5952b61a636e61", "score": "0.5355324", "text": "def main():\n context = {}\n urls = {}\n urls[\"delivery\"] = BASE_URL + TIME_SERIES_DELIVERY\n urls[\"vaccination\"] = BASE_URL + VACCINATION_STATE\n context[\"cwd\"] = str(Path(sys.argv[0]).parent)\n\n # load last state\n sources = load_object(context)\n if sources is None:\n sources = Sources(urls[\"delivery\"], urls[\"vaccination\"])\n\n sources.download_sources()\n prepare_data(context, urls, sources.get_data())\n plot(context, sources)\n\n # store current state\n store_object(context, sources)", "title": "" }, { "docid": "b8f4e37a763460bd6ebeae3d7c60aa40", "score": "0.535298", "text": "def main():\n\n\t################################\n\t# change this\n\tlog_dir = \"./beta_simulator_mac/data2/driving_log.csv\"\n\n\tmy_model = Model()\n\tmy_model.read_csv_data(log_dir)\n\tmy_model.build_and_train_model()", "title": "" }, { "docid": "1674ebfbc119d61410be73cb43f6f664", "score": "0.535035", "text": "def main():\n\n # Build data\n jobs = squeue(user='lmh1', # user id - change to your own\n name=['iprPy_1', 'iprPy_4']) # job names - change to your own\n logs = parse_runner_logs()\n rundirs, runners = check_run_directories()\n\n # Merge data\n logjobs = jobs.merge(logs, how='outer', on='jobid')\n logjobs.loc[(logjobs.status=='active') & (pd.isna(logjobs.user)), 'status'] = 'crashed'\n runlogjobs = logjobs.merge(runners, how='outer', on='pid')\n\n # Loop over all run directories\n keys = ['jobid', 'pid', 'status', 'time', 'calcid', 'tmpdir']\n for i in rundirs.index:\n rundir = rundirs.loc[i]\n\n # List number of prepared calculations and number of runners\n print(rundir.run_directory, rundir.numcalcs, rundir.numrunners)\n dirjobs = runlogjobs[runlogjobs.run_directory == rundir.run_directory]\n\n # Print data for runners\n if len(dirjobs) > 0:\n print(dirjobs[keys])\n print()\n\n # List jobs with no associated bid files (usually finished)\n print('Unknown/finished jobs')\n nodir = runlogjobs[pd.isna(runlogjobs.run_directory)][keys]\n with pd.option_context('display.max_rows', None,):\n print(nodir)\n\n # Delete run logs for successfully finished jobs\n for jobid in nodir[nodir.status=='finished'].jobid.values:\n Path(f'runner_{jobid}.txt').unlink()", "title": "" }, { "docid": "b91ecb34edfcca45b91023e878276de2", "score": "0.5344869", "text": "def main():\n\n # e.g. url = 'cohorts/ryan_test_2/bytes_added'\n args = parseargs()\n\n # Initialize a requester object\n logging.debug(__name__ + ' :: Attempting to create cookie jar, '\n 'logging in ..')\n umapi_req = UMAPIClient(config.UMAPI_USER,\n config.UMAPI_PASS)\n\n if args.wait < config.MIN_WAIT_REQ:\n logging.error(__name__ + ' :: Wait time to small. '\n 'Must be at least {0}'.format(\n config.MIN_WAIT_REQ))\n return\n\n # Call the request handler\n #\n # 1. URLs provided from a csv\n # 2. URL from CL\n #\n\n proc_args = [umapi_req,\n args.uri,\n args.timeout,\n args.wait,\n args.save_file,\n args.timestamp,\n args.print_res]\n\n if args.infile:\n with open(config.IN_DIR + args.infile) as f:\n urls = f.read().strip().split(args.separator)\n for url in urls:\n url = unicode(url).strip()\n logging.debug(__name__ + ' :: Processing: {0}.'.format(url))\n proc_args[1] = url\n p = Process(target=request_handler, args=proc_args)\n p.start()\n p.join()\n else:\n url = unicode(args.uri).strip()\n logging.debug(__name__ + ' :: Processing: {0}.'.format(url))\n proc_args[1] = url\n p = Process(target=request_handler, args=proc_args)\n p.start()\n p.join()\n\n logging.debug(__name__ + ' :: Shutting down umapi client.')", "title": "" }, { "docid": "e38e160bd8991fbce437f03b70494a61", "score": "0.5342661", "text": "def main():\n cmd_opts = __process_args()\n database = \"/root/Desktop/vchauhan/Project/db_class/Station.db\"\n \n # create a database connection\n conn = create_connection(database)\n with conn:\n if (cmd_opts.ID):\n print(\"Querying for Trains with Station_ID: %s\" % (cmd_opts.ID))\n select_task_by_id(conn,cmd_opts.ID)\n else:\n print(\"Query all available trains in the station\")\n select_all_tasks(conn)", "title": "" }, { "docid": "9f249ad939f81630924dfc73921e45db", "score": "0.53349763", "text": "def main():\n setup_server_window()\n load_queue_data_from_pickle_files()\n setup_server_socket()\n server_window.mainloop()", "title": "" }, { "docid": "5a83a2fb0dca8d5e0ca5b87c319671e2", "score": "0.53339314", "text": "def run(self):\n logger=self.log()\n runhere=self.workdir\n if os.path.exists(runhere):\n logger.warning('%s: directory exists; will delete'%(runhere,))\n assert(not os.path.samefile(self.getdir('WORKhwrf'),runhere))\n shutil.rmtree(runhere)\n atime=self.__wrf.simstart()\n with NamedDir(runhere,keep=self.keeprun,logger=logger,\n keep_on_error=True) as rundir:\n try:\n logger.info('%s running in directory %s'%(\n self.taskname,realcwd()))\n self.location=runhere\n self.initial_prerun()\n self.link_fix()\n self.link_all_inputs()\n self.make_namelist()\n self.final_prerun()\n self.run_exe()\n self.postrun()\n self.deliver_products()\n except Exception as e:\n logger.critical('%s failed: %s'%(self.taskname,str(e)),\n exc_info=True)\n raise\n self.postmsg('%s: completed'%(self.taskname,))", "title": "" }, { "docid": "560dafb446a00cb4f6b0f6812c61b19a", "score": "0.53298175", "text": "def main(self, *args):\n\n Launcher.main(self)\n s = SimServer(self.build_opt().copy())\n s.start()", "title": "" }, { "docid": "1a893af2a985302a71a771cb73b96e47", "score": "0.53260726", "text": "def main():\n\n # Allow user to input parameters on command line.\n userInput = argparse.ArgumentParser(description=\\\n '%s version %s. Requires a .bed file with first four columns in the '\n 'format chromosome <tab> start <tab> stop <tab> sequence such as the '\n '.bed files produced by outputClean. Returns a .bed file that is '\n 'identical to the input file except that the probe sequences have been '\n 'replaced with their reverse complements.' % (scriptName, Version))\n requiredNamed = userInput.add_argument_group('required arguments')\n requiredNamed.add_argument('-f', '--file', action='store', required=True,\n help='The .bed file containing the probe '\n 'sequences to take the reverse complements '\n 'of')\n userInput.add_argument('-o', '--output', action='store', default=None,\n type=str, help='Specify the name prefix of the '\n 'output file')\n\n # Import user-specified command line values\n args = userInput.parse_args()\n inputFile = args.file\n outNameVal = args.output\n\n createRCs(inputFile, outNameVal)", "title": "" }, { "docid": "4a1441aff7b42146a2704f22a5ad15d9", "score": "0.5325459", "text": "def main():\n # get options\n cli = bill_cli.CommandLine()\n cli.get_options()\n\n # init config\n config = cfg.Config(cli.option)\n\n\n # init AWS_Access instance\n aws_raw_bill = AWS_Raw_Bill(config=config, commandline=cli)\n aws_raw_bill.proc_detail_tags_bills()\n\n\n #print \"\\n\"\n aws_raw_bill.cli.msg(\"You got it ! Cheers!\")", "title": "" }, { "docid": "5af56283008886c67ed6a41874433143", "score": "0.53180116", "text": "def main():\n # The block of code below attempts to open the file with the bike share\n # data. If an IOError (input/output error) occurs, it prints an error\n # message and exits the program, since the rest of the program needs\n # to be able to read the file. If you get an error, check that you\n # haven't changed the name of the file and make sure if appears in the\n # same directory as bike.py.\n try:\n file = open(\"2015MayBikeShareData.csv\")\n except IOError:\n print(\"ERROR! 2015MayBikeShareData.csv doesn't appear in the current directory.\")\n exit(1)\n \n \n # The code below provides an example of looping through the data and\n # counting the number of rides that were taken in this period.\n numRides = 0\n totalDuration = 0\n numSmithJeff = 0\n startCount = 0\n numEnter = 0\n \n for line in file:\n numRides = numRides + 1\n \n # Divide this line of the file into the above fields; we'll only care about\n # a few of them so we'll ignore the others.\n fields = line.split(\",\")\n # We haven't seen this syntax before, but you should be able to guess what it\n # does. Try experimenting with it in the interpreter to confirm your guess.\n # From here on out in the code, you can ignore the fields variable - everything\n # you need has been stored in the variables on the left hand side.\n duration, startStation, endStation = int(fields[0]), fields[2], fields[4]\n \n # You'll add additional code within this for loop to complete the assignment.\n # There is one iteration of the loop for each line of the file.\n \n #sum all the durations in each ride\n totalDuration = totalDuration + duration\n \n #finding total number of rides that start at Smithsonian / Jefferson Dr & 12th St SW\n if startStation == 'Smithsonian / Jefferson Dr & 12th St SW':\n numSmithJeff = numSmithJeff + 1\n \n #finding total number of bikes that arrive at Smithsonian / Jefferson station\n if endStation == \"Smithsonian / Jefferson Dr & 12th St SW\":\n numEnter = numEnter + 1\n \n #find average duration of each ride and convert to seconds\n averageDuration = (totalDuration / numRides) / 1000 \n \n #sum the number of bikes that have entered and left the station over the course of the data\n totalChange = numEnter - numSmithJeff\n \n # Files should always be closed when you are done with them.\n file.close()\n \n # Add additional print statements here\n print(\"There were\", numRides, \"total rides.\")\n print(\"The average duration of each ride is\", averageDuration, \"seconds\")\n \n #gives average duration of each ride in minutes and prints the result in a statement\n averageMinutes(averageDuration)\n \n print(\"The number of rides that started at Smithsonian / Jefferson Dr & 12th St SW is\", numSmithJeff)\n \n if totalChange > 0:\n print(\"The number of bikes at Smithsonian / Jefferson station over the course of the data had increased by\", totalChange, \"units\")\n elif totalChange < 0:\n print(\"The number of bikes at Smithsonian / Jefferson station over the course of the data has decreased by\", totalChange, \"units\")\n elif totalChange == 0:\n print(\"The number of bikes at Smithsonian / Jefferson station over the course of the data has not changed\")", "title": "" }, { "docid": "5eb888e41c74d0981ab1fba610eeca06", "score": "0.5317356", "text": "def main():\n stats.set_time_start()\n\n if config.options.show_progress:\n stats.start_monitor()\n\n recorders = Recorder.launch(config.options.recorders)\n\n try:\n for filename in config.filenames:\n parser.parse(filename)\n\n Recorder.wait_empty()\n except KeyboardInterrupt:\n pass\n\n stats.set_time_stop()\n\n if config.options.show_progress:\n stats.stop_monitor()\n\n stats.print_summary()", "title": "" }, { "docid": "6d20928f9bf4aa07d61d880e4df4b507", "score": "0.5316588", "text": "def main():\n\n args = parser.parse_args()\n start_ms_bf_server(app_id=args.ms_id,\n app_secret=args.ms_secret)", "title": "" }, { "docid": "1e6e65f4306c215597e5b39fbd542e56", "score": "0.53132766", "text": "def main(args):\r\n\r\n return 0", "title": "" }, { "docid": "d458458b43c4d46a0aec51a4f472eb0a", "score": "0.5306254", "text": "def main():\n print ('info: this is the seedbank library, what did you smoke today?!?')", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.5305935", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.5305935", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.5305935", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.5305935", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.5305935", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.5305935", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.5305935", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.5305935", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.5305935", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.5305935", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.5305935", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.5305935", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.5305935", "text": "def main():", "title": "" } ]
22d35a6550c7797bc852d7e1e578c7a7
Test we get the expected triggers from a button.
[ { "docid": "f8100c7694ef9f6d37756857cd0f71c9", "score": "0.62324995", "text": "async def test_get_triggers(\n hass: HomeAssistant,\n device_reg: device_registry.DeviceRegistry,\n entity_reg: EntityRegistry,\n) -> None:\n config_entry = MockConfigEntry(domain=\"test\", data={})\n config_entry.add_to_hass(hass)\n device_entry = device_reg.async_get_or_create(\n config_entry_id=config_entry.entry_id,\n connections={(device_registry.CONNECTION_NETWORK_MAC, \"12:34:56:AB:CD:EF\")},\n )\n entity_reg.async_get_or_create(DOMAIN, \"test\", \"5678\", device_id=device_entry.id)\n expected_triggers = [\n {\n \"platform\": \"device\",\n \"domain\": DOMAIN,\n \"type\": \"pressed\",\n \"device_id\": device_entry.id,\n \"entity_id\": f\"{DOMAIN}.test_5678\",\n }\n ]\n triggers = await async_get_device_automations(\n hass, DeviceAutomationType.TRIGGER, device_entry.id\n )\n assert_lists_same(triggers, expected_triggers)", "title": "" } ]
[ { "docid": "6923c51cf107f5f2eea29735f0a6cc08", "score": "0.70568377", "text": "def test_get_triggers(self):\n pass", "title": "" }, { "docid": "74a38034de88a9c3953c027771e7e558", "score": "0.7040851", "text": "def getTriggerPressed(self) -> bool:\n ...", "title": "" }, { "docid": "e2c7ec4699a61a6dd5472c083ba9442e", "score": "0.68977433", "text": "def test_get_trigger(self):\n pass", "title": "" }, { "docid": "ad5dc9b6c69e828ccb99d4174418852c", "score": "0.66726124", "text": "def test_button_without_name(self):", "title": "" }, { "docid": "2ecece6161de64c8951cb67cce23bf1e", "score": "0.6605904", "text": "def test_repeated_button():", "title": "" }, { "docid": "eeab186daac25950932d509e865e5885", "score": "0.6526919", "text": "def test_add_trigger(self):\n pass", "title": "" }, { "docid": "120a477afbf6e23a229561e9ac692172", "score": "0.65125096", "text": "def test_gui_Button() -> None:\n pos = 200, 200\n size = 100, 100\n cpos_x = pos[0], pos[0] + int(size[0] / 2), pos[0] + size[0]\n cpos_y = pos[1], pos[1] + int(size[1] / 2), pos[1] + size[1]\n wsize = (ut.BSIZE[0] * ut.TILE, ut.BSIZE[1] * ut.TILE)\n\n img = images.BUTT_TMP_IMG\n img2 = images.SPLASH_IMG\n img3 = images.BACK_IMG\n\n tname = 'test_trigger'\n text = ('test', ut.GAME_FONT)\n triggers1 = [('test_trigger', 0, 2)]\n triggers2 = [('test_trig', 0, 4), ('test_trigger', 4, 3), ('t', 3, 4)]\n\n # Test 0: incorrect initial values\n test = gui.Button((-1, -1), (-1, -1), tname, img)\n assert 0 <= test.pos[0] < wsize[0]\n assert 0 <= test.pos[1] < wsize[1]\n assert test.size[0] > 0\n assert test.size[1] > 0\n assert test.text is None\n\n test = gui.Button(wsize, (0, 0), tname, img, None, text)\n assert 0 <= test.pos[0] < wsize[0]\n assert 0 <= test.pos[1] < wsize[1]\n assert test.size[0] > 0\n assert test.size[1] > 0\n assert test.text is not None\n\n test = gui.Button(pos, size, tname, img2)\n assert test.image.get_size()[0] == size[0]\n assert test.image.get_size()[1] == size[1]\n\n test = gui.Button(pos, size, tname, img3)\n assert test.image.get_size()[0] == size[0]\n assert test.image.get_size()[1] == size[1]\n\n # Test 1: proper work of init_pdown() method\n\n for ix in range(3):\n for iy in range(3):\n test = gui.Button(pos, size, tname, img)\n test.init_pdown((cpos_x[ix], cpos_y[iy]))\n assert test.is_pressed is True\n\n test = gui.Button(pos, size, tname, img, None, text)\n test.init_pdown((cpos_x[ix], cpos_y[iy]))\n assert test.is_pressed is True\n assert test.text.is_pressed is True\n\n for ix in range(3):\n for iy in range(3):\n if ix == iy and iy == 1:\n continue\n\n test = gui.Button(pos, size, tname, img)\n test.init_pdown((cpos_x[ix]+ix-1, cpos_y[iy]+iy-1))\n assert test.is_pressed is False\n\n test = gui.Button(pos, size, tname, img, None, text)\n test.init_pdown((cpos_x[ix]+ix-1, cpos_y[iy]+iy-1))\n assert test.is_pressed is False\n assert test.text.is_pressed is False\n\n # Test 2: proper work of init_pup() method\n test = gui.Button(pos, size, tname, img)\n test.init_pdown((cpos_x[1], cpos_y[1]))\n test.init_pup((cpos_x[1], cpos_y[1]))\n assert test.is_pressed is False\n\n test = gui.Button(pos, size, tname, img)\n test.init_pdown((cpos_x[1], cpos_y[1]))\n test.init_pup((cpos_x[1], cpos_y[1]), [])\n\n test = gui.Button(pos, size, tname, img)\n test.init_pdown((cpos_x[1], cpos_y[1]))\n test.init_pup((cpos_x[1], cpos_y[1]), triggers1)\n assert triggers1[0][1] == 1\n\n test = gui.Button(pos, size, tname, img)\n test.init_pdown((cpos_x[1], cpos_y[1]))\n test.init_pup((cpos_x[1], cpos_y[1]), triggers2)\n assert triggers2[1][1] == 2\n\n triggers1 = [('test_trigger', 0, 2)]\n triggers2 = [('test_trig', 0, 4), ('test_trigger', 4, 3), ('t', 3, 4)]\n\n test = gui.Button(pos, size, tname, img, None, text)\n test.init_pdown((cpos_x[1], cpos_y[1]))\n test.init_pup((cpos_x[1], cpos_y[1]))\n assert test.is_pressed is False\n assert test.text.is_pressed is False\n\n test = gui.Button(pos, size, tname, img, None, text)\n test.init_pdown((cpos_x[1], cpos_y[1]))\n test.init_pup((cpos_x[1], cpos_y[1]), [])\n\n test = gui.Button(pos, size, tname, img, None, text)\n test.init_pdown((cpos_x[1], cpos_y[1]))\n test.init_pup((cpos_x[1], cpos_y[1]), triggers1)\n assert triggers1[0][1] == 1\n\n test = gui.Button(pos, size, tname, img, None, text)\n test.init_pdown((cpos_x[1], cpos_y[1]))\n test.init_pup((cpos_x[1], cpos_y[1]), triggers2)\n assert triggers2[1][1] == 2", "title": "" }, { "docid": "f4efef40153a1f11e2ee16af6042fcdb", "score": "0.63937", "text": "def verify_button(self, locator, expected):\n button = self.state._get_typed_item_by_locator(Button, locator)\n self.state._verify_value(expected, button.Text)", "title": "" }, { "docid": "eb072dfb4e43f75a6f52a23a2926dc0a", "score": "0.61948365", "text": "def _findBtnElem(browser, triggers):\n elemTypes = [\"button\", \"input\", \"a\"]\n for t in elemTypes:\n log.debug(\"Looking for element by type {}.\", t)\n elems = browser.find_by_tag(t)\n for elem in elems:\n elemText = elem.value or elem.text\n if not elemText:\n continue\n # Make lowercase, remove spaces,tabs,newlines,non alphab chars.\n elemText = \"\".join(char for char in elemText if char.isalpha())\n elemText = \"\".join(elemText.lower().split())\n # Compare against list\n for trig in triggers:\n if trig in elemText:\n return elem\n # Did not find any element.\n log.debug(\"Did not find any element.\")\n return None", "title": "" }, { "docid": "f61c292e762c5deb3a01dd4afbe6fb5f", "score": "0.6126174", "text": "async def test_if_fires_on_state_change(hass, calls):\n hass.states.async_set(\"button.entity\", \"unknown\")\n\n assert await async_setup_component(\n hass,\n automation.DOMAIN,\n {\n automation.DOMAIN: [\n {\n \"trigger\": {\n \"platform\": \"device\",\n \"domain\": DOMAIN,\n \"device_id\": \"\",\n \"entity_id\": \"button.entity\",\n \"type\": \"pressed\",\n },\n \"action\": {\n \"service\": \"test.automation\",\n \"data\": {\n \"some\": (\n \"to - {{ trigger.platform}} - \"\n \"{{ trigger.entity_id}} - {{ trigger.from_state.state}} - \"\n \"{{ trigger.to_state.state}} - {{ trigger.for }} - \"\n \"{{ trigger.id}}\"\n )\n },\n },\n }\n ]\n },\n )\n\n # Test triggering device trigger with a to state\n hass.states.async_set(\"button.entity\", \"2021-01-01T23:59:59+00:00\")\n await hass.async_block_till_done()\n assert len(calls) == 1\n assert calls[0].data[\n \"some\"\n ] == \"to - device - {} - unknown - 2021-01-01T23:59:59+00:00 - None - 0\".format(\n \"button.entity\"\n )", "title": "" }, { "docid": "8d61cdf9a1942842c830c17e0f61af39", "score": "0.6063574", "text": "def test_submit_button():", "title": "" }, { "docid": "80edad7322659bd77437bea57210db23", "score": "0.60603464", "text": "def getButton(self, button: ButtonType) -> bool:\n ...", "title": "" }, { "docid": "80ba6d9b405b8bff2e022f72b081e548", "score": "0.6039221", "text": "def test_selected_buttons() -> None:\n term = Terminal()\n tui = TextUI(term)\n buttons = [Button(tui) for _ in range(3)]\n menu = Menu(tui, buttons=buttons)\n assert menu.selected is buttons[0]", "title": "" }, { "docid": "3ebaa7fcdb2e9faa2d7215777775c2ad", "score": "0.6024014", "text": "async def test_get_triggers(\n hass: HomeAssistant,\n device_registry: dr.DeviceRegistry,\n entity_registry: er.EntityRegistry,\n set_state,\n features_reg,\n features_state,\n expected_trigger_types,\n) -> None:\n config_entry = MockConfigEntry(domain=\"test\", data={})\n config_entry.add_to_hass(hass)\n device_entry = device_registry.async_get_or_create(\n config_entry_id=config_entry.entry_id,\n connections={(dr.CONNECTION_NETWORK_MAC, \"12:34:56:AB:CD:EF\")},\n )\n entity_entry = entity_registry.async_get_or_create(\n DOMAIN,\n \"test\",\n \"5678\",\n device_id=device_entry.id,\n supported_features=features_reg,\n )\n if set_state:\n hass.states.async_set(\n entity_entry.entity_id,\n \"attributes\",\n {\"supported_features\": features_state},\n )\n\n expected_triggers = []\n\n expected_triggers += [\n {\n \"platform\": \"device\",\n \"domain\": DOMAIN,\n \"type\": trigger,\n \"device_id\": device_entry.id,\n \"entity_id\": entity_entry.id,\n \"metadata\": {\"secondary\": False},\n }\n for trigger in expected_trigger_types\n ]\n triggers = await async_get_device_automations(\n hass, DeviceAutomationType.TRIGGER, device_entry.id\n )\n assert triggers == unordered(expected_triggers)", "title": "" }, { "docid": "b8bad0492bb969fe0f6c22fc57e3a788", "score": "0.6008939", "text": "def verify_action_button_status(self, **kwargs):\n for button,expected_value in kwargs.items():\n locator = eda_lex_locators[\"eda_settings\"][\"action_button\"].format(button)\n self.selenium.page_should_contain_element(locator)\n self.selenium.wait_until_element_is_visible(locator,\n error= f\"Element '{button}' button is not displayed for the user\")\n time.sleep(1)\n actual_value = self.selenium.get_webelement(locator).get_attribute(\"disabled\")\n expected_value = bool(expected_value == \"disabled\")\n if not str(expected_value).lower() == str(actual_value).lower() :\n raise Exception (f\"Element {button} button status is {actual_value} instead of {expected_value}\")", "title": "" }, { "docid": "11f6931ac33aab0f5601a9b0dd3f812e", "score": "0.5992467", "text": "def getTrigger(self) -> bool:\n ...", "title": "" }, { "docid": "6d8adc23251ea993fae0f5759fb596e7", "score": "0.5991863", "text": "async def test_button(hass: HomeAssistant) -> None:\n button = ButtonEntity()\n assert button.state is None\n\n button.hass = hass\n\n with pytest.raises(NotImplementedError):\n await button.async_press()\n\n button.press = MagicMock()\n await button.async_press()\n\n assert button.press.called", "title": "" }, { "docid": "bb128b720979d5de8ae31492ded29c45", "score": "0.59495145", "text": "def test_choose_submit_buttons(expected_post):\n text = \"\"\"\n <form method=\"post\" action=\"mock://form.com/post\">\n <button type=\"butTon\" name=\"sub1\" value=\"val1\">Val1</button>\n <button type=\"suBmit\" name=\"sub2\" value=\"val2\">Val2</button>\n <button type=\"reset\" name=\"sub3\" value=\"val3\">Val3</button>\n <button name=\"sub4\" value=\"val4\">Val4</button>\n <input type=\"subMit\" name=\"sub5\" value=\"val5\">\n </form>\n \"\"\"\n browser, url = setup_mock_browser(expected_post=expected_post, text=text)\n browser.open(url)\n browser.select_form()\n res = browser.submit_selected(btnName=expected_post[0][0])\n assert res.status_code == 200 and res.text == 'Success!'", "title": "" }, { "docid": "53343de55ba0e8a9212d60be0343e9ee", "score": "0.5919839", "text": "def test_get_trigger_setting(self):\n pass", "title": "" }, { "docid": "44deacb5017eca4c48630dacbe788cc5", "score": "0.5918713", "text": "def test_triggered(self, h5reader):\n h5reader.open()\n for i, event in enumerate(h5reader):\n assert event.triggered == bool(i)\n h5reader.close()", "title": "" }, { "docid": "5228843791b9f0f3f3a92a92b1599682", "score": "0.5917064", "text": "def buttons(self):\n\t\traise HaventGottenToThatYetError, \"I ain't done writing this code yet.\"", "title": "" }, { "docid": "81a84e335ca25498d084f8fefa9c9ced", "score": "0.5897216", "text": "def _verify_wheel_actions(self, expected_actions):\n self.assertEqual(self._wheel_actions, expected_actions)", "title": "" }, { "docid": "d16851c2c0592200b2d75de755ebd5d3", "score": "0.58840644", "text": "async def test_button_simple(hass: HomeAssistant, knx: KNXTestKit) -> None:\n events = async_capture_events(hass, \"state_changed\")\n await knx.setup_integration(\n {\n ButtonSchema.PLATFORM: {\n CONF_NAME: \"test\",\n KNX_ADDRESS: \"1/2/3\",\n }\n }\n )\n assert len(hass.states.async_all()) == 1\n assert len(events) == 1\n events.pop()\n\n # press button\n await hass.services.async_call(\n \"button\", \"press\", {\"entity_id\": \"button.test\"}, blocking=True\n )\n await knx.assert_write(\"1/2/3\", True)\n assert len(events) == 1\n events.pop()\n\n # received telegrams on button GA are ignored by the entity\n old_state = hass.states.get(\"button.test\")\n async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=3))\n await knx.receive_write(\"1/2/3\", False)\n await knx.receive_write(\"1/2/3\", True)\n new_state = hass.states.get(\"button.test\")\n assert old_state == new_state\n assert len(events) == 0\n\n # button does not respond to read\n await knx.receive_read(\"1/2/3\")\n await knx.assert_telegram_count(0)", "title": "" }, { "docid": "4ce5d376cd2a764f1425cbb0bc6406be", "score": "0.5875481", "text": "def test_file_buttons(self):\n dialog = self.dialog\n button_sources = [(self.dialog.readme, 'README.txt', 'readme'),\n (self.dialog.idle_news, 'NEWS.txt', 'news'),\n (self.dialog.idle_credits, 'CREDITS.txt', 'credits')]\n\n for button, filename, name in button_sources:\n with self.subTest(name=name):\n button.invoke()\n fn = findfile(filename, subdir='idlelib')\n get = dialog._current_textview.viewframe.textframe.text.get\n with open(fn, encoding='utf-8') as f:\n self.assertEqual(f.readline().strip(), get('1.0', '1.end'))\n f.readline()\n self.assertEqual(f.readline().strip(), get('3.0', '3.end'))\n dialog._current_textview.destroy()", "title": "" }, { "docid": "f5fc6c730c70640147baf9d21fccbd19", "score": "0.5857787", "text": "def test_generate_button_press(self):\n params = {'state': 1, 'counter': 62552}\n result = self.node_obj.generate_button_press(params)\n expected = b'\\t\\x00\\x01\\x00\\x01X\\xf4\\x00\\x00'\n self.assertEqual(result, expected)", "title": "" }, { "docid": "a130514a94065085b1e078a92164424d", "score": "0.582386", "text": "def testGetActions(self):\r\n iq = self.Iq()\r\n iq['command']['node'] = 'foo'\r\n iq['command']['actions'] = ['prev', 'next']\r\n\r\n results = iq['command']['actions']\r\n expected = set(['prev', 'next'])\r\n self.assertEqual(results, expected,\r\n \"Incorrect next actions: %s\" % results)", "title": "" }, { "docid": "7f1886ae65de7d864c086a30feb71392", "score": "0.5813623", "text": "async def test_button_type(hass: HomeAssistant, knx: KNXTestKit) -> None:\n await knx.setup_integration(\n {\n ButtonSchema.PLATFORM: {\n CONF_NAME: \"test\",\n KNX_ADDRESS: \"1/2/3\",\n ButtonSchema.CONF_VALUE: 21.5,\n CONF_TYPE: \"2byte_float\",\n }\n }\n )\n # press button\n await hass.services.async_call(\n \"button\", \"press\", {\"entity_id\": \"button.test\"}, blocking=True\n )\n await knx.assert_write(\"1/2/3\", (0x0C, 0x33))", "title": "" }, { "docid": "4976e01c22d2f8a2ed0e9adb400cba1e", "score": "0.5800542", "text": "def check_actions(self, expected, elements):\r\n for idx, item in enumerate(expected):\r\n text, form_value = item\r\n e = elements.eq(idx)\r\n eq_(e.parent().text(), text)\r\n eq_(e.attr('name'), 'action')\r\n eq_(e.val(), form_value)", "title": "" }, { "docid": "7e94b9e010814b6063f549215074f2f8", "score": "0.5797616", "text": "def test_create_button(self):\n self.root, s, array = self.populate_gui()\n # Mocking \"Create button\" click. Idea - http://code.activestate.com/recipes/578978-using-tkinters-invoke-method-for-testing/\n s.frame_bottom5.children['but_create'].invoke()\n # Mocked data as an array\n\n # Compare fresh Tuna with mocked data\n z = Data.tunas[0].getTunaFeatures\n for x, y in zip (z, array):\n # Must be equal. If not - error\n self.assertEqual(x, y)\n if self.root:\n self.root.destroy()", "title": "" }, { "docid": "9b4e128bb7f65592d7eacc41e217ad69", "score": "0.57582724", "text": "def test_change_trigger_setting(self):\n pass", "title": "" }, { "docid": "ae8e20d8b26f0bd241217a403be66d45", "score": "0.5746524", "text": "def test_replace_triggers(self):\n pass", "title": "" }, { "docid": "7ecb71ad90f441a936a274233a1fd47b", "score": "0.5740988", "text": "def test_call_button(qtbot):\n\n @magicgui(call_button=\"my_button\", auto_call=True)\n def func(a: int, b: int = 3, c=7.1) -> None:\n assert a == 7\n\n magic_widget = func.Gui()\n\n assert hasattr(magic_widget, \"call_button\")\n assert isinstance(magic_widget.call_button, QtW.QPushButton)\n magic_widget.a = 7\n\n qtbot.mouseClick(magic_widget.call_button, Qt.LeftButton)", "title": "" }, { "docid": "7275ca9d46acff246dc95da9656d8599", "score": "0.5714357", "text": "def test_object_buttons(self):\n r = self.renderer(\n context=self.portal[\"news\"],\n assignment=actions.Assignment(\n ptitle=\"actions\", category=\"object_buttons\", show_icons=False\n ),\n )\n r.update()\n output = r.actionLinks()\n\n # Have our expected tabs ?\n expected = {\"Cut\", \"Copy\", \"Rename\", \"Delete\"}\n got = {str(link[\"title\"]) for link in output}\n self.assertTrue(expected.issubset(got))", "title": "" }, { "docid": "3c85d3bf3a40f27ce6c0834b4a67ee57", "score": "0.5689448", "text": "def test_action(\n widget,\n action_name,\n input_text,\n expected_result,\n selected_text=None,\n cursor_placement=None\n):\n app = QtWidgets.QApplication.instance()\n window = app.activeWindow()\n\n for action in widget.actions():\n if action.text() == action_name:\n break\n\n assert action_name == action.text()\n\n try:\n setup_test(\n widget,\n input_text,\n selected_text=selected_text,\n cursor_placement=cursor_placement\n )\n\n # widget.last_key_pressed may be necessary here for some actions... but should those actions really fall under this domain? or be placed on the editor?\n action.trigger()\n\n text = widget.toPlainText()\n assert text == expected_result\n except AssertionError:\n raise\n finally:\n window.activateWindow()", "title": "" }, { "docid": "2619c809c051daab8d80c51138eae921", "score": "0.56763", "text": "def test_get_behaviors(self):\n pass", "title": "" }, { "docid": "c9e11074eba811a72adc9368867b0034", "score": "0.56531715", "text": "def test_printer_buttons(self):\n dialog = self.dialog\n button_sources = [(dialog.py_license, license, 'license'),\n (dialog.py_copyright, copyright, 'copyright'),\n (dialog.py_credits, credits, 'credits')]\n\n for button, printer, name in button_sources:\n with self.subTest(name=name):\n printer._Printer__setup()\n button.invoke()\n get = dialog._current_textview.viewframe.textframe.text.get\n lines = printer._Printer__lines\n self.assertEqual(lines[0], get('1.0', '1.end'))\n self.assertEqual(lines[1], get('2.0', '2.end'))\n dialog._current_textview.destroy()", "title": "" }, { "docid": "3e73da42a24d87c5e69f4f34200766a8", "score": "0.56365985", "text": "def test_main_button(self):\n self.assertTrue(self.browser.is_text_present('SELECCIONAR IMAGEN'))\n self.assertTrue(self.browser.is_text_present('CLASIFICAR'))", "title": "" }, { "docid": "ef3a43fbd947df52e772a280c08df58e", "score": "0.56074643", "text": "def test_checkBookButton(self):\n\n homePage = HomePage(self.driver)\n self.explicit_wait(HomePage.FromBooking)\n homePage.bookButton().click()\n self.explicit_wait(HomePage.ContinueButton)\n assert homePage.continueButton().is_displayed(), \"Book Button didn't work correctly\"", "title": "" }, { "docid": "18f647709dee22339c7187aabc5d1091", "score": "0.5595025", "text": "def Button(self) -> _n_11_t_4:", "title": "" }, { "docid": "eba6ce0e9113a5145a53162d59a770a2", "score": "0.5585998", "text": "def test_button_switch_up() -> None:\n term = Terminal()\n tui = TextUI(term)\n buttons = [Button(tui) for _ in range(3)]\n menu = Menu(tui, buttons=buttons)\n assert menu.selected is buttons[0]\n menu.selection_up()\n assert menu.selected is buttons[2]\n menu.selection_up()\n assert menu.selected is buttons[1]\n menu.selection_up()\n assert menu.selected is buttons[0]", "title": "" }, { "docid": "856a534f6e16ea198909c707a8679242", "score": "0.55750066", "text": "def test_name_in_actions(self, trigger_actions_mock):\n name = ACTIONS.GET\n\n ds_config = CHAOS_CONFIG.datastore\n\n trigger_action(name, ds_config)\n\n trigger_actions_mock.assert_called_once_with(name, ds_config)", "title": "" }, { "docid": "498ca2b80db53489d47259a10bee0fb3", "score": "0.555756", "text": "def test_submit_button(self):\n button = get_button(self.get_form())\n self.assertEqual(button.get('type'), 'submit')", "title": "" }, { "docid": "ec9a150d48c80843e2b30313522b1b68", "score": "0.5556827", "text": "async def test_get_triggers(opp, device_reg, entity_reg):\n config_entry = MockConfigEntry(domain=\"test\", data={})\n config_entry.add_to_opp(opp)\n device_entry = device_reg.async_get_or_create(\n config_entry_id=config_entry.entry_id,\n connections={(device_registry.CONNECTION_NETWORK_MAC, \"12:34:56:AB:CD:EF\")},\n )\n entity_reg.async_get_or_create(DOMAIN, \"test\", \"5678\", device_id=device_entry.id)\n expected_triggers = [\n {\n \"platform\": \"device\",\n \"domain\": DOMAIN,\n \"type\": \"turned_off\",\n \"device_id\": device_entry.id,\n \"entity_id\": f\"{DOMAIN}.test_5678\",\n },\n {\n \"platform\": \"device\",\n \"domain\": DOMAIN,\n \"type\": \"turned_on\",\n \"device_id\": device_entry.id,\n \"entity_id\": f\"{DOMAIN}.test_5678\",\n },\n ]\n triggers = await async_get_device_automations(opp, \"trigger\", device_entry.id)\n assert_lists_same(triggers, expected_triggers)", "title": "" }, { "docid": "d19b31d39c617f88801539774d382d7b", "score": "0.5552215", "text": "def _verify_back_arm_actions(self, expected_actions):\n self.assertEqual(self._back_arm_actions, expected_actions)", "title": "" }, { "docid": "d479da65719d58980a14bd2d167d27a3", "score": "0.5544555", "text": "def test_actions_available(admin_client, article):\n url = reverse('admin:blog_article_changelist')\n changeview = admin_client.get(url)\n path = (\n './/table[@id=\"result_list\"]' '//thead//th//*[starts-with(text(), \"Actions\")]'\n )\n assert len(changeview.lxml.xpath(path)) == 1", "title": "" }, { "docid": "bb89546863d2b6cd5aad54ff3fb808b9", "score": "0.55428463", "text": "def test_actions(self):\n serialized = self._runner.serialized_model\n actions = serialized['Objects']['?'].get('_actions')\n self.assertIsInstance(actions, dict)\n action_names = [action['name'] for action in actions.values()]\n self.assertIn('testAction', action_names)\n self.assertNotIn('notAction', action_names)\n self.assertIn('testRootMethod', action_names)\n action_meta = None\n for action in actions.values():\n self.assertIsInstance(action.get('enabled'), bool)\n self.assertIsInstance(action.get('name'), str)\n self.assertThat(\n action['name'],\n matchers.StartsWith('test'))\n if action['name'] == 'testActionMeta':\n action_meta = action\n else:\n self.assertEqual(action['title'], action['name'])\n self.assertIsNotNone(action_meta)\n self.assertEqual(action_meta['title'], \"Title of the method\")\n self.assertEqual(action_meta['description'],\n \"Description of the method\")\n self.assertEqual(action_meta['helpText'], \"HelpText of the method\")", "title": "" }, { "docid": "f3ad0ff2f900dee6313aab175e3c6c26", "score": "0.55339754", "text": "def test_submit_button(self):\n button = get_button(self.form())\n self.assertEqual(button.get('type'), 'submit')", "title": "" }, { "docid": "f3ad0ff2f900dee6313aab175e3c6c26", "score": "0.55339754", "text": "def test_submit_button(self):\n button = get_button(self.form())\n self.assertEqual(button.get('type'), 'submit')", "title": "" }, { "docid": "f3ad0ff2f900dee6313aab175e3c6c26", "score": "0.55339754", "text": "def test_submit_button(self):\n button = get_button(self.form())\n self.assertEqual(button.get('type'), 'submit')", "title": "" }, { "docid": "25a7a5afa50302f5166053d2efb8f007", "score": "0.5505979", "text": "def events(self):\n # Button pressed (originally none)\n button = \"\"\n # Check event queue\n for event in pygame.event.get():\n # If exit button clicked, exit game\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n # If a click, see if it is over a virtual button\n elif event.type == pygame.MOUSEBUTTONUP:\n button = self.get_button(event.pos)\n # Return the virtual button clicked\n return button", "title": "" }, { "docid": "a936a25c82a9396e9d559b4a8ed7e3e0", "score": "0.5503027", "text": "def detect_button_collide(self):\n for button in self.clickables:\n clicked_button = button.check_collide()\n if clicked_button:\n print('clicked: ' + button.name)\n return button", "title": "" }, { "docid": "01915a994d2afe27718838c74b97ac22", "score": "0.54878205", "text": "async def test_button_raw(hass: HomeAssistant, knx: KNXTestKit) -> None:\n await knx.setup_integration(\n {\n ButtonSchema.PLATFORM: {\n CONF_NAME: \"test\",\n KNX_ADDRESS: \"1/2/3\",\n CONF_PAYLOAD: False,\n CONF_PAYLOAD_LENGTH: 0,\n }\n }\n )\n # press button\n await hass.services.async_call(\n \"button\", \"press\", {\"entity_id\": \"button.test\"}, blocking=True\n )\n await knx.assert_write(\"1/2/3\", False)", "title": "" }, { "docid": "102eec5e6e9c3d68a4aa2982d3a4088f", "score": "0.54876107", "text": "def _verify_front_drum_actions(self, expected_actions):\n self.assertEqual(self._front_drum_actions, expected_actions)", "title": "" }, { "docid": "f4fc6bb9d9f3134a834fa1dfb539e975", "score": "0.54778445", "text": "def test_set_actions_should_set_navigate(self):\n self.navigate_mock.side_effect = lambda x: self.assertTrue(self.driver_mock is x)\n self.form_action.set_actions(self.navigate_mock, self.action_mock, self.data_mock)\n self.form_action.navigate()", "title": "" }, { "docid": "f37adb87a612d886fc32172fad77e1bc", "score": "0.5472817", "text": "def _verify_back_drum_actions(self, expected_actions):\n self.assertEqual(self._back_drum_actions, expected_actions)", "title": "" }, { "docid": "fe8206e18839c5889e90fac72d1bdddf", "score": "0.546266", "text": "def test_simple(self):\n control = self.window\n mock = self.app._the_mock\n control.on_prev = mock.on_prev\n control.on_next = mock.on_next\n event = wx.CommandEvent()\n\n LOG.info(\"Dimensions changed\")\n control.on_dimensions(event)\n expect = [\n call.on_dimensions(event),\n call.App.new_dimensions(),\n call.App.refresh_contents(),\n call.CommandEvent.Skip(),\n ]\n\n LOG.info(\"Variant changed\")\n control.on_variant(event)\n expect += [\n call.on_variant(event),\n call.App.refresh_contents(),\n call.CommandEvent.Skip(),\n ]\n\n LOG.info(\"Option changed\")\n control.on_option(event)\n expect += [\n call.on_option(event),\n call.App.grab_contents(),\n call.CommandEvent.Skip(),\n ]\n\n LOG.info(\"Reload and prev\")\n control.on_reload_and_prev(event)\n expect += [\n call.on_reload_and_prev(event),\n call.App.book.reload(),\n call.on_prev(event),\n ]\n\n LOG.info(\"Changes applied\")\n control.on_apply_and_next(event)\n expect += [call.on_apply_and_next(event), call.on_next(event), call.App.apply()]\n\n mock.assert_has_calls(expect)", "title": "" }, { "docid": "9eeec4a183812051645bda0f0fe535ef", "score": "0.5457157", "text": "def is_button_pressed(button):\r\n return ui.is_pressed(button)", "title": "" }, { "docid": "c8433fa84b1c4dedde774ec7d78554bb", "score": "0.5456086", "text": "def test_button_text_subscribe(self):\n button = get_button(self.get_form())\n self.assertEqual(button.string, 'Subscribe')", "title": "" }, { "docid": "1d8a9f7f88cbd4fbcbbd7a761c98fe6b", "score": "0.5448595", "text": "def test_delete_trigger(self):\n pass", "title": "" }, { "docid": "424e43ebe5d05571d8994bd1b7dc79e2", "score": "0.54470354", "text": "def test_parse_button_press(self):\n result = self.node_obj.parse_button_press(b'\\t\\x00\\x00\\x00\\x02\\xbf\\xc3\\x00\\x00')\n expected = {'counter': 50111, 'button_state': 0}\n self.assertEqual(result, expected, \"State OFF, Counter 50111\")\n\n result = self.node_obj.parse_button_press(b'\\t\\x00\\x01\\x00\\x01\\x12\\xca\\x00\\x00')\n expected = {'counter': 51730, 'button_state': 1}\n self.assertEqual(result, expected, \"State ON, Counter 51730\")", "title": "" }, { "docid": "bab4fc1a0399dd4e271598d2d0db2d4b", "score": "0.5445742", "text": "def test_button_switch_down() -> None:\n term = Terminal()\n tui = TextUI(term)\n buttons = [Button(tui) for _ in range(3)]\n menu = Menu(tui, buttons=buttons)\n assert menu.selected is buttons[0]\n menu.selection_down()\n assert menu.selected is buttons[1]\n menu.selection_down()\n assert menu.selected is buttons[2]\n menu.selection_down()\n assert menu.selected is buttons[0]", "title": "" }, { "docid": "b8dcb63fb8ed3b1ddf842ba3dfeec589", "score": "0.5445198", "text": "def test_alert_triggering(self):\n\t\tself.called = 0\n\t\talert = self.alertservice.create(None, (self.fake_method, tuple()), (model.LoadBalancer.RESP_TIME, model.Alert.GE, 2))\n\t\tself.assertEqual(False, alert.triggered)\n\t\tself.alertservice.validate(alert, 1)\n\t\tself.assertEqual(0, self.called)\n\t\tself.alertservice.validate(alert, 2)\n\t\tself.assertEqual(True, alert.triggered)\n\t\tself.assertEqual(1, self.called)", "title": "" }, { "docid": "83623382d652a64f807b2a67d4e8e168", "score": "0.54440373", "text": "def _verify_routine_actions(self, expected_actions):\n self.assertEqual(self._routine_actions, expected_actions)", "title": "" }, { "docid": "842fa44fd78195a63ee437b44f24f471", "score": "0.54278517", "text": "def test_on_btn_norm(self, caw, multishell_edit, inputs, locks, expected):\n if multishell_edit:\n caw.rbs_multi_apply.index = 0\n for i, item in enumerate(caw.input_items):\n item.value = inputs[i]\n caw.checks[i].value = locks[i]\n\n caw.on_btn_norm(None)\n\n for i, item in enumerate(caw.input_items):\n assert item.value == expected[i]\n\n start_no = caw.irs_shell_range.value[0]\n end_no = caw.irs_shell_range.value[1]\n\n for i, v in enumerate(expected):\n line = caw.fig.data[2 + i].y[start_no - 1 : end_no]\n unique_v = set(line)\n assert len(unique_v) == 1\n unique_v = float(\"{:.3g}\".format(list(unique_v)[0]))\n assert unique_v == v\n else:\n for i, item in enumerate(caw.input_items):\n item.value = inputs[i]\n caw.checks[i].value = locks[i]\n\n caw.on_btn_norm(None)\n\n for i, item in enumerate(caw.input_items):\n assert item.value == expected[i]", "title": "" }, { "docid": "f5553e90dfdb6fa8d90ee71120a3c056", "score": "0.5427106", "text": "def tell_buttonbox(self, data):\n self.factory.tell_buttonbox(data)", "title": "" }, { "docid": "d3bf4f0212cd482aa9d6f5396b91babf", "score": "0.54119587", "text": "def detect_events(self):\n for button in self.clickables:\n button.check_collide()\n for event in pygame.event.get():\n self.detect_quit(event)\n mouse_xy = self.get_click_xy(event)\n if mouse_xy:\n button = self.detect_button_collide()\n return button", "title": "" }, { "docid": "9459ec8e6d259d6768d4bb83493c4dfa", "score": "0.5403557", "text": "def correct_button(self, e):\n if e.button == 2:\n return 3\n elif e.button == 3:\n return 2\n else:\n return e.button", "title": "" }, { "docid": "fcfdb876762d81dc8052f794029979c2", "score": "0.53995323", "text": "def test_set_button_label(button):\n set_button_property(button, \"red\", \"test_button\")\n assert button.get_label() == \"test_button\"", "title": "" }, { "docid": "cd6ea4c517bacc65ae462cf56b1878d0", "score": "0.5398053", "text": "def test_subcontrols_can_be_selected_by_value():", "title": "" }, { "docid": "8cac19b51c33b839efa978616c5cdf64", "score": "0.53964406", "text": "async def test_get_basic_value_notification_triggers(\n hass: HomeAssistant, client, ge_in_wall_dimmer_switch, integration\n) -> None:\n dev_reg = async_get_dev_reg(hass)\n device = dev_reg.async_get_device(\n identifiers={get_device_id(client.driver, ge_in_wall_dimmer_switch)}\n )\n assert device\n expected_trigger = {\n \"platform\": \"device\",\n \"domain\": DOMAIN,\n \"type\": \"event.value_notification.basic\",\n \"device_id\": device.id,\n \"command_class\": CommandClass.BASIC,\n \"property\": \"event\",\n \"property_key\": None,\n \"endpoint\": 0,\n \"subtype\": \"Endpoint 0\",\n \"metadata\": {},\n }\n triggers = await async_get_device_automations(\n hass, DeviceAutomationType.TRIGGER, device.id\n )\n assert expected_trigger in triggers", "title": "" }, { "docid": "914b57812637da3a4f2df5c779fac567", "score": "0.53924584", "text": "def test_set_actions_should_set_action(self):\n self.action_mock = lambda x: self.assertTrue(self.driver_mock is x)\n self.form_action.set_actions(self.navigate_mock, self.action_mock, self.action_mock)\n self.form_action.action()", "title": "" }, { "docid": "ede56193f87dab34009ce122f21edeb1", "score": "0.53825533", "text": "def test_actions_rendered(admin_client, article, action):\n url = reverse('admin:blog_article_changelist')\n changelist = admin_client.get(url)\n\n input_name = '_action__articleadmin__admin__{}__blog__article__{}'.format(\n action, article.pk\n )\n assert input_name in dict(changelist.form.fields)", "title": "" }, { "docid": "3c0bfd7aafa57c2a45342c87edcca13b", "score": "0.5360911", "text": "async def test_event(\n hass: HomeAssistant, mock_bridge_v2, v2_resources_test_data\n) -> None:\n await mock_bridge_v2.api.load_test_data(v2_resources_test_data)\n await setup_platform(hass, mock_bridge_v2, \"event\")\n # 7 entities should be created from test data\n assert len(hass.states.async_all()) == 7\n\n # pick one of the remote buttons\n state = hass.states.get(\"event.hue_dimmer_switch_with_4_controls_button_1\")\n assert state\n assert state.state == \"unknown\"\n assert state.name == \"Hue Dimmer switch with 4 controls Button 1\"\n # check event_types\n assert state.attributes[ATTR_EVENT_TYPES] == [\n \"initial_press\",\n \"repeat\",\n \"short_release\",\n \"long_press\",\n \"long_release\",\n ]\n # trigger firing 'initial_press' event from the device\n btn_event = {\n \"button\": {\"last_event\": \"initial_press\"},\n \"id\": \"f92aa267-1387-4f02-9950-210fb7ca1f5a\",\n \"metadata\": {\"control_id\": 1},\n \"type\": \"button\",\n }\n mock_bridge_v2.api.emit_event(\"update\", btn_event)\n await hass.async_block_till_done()\n state = hass.states.get(\"event.hue_dimmer_switch_with_4_controls_button_1\")\n assert state.attributes[ATTR_EVENT_TYPE] == \"initial_press\"\n # trigger firing 'long_release' event from the device\n btn_event = {\n \"button\": {\"last_event\": \"long_release\"},\n \"id\": \"f92aa267-1387-4f02-9950-210fb7ca1f5a\",\n \"metadata\": {\"control_id\": 1},\n \"type\": \"button\",\n }\n mock_bridge_v2.api.emit_event(\"update\", btn_event)\n await hass.async_block_till_done()\n state = hass.states.get(\"event.hue_dimmer_switch_with_4_controls_button_1\")\n assert state.attributes[ATTR_EVENT_TYPE] == \"long_release\"", "title": "" }, { "docid": "4f157ce76166496fe3fc5d229d1383d2", "score": "0.53346133", "text": "async def test_get_trigger_capabilities(\n hass: HomeAssistant,\n device_registry: dr.DeviceRegistry,\n entity_registry: er.EntityRegistry,\n enable_custom_integrations: None,\n) -> None:\n platform = getattr(hass.components, f\"test.{DOMAIN}\")\n platform.init()\n ent = platform.ENTITIES[0]\n assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: \"test\"}})\n await hass.async_block_till_done()\n\n config_entry = MockConfigEntry(domain=\"test\", data={})\n config_entry.add_to_hass(hass)\n device_entry = device_registry.async_get_or_create(\n config_entry_id=config_entry.entry_id,\n connections={(dr.CONNECTION_NETWORK_MAC, \"12:34:56:AB:CD:EF\")},\n )\n entity_registry.async_get_or_create(\n DOMAIN, \"test\", ent.unique_id, device_id=device_entry.id\n )\n\n triggers = await async_get_device_automations(\n hass, DeviceAutomationType.TRIGGER, device_entry.id\n )\n assert len(triggers) == 4\n for trigger in triggers:\n capabilities = await async_get_device_automation_capabilities(\n hass, DeviceAutomationType.TRIGGER, trigger\n )\n assert capabilities == {\n \"extra_fields\": [\n {\"name\": \"for\", \"optional\": True, \"type\": \"positive_time_period_dict\"}\n ]\n }", "title": "" }, { "docid": "4019e90fb79757be24d590e65569b020", "score": "0.53253406", "text": "def test_trig():\n pass", "title": "" }, { "docid": "d12199d1ef2a4ca526f7009c3665cd6a", "score": "0.5322279", "text": "def btn_pressed():\n for k in pyxel.__dict__.keys():\n if (\n k.startswith('KEY_')\n or k.startswith('GAMEPAD_')\n or k.startswith('MOUSE_')\n ) and pyxel.btn(getattr(pyxel, k)):\n return k", "title": "" }, { "docid": "93809d120757a332551d2cf4b8f41c9f", "score": "0.53117657", "text": "def _verify_front_arm_actions(self, expected_actions):\n self.assertEqual(self._front_arm_actions, expected_actions)", "title": "" }, { "docid": "1a7a5195b6914fef302c55c500ff131a", "score": "0.5308277", "text": "async def test_get_triggers_hidden_auxiliary(\n hass: HomeAssistant,\n device_registry: dr.DeviceRegistry,\n entity_registry: er.EntityRegistry,\n hidden_by,\n entity_category,\n) -> None:\n config_entry = MockConfigEntry(domain=\"test\", data={})\n config_entry.add_to_hass(hass)\n device_entry = device_registry.async_get_or_create(\n config_entry_id=config_entry.entry_id,\n connections={(dr.CONNECTION_NETWORK_MAC, \"12:34:56:AB:CD:EF\")},\n )\n entity_entry = entity_registry.async_get_or_create(\n DOMAIN,\n \"test\",\n \"5678\",\n device_id=device_entry.id,\n entity_category=entity_category,\n hidden_by=hidden_by,\n supported_features=CoverEntityFeature.OPEN,\n )\n expected_triggers = [\n {\n \"platform\": \"device\",\n \"domain\": DOMAIN,\n \"type\": trigger,\n \"device_id\": device_entry.id,\n \"entity_id\": entity_entry.id,\n \"metadata\": {\"secondary\": True},\n }\n for trigger in [\"opened\", \"closed\", \"opening\", \"closing\"]\n ]\n triggers = await async_get_device_automations(\n hass, DeviceAutomationType.TRIGGER, device_entry.id\n )\n assert triggers == unordered(expected_triggers)", "title": "" }, { "docid": "ef48bbaf22b7c3acdbded5c2f656b156", "score": "0.52773756", "text": "def click_target_buy_overrides_button(self):\n is_clicked = None\n try:\n self.logger.info('Start: click target buy overrides button')\n self._buy_page.click_target_buy_overrides_button()\n is_clicked = True\n except WebDriverException as exp:\n is_clicked = False\n self.logger.error(exp.msg)\n raise\n finally:\n self.logger.info('End: click target buy overrides button')\n return is_clicked", "title": "" }, { "docid": "d8770d1db5bd698c6a53cea172cff84e", "score": "0.5271562", "text": "def __init__(self, *triggers):\n\n\t\tif len(triggers) == 0:\n\t\t\traise ValueError(\"No Triggers have been passed in!\")\n\n\t\tfor trigger in triggers:\n\t\t\tif not callable(trigger):\n\t\t\t\traise ValueError(\"A trigger passed into this custom button is not a function!\")\n\n\t\tself.triggers = triggers\n\t\t#super.__init__()", "title": "" }, { "docid": "d0d36af2e939d8f222f070278ad0e04d", "score": "0.5271143", "text": "async def test_if_fires_on_event(hass: HomeAssistant, calls, setup_comp) -> None:\n assert await async_setup_component(\n hass,\n \"automation\",\n {\n \"automation\": {\n \"trigger\": {\n \"platform\": \"conversation\",\n \"command\": [\n \"Hey yo\",\n \"Ha ha ha\",\n ],\n },\n \"action\": {\n \"service\": \"test.automation\",\n \"data_template\": {\"data\": \"{{ trigger }}\"},\n },\n }\n },\n )\n\n await hass.services.async_call(\n \"conversation\",\n \"process\",\n {\n \"text\": \"Ha ha ha\",\n },\n blocking=True,\n )\n\n await hass.async_block_till_done()\n assert len(calls) == 1\n assert calls[0].data[\"data\"] == {\n \"alias\": None,\n \"id\": \"0\",\n \"idx\": \"0\",\n \"platform\": \"conversation\",\n \"sentence\": \"Ha ha ha\",\n \"slots\": {},\n \"details\": {},\n }", "title": "" }, { "docid": "4a4e5dc33e7e82339bb1edbe1ff0aa49", "score": "0.526724", "text": "def test_gui_MenuMode() -> None:\n img = images.SPLASH_IMG\n pos = (0, 0)\n wsize = (ut.BSIZE[0] * ut.TILE, ut.BSIZE[1] * ut.TILE)\n\n b_pos1 = (pos[0], pos[1])\n b_pos2 = (pos[0]+50, pos[1]+50)\n b_size = (100, 100)\n b_img = images.BUTT_TMP_IMG\n\n test_gui = [gui.Button(b_pos1, b_size, 'trigger_1', b_img),\n gui.Button(b_pos2, b_size, 'trigger_2', b_img)]\n trigs = [('trigger1', 0, 2), ('trigger2', 0, 2)]\n\n # Test 0: incorrect initial values\n test = gui.MenuMode(img, (-1, -1))\n assert 0 <= test.menu_pos[0] < wsize[0]\n assert 0 <= test.menu_pos[1] < wsize[1]\n assert test.focused is None\n assert test.gui is None\n assert test.triggers is None\n\n test = gui.MenuMode(img, wsize)\n assert 0 <= test.menu_pos[0] < wsize[0]\n assert 0 <= test.menu_pos[1] < wsize[1]\n assert test.focused is None\n assert test.gui is None\n assert test.triggers is None\n\n # Test 1: proper work of init() method\n # pygame.display.init()\n # test = gui.MenuMode(img, pos, test_gui, trigs)\n # test.init(dummy_screen)\n # assert pygame.mouse.get_visible() is False # only from pygame 2.0.0\n # pygame.mouse.set_visible(True)\n\n # Test 2: proper work of update_focus() method\n test = gui.MenuMode(img, pos, test_gui, trigs)\n test.update_focus((25, 25))\n assert test.focused == 0\n test.update_focus((50, 50))\n assert test.focused == 0\n test.update_focus((75, 75))\n assert test.focused == 0\n test.update_focus((125, 125))\n assert test.focused == 1\n test.update_focus((100, 100))\n assert test.focused == 1\n test.update_focus((75, 75))\n assert test.focused == 1\n test.update_focus((0, 150))\n assert test.focused is None\n test.update_focus((75, 75))\n assert test.focused == 0\n\n # Test 3: proper work of events() method\n\n # Test 4: proper work of leave() method\n # test = gui.MenuMode()\n # test.init()\n # test.leave()\n # assert pygame.mouse.get_visible() is True", "title": "" }, { "docid": "a96d14799612e8719d42f871b8ff660c", "score": "0.52647483", "text": "def test_controls(self):\n user_input = Input()\n print(\"Gimme anyol input:\")\n event = InputEvent.NONE\n while event == InputEvent.NONE:\n event = user_input.poll()\n\n print(\"Event:\", event)\n print(\"Got event\", event.description)\n self.assertIsNotNone(event)", "title": "" }, { "docid": "c9e80fd5434c7b63c499c64670d1aba7", "score": "0.5263074", "text": "def test_delete_button(self):\n self.root, s, array = self.populate_gui()\n # Mocking \"Create button\" click. Idea - http://code.activestate.com/recipes/578978-using-tkinters-invoke-method-for-testing/\n s.frame_bottom5.children['but_create'].invoke()\n # Mocked data as an array\n\n # Compare fresh Tuna with mocked data\n z = Data.tunas[0].getTunaFeatures\n for x, y in zip (z, array):\n # Must be equal. If not - error\n self.assertEqual(x, y)\n # Setting focus\n s.tree.focus(0)\n # \"Press\" Delete entry button\n s.frame_bottom5.children['but_delete'].invoke()\n # After deletion, tunas length should be 0\n self.assertEqual(len(Data.tunas), 0)\n if self.root:\n self.root.destroy()", "title": "" }, { "docid": "0b53113818c2a2282fec5ac20c13d51e", "score": "0.5257707", "text": "def _TriggerSetUp(self):\n raise NotImplementedError", "title": "" }, { "docid": "dd51c27620ec254720a0769b4086453a", "score": "0.52566755", "text": "def test_set_button_color(button):\n set_button_property(button, \"red\", \"\")\n assert button.get_color() == \"red\"", "title": "" }, { "docid": "1571a7dba78c1b2262d76b6b1cc00a6b", "score": "0.52473766", "text": "def test_search_button_enabled(self):\n # Get Search Button\n search_button = self.driver.find_element_by_class_name(\"btn-prim\")\n # Check Search Button is enabled\n self.assertTrue(search_button.is_enabled())", "title": "" }, { "docid": "d73831ad1c90cb2926a4b571ee0546b0", "score": "0.52439314", "text": "def test_get_event_matches_keys(self):\n pass", "title": "" }, { "docid": "96055e161e7ee0ec8cd1e4047d77d8b4", "score": "0.5232518", "text": "def test_radio_buttons_cannot_be_unselected():", "title": "" }, { "docid": "df79075394e4caf544b56f6fc0b4efee", "score": "0.5229247", "text": "def test_controls_without_value(self):", "title": "" }, { "docid": "b16835cd6167dd515c1b7e41a46ebb9f", "score": "0.52196145", "text": "def defineClickActions(self):\n pass", "title": "" }, { "docid": "860d3afc0bd46811d16dc131bfc70c71", "score": "0.5218555", "text": "async def test_get_scene_activation_value_notification_triggers(\n hass: HomeAssistant, client, hank_binary_switch, integration\n) -> None:\n dev_reg = async_get_dev_reg(hass)\n device = dev_reg.async_get_device(\n identifiers={get_device_id(client.driver, hank_binary_switch)}\n )\n assert device\n expected_trigger = {\n \"platform\": \"device\",\n \"domain\": DOMAIN,\n \"type\": \"event.value_notification.scene_activation\",\n \"device_id\": device.id,\n \"command_class\": CommandClass.SCENE_ACTIVATION.value,\n \"property\": \"sceneId\",\n \"property_key\": None,\n \"endpoint\": 0,\n \"subtype\": \"Endpoint 0\",\n \"metadata\": {},\n }\n triggers = await async_get_device_automations(\n hass, DeviceAutomationType.TRIGGER, device.id\n )\n assert expected_trigger in triggers", "title": "" }, { "docid": "1af0c659c644779ac876d4ad778d50a6", "score": "0.52004796", "text": "def assert_action_performed(_widget, _action):\n try:\n EventLog.performed_actions(_widget, _action)\n except AttributeError as e:\n pytest.fail(str(e))", "title": "" }, { "docid": "e2c2daea300e8acc5dfdd2f782bf35d1", "score": "0.5199976", "text": "def button_pressed(self, button: Button) -> bool:\n return button == self.input", "title": "" }, { "docid": "025672332dcf05ee9dab0b1e8ebb87e6", "score": "0.51872325", "text": "def test_controls_with_slightly_invalid_ids(self):", "title": "" }, { "docid": "538f18accff0d5a575fd5461ae52c1b1", "score": "0.51822996", "text": "def get_triggers():\n # TODO: filter out nonetypes?\n for trigger in triggers:\n for i in trigger:\n xbmc.log(msg='The value of %s is %s and a %s' % (i, trigger[i], type(trigger[i])),\n level=xbmc.LOGNOTICE)\n return triggers", "title": "" }, { "docid": "a5dcc04f5c620c30dd6304a57c0064f9", "score": "0.51805586", "text": "def on_click(self, button):\n pass", "title": "" }, { "docid": "97b8c5a9a5d459de423a216eb9294a1f", "score": "0.5180535", "text": "def execute_buttons(self):\n arms = 0\n button_states = self.get_buttons()\n for button_name in BUTTON_PORTS:\n port = BUTTON_PORTS[button_name]\n angle = -1\n if isinstance(port, list):\n angle = port[1]\n port = port[0]\n if port in button_states and button_states[port] is True:\n # button was just pressed\n if button_name in self.button_toggles:\n # needs toggled\n if button_name == \"GRAB\":\n pass\n # print(button_name, port == POV,\n # self.get_raw_buttons()[POV], angle)\n if not (port == POV and not self.get_raw_buttons()[\n POV] == angle):\n new_state = not self.button_toggles[button_name]\n self.button_toggles[button_name] = new_state\n self.set_rumble(new_state)\n # print(button_name, new_state)\n elif self.get_raw_buttons()[POV] == angle:\n # Button angle correct, check button name\n if button_name == \"INC SPEED\":\n self.max_enhancer += SPEED_ADJUST\n self.set_rumble(True, 1)\n elif button_name == \"DEC SPEED\":\n self.max_enhancer -= SPEED_ADJUST\n self.set_rumble(False, 1)\n elif button_name == \"RES SPEED\":\n self.max_enhancer = 0\n self.set_rumble(True, length=200)\n elif port in button_states:\n # BUTTON BEING PRESSED\n if button_name == \"ARM IN\":\n arms = max(-1, min(-(ARM_SPEED_IN), 1))\n self.button_toggles[\"GRAB\"] = False\n elif button_name == \"ARM OUT\":\n arms = max(-1, min(ARM_SPEED_OUT, 1))\n self.button_toggles[\"GRAB\"] = False\n # if arms == 0 and self.button_toggles[\"GRAB\"]:\n # self.robot_lift_2.set(-ARM_SPEED_IN)\n # else:\n # self.robot_lift_2.set(arms)\n\n self.check_rumble()", "title": "" }, { "docid": "f57bded989269779c0f1a519e5b2bac7", "score": "0.5177102", "text": "def inputButton(self, button):\n pass", "title": "" }, { "docid": "06c1235719c588f61b8b89ea5c86301e", "score": "0.51698107", "text": "def _get_triggers(\n self, link: 'Link', src_spec: 'SourceModelSpec'\n ) -> Tuple[List[str], List[str]]:\n return [], []", "title": "" } ]
9535fe6c46cb906bf64698332e3cd3aa
Blueprint for a simple model assigning stellar mass and quiescent/active designation to a subhalo catalog.
[ { "docid": "7013fc7bb776bc7cea36b4c0a616cfd0", "score": "0.0", "text": "def SmHmBinarySFR_blueprint(\n prim_haloprop_key = model_defaults.default_smhm_haloprop, \n smhm_model=smhm_components.Moster13SmHm, \n scatter_level = 0.2, \n redshift = sim_defaults.default_redshift, \n sfr_abcissa = [12, 15], sfr_ordinates = [0.25, 0.75], logparam=True, \n **kwargs):\n\n sfr_model = sfr_components.BinaryGalpropInterpolModel(\n galprop_key='quiescent', prim_haloprop_key=prim_haloprop_key, \n abcissa=sfr_abcissa, ordinates=sfr_ordinates, logparam=logparam)\n\n sm_model = smhm_components.Moster13SmHm(\n prim_haloprop_key=prim_haloprop_key, redshift=redshift, \n scatter_abcissa = [12], scatter_ordinates = [scatter_level])\n\n blueprint = {sm_model.galprop_key: sm_model, sfr_model.galprop_key: sfr_model, 'mock_factory': SubhaloMockFactory}\n\n return blueprint", "title": "" } ]
[ { "docid": "3193e2ab42a83b12b66740baeb711288", "score": "0.6471101", "text": "def Campbell15_blueprint(\n prim_haloprop_key = model_defaults.default_smhm_haloprop, \n sec_galprop_key = 'ssfr', sec_haloprop_key = 'vpeak', \n smhm_model=smhm_components.Moster13SmHm, \n scatter_level = 0.2, \n redshift = sim_defaults.default_redshift, **kwargs):\n\n stellar_mass_model = smhm_model(\n prim_haloprop_key=prim_haloprop_key, redshift=redshift, \n scatter_abcissa = [12], scatter_ordinates = [scatter_level])\n\n fake_mock = FakeMock(approximate_ngals = 1e5)\n input_galaxy_table = fake_mock.galaxy_table\n prim_galprop_bins = np.logspace(8, 12, num=15) \n\n ssfr_model = ConditionalAbunMatch(input_galaxy_table=input_galaxy_table, \n prim_galprop_key=stellar_mass_model.galprop_key, \n galprop_key=sec_galprop_key, \n sec_haloprop_key=sec_haloprop_key, \n prim_galprop_bins=prim_galprop_bins, \n **kwargs)\n blueprint = ({\n stellar_mass_model.galprop_key: stellar_mass_model, \n ssfr_model.galprop_key: ssfr_model, \n 'mock_factory': SubhaloMockFactory})\n\n return blueprint", "title": "" }, { "docid": "412e98b8820f7be92caa064dc48f9eab", "score": "0.61549777", "text": "def create_model():\n ###########################################################################\n # Flowsheet and Property Package #\n ###########################################################################\n m = pyo.ConcreteModel(name=\"Steam Cycle Model\")\n m.fs = FlowsheetBlock(default={\"dynamic\": False})\n m.fs.prop_water = iapws95.Iapws95ParameterBlock(\n default={\"phase_presentation\": iapws95.PhaseType.LG}\n )\n\n m.fs.prop_water2 = iapws95.Iapws95ParameterBlock()\n m.fs.therminol66 = ThermalOilParameterBlock()\n\n m.fs.charge_hx = HeatExchanger(\n default={\"delta_temperature_callback\": delta_temperature_underwood_callback,\n \"shell\": {\"property_package\": m.fs.prop_water2},\n \"tube\": {\"property_package\": m.fs.therminol66},\n \"flow_pattern\": HeatExchangerFlowPattern.countercurrent})\n\n m.fs.hp_splitter = HelmSplitter(default={\"dynamic\": False,\n \"property_package\": m.fs.prop_water2})\n m.fs.ip_splitter = HelmSplitter(default={\"dynamic\": False,\n \"property_package\": m.fs.prop_water2})\n\n m.fs.storage_cooler = Heater(default={\"dynamic\": False,\n \"property_package\": m.fs.prop_water2,\n \"has_pressure_change\": True})\n \n m.fs.hx_pump = WaterPump(default={\"property_package\": m.fs.prop_water2})\n\n # The enthalpy at the outlet of the cooler is required to be subcooled, that is,\n # below the ehntalpy of saturation. This condition was selected instead of using\n # temperatures, which cause certain difficulty in converging the model.\n # return (m.fs.storage_cooler.control_volume.properties_out[0].temperature <= \n # m.fs.storage_cooler.control_volume.properties_out[0].temperature_sat - 5)\n @m.fs.storage_cooler.Constraint(m.fs.time)\n def constraint_cooler_enth(b, t):\n return (m.fs.storage_cooler.control_volume.properties_out[0].enth_mol <= \n m.fs.storage_cooler.control_volume.properties_out[0].enth_mol_sat_phase['Liq'])\n \n ###########################################################################\n # Turbine declarations #\n ###########################################################################\n\n for i in range(9):\n\n turbine = HelmTurbineStage(\n default={\n \"property_package\": m.fs.prop_water2\n }\n )\n setattr(m.fs, \"turbine_\" + str(i+1), turbine)\n\n ###########################################################################\n # Boiler section declarations: #\n ###########################################################################\n # Boiler section is set up using two heater blocks, as following:\n # 1) For the main steam the heater block is named 'boiler'\n # 2) For the reheated steam the heater block is named 'reheater'\n m.fs.boiler = Heater(\n default={\n \"dynamic\": False,\n \"property_package\": m.fs.prop_water,\n \"has_pressure_change\": True\n }\n )\n m.fs.reheater = Heater(\n default={\n \"dynamic\": False,\n \"property_package\": m.fs.prop_water,\n \"has_pressure_change\": True\n }\n )\n\n # Outlet temperature of boiler is set to 866.15 K\n @m.fs.boiler.Constraint(m.fs.time)\n def boiler_temperature_constraint(b, t):\n return b.control_volume.properties_out[t].temperature == 866.15 # K\n\n # Outlet temperature of reheater is set to 866.15 K\n @m.fs.reheater.Constraint(m.fs.time)\n def reheater_temperature_constraint(b, t):\n return b.control_volume.properties_out[t].temperature == 866.15 # K\n\n ###########################################################################\n # Add Condenser Mixer, Condenser, and Condensate pump #\n ###########################################################################\n # condenser mix\n m.fs.condenser_mix = Mixer(\n default={\n \"momentum_mixing_type\": MomentumMixingType.none,\n \"inlet_list\": [\"main\", \"bfpt\", \"drain\", \"makeup\"],\n \"property_package\": m.fs.prop_water,\n }\n )\n\n # The inlet 'main' refers to the main steam coming from the turbine train\n # Inlet 'bfpt' refers to the steam coming from the bolier feed pump turbine\n # Inlet 'drain' refers to the condensed steam from the feed water heater 1\n # Inlet 'makeup' refers to the make up water\n # The outlet pressure of condenser mixer is equal to the minimum pressure\n # Since the turbine (#9) outlet (or, mixer inlet 'main') pressure\n # has the minimum pressure, the following constraint sets the outlet\n # pressure of the condenser mixer to the pressure of the inlet 'main'\n @m.fs.condenser_mix.Constraint(m.fs.time)\n def mixer_pressure_constraint(b, t):\n return b.main_state[t].pressure == b.mixed_state[t].pressure\n\n m.fs.condenser = CondenserHelm(default={\"shell\":{\"has_pressure_change\": False,\n \"property_package\": m.fs.prop_water2},\n \"tube\": {\"has_pressure_change\": False,\n \"property_package\": m.fs.prop_water2}})\n \n iscale.set_scaling_factor(m.fs.condenser.side_1.heat, 1e-9)\n iscale.set_scaling_factor(m.fs.condenser.side_2.heat, 1e-9)\n\n # condensate pump\n m.fs.cond_pump = WaterPump(\n default={\n \"property_package\": m.fs.prop_water2,\n }\n )\n ###########################################################################\n # Feedwater heater declaration #\n ###########################################################################\n # Feed water heaters (FWHs) are declared as 0D heat exchangers\n # Tube side is for feed water & Shell side is for steam condensing\n # Pressure drop on both sides are accounted for by setting the respective\n # outlet pressure based on the following assumptions:\n # (1) Feed water side: A constant 4% pressure drop is assumed\n # on the feedwater side for all FWHs. For this,\n # the outlet pressure is set to 0.96 times the inlet pressure,\n # on the feed water side for all FWHs\n # (2) Steam condensing side: Going from high pressure to\n # low pressure FWHs, the outlet pressure of\n # the condensed steam in assumed to be 10% more than that\n # of the pressure of steam extracted for the immediately\n # next lower pressure feedwater heater.\n # e.g. the outlet condensate pressure of FWH 'n'\n # = 1.1 * pressure of steam extracted for FWH 'n-1'\n # In case of FWH1 the FWH 'n-1' is used for Condenser,\n # and in case of FWH6, FWH 'n-1' is for Deaerator. Here,\n # the steam pressure for FWH 'n-1' is known because the\n # pressure ratios for turbines are fixed.\n # The condensing steam is assumed to leave the FWH as saturated liquid\n # Thus, each FWH is accompanied by 3 constraints, 2 for pressure drop\n # and 1 for the enthalpy.\n\n # Scaling factors for area and overall heat transfer coefficients for\n # FWHs have all been set appropriately (user may change these values,\n # if needed) if not set, the scaling factors = 1 (IDAES default)\n\n ###########################################################################\n # DEFINITION OF FEED WATER HEATERS MIXERS\n ###########################################################################\n FWH_Mixers_list = ['fwh1_mix', 'fwh2_mix', 'fwh3_mix', 'fwh6_mix']\n\n for i in FWH_Mixers_list:\n FWH_Mixer = Mixer(\n default={\n \"momentum_mixing_type\": MomentumMixingType.none,\n \"inlet_list\": [\"steam\", \"drain\"],\n \"property_package\": m.fs.prop_water,\n }\n )\n setattr(m.fs, i, FWH_Mixer)\n\n m.fs.fwh7_mix = Mixer(\n default={\n \"momentum_mixing_type\": MomentumMixingType.none,\n \"inlet_list\": [\"steam\", \"drain\", \"from_hx_pump\"],\n \"property_package\": m.fs.prop_water,\n }\n )\n \n m.fs.bfp_mix = Mixer(\n default={\n \"momentum_mixing_type\": MomentumMixingType.none,\n \"inlet_list\": [\"from_bfp\", \"from_hx_pump\"],\n \"property_package\": m.fs.prop_water,\n }\n )\n # @m.fs.hx_pump.Constraint(m.fs.time)\n # def hx_pump_pressure_out(b, t):\n # return (m.fs.hx_pump.control_volume.properties_out[0.0].pressure == \n # m.fs.fwh7_mix.steam_state[0].pressure*1.15)\n ###########################################################################\n # DEFINITION OF OUTLET PRESSURE OF FEED WATER HEATERS MIXERS\n ###########################################################################\n\n # The outlet pressure of an FWH mixer is equal to the minimum pressure\n # Since the pressure of mixer inlet 'steam' has the minimum pressure,\n # the following constraints set the outlet pressure of FWH mixers to be same\n # as the pressure of the inlet 'steam'\n\n def fwhmixer_pressure_constraint(b, t):\n return b.steam_state[t].pressure == b.mixed_state[t].pressure\n\n for i in FWH_Mixers_list:\n setattr(getattr(m.fs, i), \"mixer_pressure_constraint\", pyo.Constraint(m.fs.config.time, rule=fwhmixer_pressure_constraint))\n\n @m.fs.fwh7_mix.Constraint(m.fs.time)\n def fwh7mixer_pressure_constraint(b, t):\n return b.steam_state[t].pressure == b.mixed_state[t].pressure\n \n @m.fs.bfp_mix.Constraint(m.fs.time)\n def bfp_mix_pressure_constraint(b, t):\n return b.from_bfp_state[t].pressure == b.mixed_state[t].pressure\n ###########################################################################\n # DEFINITION OF FEED WATER HEATERS\n ###########################################################################\n FWH_list = ['fwh1', 'fwh2', 'fwh3', 'fwh4', 'fwh6', 'fwh7', 'fwh8']\n\n for i in FWH_list:\n FWH = HeatExchanger(\n default={\n \"delta_temperature_callback\": delta_temperature_underwood_callback,\n \"shell\": {\n \"property_package\": m.fs.prop_water,\n \"material_balance_type\": MaterialBalanceType.componentTotal,\n \"has_pressure_change\": True,\n },\n \"tube\": {\n \"property_package\": m.fs.prop_water,\n \"material_balance_type\": MaterialBalanceType.componentTotal,\n \"has_pressure_change\": True,\n },\n }\n )\n setattr(m.fs, i, FWH)\n\n ###########################################################################\n # SETTING SCALING FACTORS FOR AREA AND HEAT TRANSFER COEFFICIENT\n ###########################################################################\n\n for i in FWH_list:\n c = getattr(m.fs, i)\n iscale.set_scaling_factor(getattr(c, \"area\"), 1e-2)\n iscale.set_scaling_factor(getattr(c, \"overall_heat_transfer_coefficient\"), 1e-3)\n\n ###########################################################################\n # Setting the outlet enthalpy of condensate in an FWH to be same as saturated liquid\n ###########################################################################\n def fwh_vaporfrac_constraint(b, t):\n return (\n b.side_1.properties_out[t].enth_mol\n == b.side_1.properties_out[t].enth_mol_sat_phase['Liq'])\n\n for i in FWH_list:\n setattr(getattr(m.fs, i), i + \"_vaporfrac_constraint\", pyo.Constraint(m.fs.time, rule=fwh_vaporfrac_constraint))\n\n ###########################################################################\n # Setting a 4% pressure drop on the feedwater side (P_out = 0.96 * P_in)\n ###########################################################################\n\n def fwh_s2pdrop_constraint(b, t):\n return (\n b.side_2.properties_out[t].pressure\n == 0.96 * b.side_2.properties_in[t].pressure)\n\n for i in FWH_list:\n setattr(getattr(m.fs, i), i + \"_s2pdrop_constraint\", pyo.Constraint(m.fs.time, rule=fwh_s2pdrop_constraint))\n\n ###########################################################################\n # Setting the outlet pressure of condensate to be 10% more than that of\n # steam routed to condenser, as described in FWH description\n ###########################################################################\n # FWH1: 0.5 is the pressure ratio for turbine #9 (see set_inputs)\n # FWH2: 0.64^2 is the pressure ratio for turbine #8 (see set_inputs)\n # FWH3: 0.64^2 is the pressure ratio for turbine #7 (see set_inputs)\n # FWH4: 0.64^2 is the pressure ratio for turbine #6 (see set_inputs)\n # FWH6: 0.79^6 is the pressure ratio for turbine #4 (see set_inputs)\n # FWH7: 0.79^4 is the pressure ratio for turbine #3 (see set_inputs)\n # FWH8: 0.8^2 is the pressure ratio for turbine #2 (see set_inputs)\n \n pressure_ratio_list = { 'fwh1': 0.5,\n 'fwh2': 0.64**2,\n 'fwh3': 0.64**2,\n 'fwh4': 0.64**2,\n 'fwh6': 0.79**6,\n 'fwh7': 0.79**4,\n 'fwh8': 0.8**2}\n \n def fwh_s1pdrop_constraint(b, t):\n return (\n b.side_1.properties_out[t].pressure\n == 1.1 * b.turbine_pressure_ratio * b.side_1.properties_in[t].pressure)\n\n for i in FWH_list:\n b = getattr(m.fs, i)\n b.turbine_pressure_ratio = pyo.Param(initialize = pressure_ratio_list[i])\n setattr(b, i+\"_s1pdrop_constraint\", pyo.Constraint(m.fs.config.time, rule=fwh_s1pdrop_constraint))\n\n ###########################################################################\n # Add deaerator and boiler feed pump (BFP) #\n ###########################################################################\n m.fs.fwh5_da = Mixer(\n default={\n \"momentum_mixing_type\": MomentumMixingType.none,\n \"inlet_list\": [\"steam\", \"drain\", \"feedwater\"],\n \"property_package\": m.fs.prop_water,\n }\n )\n\n # The outlet pressure of deaerator is equal to the minimum pressure\n # Since the pressure of deaerator inlet 'feedwater' has\n # the minimum pressure, the following constraint sets the outlet pressure\n # of deaerator to be same as the pressure of the inlet 'feedwater'\n @m.fs.fwh5_da.Constraint(m.fs.time)\n def fwh5mixer_pressure_constraint(b, t):\n return b.feedwater_state[t].pressure == b.mixed_state[t].pressure\n\n m.fs.bfp = WaterPump(\n default={\n \"property_package\": m.fs.prop_water2,\n }\n )\n m.fs.bfpt = HelmTurbineStage(\n default={\n \"property_package\": m.fs.prop_water2,\n }\n )\n\n # The following constraint sets the outlet pressure of steam extracted\n # for boiler feed water turbine to be same as that of condenser\n @m.fs.Constraint(m.fs.time)\n def constraint_out_pressure(b, t):\n return (\n b.bfpt.control_volume.properties_out[t].pressure\n == b.condenser_mix.mixed_state[t].pressure\n )\n\n # The following constraint demands that the work done by the\n # boiler feed water pump is same as that of boiler feed water turbine\n # Essentially, this says that boiler feed water turbine produces just\n # enough power to meet the demand of boiler feed water pump\n @m.fs.Constraint(m.fs.time)\n def constraint_bfp_power(b, t):\n return (\n b.bfp.control_volume.work[t] + b.bfpt.control_volume.work[t]\n == 0\n )\n\n ###########################################################################\n # Turbine outlet splitter constraints #\n ###########################################################################\n # Equality constraints have been written as following to define\n # the split fractions within the turbine train\n\n splitter_list = ['t1_splitter', 't2_splitter', 't3_splitter', 't5_splitter', 't6_splitter', 't7_splitter', 't8_splitter']\n\n for i in splitter_list:\n\n Splitter = HelmSplitter(default={\"dynamic\": False,\n \"property_package\": m.fs.prop_water})\n setattr(m.fs, i, Splitter)\n \n m.fs.t4_splitter = HelmSplitter(default={\"dynamic\": False,\n \"property_package\": m.fs.prop_water,\n \"num_outlets\": 3})\n\n # The power plant with storage for a charge scenario is now ready\n # Declaraing a plant power out variable for easy analysis of various\n # design and operating scenarios\n m.fs.plant_power_out = pyo.Var(\n m.fs.time,\n domain=pyo.Reals,\n initialize=620,\n doc=\"Net Power MWe out from the power plant\"\n )\n\n # Constraint on Plant Power Output\n # Plant Power Out = Turbine Power - Power required for HX Pump\n @m.fs.Constraint(m.fs.time)\n def production_cons(b, t):\n return (\n (-1*(m.fs.turbine_1.work_mechanical[t]\n + m.fs.turbine_2.work_mechanical[t]\n + m.fs.turbine_3.work_mechanical[t]\n + m.fs.turbine_4.work_mechanical[t]\n + m.fs.turbine_5.work_mechanical[t]\n + m.fs.turbine_6.work_mechanical[t]\n + m.fs.turbine_7.work_mechanical[t]\n + m.fs.turbine_8.work_mechanical[t]\n + m.fs.turbine_9.work_mechanical[t])\n ) * 1e-6\n == m.fs.plant_power_out[t]\n )\n\n ###########################################################################\n # Create the stream Arcs and return the model #\n ###########################################################################\n _create_arcs(m)\n pyo.TransformationFactory(\"network.expand_arcs\").apply_to(m.fs)\n return m", "title": "" }, { "docid": "22c8c809f4bcbb7a6755fae6d9c80968", "score": "0.61260045", "text": "def main():\n stage = Usd.Stage.CreateInMemory()\n stage.GetRootLayer().documentation = (\n \"This is an example of setting a Model Prim kind\"\n )\n\n sphere1 = UsdGeom.Sphere.Define(stage, \"/SomeSphere\")\n Usd.ModelAPI(sphere1).SetKind(Kind.Tokens.component)\n sphere2 = UsdGeom.Sphere.Define(stage, \"/SomeSphere/SphereChild\")\n Usd.ModelAPI(sphere2).SetKind(Kind.Tokens.subcomponent)\n sphere3 = UsdGeom.Sphere.Define(stage, \"/SomeSphere/Foo\")\n Usd.ModelAPI(sphere3).SetKind(\"does_not_exist\")\n sphere3.GetPrim().SetMetadata(\n \"comment\",\n \"XXX: This kind is made up. But it could be real if we added to the KindRegistry\\n\"\n \"https://graphics.pixar.com/usd/docs/api/class_kind_registry.html\",\n )\n\n print(stage.GetRootLayer().ExportToString())", "title": "" }, { "docid": "f54114e099db8df2eeefbae3e2bfbb70", "score": "0.58137923", "text": "def __init__(self, catalog_version, catalog_path=None):\n if catalog_path is None:\n catalog_path = (f'/home/mike/research/'\n 'ac6_microburst_scale_sizes'\n '/data/coincident_microbursts_catalogues/'\n 'AC6_coincident_microbursts_sorted_'\n f'Brady_v{catalog_version}.txt')\n # Load catalog.\n self.microburst_catalog = pd.read_csv(catalog_path)\n self.model = IRBEM.MagFields(kext='OPQ77')\n # print(f'Number of microbursts {self.microburst_catalog.shape[0]}')\n return", "title": "" }, { "docid": "e8341d1942ab1d33ea5faba68460cb35", "score": "0.5692982", "text": "def __init__(self,description=\"My Model\\n Is here.\",\n geometry={\"wiresegs\":[] # A straight list of the wiresegments\n },\n excitation=None,\n freq=LinearFrequencyRange(),\n radiationPattern=None,\n computeCharges=True):\n attributesFromDict(locals())", "title": "" }, { "docid": "70d3b922eb2e50f0f3f650974e8bb455", "score": "0.5619619", "text": "def company_skeleton():\n\n E, P, B, D, F = EPBDF()\n entities = ['Paul', 'Roger', 'Quinn', 'Sally', 'Thomas',\n 'Case', 'Adapter', 'Laptop', 'Tablet', 'Smartphone',\n 'Accessories', 'Devices']\n\n entity_types = {'Paul': E, 'Roger': E, 'Quinn': E, 'Sally': E, 'Thomas': E,\n 'Case': P, 'Adapter': P, 'Laptop': P, 'Tablet': P, 'Smartphone': P,\n 'Accessories': B, 'Devices': B}\n skeleton = RelationalSkeleton(company_schema(), True)\n p, r, q, s, t, c, a, l, ta, sm, ac, d = ents = tuple([SkItem(e, entity_types[e]) for e in entities])\n skeleton.add_entities(*ents)\n for emp, prods in ((p, {c}), (q, {c, a, l}), (s, {l, ta}), (t, {sm, ta}), (r, {l})):\n for prod in prods:\n skeleton.add_relationship(SkItem(emp.name + '-' + prod.name, D), {emp, prod})\n for biz, prods in ((ac, {c, a}), (d, {l, ta, sm})):\n for prod in prods:\n skeleton.add_relationship(SkItem(biz.name + '-' + prod.name, F), {biz, prod})\n\n return skeleton", "title": "" }, { "docid": "628dae5ba9c89d6e4a015de048634080", "score": "0.5599956", "text": "def __init__(self, design):\n super(ols_model, self).__init__()\n self.initialize(design)", "title": "" }, { "docid": "6478bc8f50257f1ec8feb0d35d0f34f7", "score": "0.5526171", "text": "def __init__(self, model, colour, serial):\n self.model = model\n self.colour = colour\n self.serial = serial", "title": "" }, { "docid": "7f0c8476fbe7de141e200de50dd4ee7b", "score": "0.5524039", "text": "def setupModelInstance(self, geom, dssatexe):\n return super(Model, self).setupModelInstance(geom, \"DSSAT_Ex.exe\")", "title": "" }, { "docid": "45d6ebfdfcbac8c858c3c34cd87e0339", "score": "0.5518273", "text": "def apcupsd_model(self):\n self.writeCommand('apcupsd_model')\n return self", "title": "" }, { "docid": "c2ad82bc902d315973108e63404d969c", "score": "0.5479451", "text": "def pwr_core():\n model = openmc.model.Model()\n\n # Define materials.\n fuel = openmc.Material(1, name='UOX fuel')\n fuel.set_density('g/cm3', 10.062)\n fuel.add_nuclide('U234', 4.9476e-6)\n fuel.add_nuclide('U235', 4.8218e-4)\n fuel.add_nuclide('U238', 2.1504e-2)\n fuel.add_nuclide('Xe135', 1.0801e-8)\n fuel.add_nuclide('O16', 4.5737e-2)\n\n clad = openmc.Material(2, name='Zircaloy')\n clad.set_density('g/cm3', 5.77)\n clad.add_nuclide('Zr90', 0.5145)\n clad.add_nuclide('Zr91', 0.1122)\n clad.add_nuclide('Zr92', 0.1715)\n clad.add_nuclide('Zr94', 0.1738)\n clad.add_nuclide('Zr96', 0.0280)\n\n cold_water = openmc.Material(3, name='Cold borated water')\n cold_water.set_density('atom/b-cm', 0.07416)\n cold_water.add_nuclide('H1', 2.0)\n cold_water.add_nuclide('O16', 1.0)\n cold_water.add_nuclide('B10', 6.490e-4)\n cold_water.add_nuclide('B11', 2.689e-3)\n cold_water.add_s_alpha_beta('c_H_in_H2O')\n\n hot_water = openmc.Material(4, name='Hot borated water')\n hot_water.set_density('atom/b-cm', 0.06614)\n hot_water.add_nuclide('H1', 2.0)\n hot_water.add_nuclide('O16', 1.0)\n hot_water.add_nuclide('B10', 6.490e-4)\n hot_water.add_nuclide('B11', 2.689e-3)\n hot_water.add_s_alpha_beta('c_H_in_H2O')\n\n rpv_steel = openmc.Material(5, name='Reactor pressure vessel steel')\n rpv_steel.set_density('g/cm3', 7.9)\n rpv_steel.add_nuclide('Fe54', 0.05437098, 'wo')\n rpv_steel.add_nuclide('Fe56', 0.88500663, 'wo')\n rpv_steel.add_nuclide('Fe57', 0.0208008, 'wo')\n rpv_steel.add_nuclide('Fe58', 0.00282159, 'wo')\n rpv_steel.add_nuclide('Ni58', 0.0067198, 'wo')\n rpv_steel.add_nuclide('Ni60', 0.0026776, 'wo')\n rpv_steel.add_nuclide('Mn55', 0.01, 'wo')\n rpv_steel.add_nuclide('Cr52', 0.002092475, 'wo')\n rpv_steel.add_nuclide('C0', 0.0025, 'wo')\n rpv_steel.add_nuclide('Cu63', 0.0013696, 'wo')\n\n lower_rad_ref = openmc.Material(6, name='Lower radial reflector')\n lower_rad_ref.set_density('g/cm3', 4.32)\n lower_rad_ref.add_nuclide('H1', 0.0095661, 'wo')\n lower_rad_ref.add_nuclide('O16', 0.0759107, 'wo')\n lower_rad_ref.add_nuclide('B10', 3.08409e-5, 'wo')\n lower_rad_ref.add_nuclide('B11', 1.40499e-4, 'wo')\n lower_rad_ref.add_nuclide('Fe54', 0.035620772088, 'wo')\n lower_rad_ref.add_nuclide('Fe56', 0.579805982228, 'wo')\n lower_rad_ref.add_nuclide('Fe57', 0.01362750048, 'wo')\n lower_rad_ref.add_nuclide('Fe58', 0.001848545204, 'wo')\n lower_rad_ref.add_nuclide('Ni58', 0.055298376566, 'wo')\n lower_rad_ref.add_nuclide('Mn55', 0.0182870, 'wo')\n lower_rad_ref.add_nuclide('Cr52', 0.145407678031, 'wo')\n lower_rad_ref.add_s_alpha_beta('c_H_in_H2O')\n\n upper_rad_ref = openmc.Material(7, name='Upper radial reflector / Top plate region')\n upper_rad_ref.set_density('g/cm3', 4.28)\n upper_rad_ref.add_nuclide('H1', 0.0086117, 'wo')\n upper_rad_ref.add_nuclide('O16', 0.0683369, 'wo')\n upper_rad_ref.add_nuclide('B10', 2.77638e-5, 'wo')\n upper_rad_ref.add_nuclide('B11', 1.26481e-4, 'wo')\n upper_rad_ref.add_nuclide('Fe54', 0.035953677186, 'wo')\n upper_rad_ref.add_nuclide('Fe56', 0.585224740891, 'wo')\n upper_rad_ref.add_nuclide('Fe57', 0.01375486056, 'wo')\n upper_rad_ref.add_nuclide('Fe58', 0.001865821363, 'wo')\n upper_rad_ref.add_nuclide('Ni58', 0.055815129186, 'wo')\n upper_rad_ref.add_nuclide('Mn55', 0.0184579, 'wo')\n upper_rad_ref.add_nuclide('Cr52', 0.146766614995, 'wo')\n upper_rad_ref.add_s_alpha_beta('c_H_in_H2O')\n\n bot_plate = openmc.Material(8, name='Bottom plate region')\n bot_plate.set_density('g/cm3', 7.184)\n bot_plate.add_nuclide('H1', 0.0011505, 'wo')\n bot_plate.add_nuclide('O16', 0.0091296, 'wo')\n bot_plate.add_nuclide('B10', 3.70915e-6, 'wo')\n bot_plate.add_nuclide('B11', 1.68974e-5, 'wo')\n bot_plate.add_nuclide('Fe54', 0.03855611055, 'wo')\n bot_plate.add_nuclide('Fe56', 0.627585036425, 'wo')\n bot_plate.add_nuclide('Fe57', 0.014750478, 'wo')\n bot_plate.add_nuclide('Fe58', 0.002000875025, 'wo')\n bot_plate.add_nuclide('Ni58', 0.059855207342, 'wo')\n bot_plate.add_nuclide('Mn55', 0.0197940, 'wo')\n bot_plate.add_nuclide('Cr52', 0.157390026871, 'wo')\n bot_plate.add_s_alpha_beta('c_H_in_H2O')\n\n bot_nozzle = openmc.Material(9, name='Bottom nozzle region')\n bot_nozzle.set_density('g/cm3', 2.53)\n bot_nozzle.add_nuclide('H1', 0.0245014, 'wo')\n bot_nozzle.add_nuclide('O16', 0.1944274, 'wo')\n bot_nozzle.add_nuclide('B10', 7.89917e-5, 'wo')\n bot_nozzle.add_nuclide('B11', 3.59854e-4, 'wo')\n bot_nozzle.add_nuclide('Fe54', 0.030411411144, 'wo')\n bot_nozzle.add_nuclide('Fe56', 0.495012237964, 'wo')\n bot_nozzle.add_nuclide('Fe57', 0.01163454624, 'wo')\n bot_nozzle.add_nuclide('Fe58', 0.001578204652, 'wo')\n bot_nozzle.add_nuclide('Ni58', 0.047211231662, 'wo')\n bot_nozzle.add_nuclide('Mn55', 0.0156126, 'wo')\n bot_nozzle.add_nuclide('Cr52', 0.124142524198, 'wo')\n bot_nozzle.add_s_alpha_beta('c_H_in_H2O')\n\n top_nozzle = openmc.Material(10, name='Top nozzle region')\n top_nozzle.set_density('g/cm3', 1.746)\n top_nozzle.add_nuclide('H1', 0.0358870, 'wo')\n top_nozzle.add_nuclide('O16', 0.2847761, 'wo')\n top_nozzle.add_nuclide('B10', 1.15699e-4, 'wo')\n top_nozzle.add_nuclide('B11', 5.27075e-4, 'wo')\n top_nozzle.add_nuclide('Fe54', 0.02644016154, 'wo')\n top_nozzle.add_nuclide('Fe56', 0.43037146399, 'wo')\n top_nozzle.add_nuclide('Fe57', 0.0101152584, 'wo')\n top_nozzle.add_nuclide('Fe58', 0.00137211607, 'wo')\n top_nozzle.add_nuclide('Ni58', 0.04104621835, 'wo')\n top_nozzle.add_nuclide('Mn55', 0.0135739, 'wo')\n top_nozzle.add_nuclide('Cr52', 0.107931450781, 'wo')\n top_nozzle.add_s_alpha_beta('c_H_in_H2O')\n\n top_fa = openmc.Material(11, name='Top of fuel assemblies')\n top_fa.set_density('g/cm3', 3.044)\n top_fa.add_nuclide('H1', 0.0162913, 'wo')\n top_fa.add_nuclide('O16', 0.1292776, 'wo')\n top_fa.add_nuclide('B10', 5.25228e-5, 'wo')\n top_fa.add_nuclide('B11', 2.39272e-4, 'wo')\n top_fa.add_nuclide('Zr90', 0.43313403903, 'wo')\n top_fa.add_nuclide('Zr91', 0.09549277374, 'wo')\n top_fa.add_nuclide('Zr92', 0.14759527104, 'wo')\n top_fa.add_nuclide('Zr94', 0.15280552077, 'wo')\n top_fa.add_nuclide('Zr96', 0.02511169542, 'wo')\n top_fa.add_s_alpha_beta('c_H_in_H2O')\n\n bot_fa = openmc.Material(12, name='Bottom of fuel assemblies')\n bot_fa.set_density('g/cm3', 1.762)\n bot_fa.add_nuclide('H1', 0.0292856, 'wo')\n bot_fa.add_nuclide('O16', 0.2323919, 'wo')\n bot_fa.add_nuclide('B10', 9.44159e-5, 'wo')\n bot_fa.add_nuclide('B11', 4.30120e-4, 'wo')\n bot_fa.add_nuclide('Zr90', 0.3741373658, 'wo')\n bot_fa.add_nuclide('Zr91', 0.0824858164, 'wo')\n bot_fa.add_nuclide('Zr92', 0.1274914944, 'wo')\n bot_fa.add_nuclide('Zr94', 0.1319920622, 'wo')\n bot_fa.add_nuclide('Zr96', 0.0216912612, 'wo')\n bot_fa.add_s_alpha_beta('c_H_in_H2O')\n\n # Define the materials file.\n model.materials = (fuel, clad, cold_water, hot_water, rpv_steel,\n lower_rad_ref, upper_rad_ref, bot_plate,\n bot_nozzle, top_nozzle, top_fa, bot_fa)\n\n # Define surfaces.\n s1 = openmc.ZCylinder(r=0.41, surface_id=1)\n s2 = openmc.ZCylinder(r=0.475, surface_id=2)\n s3 = openmc.ZCylinder(r=0.56, surface_id=3)\n s4 = openmc.ZCylinder(r=0.62, surface_id=4)\n s5 = openmc.ZCylinder(r=187.6, surface_id=5)\n s6 = openmc.ZCylinder(r=209.0, surface_id=6)\n s7 = openmc.ZCylinder(r=229.0, surface_id=7)\n s8 = openmc.ZCylinder(r=249.0, surface_id=8, boundary_type='vacuum')\n\n s31 = openmc.ZPlane(z0=-229.0, surface_id=31, boundary_type='vacuum')\n s32 = openmc.ZPlane(z0=-199.0, surface_id=32)\n s33 = openmc.ZPlane(z0=-193.0, surface_id=33)\n s34 = openmc.ZPlane(z0=-183.0, surface_id=34)\n s35 = openmc.ZPlane(z0=0.0, surface_id=35)\n s36 = openmc.ZPlane(z0=183.0, surface_id=36)\n s37 = openmc.ZPlane(z0=203.0, surface_id=37)\n s38 = openmc.ZPlane(z0=215.0, surface_id=38)\n s39 = openmc.ZPlane(z0=223.0, surface_id=39, boundary_type='vacuum')\n\n # Define pin cells.\n fuel_cold = openmc.Universe(name='Fuel pin, cladding, cold water',\n universe_id=1)\n c21 = openmc.Cell(cell_id=21, fill=fuel, region=-s1)\n c22 = openmc.Cell(cell_id=22, fill=clad, region=+s1 & -s2)\n c23 = openmc.Cell(cell_id=23, fill=cold_water, region=+s2)\n fuel_cold.add_cells((c21, c22, c23))\n\n tube_cold = openmc.Universe(name='Instrumentation guide tube, '\n 'cold water', universe_id=2)\n c24 = openmc.Cell(cell_id=24, fill=cold_water, region=-s3)\n c25 = openmc.Cell(cell_id=25, fill=clad, region=+s3 & -s4)\n c26 = openmc.Cell(cell_id=26, fill=cold_water, region=+s4)\n tube_cold.add_cells((c24, c25, c26))\n\n fuel_hot = openmc.Universe(name='Fuel pin, cladding, hot water',\n universe_id=3)\n c27 = openmc.Cell(cell_id=27, fill=fuel, region=-s1)\n c28 = openmc.Cell(cell_id=28, fill=clad, region=+s1 & -s2)\n c29 = openmc.Cell(cell_id=29, fill=hot_water, region=+s2)\n fuel_hot.add_cells((c27, c28, c29))\n\n tube_hot = openmc.Universe(name='Instrumentation guide tube, hot water',\n universe_id=4)\n c30 = openmc.Cell(cell_id=30, fill=hot_water, region=-s3)\n c31 = openmc.Cell(cell_id=31, fill=clad, region=+s3 & -s4)\n c32 = openmc.Cell(cell_id=32, fill=hot_water, region=+s4)\n tube_hot.add_cells((c30, c31, c32))\n\n # Set positions occupied by guide tubes\n tube_x = np.array([5, 8, 11, 3, 13, 2, 5, 8, 11, 14, 2, 5, 8, 11, 14,\n 2, 5, 8, 11, 14, 3, 13, 5, 8, 11])\n tube_y = np.array([2, 2, 2, 3, 3, 5, 5, 5, 5, 5, 8, 8, 8, 8, 8,\n 11, 11, 11, 11, 11, 13, 13, 14, 14, 14])\n\n # Define fuel lattices.\n l100 = openmc.RectLattice(name='Fuel assembly (lower half)', lattice_id=100)\n l100.lower_left = (-10.71, -10.71)\n l100.pitch = (1.26, 1.26)\n l100.universes = np.tile(fuel_cold, (17, 17))\n l100.universes[tube_x, tube_y] = tube_cold\n\n l101 = openmc.RectLattice(name='Fuel assembly (upper half)', lattice_id=101)\n l101.lower_left = (-10.71, -10.71)\n l101.pitch = (1.26, 1.26)\n l101.universes = np.tile(fuel_hot, (17, 17))\n l101.universes[tube_x, tube_y] = tube_hot\n\n # Define assemblies.\n fa_cw = openmc.Universe(name='Water assembly (cold)', universe_id=5)\n c50 = openmc.Cell(cell_id=50, fill=cold_water, region=+s34 & -s35)\n fa_cw.add_cell(c50)\n\n fa_hw = openmc.Universe(name='Water assembly (hot)', universe_id=7)\n c70 = openmc.Cell(cell_id=70, fill=hot_water, region=+s35 & -s36)\n fa_hw.add_cell(c70)\n\n fa_cold = openmc.Universe(name='Fuel assembly (cold)', universe_id=6)\n c60 = openmc.Cell(cell_id=60, fill=l100, region=+s34 & -s35)\n fa_cold.add_cell(c60)\n\n fa_hot = openmc.Universe(name='Fuel assembly (hot)', universe_id=8)\n c80 = openmc.Cell(cell_id=80, fill=l101, region=+s35 & -s36)\n fa_hot.add_cell(c80)\n\n # Define core lattices\n l200 = openmc.RectLattice(name='Core lattice (lower half)', lattice_id=200)\n l200.lower_left = (-224.91, -224.91)\n l200.pitch = (21.42, 21.42)\n l200.universes = [\n [fa_cw]*21,\n [fa_cw]*21,\n [fa_cw]*7 + [fa_cold]*7 + [fa_cw]*7,\n [fa_cw]*5 + [fa_cold]*11 + [fa_cw]*5,\n [fa_cw]*4 + [fa_cold]*13 + [fa_cw]*4,\n [fa_cw]*3 + [fa_cold]*15 + [fa_cw]*3,\n [fa_cw]*3 + [fa_cold]*15 + [fa_cw]*3,\n [fa_cw]*2 + [fa_cold]*17 + [fa_cw]*2,\n [fa_cw]*2 + [fa_cold]*17 + [fa_cw]*2,\n [fa_cw]*2 + [fa_cold]*17 + [fa_cw]*2,\n [fa_cw]*2 + [fa_cold]*17 + [fa_cw]*2,\n [fa_cw]*2 + [fa_cold]*17 + [fa_cw]*2,\n [fa_cw]*2 + [fa_cold]*17 + [fa_cw]*2,\n [fa_cw]*2 + [fa_cold]*17 + [fa_cw]*2,\n [fa_cw]*3 + [fa_cold]*15 + [fa_cw]*3,\n [fa_cw]*3 + [fa_cold]*15 + [fa_cw]*3,\n [fa_cw]*4 + [fa_cold]*13 + [fa_cw]*4,\n [fa_cw]*5 + [fa_cold]*11 + [fa_cw]*5,\n [fa_cw]*7 + [fa_cold]*7 + [fa_cw]*7,\n [fa_cw]*21,\n [fa_cw]*21]\n\n l201 = openmc.RectLattice(name='Core lattice (lower half)', lattice_id=201)\n l201.lower_left = (-224.91, -224.91)\n l201.pitch = (21.42, 21.42)\n l201.universes = [\n [fa_hw]*21,\n [fa_hw]*21,\n [fa_hw]*7 + [fa_hot]*7 + [fa_hw]*7,\n [fa_hw]*5 + [fa_hot]*11 + [fa_hw]*5,\n [fa_hw]*4 + [fa_hot]*13 + [fa_hw]*4,\n [fa_hw]*3 + [fa_hot]*15 + [fa_hw]*3,\n [fa_hw]*3 + [fa_hot]*15 + [fa_hw]*3,\n [fa_hw]*2 + [fa_hot]*17 + [fa_hw]*2,\n [fa_hw]*2 + [fa_hot]*17 + [fa_hw]*2,\n [fa_hw]*2 + [fa_hot]*17 + [fa_hw]*2,\n [fa_hw]*2 + [fa_hot]*17 + [fa_hw]*2,\n [fa_hw]*2 + [fa_hot]*17 + [fa_hw]*2,\n [fa_hw]*2 + [fa_hot]*17 + [fa_hw]*2,\n [fa_hw]*2 + [fa_hot]*17 + [fa_hw]*2,\n [fa_hw]*3 + [fa_hot]*15 + [fa_hw]*3,\n [fa_hw]*3 + [fa_hot]*15 + [fa_hw]*3,\n [fa_hw]*4 + [fa_hot]*13 + [fa_hw]*4,\n [fa_hw]*5 + [fa_hot]*11 + [fa_hw]*5,\n [fa_hw]*7 + [fa_hot]*7 + [fa_hw]*7,\n [fa_hw]*21,\n [fa_hw]*21]\n\n # Define root universe.\n root = openmc.Universe(universe_id=0, name='root universe')\n c1 = openmc.Cell(cell_id=1, fill=l200, region=-s6 & +s34 & -s35)\n c2 = openmc.Cell(cell_id=2, fill=l201, region=-s6 & +s35 & -s36)\n c3 = openmc.Cell(cell_id=3, fill=bot_plate, region=-s7 & +s31 & -s32)\n c4 = openmc.Cell(cell_id=4, fill=bot_nozzle, region=-s5 & +s32 & -s33)\n c5 = openmc.Cell(cell_id=5, fill=bot_fa, region=-s5 & +s33 & -s34)\n c6 = openmc.Cell(cell_id=6, fill=top_fa, region=-s5 & +s36 & -s37)\n c7 = openmc.Cell(cell_id=7, fill=top_nozzle, region=-s5 & +s37 & -s38)\n c8 = openmc.Cell(cell_id=8, fill=upper_rad_ref, region=-s7 & +s38 & -s39)\n c9 = openmc.Cell(cell_id=9, fill=bot_nozzle, region=+s6 & -s7 & +s32 & -s38)\n c10 = openmc.Cell(cell_id=10, fill=rpv_steel, region=+s7 & -s8 & +s31 & -s39)\n c11 = openmc.Cell(cell_id=11, fill=lower_rad_ref, region=+s5 & -s6 & +s32 & -s34)\n c12 = openmc.Cell(cell_id=12, fill=upper_rad_ref, region=+s5 & -s6 & +s36 & -s38)\n root.add_cells((c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12))\n\n # Assign root universe to geometry\n model.geometry.root_universe = root\n\n model.settings.batches = 10\n model.settings.inactive = 5\n model.settings.particles = 100\n model.settings.source = openmc.Source(space=openmc.stats.Box(\n [-160, -160, -183], [160, 160, 183]))\n\n plot = openmc.Plot()\n plot.origin = (125, 125, 0)\n plot.width = (250, 250)\n plot.pixels = (3000, 3000)\n plot.color_by = 'material'\n model.plots.append(plot)\n\n return model", "title": "" }, { "docid": "e38c691b3c7b5d3db4ec3e58594b72d7", "score": "0.544043", "text": "def create_some_object(ii,y_co,mass):\n name = ''\n diff = (rand.uniform(0,1),rand.uniform(0,1),rand.uniform(0,1))\n spec = (rand.uniform(0,1),rand.uniform(0,1),rand.uniform(0,1))\n alp = rand.uniform(0,1)\n if ii % 5 == 0:\n x_co = 8.1\n name = \"obj_torus\"+str(ii)\n bpy.ops.mesh.primitive_torus_add(location = (x_co,y_co,2))\n selobj = bpy.context.active_object\n selobj.name = name\n selobj.game.mass = mass\n selobj.scale = (1.5,1.5,1.5)\n if ii % 5 == 1:\n x_co = 3.5\n name = \"obj_cube\"+str(ii)\n bpy.ops.mesh.primitive_cube_add(location = (x_co,y_co,2))\n selobj = bpy.context.active_object\n selobj.name = name\n selobj.game.mass = mass\n selobj.scale = (2,2,2)\n if ii % 5 == 2:\n x_co =-1\n name = \"obj_cone\"+str(ii)\n bpy.ops.mesh.primitive_cone_add(location = (x_co,y_co,2))\n selobj = bpy.context.active_object\n selobj.name = name\n selobj.game.mass = mass\n selobj.scale = (2,2,2)\n if ii % 5 == 3:\n x_co = -5.4\n name = \"obj_sphere\"+str(ii)\n bpy.ops.mesh.primitive_uv_sphere_add(location = (x_co,y_co,2))\n selobj = bpy.context.active_object\n selobj.name = name\n selobj.game.mass = mass\n selobj.scale = (2,2,2)\n if ii % 5 == 4:\n x_co = -9.0\n name = \"obj_monkey\"+str(ii)\n bpy.ops.mesh.primitive_monkey_add(location = (x_co,y_co,2))\n selobj = bpy.context.active_object\n selobj.name = name\n selobj.game.mass = mass\n selobj.scale = (1.2,1.2,1.2)\n tempmat = makeMaterial(name, diff, spec, alp)\n setMaterial(bpy.context.active_object,tempmat)", "title": "" }, { "docid": "cd1e72f4d86cc8e24bc38b1df1620380", "score": "0.5433962", "text": "def __init__(self, make, model, year):\n super().__init__(make, model, year)\n self.battery_size = 70", "title": "" }, { "docid": "96454f002910b607afedac5f1dc9f3dd", "score": "0.54304534", "text": "def forward_model(f_grid, atm_fields_compact, verbosity=0):\n ws = pyarts.workspace.Workspace(verbosity=0)\n ws.water_p_eq_agendaSet()\n ws.PlanetSet(option=\"Earth\")\n ws.verbositySetScreen(ws.verbosity, verbosity)\n\n # standard emission agenda\n ws.iy_main_agendaSet(option=\"Emission\")\n\n # cosmic background radiation\n ws.iy_space_agendaSet(option=\"CosmicBackground\")\n\n # standard surface agenda (i.e., make use of surface_rtprop_agenda)\n ws.iy_surface_agendaSet(option=\"UseSurfaceRtprop\")\n\n # sensor-only path\n ws.ppath_agendaSet(option=\"FollowSensorLosPath\")\n\n # no refraction\n ws.ppath_step_agendaSet(option=\"GeometricPath\")\n\n # Non reflecting surface\n ws.surface_rtprop_agendaSet(\n option=\"Specular_NoPol_ReflFix_SurfTFromt_surface\")\n\n # Number of Stokes components to be computed\n ws.IndexSet(ws.stokes_dim, 1)\n\n #########################################################################\n\n # Definition of absorption species\n ws.abs_speciesSet(species=[\n \"H2O, H2O-SelfContCKDMT400, H2O-ForeignContCKDMT400\",\n \"O2-TRE05\",\n \"N2, N2-CIAfunCKDMT252, N2-CIArotCKDMT252\",\n ])\n\n ws.abs_lines_per_speciesReadSpeciesSplitCatalog(basename=\"lines/\")\n\n # Load CKDMT400 model data\n ws.ReadXML(ws.predefined_model_data, \"model/mt_ckd_4.0/H2O.xml\")\n\n # ws.abs_lines_per_speciesLineShapeType(option=lineshape)\n ws.abs_lines_per_speciesCutoff(option=\"ByLine\", value=750e9)\n # ws.abs_lines_per_speciesNormalization(option=normalization)\n\n ws.VectorSetConstant(ws.surface_scalar_reflectivity, 1, 0.4)\n\n # Set the frequency grid\n ws.f_grid = f_grid\n\n # Throw away lines outside f_grid\n ws.abs_lines_per_speciesCompact()\n\n # No sensor properties\n ws.sensorOff()\n\n # We select here to use Planck brightness temperatures\n ws.StringSet(ws.iy_unit, \"PlanckBT\")\n\n #########################################################################\n\n # Atmosphere and surface\n ws.AtmosphereSet1D()\n ws.atm_fields_compact = atm_fields_compact\n ws.atm_fields_compactAddConstant(ws.atm_fields_compact, \"abs_species-N2\",\n 0.78, 0, [\"abs_species-H2O\"])\n ws.atm_fields_compactAddConstant(ws.atm_fields_compact, \"abs_species-O2\",\n 0.21, 0, [\"abs_species-H2O\"])\n ws.AtmFieldsAndParticleBulkPropFieldFromCompact()\n\n ws.Extract(ws.z_surface, ws.z_field, 0)\n ws.Extract(ws.t_surface, ws.t_field, 0)\n\n # Definition of sensor position and line of sight (LOS)\n ws.MatrixSet(ws.sensor_pos, np.array([[10e3]]))\n ws.MatrixSet(ws.sensor_los, np.array([[0]]))\n ws.sensorOff()\n\n # Jacobian calculation\n ws.jacobianInit()\n ws.jacobianAddAbsSpecies(\n g1=ws.p_grid,\n g2=ws.lat_grid,\n g3=ws.lon_grid,\n species=\"H2O, H2O-SelfContCKDMT400, H2O-ForeignContCKDMT400\",\n unit=\"vmr\",\n )\n ws.jacobianClose()\n\n # Clearsky = No scattering\n ws.cloudboxOff()\n\n # on-the-fly absorption\n ws.propmat_clearsky_agendaAuto()\n\n # Perform RT calculations\n ws.lbl_checkedCalc()\n ws.atmfields_checkedCalc()\n ws.atmgeom_checkedCalc()\n ws.cloudbox_checkedCalc()\n ws.sensor_checkedCalc()\n\n ws.yCalc()\n\n return ws.y.value[:].copy(), ws.jacobian.value[:].copy()", "title": "" }, { "docid": "cd2fa7e69f447c18eb36afe998fdb08d", "score": "0.5421357", "text": "def main():\n stage = Usd.Stage.CreateInMemory()\n stage.SetStartTimeCode(0)\n stage.SetEndTimeCode(2)\n\n prim = stage.DefinePrim(\"/Set\")\n non_template_set_name = \"non_template_clips\"\n model = Usd.ClipsAPI(prim)\n model.SetClipActive([(0.0, 0)], non_template_set_name)\n model.SetClipAssetPaths(\n [Sdf.AssetPath(\"./non_template_clip.usda\")], non_template_set_name\n )\n model.SetClipPrimPath(\"/NonTemplate\", non_template_set_name)\n\n template_set_name = \"template_clips\"\n model.SetClipTemplateAssetPath(\"./template_clip.##.usda\", template_set_name)\n model.SetClipTemplateEndTime(2, template_set_name)\n model.SetClipTemplateStartTime(0, template_set_name)\n model.SetClipTemplateStride(1, template_set_name)\n model.SetClipPrimPath(\"/Template\", template_set_name)\n\n prim.GetReferences().AddReference(assetPath=\"./set.usda\", primPath=\"/Set\")\n\n print(stage.GetRootLayer().ExportToString())", "title": "" }, { "docid": "afd40b6d51940a11430eaf5f579c6d95", "score": "0.5414305", "text": "def __new__(cls,*arg,**kwarg):\n \n cls.FREEPARAMETERS_STD = [\"a%d\"%(i+1) for i in range(len(cls.STANDARDIZATION))]\n cls.FREEPARAMETERS = [\"M0\"]+cls.FREEPARAMETERS_STD\n return super(ModelStandardization,cls).__new__(cls)", "title": "" }, { "docid": "b518f9f879242aeb4a2e49ce0dda73cf", "score": "0.54063696", "text": "def __init__(self, species, qty):\n super().__init__(species, qty, \"domestic\", 0.08)", "title": "" }, { "docid": "c829db1d0eb021c2af1a2733b81b68b4", "score": "0.5396692", "text": "def create_model( fullMoleculeList, moleculePartition, parameterMap, includeSoluteDielectricAsParameter ):\n\n # Define deterministic functions for hydration free energies.\n\n model = parameterMap['model']\n parameters = parameterMap['stochastic']\n radiusParameterMap = parameterMap['radiusParameterMap']\n gammaParameterMap = parameterMap['gammaParameterMap']\n\n for (molecule_index, molecule) in enumerate(fullMoleculeList):\n\n if( moleculePartition[molecule_index] == 1 ):\n \n molecule_name = molecule.GetTitle()\n variable_name = \"dg_gbvi_%08d\" % molecule_index\n \n # Determine which parameters are involved in this molecule to limit number of parents for caching.\n \n parents = dict()\n for atom in molecule.GetAtoms():\n atomtype = atom.GetStringData(\"gbvi_type\") # GBVI atomtype\n for parameter_name in ['gamma', 'radius']:\n stochastic_name = '%s_%s' % (atomtype,parameter_name)\n if( stochastic_name in parameters ):\n parents[stochastic_name] = parameters[stochastic_name]\n else:\n print \"create_model Warning: parameter=%s missing for %40s\" % (stochastic_name, molecule_name )\n \n if( includeSoluteDielectricAsParameter ):\n parents['soluteDielectric'] = parameters['soluteDielectric']\n \n print \"create_model %40s: %s\" % (molecule_name, parents.keys() )\n \n # Create deterministic variable for computed hydration free energy.\n \n #if( energyCalculations == 'Swig' ):\n function = hydration_energy_factory_swig(molecule_index, radiusParameterMap, gammaParameterMap)\n \n # if( energyCalculations == 'OpenMM' ):\n # function = hydration_energy_factory_OpenMM(molecule)\n #\n # if( energyCalculations == 'Test' ):\n # function = hydration_energy_factory_test( molecule, molecule_index, radiusParameterMap, gammaParameterMap)\n \n model[variable_name] = pymc.Deterministic(eval=function,\n name=variable_name,\n parents=parents,\n doc=molecule_name,\n trace=True,\n verbose=1,\n dtype=float,\n plot=False,\n cache_depth=2)\n \n # Define error model\n log_sigma_min = math.log(0.01) # kcal/mol\n log_sigma_max = math.log(10.0) # kcal/mol\n log_sigma_guess = math.log(1.0) # kcal/mol\n model['log_sigma'] = pymc.Uniform('log_sigma', lower=log_sigma_min, upper=log_sigma_max, value=log_sigma_guess)\n model['sigma'] = pymc.Lambda('sigma', lambda log_sigma=model['log_sigma'] : math.exp(log_sigma) ) \n model['tau'] = pymc.Lambda('tau', lambda sigma=model['sigma'] : sigma**(-2) )\n\n for (molecule_index, molecule) in enumerate(fullMoleculeList):\n\n if( moleculePartition[molecule_index] == 1 ):\n molecule_name = molecule.GetTitle()\n variable_name = \"dg_exp_%08d\" % molecule_index\n dg_exp = float(OEGetSDData(molecule, 'dG(exp)')) # observed hydration free energy in kcal/mol\n print \"Mol=%4d dG=%15.7e %s \" % (molecule_index, dg_exp, molecule_name )\n sys.stdout.flush()\n model[variable_name] = pymc.Normal(mu=model['dg_gbvi_%08d' % molecule_index], tau=model['tau'], value=dg_exp, observed=True) \n\n return", "title": "" }, { "docid": "7565df4cc08ccad217eeaec3123665dc", "score": "0.53920263", "text": "def create_snowmodel(self, hspfmodel, verbose = True, vverbose = False):\n\n filename = hspfmodel.filename + '_snow'\n\n submodel = HSPFModel()\n\n submodel.build_from_existing(hspfmodel, filename, directory = \n hspfmodel.filepath[:-1],\n verbose = vverbose)\n\n # simplify the landtypes to one developed which contains one implnd and \n # one perlnd (since they are all the same)\n\n for subbasin in submodel.subbasins:\n year = min(submodel.subbasins[subbasin].landuse.keys())\n submodel.subbasins[subbasin].landuse = {year: {'Developed': 100}}\n\n submodel.build()\n\n # get rid of the reaches\n\n submodel.rchreses = []\n\n # add in the modules\n\n submodel.add_temp()\n \n densities = [o.RDENPF \n for o in hspfmodel.perlnds + hspfmodel.implnds]\n depths = [o.packsnow / o.RDENPF \n for o in hspfmodel.perlnds + hspfmodel.implnds]\n\n depth = sum(depths) / len(depths)\n density = sum(densities) / len(densities)\n\n submodel.add_snow(depth = depth, density = density) \n \n # overwrite the time series dictionaries for the model\n\n for subbasin in hspfmodel.subbasins:\n if subbasin in submodel.subbasins:\n start, tstep, data = hspfmodel.precipitations['%d' % subbasin]\n submodel.add_timeseries('precipitation', '%d' % subbasin, \n start, data)\n submodel.assign_subbasin_timeseries('precipitation', subbasin, \n '%d' % subbasin)\n\n start, tstep, data = hspfmodel.temperatures[hspfmodel.description]\n\n submodel.add_timeseries('temperature', submodel.description, \n start, data)\n\n start, tstep, data = hspfmodel.dewpoints[hspfmodel.description]\n\n submodel.add_timeseries('dewpoint', submodel.description, start, data)\n\n submodel.assign_watershed_timeseries('temperature', \n submodel.description)\n submodel.assign_watershed_timeseries('dewpoint',\n submodel.description)\n\n self.snowmodel = submodel.filepath + submodel.filename\n\n with open(self.snowmodel, 'wb') as f: pickle.dump(submodel, f)", "title": "" }, { "docid": "4ea41ca331df38389e0231e0e4df605e", "score": "0.53883773", "text": "def __init__(self, name=None):\n\n # Always call parent class' constructor FIRST:\n CoupledDEVS.__init__(self, name)\n\n # Declare the coupled model's output ports:\n # Autonomous, so no output ports\n #self.OUT = self.addOutPort(name=\"OUT\")\n\n # Declare the coupled model's sub-models:\n\n # The Policeman generating interrupts \n self.policeman = self.addSubModel(Policeman(name=\"policeman\"))\n\n # The TrafficLight \n self.trafficLight = self.addSubModel(TrafficLight(name=\"trafficLight\"))\n\n # Only connect ...\n self.connectPorts(self.policeman.OUT, self.trafficLight.INTERRUPT)\n #self.connectPorts(self.trafficLight.OBSERVED, self.OUT)", "title": "" }, { "docid": "2a6e185ce95912ec70c2c39b0cfe58ff", "score": "0.5372091", "text": "def calibrate_submodel(self):\n \n model = self.model \n cell = self.knowledge_base.cell\n nucleus = model.compartments.get_one(id='n')\n mitochondrion = model.compartments.get_one(id='m')\n cytoplasm = model.compartments.get_one(id='c')\n\n beta = self.options.get('beta')\n\n Avogadro = self.model.parameters.get_or_create(\n id='Avogadro',\n type=None,\n value=scipy.constants.Avogadro,\n units=unit_registry.parse_units('molecule mol^-1')) \n\n rnas_kb = cell.species_types.get(__type=wc_kb.eukaryote.TranscriptSpeciesType)\n undetermined_model_kcat = []\n determined_kcat = []\n for rna_kb, reaction in zip(rnas_kb, self.submodel.reactions):\n\n init_species_counts = {}\n \n modifier_species = self._degradation_modifier[reaction.name] \n init_species_counts[modifier_species.gen_id()] = modifier_species.distribution_init_concentration.mean\n \n rna_kb_compartment_id = rna_kb.species[0].compartment.id\n if rna_kb_compartment_id == 'c':\n rna_compartment = cytoplasm\n degradation_compartment = cytoplasm\n else:\n rna_compartment = mitochondrion\n degradation_compartment = mitochondrion \n\n rna_reactant = model.species_types.get_one(id=rna_kb.id).species.get_one(compartment=rna_compartment)\n\n half_life = rna_kb.properties.get_one(property='half-life').get_value()\n mean_concentration = rna_reactant.distribution_init_concentration.mean\n\n average_rate = utils.calc_avg_deg_rate(mean_concentration, half_life)\n \n for species in reaction.get_reactants():\n\n init_species_counts[species.gen_id()] = species.distribution_init_concentration.mean\n\n if model.parameters.get(id='K_m_{}_{}'.format(reaction.id, species.species_type.id)):\n model_Km = model.parameters.get_one(\n id='K_m_{}_{}'.format(reaction.id, species.species_type.id))\n if species.distribution_init_concentration.mean:\n model_Km.value = beta * species.distribution_init_concentration.mean \\\n / Avogadro.value / species.compartment.init_volume.mean\n model_Km.comments = 'The value was assumed to be {} times the concentration of {} in {}'.format(\n beta, species.species_type.id, species.compartment.name)\n else:\n model_Km.value = 1e-05\n model_Km.comments = 'The value was assigned to 1e-05 because the concentration of ' +\\\n '{} in {} was zero'.format(species.species_type.id, species.compartment.name)\n\n model_kcat = model.parameters.get_one(id='k_cat_{}'.format(reaction.id))\n\n if average_rate: \n model_kcat.value = 1.\n eval_rate_law = reaction.rate_laws[0].expression._parsed_expression.eval({\n wc_lang.Species: init_species_counts,\n wc_lang.Compartment: {\n rna_compartment.id: rna_compartment.init_volume.mean * \\\n rna_compartment.init_density.value,\n degradation_compartment.id: degradation_compartment.init_volume.mean * \\\n degradation_compartment.init_density.value}\n })\n if eval_rate_law:\n model_kcat.value = average_rate / eval_rate_law\n determined_kcat.append(model_kcat.value)\n else:\n undetermined_model_kcat.append(model_kcat) \n else: \n undetermined_model_kcat.append(model_kcat)\n \n median_kcat = numpy.median(determined_kcat)\n for model_kcat in undetermined_model_kcat:\n model_kcat.value = median_kcat\n model_kcat.comments = 'Set to the median value because it could not be determined from data' \n\n print('RNA degradation submodel has been generated')", "title": "" }, { "docid": "6bbfcf5951a375728bdff97674fbc037", "score": "0.5360838", "text": "def pole_bending_modeling(model1,leftsupportx,rightsupportx,supporty,leftplatecenterx,rightplatecenterx,plateheighty,lengthx,heighty,stiffness):\r\n \r\n \r\n # add loading plate\r\n model1 = pole_contact_plate(model1,'rightloadplate',(rightplatecenterx,heighty,0),lengthx,1000,plateheighty,stiffness)\r\n model1 = pole_contact_plate(model1,'leftloadplate',(leftplatecenterx,heighty,0),lengthx,1000,plateheighty,stiffness)\r\n \r\n \r\n # add support\r\n model1 = add_support(model1,'leftsupport',leftsupportx,supporty,0,lengthx * 2,10000,default=[1,1,1,0,0,0]) # due to fact that only one side of plate will be selected\r\n model1 = add_support(model1,'rightsupport',rightsupportx,supporty,0,lengthx * 2,10000,default=[0,1,1,0,0,0]) # big width number to seelct all nodes\r\n \r\n \r\n model1 = add_material(model1)\r\n model1.table('loadtable',1,['time'],[[0,0],[1,1]])\r\n model1.load('leftrightload',{'xyz':[0,1,0,0,0,0],'DOF':6,'scalar':-1,'setnamelist':['leftloadplate_plate','rightloadplate_plate'],'tabletag':'loadtable'})\r\n \r\n\r\n model1.section('sec_1','shell_section',{'thickness':0.1875})\r\n\r\n \r\n model1.property('prop1','quad4',{'type':75,'thinkness':0.01})\r\n model1.property('prop_dent','quad4',{'type':75})\r\n \r\n \r\n model1.elemset_sub_setname('surface_elements','dentelems')\r\n \r\n model1.link_prop_conn('prop1',setnamelist=['surface_elements-dentelems'])\r\n model1.link_prop_conn('prop_dent',setnamelist=['dentelems','extension_elements_surface_leftend','extension_elements_surface_rightend'])\r\n \r\n model1.link_sec_prop('sec_1','prop1')\r\n model1.link_sec_prop('sec_1','prop_dent')\r\n \r\n # associate the material\r\n model1.link_mat_prop('pole_alum','prop1')\r\n model1.link_mat_prop('pole_alum_dent','prop_dent')\r\n \r\n model1.loadcase('loadcase1','static_arclength',{'boundarylist':['leftsupport','rightsupport','leftrightload'],'para':{'nstep':50}})\r\n \r\n #model1.job('job1','static_job',{'loadcaselist':['loadcase0','loadcase1'],'submit':True,'reqresultslist':['stress','total_strain','plastic_strain']})\r\n model1.job('job1','static_job',{'loadcaselist':['loadcase0','loadcase1'],'submit':False,'reqresultslist':['stress','total_strain','plastic_strain']})\r\n \r\n return model1", "title": "" }, { "docid": "931a784adbb0e80ab049882aa7d0a382", "score": "0.53536534", "text": "def scaffold():\n pass", "title": "" }, { "docid": "772924833d87c7975ec747b5eb019b0b", "score": "0.5339107", "text": "def model():\n return DBC14(dist_jb=10, mag=6, v_s30=600, depth_hyp=10, mechanism='SS')", "title": "" }, { "docid": "f5bb6a8b9a604f242b3c2505439154c4", "score": "0.53274566", "text": "def __init__(self):\n \n super(Sellar, self).__init__()\n \n #add the discipline components to the assembly\n self.add('dis1', SellarDiscipline1())\n self.add('dis2', SellarDiscipline2())\n \n #START OF MDAO Problem Definition\n #Global Des Vars\n self.add_parameter((\"dis1.z1\",\"dis2.z1\"),low=-10,high=10)\n self.add_parameter((\"dis1.z2\",\"dis2.z2\"),low=0,high=10)\n \n #Local Des Vars \n self.add_parameter(\"dis1.x1\",low=0,high=10)\n \n #Coupling Vars\n self.add_coupling_var(\"dis2.y1\",\"dis1.y1\")\n self.add_coupling_var(\"dis1.y2\",\"dis2.y2\")\n \n self.add_objective('(dis1.x1)**2 + dis1.z2 + dis1.y1 + math.exp(-dis2.y2)')\n self.add_constraint('3.16 < dis1.y1')\n self.add_constraint('dis2.y2 < 24.0')\n \n #END OF MDAO Problem Definition\n \n self.dis1.z1 = self.dis2.z1 = 5.0\n self.dis1.z2 = self.dis2.z2 = 2.0\n self.dis1.x1 = 1.0\n self.dis1.y2 = 3.0", "title": "" }, { "docid": "46f38cf86d36765820ac82e2cc6bce10", "score": "0.53215253", "text": "def assembly_model():\n\n model = openmc.model.Model()\n\n # Create fuel assembly Lattice\n pitch = 21.42\n assembly = openmc.RectLattice(name='Fuel Assembly')\n assembly.pitch = (pitch/17, pitch/17)\n assembly.lower_left = (-pitch/2, -pitch/2)\n\n # Create array indices for guide tube locations in lattice\n gt_pos = np.array([\n [2, 5], [2, 8], [2, 11],\n [3, 3], [3, 13],\n [5, 2], [5, 5], [5, 8], [5, 11], [5, 14],\n [8, 2], [8, 5], [8, 8], [8, 11], [8, 14],\n [11, 2], [11, 5], [11, 8], [11, 11], [11, 14],\n [13, 3], [13, 13],\n [14, 5], [14, 8], [14, 11]\n ])\n\n # Create 17x17 array of universes. First we create a 17x17 array all filled\n # with the fuel pin universe. Then, we replace the guide tube positions with\n # the guide tube pin universe (note the use of numpy fancy indexing to\n # achieve this).\n assembly.universes = np.full((17, 17), fuel_pin())\n assembly.universes[gt_pos[:, 0], gt_pos[:, 1]] = guide_tube_pin()\n\n # Create outer boundary of the geometry to surround the lattice\n outer_boundary = openmc.model.rectangular_prism(\n pitch, pitch, boundary_type='reflective')\n\n # Create a cell filled with the lattice\n main_cell = openmc.Cell(fill=assembly, region=outer_boundary)\n\n # Finally, create geometry by providing a list of cells that fill the root\n # universe\n model.geometry = openmc.Geometry([main_cell])\n\n model.settings.batches = 150\n model.settings.inactive = 50\n model.settings.particles = 1000\n model.settings.source = openmc.Source(space=openmc.stats.Box(\n (-pitch/2, -pitch/2, -1),\n (pitch/2, pitch/2, 1),\n only_fissionable=True\n ))\n\n # NOTE: We never actually created a Materials object. When you export/run\n # using the Model object, if no materials were assigned it will look through\n # the Geometry object and automatically export any materials that are\n # necessary to build the model.\n return model", "title": "" }, { "docid": "50e38c22bacf8f6aa559c93e282dc4a5", "score": "0.53204226", "text": "def _configure(self):\n SpatialDBObj._configure(self)\n self.label(\"SCEC CVM-H\")\n self.dataDir(self.inventory.dataDir)\n self.minVs(self.inventory.minVs.value)\n self.squash(self.inventory.squash, self.inventory.squashLimit.value)\n return", "title": "" }, { "docid": "ed02d4bfd211d2473c9fc1c40d0d8dfa", "score": "0.52838945", "text": "def __init__(self, make, model, year, battery=70):\n super().__init__(make, model, year)\n self.battery_size = battery", "title": "" }, { "docid": "82f144d6691eadf5238b16e0bebcbfe2", "score": "0.527416", "text": "def __init__(self, make, model, year):\n\n super().__init__(make, model, year) \n self.battery_size = 75 # Here, We add an extra attribute to our child class. ", "title": "" }, { "docid": "82f144d6691eadf5238b16e0bebcbfe2", "score": "0.527416", "text": "def __init__(self, make, model, year):\n\n super().__init__(make, model, year) \n self.battery_size = 75 # Here, We add an extra attribute to our child class. ", "title": "" }, { "docid": "a14b8f584528aa2d8692bd34af5d91c0", "score": "0.5255351", "text": "def sol553(design_parameters):\r\n prt('Design governing by this mantras (in oder of importance): \\n size, cost, stall margin, off.design', 'blue')\r\n pass", "title": "" }, { "docid": "29a00f4e7e1dfca61dafd9bc95836c98", "score": "0.52360165", "text": "def construct_model(self):\r\n\r\n # Initialise model object\r\n m = ConcreteModel()\r\n\r\n # Add component allowing dual variables to be imported\r\n m.dual = Suffix(direction=Suffix.IMPORT)\r\n\r\n # Define sets\r\n m = self.components.define_sets(m)\r\n\r\n # Define parameters common to unit commitment sub-problems and investment plan\r\n m = self.components.define_parameters(m)\r\n\r\n # Define parameters specific to unit commitment sub-problem\r\n m = self.define_parameters(m)\r\n\r\n # Define variables\r\n m = self.define_variables(m)\r\n\r\n # Define expressions\r\n m = self.define_expressions(m)\r\n\r\n # Define constraints\r\n m = self.define_constraints(m)\r\n\r\n # Define objective\r\n m = self.define_objective(m)\r\n\r\n return m", "title": "" }, { "docid": "799f4ce017f5053655d21a467eea6d34", "score": "0.5233242", "text": "def Schemata():", "title": "" }, { "docid": "6bca46ea6ca5bae506893a67f20e0a74", "score": "0.52153826", "text": "def main():\n\n # We get the user model choice.\n selected_model = model_selection()\n\n # We print equations to show the different variables to fill.\n print(selected_model[\"class\"].eq_to_string())\n\n # We create an object from the model chosen\n model_obj = selected_model[\"class\"]()\n\n # We write the output to the file\n model_obj.write_file()", "title": "" }, { "docid": "0d0b8b9a11a3a040e38d0968d79837dd", "score": "0.52152205", "text": "def model_defense():\n pass", "title": "" }, { "docid": "58f692f2614373b660e1a4cf0caea7f5", "score": "0.51889247", "text": "def __init__(self, size_float=0.0):\n Organ.__init__(self,'Heart',size_float)", "title": "" }, { "docid": "e4eb460f7f049c91bbb1badff8242642", "score": "0.51871455", "text": "def test_create_superModel(warnings = True, stdout_msgs = True):\n # Model path\n model_path = home_dir + 'work/models/Escherichia_coli/iJO1366/'\n\n model_organism = organism(id = 'Ecoli', name = 'Escherichia coli',domain = 'Bacteria', genus = 'Escherichia', species = 'coli', strain = 'MG1655')\n\n flux_bounds_dict = {'EX_glc(e)':[-100,1000], 'EX_o2(e)':[-200,1000]}\n flux_bounds_filename = model_path + 'iJO1366_minimal_glucose_aerobic.py'\n\n # Orignal iJo1266 model\n model = create_model(model_organism = model_organism, model_info = {'id':'iJO1366', 'file_format':'sbml', 'model_filename':model_path + 'iJO1366_updated.xml', 'biomassrxn_id':'Ec_biomass_iJO1366_core_53p95M'}, growthMedium_flux_bounds = {'flux_bounds_filename':flux_bounds_filename, 'flux_bounds_dict': flux_bounds_dict}, validate = True, stdout_msgs = True, warnings = True) \n\n model.organism.ModelSEED_type = 'bacteria_GramNegative'\n\n \"\"\"\n #rxn_id = 'GAPD'\n #rxn_id = 'ZN2abcpp'\n rxn_id = 'VALtex'\n get_cpds_ModelSEED_id(cpds_list = model.reactions_by_id[rxn_id].compounds)\n for cpd in model.reactions_by_id[rxn_id].compounds:\n print cpd.id,': ',cpd.ModelSEED_id,' stoic = ',model.reactions_by_id[rxn_id].stoichiometry[cpd]\n rxn_ModelSEED_id, ModelSEED_id_found_by = match_rxn_eqn(rxn = model.reactions_by_id[rxn_id]) \n print 'rxn_ModelSEED_id = {} , ModelSEED_id_found_by = {}\\n'.format(rxn_ModelSEED_id, ModelSEED_id_found_by)\n \"\"\"\n\n print '\\n----- Getting ModelSEED ids ----'\n get_cpds_ModelSEED_id(cpds_list = model.compounds)\n get_rxns_ModelSEED_id(rxns_list = model.reactions)\n sys.stdout.flush()\n\n \"\"\"\n #--- Text exporiting to pydict and importing back ----\n # Save the model into a python dictionary\n model.export(output_format = 'pydict', output_filename = 'iJO1366.py')\n\n # Re-import the pydict model\n model = create_model(model_info = {'id':'iJO1366', 'file_format':'pydict', 'model_filename':'iJO1366.py', 'biomassrxn_id':'Ec_biomass_iJO1366_core_53p95M'}, growthMedium_flux_bounds = {'flux_bounds_filename':flux_bounds_filename, 'flux_bounds_dict': flux_bounds_dict}, validate = True, stdout_msgs = True, warnings = True) \n \"\"\"\n\n print '\\n----- Creating super_model ----'\n sys.stdout.flush()\n\n super_model = create_superModel_from_ModelSEED(original_model = model, standard_to_model_compartID_map = {'c':'c','e':'e','p':'p'}, validate = True)\n print 'super_model statistics: # of compounds (total/original/external) = {}/{}/{} , # of reactions (total/original/external) = {}/{}/{}'.format(len(super_model.compounds), len([c for c in super_model.compounds if not c.external]), len([c for c in super_model.compounds if c.external]), len(super_model.reactions), len([r for r in super_model.reactions if not r.external]), len([r for r in super_model.reactions if r.external])) \n\n print '\\n----- fba with super_model ----'\n for rxn in [r for r in super_model.reactions if r.external]:\n rxn.flux_bounds = [0,0]\n for rxn in super_model.reactions:\n rxn.objective_coefficient = 0\n super_model.reactions_by_id['Ec_biomass_iJO1366_core_53p95M'].objective_coefficient = 1\n super_model.fba()\n\n print '\\n----- Exporting super_model to pydict ----'\n # Export to a pydict model\n super_model.export(output_format = 'pydict', output_filename = 'super_model_iJO1366.py')\n\n print '\\n----- Re-importing super_model from pydict ----'\n # Re-import the pydict model\n super_modelmodel = create_model(model_info = {'id':'super_model_iJO1366', 'file_format':'pydict', 'model_filename':'super_model_iJO1366.py', 'biomassrxn_id':'Ec_biomass_iJO1366_core_53p95M'}, growthMedium_flux_bounds = {'flux_bounds_filename':flux_bounds_filename, 'flux_bounds_dict': dict(flux_bounds_dict.items() + [(r.id,[0,0]) for r in super_model.reactions if r.external])}, validate = True, stdout_msgs = True, warnings = True)", "title": "" }, { "docid": "bb20c49d693b341d5afb650a0d189b3c", "score": "0.5186923", "text": "def __init__(self, number_of_cheeses, number_of_stools):\n self.number_of_cheeses = number_of_cheeses\n self.number_of_stools = number_of_stools\n self.model = TOAHModel(self.number_of_stools)\n self.model.fill_first_stool(self.number_of_cheeses)", "title": "" }, { "docid": "0027ae7e48bb44a362a776ed4977c263", "score": "0.51864123", "text": "def createSynthModel():\n # Create the synthetic model\n world = mt.createCircle(boundaryMarker=-1, nSegments=64)\n tri = mt.createPolygon([[-0.8, -0], [-0.5, -0.7], [0.7, 0.5]],\n isClosed=True, area=0.0015)\n c1 = mt.createCircle(radius=0.2, pos=[-0.2, 0.5], nSegments=32,\n area=0.0025, marker=3)\n c2 = mt.createCircle(radius=0.2, pos=[0.32, -0.3], nSegments=32,\n area=0.0025, marker=3)\n\n poly = mt.mergePLC([world, tri, c1, c2])\n\n poly.addRegionMarker([0.0, 0, 0], 1, area=0.0015)\n poly.addRegionMarker([-0.9, 0, 0], 2, area=0.0015)\n\n c = mt.createCircle(radius=0.99, nSegments=16, start=np.pi, end=np.pi*3)\n [poly.createNode(p.pos(), -99) for p in c.nodes()]\n mesh = pg.meshtools.createMesh(poly, q=34.4, smooth=[1, 10])\n mesh.scale(1.0/5.0)\n mesh.rotate([0., 0., 3.1415/3])\n mesh.rotate([0., 0., 3.1415])\n\n petro = pg.solver.parseArgToArray([[1, 0.9], [2, 0.6], [3, 0.3]],\n mesh.cellCount(), mesh)\n\n # Create the parametric mesh that only reflect the domain geometry\n world = mt.createCircle(boundaryMarker=-1, nSegments=32, area=0.0051)\n paraMesh = pg.meshtools.createMesh(world, q=34.0, smooth=[1, 10])\n paraMesh.scale(1.0/5.0)\n\n return mesh, paraMesh, petro", "title": "" }, { "docid": "f69f2c3de08a8fea2106af210c45100c", "score": "0.51764035", "text": "def __init__(self, model, shore_coef):\r\n\r\n self.model = model\r\n self._shore_coef = shore_coef\r\n self.gtab = model.gtab\r\n self.radial_order = model.radial_order\r\n self.zeta = model.zeta", "title": "" }, { "docid": "6a9ee048ebf306caf8af1fef06c56ad3", "score": "0.5174483", "text": "def __init__(self, model):\n\n self.model = model\n self.fix_none_bounds(10, 813)\n self.info = {\n \"obj\": \"{}_{}\".format([reac for reac in self.model.get_objective()][0], self.model.id),\n \"fobj\": FBA(self.model).fobj\n }", "title": "" }, { "docid": "9ed8dcdc305216bc0634bee71a575d05", "score": "0.5164404", "text": "def product_main(self):\n return", "title": "" }, { "docid": "931cadce3dc70d56bfa59dd4e8cc866b", "score": "0.5154811", "text": "def __init__(self, species, qty):\n\n #these set the initial attributes for species, quantity and shipped status\n self.species = species\n self.qty = qty\n self.shipped = False\n self.tax = 0", "title": "" }, { "docid": "b7f4264d43a26687940e9e7a49bb9da0", "score": "0.51524115", "text": "def __init__(self, name, fuel, fanciness):\n super().__init__(name, fuel)\n self.fanciness = fanciness\n self.price_per_km *= fanciness", "title": "" }, { "docid": "f599d45d4a45e7888ab38854d88fe0a7", "score": "0.51432717", "text": "def __init__(self, name, fuel, fanciness):\n super().__init__(name, fuel)\n self.fanciness = fanciness\n self.price_per_km *= self.fanciness\n self.flagfall = 4.50", "title": "" }, { "docid": "cf436a669586d189c271cb9e8b38316f", "score": "0.5142965", "text": "def __init__(self, make, model, year):\n # super() allows you to call methods from parent class\n # superclass is parent, subclass is child\n super().__init__(make, model, year)\n\n # can also define additional attributes\n self.battery_size = 75", "title": "" }, { "docid": "1542ec9e58a9ab3a9bdbf240d0a86296", "score": "0.5134287", "text": "def model(self):", "title": "" }, { "docid": "61e7feb2fb5a65a88b23ed6514ea3b98", "score": "0.5133308", "text": "def __init__(self, z, p, dz_soil, ctr, loc):\n #print(p)\n self.StomaModel = 'MEDLYN_FARQUHAR' # stomatal model\n \n self.Switch_pheno = ctr['pheno_cycle'] # include phenology\n self.Switch_lai = ctr['seasonal_LAI'] # seasonal LAI\n self.Switch_WaterStress = ctr['WaterStress'] # water stress affects stomata\n\n self.name = p['name']\n\n # phenology model\n if self.Switch_pheno:\n self.Pheno_Model = Photo_cycle(p['phenop']) # phenology model instance\n self.pheno_state = self.Pheno_Model.f # phenology state [0...1]\n else:\n self.pheno_state = 1.0\n\n # dynamic LAI model\n if self.Switch_lai:\n # seasonality of leaf area\n self.LAI_Model = LAI_cycle(p['laip'], loc) # LAI model instance\n self.relative_LAI = self.LAI_Model.f # LAI relative to annual maximum [0...1]\n else:\n self.relative_LAI = 1.0\n\n # physical structure\n self.LAImax = p['LAImax'] # maximum annual 1-sided LAI [m2m-2]\n self.LAI = self.LAImax * self.relative_LAI # current LAI\n self.lad_normed = p['lad'] # normalized leaf-area density [m-1]\n self.lad = self.LAI * self.lad_normed # current leaf-area density [m2m-3]\n\n # root properties\n self.Roots = RootUptake(p['rootp'], dz_soil, self.LAImax)\n\n self.dz = z[1] - z[0]\n\n # leaf gas-exchange parameters\n self.photop0 = p['photop'] # A-gs parameters at pheno_state = 1.0 (dict)\n self.photop = self.photop0.copy() # current A-gs parameters (dict)\n # leaf properties\n self.leafp = p['leafp'] # leaf properties (dict)", "title": "" }, { "docid": "46350117caed49e308e33e79f13ed55e", "score": "0.51327664", "text": "def _profile(self):\n flexible_data_set = cx.FlexibleDataSetType(\n Code=\"QBIC_SOMATIC_SV_V{:.1f}\".format(SOMATIC_SV_VERSION),\n Systemwide=\"false\",\n FlexibleDataSetType=\"MEASUREMENT\",\n Category=\"LABOR\")\n multi_lang_de = cx.MultilingualEntryType(Lang='de', Value='QBIC Somatic SV V{:.1f}'.format(SOMATIC_SV_VERSION))\n multi_lang_en = cx.MultilingualEntryType(Lang='en', Value='QBIC Somatic SV V{:.1f}'.format(SOMATIC_SV_VERSION))\n flexible_data_set.NameMultilingualEntries = [multi_lang_de, multi_lang_en] \n \n flexible_data_set.FlexibleValueComplexRefs = [\n cx.FlexibleValueRefType(FlexibleValueRef=field_type, Required=\"false\") for field_type in SV_FIELD_TYPES\n ]\n \n self._catalogue_data.append(flexible_data_set)\n\n cfr_template = cx.CrfTemplateType(Name='Somatic SV V{:.1f}'.format(SOMATIC_SV_VERSION),\n FlexibleDataSetRef='QBIC_SOMATIC_SV_V{:.1f}'.format(SOMATIC_SV_VERSION),\n TemplateType='LABORMETHOD', Version='{:.0f}'.format(SOMATIC_SV_VERSION-1), EntityStatus='ACTIVE', Global='false', MultipleUse='false', Active='false')\n\n cfr_template_section = cx.CrfTemplateSectionType(Name='Somatic SV V{:.1f}'.format(SOMATIC_SV_VERSION),\n Height=len(SV_FIELD_TYPES), Width='1', Position='-1')\n \n cfr_template_section.CrfTemplateField = [\n cx.CrfTemplateFieldType(LaborValue=value,\n LowerRow=ind,\n LowerColumn='0',\n UpperRow=ind,\n UpperColumn='0',\n Mandatory='false',\n VisibleCaption='true',\n FieldType='LABORVALUE') for ind, value in enumerate(SV_FIELD_TYPES)]\n\n cfr_template.CrfTemplateSection = [cfr_template_section]\n\n self._catalogue_data.append(cfr_template)", "title": "" }, { "docid": "cb82bc65e036a57c35f5f5156e807680", "score": "0.51289624", "text": "def __init__(self: 'ConsoleController',\n number_of_cheeses: int, number_of_stools: int):\n #get cheeses and stools\n self.number_of_cheeses = number_of_cheeses\n self.num_of_stools = number_of_stools\n\n #create a model using cheeses and stools\n self.model = TOAHModel(number_of_stools)\n self.model.fill_first_stool(number_of_cheeses)", "title": "" }, { "docid": "133482de99420624ce5fecf560610a41", "score": "0.5127023", "text": "def create_top_level_model(sbml_file):\n sbmlns = SBMLNamespaces(3, 1, \"comp\", 1)\n doc = SBMLDocument(sbmlns)\n doc.setPackageRequired(\"comp\", True)\n doc.setPackageRequired(\"fbc\", False)\n\n mdoc = doc.getPlugin(\"comp\")\n\n # create listOfExternalModelDefinitions\n emd_bounds = comp.create_ExternalModelDefinition(mdoc, \"toy_ode_bounds\", sbml_file=ode_bounds_file)\n emd_fba = comp.create_ExternalModelDefinition(mdoc, \"toy_fba\", sbml_file=fba_file)\n emd_update = comp.create_ExternalModelDefinition(mdoc, \"toy_ode_update\", sbml_file=ode_update_file)\n emd_model = comp.create_ExternalModelDefinition(mdoc, \"toy_ode_model\", sbml_file=ode_model_file)\n\n # create models and submodels\n model = doc.createModel()\n model.setId(\"toy_top_level\")\n model.setName(\"Top level model\")\n model_factory.add_generic_info(model)\n mplugin = model.getPlugin(\"comp\")\n model.setSBOTerm(comp.SBO_CONTINOUS_FRAMEWORK)\n\n # add submodel which references the external model definition\n comp.add_submodel_from_emd(mplugin, submodel_sid=\"bounds\", emd=emd_bounds)\n comp.add_submodel_from_emd(mplugin, submodel_sid=\"fba\", emd=emd_fba)\n comp.add_submodel_from_emd(mplugin, submodel_sid=\"update\", emd=emd_update)\n comp.add_submodel_from_emd(mplugin, submodel_sid=\"model\", emd=emd_model)\n\n # --- compartments ---\n create_compartments(model, [\n {A_ID: \"extern\", A_NAME: \"external compartment\", A_VALUE: 1.0, A_SPATIAL_DIMENSION: 3, A_UNIT: model_factory.UNIT_VOLUME},\n {A_ID: 'cell', A_NAME: 'cell', A_VALUE: 1.0, A_SPATIAL_DIMENSION: 3, A_UNIT: model_factory.UNIT_VOLUME}\n ])\n\n # --- species ---\n # replaced species\n # (fba species are not replaced, because they need their boundaryConditions for the FBA,\n # and do not depend on the actual concentrations)\n create_species(model, [\n {A_ID: 'C', A_NAME: \"C\", A_VALUE: 0, A_UNIT: model_factory.UNIT_AMOUNT, A_HAS_ONLY_SUBSTANCE_UNITS: True,\n A_COMPARTMENT: \"extern\"},\n ])\n\n # --- parameters ---\n create_parameters(model, [\n # bounds\n {A_ID: 'ub_R1', A_VALUE: 1.0, A_UNIT: model_factory.UNIT_FLUX, A_NAME: 'ub_R1', A_CONSTANT: False},\n {A_ID: \"vR3\", A_NAME: \"vR3 (FBA flux)\", A_VALUE: 0.1, A_UNIT: model_factory.UNIT_FLUX, A_CONSTANT: False}\n ])\n\n # --- reactions ---\n # dummy reaction in top model\n r1 = create_reaction(model, rid=\"R3\", name=\"R3 dummy\", fast=False, reversible=True,\n reactants={}, products={\"C\": 1}, compartment=\"extern\")\n # assignment rule\n create_assignment_rules(model, [{A_ID: \"vR3\", A_VALUE: \"R3\"}])\n\n # --- replacements ---\n # replace compartments\n comp.replace_elements(model, 'extern', ref_type=comp.SBASE_REF_TYPE_PORT,\n replaced_elements={'fba': ['extern_port'], 'update': ['extern_port'], 'model': ['extern_port']})\n\n comp.replace_elements(model, 'cell', ref_type=comp.SBASE_REF_TYPE_PORT,\n replaced_elements={'fba': ['cell_port']})\n\n # replace parameters\n comp.replace_elements(model, 'ub_R1', ref_type=comp.SBASE_REF_TYPE_PORT,\n replaced_elements={'bounds': ['ub_R1_port'], 'fba': ['ub_R1_port']})\n comp.replace_elements(model, 'vR3', ref_type=comp.SBASE_REF_TYPE_PORT,\n replaced_elements={'update': ['vR3_port']})\n\n # replace species\n comp.replace_elements(model, 'C', ref_type=comp.SBASE_REF_TYPE_PORT,\n replaced_elements={'fba': ['C_port'], 'update': ['C_port'],\n 'model': ['C_port']})\n\n # replace reaction by fba reaction\n comp.replaced_by(model, 'R3', ref_type=comp.SBASE_REF_TYPE_PORT,\n submodel='fba', replaced_by=\"R3_port\")\n\n # replace units\n for uid in ['s', 'kg', 'm3', 'm2', 'mM', 'item_per_m3', 'm', 'per_s', 'item_per_s']:\n comp.replace_element_in_submodels(model, uid, ref_type=comp.SBASE_REF_TYPE_UNIT,\n submodels=['bounds', 'fba', 'update', 'model'])\n\n # write SBML file\n sbml_io.write_and_check(doc, sbml_file)", "title": "" }, { "docid": "8a61b3ec62b3cdb766201f4d60c90ea9", "score": "0.51260775", "text": "def __init__(self, modelfits='/global/project/projectdirs/desi/science/td/timedomain-github/snmodels/vincenzi_2019_models.fits'):\n # Set up core-collapse models.\n modtab = Table.read(modelfits)\n \n # Blacklist models with odd-looking spectra for some epochs (Eddie Sepeku).\n blacklist = [b'SN2013by', b'SN2013fs', b'SN2009bw', b'SN2012aw', b'SN2009kr', b'ASASSN14jb', b'SN2013am', b'SN2008ax', b'SN2008fq', b'SN2009ip', b'iPTF13bvn', b'SN2008D', b'SN1994I', b'SN2007gr', b'SN2009bb', b'SN2007ru']\n select = ~np.in1d(modtab['Name'].value, blacklist)\n modtab = modtab[select]\n\n sntypes = np.unique(modtab['Type'])\n self.models = {}\n for sntype in sntypes:\n self.models[sntype] = []\n for nm in modtab['Name'][modtab['Type'] == sntype]:\n # Get the corresponding sncosmo model with host dust correction applied.\n if nm.startswith('SN'):\n model_name = 'v19-{}-corr'.format(nm[2:].lower())\n else:\n model_name = 'v19-{}-corr'.format(nm.lower())\n self.models[sntype].append(model_name)\n\n # Add Ia models.\n self.models['Ia'] = ['hsiao']\n \n # Add kilonova models.\n self.models['kN'] = []\n knfiles = sorted(glob('/global/u2/l/lehsani/kilonova/models_0.2_0.8/*knova*.csv'))\n for knfile in knfiles:\n kn = KNSource(knfile)\n sncosmo.register(kn, kn.name, force=True)\n self.models['kN'].append(kn.name)", "title": "" }, { "docid": "9ad07bd3f5ada935f0138b4b2f61e43d", "score": "0.51237166", "text": "def __init__(self, community_id, models, extracellular_compartment_id = \"e\", empty_flag = False):\n self.community_id = community_id\n self.models = models\n self.extracellular_compartment_id = extracellular_compartment_id\n self.empty_flag = empty_flag\n self.model_dic = self.__model_dic()\n self.cmodel = self.__create_cmodel()\n self.objectives = self.cmodel.get_objective()\n self.pex_cons = self.__get_pex_constraints()\n self.biomass_reactions = self.__biomass_reactions()", "title": "" }, { "docid": "75ba048ca022f541a1ee1e10167a7962", "score": "0.5115951", "text": "def __init__(self):\n self.label = \"Apply model\"\n self.description = 'Applies a model to a series of data rasters obtain a response raster'\n self.canRunInBackground = False\n self.category = \"Modelling\"", "title": "" }, { "docid": "5eea97c6320b8410c80c39d70ab96c84", "score": "0.51052773", "text": "def __init__(self, *args):\r\n self.__stencils = {}\r\n self.__shapes = {}\r\n #self.add_stencil(\"Basic Shapes.vss\")\r\n for stencil in args:\r\n self.add_stencil(stencil)", "title": "" }, { "docid": "9c650c7e58dccfdea82af9a8d8eccb70", "score": "0.5098148", "text": "def main():\n # sde = SDE()\n # sde.import_and_export()\n market = Market()\n market.update_marketData()", "title": "" }, { "docid": "f3408a0e1bd4491b367d3a628b5fbcd4", "score": "0.5096738", "text": "def __init__(self,\n sbml_file,\n extra_cellular_id,\n periplasm_id,\n cytoplasm_id,\n model_dir='..',\n json_model=None,\n groups_size=150,\n kon=0.8,\n koff=0.8,\n rates='0:0:0:1',\n enzymes=1000,\n metabolites=600000):\n\n self.groups_size = groups_size\n self.parameter_writer = XMLParametersWriter(model_dir=model_dir)\n self.coder = ModelCodeWriter(model_dir=model_dir)\n self.parser = SBMLParser(sbml_file,\n extra_cellular_id,\n periplasm_id,\n cytoplasm_id,\n json_model=json_model,\n default_konSTP=kon,\n default_konPTS=kon,\n default_koffSTP=koff,\n default_koffPTS=koff,\n default_metabolite_amount=metabolites,\n default_enzyme_amount=enzymes,\n default_rate=rates,\n default_reject_rate=rates,\n default_interval_time=rates)\n\n special_compartment_ids = [extra_cellular_id, periplasm_id, cytoplasm_id]\n\n c_external_enzyme_sets = {cid: [MEMBRANE]\n for cid in self.parser.get_compartments()\n if cid not in special_compartment_ids}\n c_external_enzyme_sets[periplasm_id] = [INNER, TRANS]\n\n e_external_enzyme_sets = {periplasm_id: [OUTER, TRANS]}\n\n # Compartment model structures\n self.periplasm = ModelStructure(periplasm_id,\n self.parser,\n membranes=[OUTER, INNER, TRANS])\n\n self.extra_cellular = ModelStructure(extra_cellular_id,\n self.parser,\n external_enzyme_sets=e_external_enzyme_sets)\n\n self.cytoplasm = ModelStructure(cytoplasm_id,\n self.parser,\n external_enzyme_sets=c_external_enzyme_sets)\n\n self.organelles = {comp_id: ModelStructure(comp_id, self.parser, [MEMBRANE])\n for comp_id in self.parser.get_compartments()\n if comp_id not in special_compartment_ids}", "title": "" }, { "docid": "294fb43d0e5c8ae0155134b298fd02f7", "score": "0.5083324", "text": "def __init__(self, model_file_path='../model/model_final', target_var='SalePrice'): \n self.model_file_path = model_file_path\n self.load_model()\n self.target_var = target_var", "title": "" }, { "docid": "597249eb3dedd47cc6937d9357251e99", "score": "0.5083237", "text": "def construct(self, geom):\n\n main_shape = geom.shapes.Tubs(self.name, rmin = self.halfDimension[\"rmin\"],\n rmax = self.halfDimension[\"rmax\"], dz = self.halfDimension[\"dz\"])\n main_lv = geom.structure.Volume(self.name+\"_lv\", material=self.Material, shape=main_shape)\n if isinstance(self.Sensitive,str):\n main_lv.params.append((\"SensDet\",self.Sensitive))\n self.add_volume(main_lv)\n\n straw_shape = geom.shapes.Tubs(self.name+\"_straw\", rmin = self.halfSTDimension[\"rmin\"],\n rmax = self.halfSTDimension[\"rmax\"], dz = self.halfSTDimension[\"dz\"])\n straw_lv = geom.structure.Volume(self.name+\"_straw_lv\", material=self.STMaterial, shape=straw_shape)\n\n wire_shape = geom.shapes.Tubs(self.name+\"_wire\", self.halfWireDimension[\"rmin\"],\n rmax = self.halfWireDimension[\"rmax\"], dz = self.halfWireDimension[\"dz\"])\n wire_lv = geom.structure.Volume(self.name+\"_wire_lv\", material=self.WireMaterial, shape=wire_shape)\n\n straw_pla = geom.structure.Placement( self.name+\"_straw_pla\", volume = straw_lv )\n wire_pla = geom.structure.Placement( self.name+\"_wire_pla\", volume = wire_lv )\n main_lv.placements.append( straw_pla.name )\n main_lv.placements.append( wire_pla.name )", "title": "" }, { "docid": "46b3b31f7f083d4ab5495942c3a43018", "score": "0.5074051", "text": "def __init__(self, make, model, year):\n super().__init__(make, model, year)\n #new 'Battery' instance above defined as an attribute within 'ElectricCar' instance\n self.battery = Battery()", "title": "" }, { "docid": "e02764166a3d23a0857f6965f383a596", "score": "0.50672626", "text": "def stadardization_model(corr):\n class ModelStandardization_mystand( ModelStandardization ):\n STANDARDIZATION = corr\n\n return ModelStandardization_mystand()", "title": "" }, { "docid": "a7963a1de6ca2f53960b2a73a0bcab7a", "score": "0.5061793", "text": "def build_car(self, model):", "title": "" }, { "docid": "1634cd3a5d6f896a8f6d2f44ecf2a1fb", "score": "0.5059821", "text": "def __init__(self):\r\n\r\n super(Model, self).__init__()\r\n\r\n # Define private dictionary attributes.\r\n\r\n # Define private list attributes.\r\n\r\n # Define private scalar attributes.\r\n\r\n # Define public dictionary attributes.\r\n\r\n # Define public list attributes.\r\n self.lst_derate_criteria = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]\r\n\r\n # Define public scalar attributes.\r\n self.application = 0 # Application index.\r\n self.base_hr = 0.0 # Base hazard rate.\r\n self.piE = 0.0 # Environment pi factor.\r\n self.reason = \"\" # Overstress reason.\r", "title": "" }, { "docid": "cfcf82f9b272d87c28d4128972cdbdc4", "score": "0.5057847", "text": "def pwr_assembly():\n\n model = openmc.model.Model()\n\n # Define materials.\n fuel = openmc.Material(name='Fuel')\n fuel.set_density('g/cm3', 10.29769)\n fuel.add_nuclide('U234', 4.4843e-6)\n fuel.add_nuclide('U235', 5.5815e-4)\n fuel.add_nuclide('U238', 2.2408e-2)\n fuel.add_nuclide('O16', 4.5829e-2)\n\n clad = openmc.Material(name='Cladding')\n clad.set_density('g/cm3', 6.55)\n clad.add_nuclide('Zr90', 2.1827e-2)\n clad.add_nuclide('Zr91', 4.7600e-3)\n clad.add_nuclide('Zr92', 7.2758e-3)\n clad.add_nuclide('Zr94', 7.3734e-3)\n clad.add_nuclide('Zr96', 1.1879e-3)\n\n hot_water = openmc.Material(name='Hot borated water')\n hot_water.set_density('g/cm3', 0.740582)\n hot_water.add_nuclide('H1', 4.9457e-2)\n hot_water.add_nuclide('O16', 2.4672e-2)\n hot_water.add_nuclide('B10', 8.0042e-6)\n hot_water.add_nuclide('B11', 3.2218e-5)\n hot_water.add_s_alpha_beta('c_H_in_H2O')\n\n # Define the materials file.\n model.materials = (fuel, clad, hot_water)\n\n # Instantiate ZCylinder surfaces\n fuel_or = openmc.ZCylinder(x0=0, y0=0, r=0.39218, name='Fuel OR')\n clad_or = openmc.ZCylinder(x0=0, y0=0, r=0.45720, name='Clad OR')\n\n # Create boundary planes to surround the geometry\n pitch = 21.42\n min_x = openmc.XPlane(x0=-pitch/2, boundary_type='reflective')\n max_x = openmc.XPlane(x0=+pitch/2, boundary_type='reflective')\n min_y = openmc.YPlane(y0=-pitch/2, boundary_type='reflective')\n max_y = openmc.YPlane(y0=+pitch/2, boundary_type='reflective')\n\n # Create a fuel pin universe\n fuel_pin_universe = openmc.Universe(name='Fuel Pin')\n fuel_cell = openmc.Cell(name='fuel', fill=fuel, region=-fuel_or)\n clad_cell = openmc.Cell(name='clad', fill=clad, region=+fuel_or & -clad_or)\n hot_water_cell = openmc.Cell(name='hot water', fill=hot_water, region=+clad_or)\n fuel_pin_universe.add_cells([fuel_cell, clad_cell, hot_water_cell])\n\n\n # Create a control rod guide tube universe\n guide_tube_universe = openmc.Universe(name='Guide Tube')\n gt_inner_cell = openmc.Cell(name='guide tube inner water', fill=hot_water,\n region=-fuel_or)\n gt_clad_cell = openmc.Cell(name='guide tube clad', fill=clad,\n region=+fuel_or & -clad_or)\n gt_outer_cell = openmc.Cell(name='guide tube outer water', fill=hot_water,\n region=+clad_or)\n guide_tube_universe.add_cells([gt_inner_cell, gt_clad_cell, gt_outer_cell])\n\n # Create fuel assembly Lattice\n assembly = openmc.RectLattice(name='Fuel Assembly')\n assembly.pitch = (pitch/17, pitch/17)\n assembly.lower_left = (-pitch/2, -pitch/2)\n\n # Create array indices for guide tube locations in lattice\n template_x = np.array([5, 8, 11, 3, 13, 2, 5, 8, 11, 14, 2, 5, 8,\n 11, 14, 2, 5, 8, 11, 14, 3, 13, 5, 8, 11])\n template_y = np.array([2, 2, 2, 3, 3, 5, 5, 5, 5, 5, 8, 8, 8, 8,\n 8, 11, 11, 11, 11, 11, 13, 13, 14, 14, 14])\n\n # Create 17x17 array of universes\n assembly.universes = np.tile(fuel_pin_universe, (17, 17))\n assembly.universes[template_x, template_y] = guide_tube_universe\n\n # Create root Cell\n root_cell = openmc.Cell(name='root cell', fill=assembly)\n root_cell.region = +min_x & -max_x & +min_y & -max_y\n\n # Create root Universe\n model.geometry.root_universe = openmc.Universe(name='root universe')\n model.geometry.root_universe.add_cell(root_cell)\n\n model.settings.batches = 10\n model.settings.inactive = 5\n model.settings.particles = 100\n model.settings.source = openmc.Source(space=openmc.stats.Box(\n [-pitch/2, -pitch/2, -1], [pitch/2, pitch/2, 1], only_fissionable=True))\n\n plot = openmc.Plot()\n plot.origin = (0.0, 0.0, 0)\n plot.width = (21.42, 21.42)\n plot.pixels = (300, 300)\n plot.color_by = 'material'\n model.plots.append(plot)\n\n return model", "title": "" }, { "docid": "7b2a7740fe2af300e9f4af151c64dc95", "score": "0.5050282", "text": "def create_submodel(self, verbose = True, vverbose = False):\n\n with open(self.basemodel, 'rb') as f: hspfmodel = pickle.load(f)\n\n comid = self.gagecomid\n upcomids = self.upcomids\n filename = hspfmodel.filename + comid\n picklefile = hspfmodel.filepath + filename\n\n submodel = HSPFModel()\n submodel.build_from_existing(hspfmodel, filename, directory = \n hspfmodel.filepath[:-1],\n verbose = vverbose)\n\n # find the subbasins between the outlet and the upstream comids and\n # store in an updown dictionary\n\n updown = {comid: 0}\n\n current = 0\n\n while current != len(updown):\n\n # see if the current length changes to check if done\n\n current = len(updown)\n\n # iterate throught the subbasins and see if any need to be added\n\n for up, down in hspfmodel.updown.items():\n\n if (up not in updown and # not already there\n up not in upcomids and # between the boundaries\n down in updown): # downstream is there\n \n updown[up] = down\n \n # overwrite the old updown dictionary\n\n submodel.updown = updown\n\n # overwrite the inlets and outlets\n\n submodel.inlets = [hspfmodel.updown[c] for c in upcomids]\n submodel.outlets = [comid]\n\n # overwrite the old subbasin dictionary\n\n submodel.subbasins = {comid: subbasin for comid, subbasin in \n submodel.subbasins.items() if comid in updown}\n\n # build with the updated model subbasin info\n\n submodel.build()\n\n # add in the modules\n\n if self.temp: submodel.add_temp()\n\n if self.snow: \n \n densities = [o.RDENPF \n for o in hspfmodel.perlnds + hspfmodel.implnds]\n depths = [o.packsnow / o.RDENPF \n for o in hspfmodel.perlnds + hspfmodel.implnds]\n\n depth = sum(depths) / len(depths)\n density = sum(densities) / len(densities)\n\n submodel.add_snow(depth = depth, density = density) \n\n if self.hydrology: submodel.add_hydrology()\n \n # add the flowgage data to the model\n\n for identifier in hspfmodel.flowgages:\n if identifier == self.gageid:\n start_date, tstep, data = hspfmodel.flowgages[identifier]\n submodel.add_timeseries('flowgage', identifier, start_date, \n data, tstep = tstep)\n\n # add the watershed time series dictionaries for the model\n\n timeseries = {'inflow': hspfmodel.inflows,\n 'temperature': hspfmodel.temperatures,\n 'dewpoint': hspfmodel.dewpoints,\n 'wind': hspfmodel.windspeeds,\n 'solar': hspfmodel.solars,\n 'snowfall': hspfmodel.snowfalls,\n 'snowdepth': hspfmodel.snowdepths,\n }\n\n for tstype, d in timeseries.items():\n for identifier in d: \n start_date, tstep, data = d[identifier]\n submodel.add_timeseries(tstype, identifier, start_date, data, \n tstep = tstep)\n\n # add the subbasin timeseries as needed\n\n for identifier in hspfmodel.precipitations:\n if identifier in submodel.subbasins.keys():\n start_date, tstep, data = hspfmodel.precipitations[identifier]\n submodel.add_timeseries('precipitation', identifier, start_date,\n data, tstep = tstep)\n\n # add the landuse timeseries as needed\n\n landuse_keys = {'Corn': 'cereals',\n 'Soybeans': 'legumes',\n 'Pasture/grass': 'pasture',\n 'Other grain': 'cereals',\n 'Hay/alfalfa': 'alfalfa',\n 'Water/wetland': 'wetlands',\n 'Fallow land': 'fallow',\n 'Forest': 'others',\n 'Developed': 'others',\n 'Impervious': 'others',\n 'Other': 'others',\n }\n\n ltypes = [landuse_keys[i] for i in hspfmodel.landuse]\n\n for identifier in hspfmodel.evaporations:\n if identifier in ltypes:\n start_date, tstep, data = hspfmodel.evaporations[identifier]\n submodel.add_timeseries('evaporation', identifier, start_date,\n data, tstep = tstep)\n\n # add the influent flows as needed\n \n for upcomid in upcomids:\n\n # find the upstream gage number\n\n \n upgage = [v for k, v in \n hspfmodel.subbasin_timeseries['flowgage'].items() \n if k == upcomid][0]\n incomid = hspfmodel.updown[upcomid]\n\n # find the outlet flows from the previous upstream calibration\n\n t = (self.directory, self.HUC8, upgage)\n flowfile = '{}/{}/calibrations/{}/outletflows'.format(*t)\n \n # get the time series and add it to the model\n\n if not os.path.isfile(flowfile): \n raise RuntimeError('warning: upstream calibration of gage ' +\n '{} does not exist\\n'.format(upgage))\n with open(flowfile, 'rb') as f: times, data = pickle.load(f)\n\n tstep = math.ceil((times[1] - times[0]).total_seconds() / 60)\n\n submodel.add_timeseries('inflow', '{}'.format(incomid), times[0], \n data, tstep = tstep)\n\n # assign the inflows from upstream to any subbasins\n\n otype = 'Reach'\n\n submodel.assign_operation_timeseries('inflow', incomid, 'Reach', \n '{}'.format(incomid))\n \n # assign as needed\n\n for tstype, identifier in hspfmodel.watershed_timeseries.items():\n \n submodel.assign_watershed_timeseries(tstype, identifier)\n\n for tstype, d in hspfmodel.subbasin_timeseries.items():\n\n for subbasin, identifier in d.items():\n \n if subbasin in submodel.subbasins:\n\n submodel.assign_subbasin_timeseries(tstype, subbasin,\n identifier)\n\n for tstype, d in hspfmodel.landuse_timeseries.items():\n\n for landtype, identifier in d.items():\n \n if landtype in submodel.landuse:\n\n submodel.assign_landuse_timeseries(tstype, landtype,\n identifier)\n\n for tstype, d1 in hspfmodel.operation_timeseries.items():\n\n for subbasin, d2 in d1.items():\n\n for otype, identifier in d2.items():\n\n if subbasin in submodel.subbasins:\n\n submodel.assign_operation_timeseries(tstype, subbasin,\n otype, identifier)\n\n with open(picklefile, 'wb') as f: pickle.dump(submodel, f)\n\n self.basemodel = picklefile", "title": "" }, { "docid": "2bf96a316ed8b50e59e35c107ca30910", "score": "0.5044699", "text": "def __init__(self, Model, settings):\n KratosMultiphysics.Process.__init__(self)\n\n # The value can be a double or a string (function)\n default_settings = KratosMultiphysics.Parameters(\n \"\"\"\n {\n \"help\" : \"This sets the initial conditions in terms of imposed strain, stress or deformation gradient\",\n \"mesh_id\" : 0,\n \"model_part_name\" : \"please_specify_model_part_name\",\n \"dimension\" : 3,\n \"imposed_strain\" : [\"0*t\",\"0*t\",\"0*t\",0,0,0],\n \"imposed_stress\" : [0,0,0,0,0,0],\n \"imposed_deformation_gradient\" : [[1,0,0],[0,1,0],[0,0,1]],\n \"interval\" : [0.0, 1e30]\n }\n \"\"\"\n )\n\n # assign this here since it will change the \"interval\" prior to validation\n self.interval = KratosMultiphysics.IntervalUtility(settings)\n\n settings.ValidateAndAssignDefaults(default_settings)\n self.model_part = Model[settings[\"model_part_name\"].GetString()]\n self.dimension = settings[\"dimension\"].GetInt()\n\n # init strain\n self.strain_functions = components_to_functions(settings[\"imposed_strain\"])\n nr_comps = len(self.strain_functions)\n self.imposed_strain = KratosMultiphysics.Vector(nr_comps)\n\n # init stress\n self.stress_functions = components_to_functions(settings[\"imposed_stress\"])\n nr_comps = len(self.stress_functions)\n self.imposed_stress = KratosMultiphysics.Vector(nr_comps)\n\n # init deformation gradient\n aux_matrix = settings[\"imposed_deformation_gradient\"]\n self.deformation_functions = []\n for row in aux_matrix.values():\n aux_vector = components_to_functions(row)\n self.deformation_functions.append(aux_vector)\n nrows = aux_matrix.size()\n ncols = aux_matrix[0].size()\n self.imposed_deformation_gradient = KratosMultiphysics.Matrix(nrows, ncols)", "title": "" }, { "docid": "60d10862cdba968122eaf36625483b31", "score": "0.50441337", "text": "def setShadeModel(*args):\n return _coin.SoLazyElement_setShadeModel(*args)", "title": "" }, { "docid": "d116762b520f239d193616f2b238969b", "score": "0.50420004", "text": "def __init__(self, Mv):\n \n # get the gravitational constant (the value is 4.498502151575286e-06)\n self.G = const.G.to(u.kpc**3/u.Msun/u.Gyr**2).value\n \n # initialize the virial mass global variable \n self.Mvir = Mv\n \n ## Cosmology Same as Patel 2020\n self.h = 0.7 # Hubble constant at z=0 / 100 \n self.omegaM = 0.27\n self.DelVir = 359 # default z=0 overdensity for this cosmology", "title": "" }, { "docid": "1129bdd69386cff2ceb1c8cb6f8afae2", "score": "0.50415975", "text": "def setup_prod():\n setup_general()", "title": "" }, { "docid": "bc28a96787b33b44f751dbe4c93bd9f9", "score": "0.5040365", "text": "def generate_catalogue(cube_side, mass_params, z, h):\n\n print(\"Generating catalogue for a volume of ({:.2f} Mpc/h)^3\\n\".format(cube_side))\n\n catalogue_volume = cube_side**3\n\n # Get the bin width and generate the bins.\n bin_width = mass_params[2]\n mass_range = 10 ** np.arange(mass_params[0], mass_params[1], mass_params[2]) #log[Msun]\n\n # Generate the mass function itself - this is from the colossus toolbox\n local_mass_function = mass_function.massFunction(mass_range, z, mdef='200m', model='tinker08', q_out='dndlnM') \\\n * np.log(10) / h # dn/dlog10M\n\n # We determine the Cumulative HMF starting from the high mass end, multiplied by the bin width.\n # This effectively gives the cumulative probability of a halo existing.\n cumulative_mass_function = np.flip(np.cumsum(np.flip(local_mass_function, 0)), 0) * bin_width\n\n ########################################################################\n # Interpolation Tests\n # Interpolator for the testing - we will update this with the volume in a second.\n # This is essentially for a volume of size unity.\n interpolator = sp.interpolate.interp1d(cumulative_mass_function, mass_range)\n\n sample_index = int(np.floor(len(cumulative_mass_function) / 2)) # index of the half way point\n num_test = cumulative_mass_function[sample_index] # The value of the cum function at this index\n mass_test = interpolator(num_test) # Interpolate to get the mass that this predicts\n # Check that these values are equivalent.\n assert mass_range[sample_index] == mass_test, \\\n \"Interpolation method incorrect: Back interpolation at midpoint failed\"\n # Check first element is equivalent to the total to 10 SF accuracy\n assert np.round(cumulative_mass_function[0], 10) ==\\\n np.round(np.sum(local_mass_function) * bin_width, 10), \"Final cum sum element != total sum\"\n ########################################################################\n\n # Multiply by volume\n cumulative_mass_function = cumulative_mass_function * catalogue_volume\n\n # Get the maximum cumulative number.\n max_number = np.floor(cumulative_mass_function[0])\n range_numbers = np.arange(max_number)\n\n # Update interpolator\n interpolator = sp.interpolate.interp1d(cumulative_mass_function, mass_range)\n mass_catalog = interpolator(range_numbers[range_numbers >= np.amin(cumulative_mass_function)])\n\n print(\"Number of halos generated: {:d}\\n\".format(len(mass_catalog)))\n\n mass_catalog = np.log10(mass_catalog)\n return mass_catalog", "title": "" }, { "docid": "3967922f2cab6aa214c6a5c1206ec360", "score": "0.5034182", "text": "def __init__(self, x=0, Np=10, error_factor=1.0, dist='Gaussian', Energy=None, relement='Au', NrDep='False', H=1.0, HvvgtR=False,\n Rsig=0.0, norm=1.0, norm_err=0.01, sbkg=0.0, cbkg=0.0, abkg=0.0, D=1.0, phi=0.1, U=-1.0, SF='None',Nalf=200,term='Total',\n mpar={'Layers': {'Material': ['Au', 'H2O'], 'Density': [19.32, 1.0], 'SolDensity': [1.0, 1.0],\n 'Rmoles': [1.0, 1.0], 'R': [1.0, 0.0]}}):\n if type(x)==list:\n self.x=np.array(x)\n else:\n self.x=x\n self.norm=norm\n self.norm_err = norm_err\n self.sbkg=sbkg\n self.cbkg=cbkg\n self.abkg=abkg\n self.dist=dist\n self.Rsig=Rsig\n self.Np=Np\n self.H=H\n self.HvvgtR=HvvgtR\n self.Nalf=Nalf\n self.Energy=Energy\n self.relement=relement\n self.NrDep=NrDep\n #self.rhosol=rhosol\n self.error_factor=error_factor\n self.D=D\n self.phi=phi\n self.U=U\n self.term=term\n self.__mpar__=mpar #If there is any multivalued parameter\n self.SF=SF\n self.choices={'HvvgtR':['True','False'],\n 'dist':['Gaussian','LogNormal'],'NrDep':['True','False'],\n 'SF':['None','Hard-Sphere', 'Sticky-Sphere'],\n 'term': ['SAXS-term', 'Cross-term', 'Resonant-term',\n 'Total']\n } #If there are choices available for any fixed parameters\n self.__cf__=Chemical_Formula()\n self.__fit__=False\n self.output_params={'scaler_parameters':{}}\n self.__mkeys__=list(self.__mpar__.keys())\n self.init_params()", "title": "" }, { "docid": "27c8b444ddf946a871eb505614b83a58", "score": "0.50339866", "text": "def __init__(self):\n from desisim.io import read_basis_templates\n \n balflux, balwave, balmeta = read_basis_templates(objtype='BAL')\n self.balflux = balflux\n self.balwave = balwave\n self.balmeta = balmeta", "title": "" }, { "docid": "0c5b7ebda2ef1b74500cdd4c5f229e27", "score": "0.5030364", "text": "def __init__(self):\n \n self.fdn = csmFoundation()\n self.transportation = 0.0\n self.roadsCivil = 0.0\n self.portStaging = 0.0\n self.installation = 0.0\n self.electrical = 0.0\n self.engPermits = 0.0\n self.pai = 0.0\n self.scour = 0.0", "title": "" }, { "docid": "6c59dc482900e1b624eb3025101a1959", "score": "0.5028235", "text": "def __init__(self, env, sub_model):\n super(ResidualModel, self).__init__()\n self.env = env\n self.model = hydra.utils.instantiate(sub_model)", "title": "" }, { "docid": "f4c539b7e75e8572870810e761acafa8", "score": "0.5015839", "text": "def cg():\n\n L = 250 # bar length\n m = 4 # number of requests\n w = [187, 119, 74, 90] # size of each item\n b = [1, 2, 2, 1] # demand for each item\n\n # creating models and auxiliary lists\n master = Model(SOLVER)\n lambdas = []\n constraints = []\n\n # creating an initial pattern (which cut one item per bar)\n # to provide the restricted master problem with a feasible solution\n for i in range(m):\n lambdas.append(master.add_var(obj=1, name='lambda_%d' % (len(lambdas) + 1)))\n\n # creating constraints\n for i in range(m):\n constraints.append(master.add_constr(lambdas[i] >= b[i], name='i_%d' % (i + 1)))\n\n # creating the pricing problem\n pricing = Model(SOLVER)\n\n # creating pricing variables\n a = []\n for i in range(m):\n a.append(pricing.add_var(obj=0, var_type=INTEGER, name='a_%d' % (i + 1)))\n\n # creating pricing constraint\n pricing += xsum(w[i] * a[i] for i in range(m)) <= L, 'bar_length'\n\n pricing.write('pricing.lp')\n\n new_vars = True\n while (new_vars):\n\n ##########\n # STEP 1: solving restricted master problem\n ##########\n\n master.optimize()\n master.write('master.lp')\n\n # printing dual values\n print_solution(master)\n print('pi = ', end='')\n print([constraints[i].pi for i in range(m)])\n print('')\n\n ##########\n # STEP 2: updating pricing objective with dual values from master\n ##########\n\n pricing.objective = 1\n for i in range(m):\n a[i].obj = -constraints[i].pi\n\n # solving pricing problem\n pricing.optimize()\n\n # printing pricing solution\n z_val = pricing.objective_value()\n print('Pricing:')\n print(' z = {z_val}'.format(**locals()))\n print(' a = ', end='')\n print([v.x for v in pricing.vars])\n print('')\n\n ##########\n # STEP 3: adding the new columns\n ##########\n\n # checking if columns with negative reduced cost were produced and\n # adding them into the restricted master problem\n if pricing.objective_value() < - EPS:\n coeffs = [a[i].x for i in range(m)]\n column = Column(constraints, coeffs)\n lambdas.append(master.add_var(obj=1, column=column, name='lambda_%d' % (len(lambdas) + 1)))\n\n print('new pattern = {coeffs}'.format(**locals()))\n\n # if no column with negative reduced cost was produced, then linear\n # relaxation of the restricted master problem is solved\n else:\n new_vars = False\n\n pricing.write('pricing.lp')\n # pdb.set_trace()\n\n print_solution(master)", "title": "" }, { "docid": "51cb817be985002f692d35e089526f32", "score": "0.5011754", "text": "def __init__(self, ngrps=2, nints=2, teff=5700.0, logg=4.0, feh=0.0, alpha=0.0, jmag=9.0, stellar_model='phoenix', filter='CLEAR', subarray='SUBSTRIP256', run=True, add_planet=False, scale=1., **kwargs):\n # Retrieve stellar model\n if stellar_model.lower() == 'phoenix':\n wav, flx = ma.get_phoenix_model(feh, alpha, teff, logg)\n elif stellar_model.lower() == 'atlas':\n wav, flx = ma.get_atlas_model(feh, teff, logg)\n\n # Scale model spectrum to user-input J-band\n flx = ma.scale_spectrum(wav, flx, jmag)\n\n # Initialize base class\n super().__init__(ngrps=ngrps, nints=nints, star=[wav, flx], subarray=subarray, filter=filter, **kwargs)\n\n # Add planet\n if add_planet:\n self.planet = hu.PLANET_DATA\n self.tmodel = hu.transit_params(self.time.jd)\n self.tmodel.t0 = np.mean(self.time.jd)\n self.tmodel.teff = teff\n self.tmodel.logg = logg\n self.tmodel.feh = feh\n self.tmodel.alpha = alpha\n\n # Run the simulation\n if run:\n self.create()", "title": "" }, { "docid": "6a05009c7e19f6c93fff49a0de3225fb", "score": "0.50089544", "text": "def __init__(self, coreRadius=0.0, rAC50=0.0, coreStrandCount=0, radius=0.0, material=\"aluminum\", rDC20=0.0, sizeDescription='', rAC75=0.0, gmr=0.0, ratedCurrent=0.0, strandCount=0, rAC25=0.0, WireArrangements=None, ConcentricNeutralCableInfos=None, *args, **kw_args):\n #: (if there is a different core material) Radius of the central core.\n self.coreRadius = coreRadius\n\n #: AC resistance per unit length of the conductor at 50 oC.\n self.rAC50 = rAC50\n\n #: (if used) Number of strands in the steel core.\n self.coreStrandCount = coreStrandCount\n\n #: Outside radius of the wire.\n self.radius = radius\n\n #: Wire material. Values are: \"aluminum\", \"copper\", \"other\", \"steel\", \"acsr\"\n self.material = material\n\n #: DC resistance per unit length of the conductor at 20 oC.\n self.rDC20 = rDC20\n\n #: Describes the wire guage or cross section (e.g., 4/0, #2, 336.5).\n self.sizeDescription = sizeDescription\n\n #: AC resistance per unit length of the conductor at 75 oC.\n self.rAC75 = rAC75\n\n #: Geometric mean radius. If we replace the conductor by a thin walled tube of radius GMR, then its reactance is identical to the reactance of the actual conductor.\n self.gmr = gmr\n\n #: Current carrying capacity of the wire under stated thermal conditions.\n self.ratedCurrent = ratedCurrent\n\n #: Number of strands in the wire.\n self.strandCount = strandCount\n\n #: AC resistance per unit length of the conductor at 25 oC.\n self.rAC25 = rAC25\n\n self._WireArrangements = []\n self.WireArrangements = [] if WireArrangements is None else WireArrangements\n\n self._ConcentricNeutralCableInfos = []\n self.ConcentricNeutralCableInfos = [] if ConcentricNeutralCableInfos is None else ConcentricNeutralCableInfos\n\n super(WireType, self).__init__(*args, **kw_args)", "title": "" }, { "docid": "a547e87cba133b63b251f82a2dbf2774", "score": "0.49989748", "text": "def canada_model_cli():\n pass", "title": "" }, { "docid": "9ee6a2dbf18053a2ef3b8991df176cb5", "score": "0.4996126", "text": "def for_model(self):", "title": "" }, { "docid": "ed169bf514baefb9d0ea3e8fe334cd8b", "score": "0.49942312", "text": "def __init__(self, \n Omega_c, Omega_b, Omega_l, h, n_s, A_s=None, sigma_8=None,\n Omega_g=0.0, Omega_n_mass=0.0, Omega_n_rel=0.0, \n w0=-1., wa=0., N_nu_mass=0, N_nu_rel = 3.046, mnu=0.0):\n # Set parameter values\n self.Omega_c = Omega_c\n self.Omega_b = Omega_b\n self.Omega_l = Omega_l\n self.h = h\n self.n_s = n_s\n self.A_s = A_s\n self.sigma_8 = sigma_8\n self.Omega_g = Omega_g\n self.Omega_n_mass = Omega_n_mass\n self.Omega_n_rel = Omega_n_rel\n self.w0 = w0\n self.wa = wa\n self.N_nu_mass = N_nu_mass\n self.N_nu_rel = N_nu_rel\n self.mnu = mnu\n self.sigma_8 = sigma_8\n \n # Set density parameters according to consistency relations\n self.Omega_m = self.Omega_c + self.Omega_b + self.Omega_n_mass\n self.Omega_k = 1. - ( self.Omega_m + self.Omega_l \n + self.Omega_g + self.Omega_n_rel )", "title": "" }, { "docid": "bc4743d680f30dab73d7a5e7a95ac6a3", "score": "0.4993503", "text": "def run_model(self, kwargs):\r\n \r\n loc = kwargs.pop(\"lookup shortage loc\")\r\n speed = kwargs.pop(\"lookup shortage speed\")\r\n kwargs['shortage price effect lookup'] = [self.f(x/10, speed, loc) for x in range(0,100)]\r\n \r\n speed = kwargs.pop(\"lookup price substitute speed\")\r\n begin = kwargs.pop(\"lookup price substitute begin\")\r\n end = kwargs.pop(\"lookup price substitute end\")\r\n kwargs['relative price substitute lookup'] = [self.priceSubstite(x, speed, begin, end) for x in range(0,100, 10)]\r\n \r\n scale = kwargs.pop(\"lookup returns to scale speed\")\r\n speed = kwargs.pop(\"lookup returns to scale scale\")\r\n kwargs['returns to scale lookup'] = [self.returnsToScale(x, speed, scale) for x in range(0, 101, 10)]\r\n \r\n scale = kwargs.pop(\"lookup approximated learning speed\")\r\n speed = kwargs.pop(\"lookup approximated learning scale\")\r\n start = kwargs.pop(\"lookup approximated learning start\")\r\n kwargs['approximated learning effect lookup'] = [self.approxLearning(x, speed, scale, start) for x in range(0, 101, 10)] \r\n \r\n super(ScarcityModel, self).run_model(kwargs)", "title": "" }, { "docid": "6af6ffd1cf00f5021f8de2ef98d8f58e", "score": "0.49905622", "text": "def main(args):\n print('Reading galaxy catalog')\n halocat_galcat_merged = pd.read_hdf('../'+args.catalog_to_use,\\\n key='halocat_galcat_merged')\n print('Plotting SMHM')\n plot_SMHM(halocat_galcat_merged,args.host_halo_mass_to_plot,\\\n args.populate_mock_key)", "title": "" }, { "docid": "75315723e31accaeefa82eab6f1de578", "score": "0.498815", "text": "def _configure(self):\n Component._configure(self)\n pylith.perf.Material.Material.__init__(self, self.inventory.label, self.inventory.size)\n return", "title": "" }, { "docid": "c67776bc83722ed0d7c8fcef236986c7", "score": "0.49846137", "text": "def model():\n pass", "title": "" }, { "docid": "b1240b372e0e828f142843a9972882e1", "score": "0.49844307", "text": "def __init__(self, name, parent=None):\n\t\t\n\t\tself.name = name\n\t\tconfig_data = courses_config[self.name]\n\t\t\n\t\tself.orgEntity = config_data['orgEntity']\n\t\tself.orgKey = config_data['orgKey']\n\t\t\n\t\tTopicRecord.dowrites = 1\n\t\tChapterRecord.dowrites = 1\n\t\t\n\t\tBackPackModelObject.__init__ (self, parent)\n\t\t\t\n\t\t# curriculum children are Unit instances\n\t\tself.curriculum = Curriculum (self.name)", "title": "" }, { "docid": "8a56536094ca1303e2f6d5ae779e4ddc", "score": "0.4982808", "text": "def set_model_logic(start_serial, stop_serial, df):\n\n time_step_in_days = 2.0\n commodity = \"Rice\"\n\n model = Model(\n starttime=start_serial,\n stoptime=stop_serial,\n dt=1.0 * time_step_in_days,\n )\n\n # INITIALISE VARIABLES\n\n # DISPLACEMENT\n hp_pop = model.constant(\"Host Population\")\n idp_pop, idp_pop_iv = create_model_stock(model, \"IDP Population\")\n aap_pop, aap_pop_iv = create_model_stock(model, \"Affected Area Population\")\n disp_rate = model.flow(\"Displacement Rate\")\n ret_rate = model.flow(\"Return Rate\")\n\n t_disp = model.constant(\"Displacement Time\")\n t_ret = model.constant(\"Return Time\")\n danger = model.converter(\"Perceived Danger\")\n\n\n # FARMER\n # physical\n f_stock, f_stock_iv = create_model_stock(model, \"Farmer Stock\")\n f_prod = model.flow(\"Farmer Production\")\n f2t_vol = model.flow(\"Farmer to Trader Volume\")\n f_supply = model.converter(\"Farmer Supply\")\n f2t_demand = model.converter(\"Farmer to Trader Demand\")\n f2t_leadtime = model.constant(\"Farmer to Trader Leadtime\")\n f_loss = model.flow(\"Farmer Loss Rate\")\n t_f_loss = model.constant(\"Farmer Loss Time\")\n # price\n baseline_price = model.constant(\"Baseline Price\")\n f_ds_ratio = model.converter(\"Farmer Demand-to-Supply Ratio\")\n f_price = model.converter(\"Farmer Price\")\n # cash\n t2f_cashflow = model.flow(\"Trader to Farmer Cashflow\")\n\n # TRADER\n # physical\n t_stock, t_stock_iv = create_model_stock(model, \"Trader Stock\")\n t2w_vol = model.flow(\"Trader to Wholesaler Volume\")\n t_supply = model.converter(\"Trader Supply\")\n t2w_leadtime = model.constant(\"Trader to Wholesaler Leadtime\")\n # price\n t_ds_ratio = model.converter(\"Trader Demand-to-Supply Ratio\")\n t_price = model.converter(\"Trader Price\")\n # cash\n t_cash, t_cash_iv = create_model_stock(model, \"Trader Cash\")\n w2t_cashflow = model.flow(\"Wholesaler to Trader Cashflow\")\n\n wholesaler = SupplyChainActor(\n model,\n \"Wholesaler\",\n upstream_actor=\"Trader\",\n downstream_actor=\"Retailer\"\n )\n\n retailer = SupplyChainActor(\n model,\n \"Retailer\",\n upstream_actor=\"Wholesaler\",\n downstream_actor=\"Consumer\"\n )\n\n retailer.connect_to_upstream(wholesaler)\n\n # CONSUMER\n hp_income_baseline = model.constant(\"Host Population Income Baseline\")\n idp_income_baseline = model.constant(\"IDP Income Baseline\")\n c_needs = model.constant(f\"{commodity} Needs Per Capita\")\n c_max_income_frac = model.constant(f\"Maximum Fraction of Income Spent on {commodity}\")\n hp_income = model.converter(\"Host Population Income\")\n idp_income = model.converter(\"IDP Income\")\n hp_pc_demand = model.converter(\"HP Per Capita Demand\")\n idp_pc_demand = model.converter(\"IDP Per Capita Demand\")\n hp_demand = model.converter(\"HP Demand\")\n idp_demand = model.converter(\"IDP Demand\")\n c_demand = model.converter(\"Retailer to Consumer Demand\")\n\n # DATA\n data_prod_usda = create_model_data_variable(model, df, \"Production (USDA)\")\n data_deaths_ucdp = create_model_data_variable(model, df, \"Deaths (UCDP)\")\n data_ip_iom = create_model_data_variable(model, df, \"IDP Population (IOM)\")\n data_ruralinflation_nbs = create_model_data_variable(model, df, \"Rural Inflation\")\n\n # CONNECT STOCKS AND FLOWS\n\n # displacement\n idp_pop.equation = disp_rate - ret_rate\n aap_pop.equation = ret_rate - disp_rate\n\n # physical\n f_stock.equation = f_prod - f2t_vol - f_loss\n t_stock.equation = f2t_vol - t2w_vol\n\n # cash\n t_cash.equation = w2t_cashflow - t2f_cashflow\n\n # DEFINE THE REST\n\n # displacement\n danger.equation = smooth_model_variable(model, data_deaths_ucdp, 30.0, 0.0) / 90.0\n disp_rate.equation = aap_pop * danger / t_disp\n ret_rate.equation = idp_pop * (1.0 - danger) / t_ret\n\n # farmer\n f_prod.equation = data_prod_usda\n f2t_vol.equation = sd.min(f2t_demand, f_supply)\n f_ds_ratio.equation = f2t_demand / f_supply\n f_price.equation = baseline_price\n f2t_demand.equation = wholesaler.demand()\n f_supply.equation = f_stock / f2t_leadtime\n f_loss.equation = f_stock / t_f_loss\n\n # trader\n t2w_vol.equation = sd.min(wholesaler.demand(), t_supply)\n t_ds_ratio.equation = wholesaler.demand() / t_supply\n t_price.equation = f_price\n t_supply.equation = t_stock / t2w_leadtime\n t2f_cashflow.equation = f_price * f2t_vol\n w2t_cashflow.equation = t_price * t2w_vol\n\n\n # consumer\n hp_income.equation = hp_income_baseline\n idp_income.equation = idp_income_baseline\n hp_pc_demand.equation = sd.min(c_needs, hp_income * c_max_income_frac / wholesaler.price())\n hp_demand.equation = hp_pc_demand * hp_pop\n idp_pc_demand.equation = sd.min(c_needs, idp_income * c_max_income_frac / wholesaler.price())\n idp_demand.equation = idp_pc_demand * idp_pop\n c_demand.equation = hp_demand + idp_demand\n\n retailer.demand().equation = c_demand\n\n # CONSTANTS\n\n # initial values\n idp_pop_iv.equation = 1500000.0\n aap_pop_iv.equation = 1900000.0\n f_stock_iv.equation = 100000.0\n t_stock_iv.equation = 100000.0\n t_cash_iv.equation = 0.0\n\n wholesaler.stock.initial_value = 100000.0\n wholesaler.cash.initial_value = 0.0\n retailer.stock.initial_value = 100000.0\n retailer.cash.initial_value = 0.0\n\n # time constants\n t_disp.equation = 30.0\n t_ret.equation = 6.0 * 30.0\n t_f_loss.equation = 7.0\n\n # leadtimes\n f2t_leadtime.equation = 1.0\n t2w_leadtime.equation = 1.0\n wholesaler.leadtime.equation = 1.0\n retailer.leadtime.equation = 1.0\n\n # others\n baseline_price.equation = 200.0\n\n # consumer\n hp_pop.equation = 1000000.0\n c_max_income_frac.equation = 0.5\n c_needs.equation = 10.0 / 30.0\n hp_income_baseline.equation = 4000.0 / 30.0\n idp_income_baseline.equation = 2000.0 / 30.0\n\n retailer.connect_eggs(wholesaler)\n\n return model", "title": "" }, { "docid": "c06bb52057148e139bbda0742193149d", "score": "0.4982488", "text": "def __init__(self, species, qty):\n super(DomesticMelonOrder, self).__init__(species, qty)\n self.tax = 0.08", "title": "" }, { "docid": "04e0bbf703ec278076d432ada05de292", "score": "0.49807692", "text": "def __init__(self, mu, sigma, corr, rf, risk_aversion=None, short_sell=True):\n\n # Asseert data indexes and organize\n self._assert_indexes(mu, sigma, corr)\n\n # Save inputs as attributes\n self.mu = mu\n self.sigma = sigma\n self.corr = corr\n self.rf = rf\n self.risk_aversion = risk_aversion\n self.short_selling = short_sell\n\n # Compute atributes\n self.n_assets = self._n_assets()\n self.cov = self._get_cov_matrix()\n\n # Get the optimal risky porfolio\n self.mu_p, self.sigma_p, self.risky_weights, self.sharpe_p = self._get_optimal_risky_portfolio()\n\n # get the minimal variance portfolio\n self.mu_mv, self.sigma_mv, self.mv_weights, self.sharpe_mv = self._get_minimal_variance_portfolio()\n\n # Get the investor's portfolio and build the complete set of weights\n self.weight_p, self.complete_weights, self.mu_c, self.sigma_c, self.certain_equivalent \\\n = self._investor_allocation()", "title": "" }, { "docid": "51f23f93c80d32888dadf1994a0b0f31", "score": "0.49710152", "text": "def __init__(self,\n size: Size,\n stuffing: Stuffing = Stuffing.POLYESTER_Fiberfill,\n fabric: Fabric = Fabric.ACRYLIC,\n has_glow: bool = True,\n name: str = \"Dancing Skeleton\",\n description: str = \"Actually this skeleton is not terrible, it is cute as coco.\",\n product_id: str = \"\"\n ) -> None:\n self._check_input(size, stuffing, fabric, has_glow, name, description, product_id)\n if len(product_id) == 0:\n product_id = \"S%04dD\" % DancingSkeleton._generate_id\n self._increment_id()\n self._has_glow = has_glow\n super().__init__(name, description, product_id, size, stuffing, fabric)", "title": "" }, { "docid": "d7691b30843657f3a37af9a89b211ecb", "score": "0.49695474", "text": "def main():\n env = bootstrap('../development.ini')\n settings = env['registry'].settings\n location_file_dir = settings.location_file_dir\n pearl_harbor_dir = os.path.join(location_file_dir, 'pearl_harbor')\n pearl_harbor_data = os.path.join(pearl_harbor_dir, 'data')\n location_file = os.path.join(pearl_harbor_dir, 'location.json')\n\n if os.path.exists(location_file):\n message = 'File already exists:\\n %s\\nRemove? (y/n) ' % location_file\n if raw_input(message).lower() == 'y':\n os.unlink(location_file)\n else:\n print 'Cancelled.'\n return\n\n model = settings.Model.create()\n\n # Map file path is relative to package root\n map_file = os.path.join('location_files', 'pearl_harbor', 'data',\n 'pearl_harbor.bna')\n model.add_bna_map(map_file, {\n 'refloat_halflife': 1 * 3600, # seconds\n })\n\n start_time = datetime(2013, 1, 1, 1, 0)\n model.time_step = 900\n model.start_time = start_time\n model.duration = timedelta(days=1)\n model.uncertain = False\n\n # adding a wind mover\n\n series = np.zeros((3,), dtype=gnome.basic_types.datetime_value_2d)\n series[0] = (start_time, ( 4, 180) )\n series[1] = (start_time+timedelta(hours=12), ( 2, 270) )\n series[2] = (start_time+timedelta(hours=24), ( 4, 180) )\n \n wind = WebWind(timeseries=series, units='knots')\n model.environment += wind\n w_mover = WebWindMover(wind)\n model.movers += w_mover\n\n # adding a random mover\n random_mover = WebRandomMover(diffusion_coef=10000)\n model.movers += random_mover\n\n # adding a grid current mover:\n\n curr_file=os.path.join( pearl_harbor_data, \"ch3d2013.nc\")\n topology_file=os.path.join( pearl_harbor_data, \"PearlHarborTop.dat\")\n model.movers += gnome.movers.GridCurrentMover(curr_file,topology_file)\n\n\n # adding a spill\n\n spill = WebPointSourceRelease(num_elements=1000,\n start_position=(\n -157.97064, 21.331524, 0.0),\n release_time=start_time)\n\n model.spills += spill\n\n serialized_model = ModelSchema().bind().serialize(model.to_dict())\n model_json = json.dumps(serialized_model, default=util.json_encoder,\n indent=4)\n\n with open(location_file, 'wb') as f:\n f.write(model_json)\n\n env['closer']()", "title": "" }, { "docid": "745e24caf916ae6f0eb397731c90d1d7", "score": "0.4967229", "text": "def initCatalog():\n # catalog es utilizado para interactuar con el modelo\n catalog = model.newCatalog()\n return catalog", "title": "" }, { "docid": "745e24caf916ae6f0eb397731c90d1d7", "score": "0.4967229", "text": "def initCatalog():\n # catalog es utilizado para interactuar con el modelo\n catalog = model.newCatalog()\n return catalog", "title": "" }, { "docid": "52f7e9357ca7a239a92e68d1390c5df2", "score": "0.49666688", "text": "def doParametersOfInterest(self):\n\t\t# --- POI and other parameters ----\n\t\t\n\t\tmaxmix = {\n\t\t\t\"a_tilde\" : 1.0,\n\t\t\t\"b_tilde\" : 1.0,\n\t\t}\n\t\t\n\t\tself.modelBuilder.doVar(\"muF[1.0,0.0,5.0]\")\n\t\tself.modelBuilder.doVar(\"muV[1.0,0.0,5.0]\")\n\t\tself.modelBuilder.doVar(\"cpmixing[0.0,0.0,1.0]\") # CP mixing angle in units of pi/2\n\t\t\n\t\tself.modelBuilder.factory_('expr::cosalpha(\"cos(@0*{pi}/2)\", cpmixing)'.format(pi=math.pi))\n\t\tself.modelBuilder.factory_('expr::sinalpha(\"sin(@0*{pi}/2)\", cpmixing)'.format(pi=math.pi))\n\t\t\n\t\tself.modelBuilder.factory_('expr::a(\"@0\", cosalpha)')\n\t\tself.modelBuilder.factory_('expr::b(\"@0\", sinalpha)')\n\t\t\n\t\tself.modelBuilder.factory_('expr::sm_scaling(\"@0*@0-@0*@1*{a_tilde}/{b_tilde}\", a, b)'.format(**maxmix))\n\t\tself.modelBuilder.factory_('expr::ps_scaling(\"@1*@1-@0*@1*{b_tilde}/{a_tilde}\", a, b)'.format(**maxmix))\n\t\tself.modelBuilder.factory_('expr::mm_scaling(\"@0*@1/({a_tilde}*{b_tilde})\", a, b)'.format(**maxmix))\n\t\t\n\t\tfor production in [\"muF\", \"muV\"]:\n\t\t\tfor decay in [\"muF\"]:\n\t\t\t\tself.modelBuilder.factory_('expr::{production}_{decay}(\"@0*@1\", {production}, {decay})'.format(\n\t\t\t\t\t\tproduction=production, decay=decay)\n\t\t\t\t)\n\t\t\t\tfor cp in [\"sm_scaling\", \"ps_scaling\", \"mm_scaling\"]:\n\t\t\t\t\tself.modelBuilder.factory_('expr::{production}_{decay}_{cp}(\"@0*@1*@2\", {production}, {decay}, {cp})'.format(\n\t\t\t\t\t\t\tproduction=production, decay=decay, cp=cp)\n\t\t\t\t\t)\n\t\t\n\t\tself.modelBuilder.doSet(\"POI\", \"muF,muV,cpmixing\")", "title": "" }, { "docid": "1c431bf8b97b271aab0bd250a8981500", "score": "0.49630284", "text": "def __init__(self,submodels):\n self.submodels = submodels\n self.obstypes = [obstype for submodel in submodels\n for obstype in submodel.obstypes]\n gauges = list()\n for submodel in submodels:\n gauges.extend(\n gauge for gauge in submodel.gauges if gauge not in gauges\n )\n super().__init__(gauges)", "title": "" }, { "docid": "7321a20d59302f62a6127f10cc6c27af", "score": "0.4953475", "text": "def SBML_setSpecies(self):\n if self._debug:\n print('SBML_setSpecies')\n reagList = self.model_obj.__species__ + self.model_obj.__fixed_species__\n for reagent in range(len(reagList)):\n s = self.sbml_model.createSpecies()\n s.setId(reagList[reagent])\n s.setName(reagList[reagent])\n s.setCompartment(self.model_compartment_name)\n if reagList[reagent] in self.model_obj.__fixed_species__:\n s.setBoundaryCondition(True)\n s.setConstant(True)\n else:\n s.setBoundaryCondition(False)\n\n if reagent < len(self.model_obj.__species__):\n reagName = reagList[reagent] + '_init'\n else:\n reagName = reagList[reagent]\n\n if self.sbml_level == 1:\n s.setInitialAmount(getattr(self.model_obj, reagName))\n else:\n s.setInitialConcentration(getattr(self.model_obj, reagName))", "title": "" }, { "docid": "413cc3068285fa798ccedccfa13621eb", "score": "0.49522227", "text": "def simple_model(neuron_instance):\n\n h, rxd, data, save_path = neuron_instance\n dend = h.Section(name='dend')\n dend.diam = 2\n dend.nseg = 5 \n dend.L = 5\n ecs = rxd.Extracellular(-10, -10, -10, 10, 10, 10, dx=3)\n cyt = rxd.Region(dend, name='cyt', nrn_region='i')\n k = rxd.Species([cyt, ecs], name='k', d=1, charge=1,\n initial=lambda nd: 140 if nd.region == cyt else 3)\n decay = rxd.Rate(k, -0.1*k)\n model = (dend, cyt, ecs, k, decay)\n yield (neuron_instance, model)", "title": "" }, { "docid": "824acd1b90426fb7a92c88542e1c33e0", "score": "0.49509418", "text": "def generate_instance(self):\n config = self._generate_config()\n manager_stop = self._generate_manager()\n\n output_file = open(self.output_file,'w')\n text = \"VEHICLE\"\n output_file.write(text + \"\\n\")\n text = \"NUMBER CAPACTIY\"\n output_file.write(text + \"\\n\")\n output_file.write(\"150 \"+ str(config.capacity_cst) + \"\\n\")\n\n\n # config.dump_to_file_capacity(output_file)\n manager_stop.dump_to_file(output_file)", "title": "" }, { "docid": "bf87f8a6bc4c0c1ba05b2913fe859fc2", "score": "0.49496803", "text": "def _construct_model(self, model):", "title": "" }, { "docid": "f3a4b69a019519f5a7e93ad5996a7d49", "score": "0.49493036", "text": "def __init__(self, config):\n self.config = config # md control\n # self.model = model # dynamic variable & ff parameters\n return", "title": "" }, { "docid": "7e11a39ffce7792f58a49a943eefbf13", "score": "0.4947336", "text": "def SBML_buildBasicModel(\n self,\n mod,\n filename,\n slvl=2,\n dir=None,\n substance=(1, 0),\n volume=(1, 0),\n time=(1, 0),\n arules=None,\n notes=None,\n ):\n self.SBML_createModel(mod, filename, slvl, dir)\n self.SBML_setCompartment()\n self.SBML_setNotes(txt=notes)\n self.SBML_setUnits(substance=substance, volume=volume, time=time)\n if arules != None:\n self.SBML_setAssignmentRules(arules)\n self.SBML_setSpecies()\n self.SBML_setReactions()\n self.SBML_setModel()", "title": "" }, { "docid": "8fd4bb2e043b5c55db8595d6334db530", "score": "0.49463186", "text": "def gen_ssmodel(self):\n\t\tprint \"generating full neural model\"\n\n\t\t#Generate field meshgrid\n simulation_field_space_x,simulation_field_space_y=pb.meshgrid(self.simulation_space_x_y,self.simulation_space_x_y)\n\n\n K=0\n for i in range(len(self.kernel.Psi)):\n K+=self.kernel.weights[i]*self.kernel.Psi[i](simulation_field_space_x,simulation_field_space_y)\n \n self.K=K\n\n\n\t\t#calculate field disturbance covariance matrix and its Cholesky decomposition\n self.Sigma_e_c=sp.linalg.cholesky(self.Sigma_e,lower=1) \n\n #calculate Cholesky decomposition of observation noise covariance matrix\n Sigma_varepsilon_c=sp.linalg.cholesky(self.Sigma_varepsilon,lower=1)\n self.Sigma_varepsilon_c=Sigma_varepsilon_c\n\n #Calculate sensors at each spatial locations, it's not the same as C in the IDE model\t\n t0=time.time()\n gamma_space=pb.array(zip(simulation_field_space_x.flatten(),simulation_field_space_y.flatten()))\n N1,D1 = gamma_space.shape\n sensor_space=self.obs_locns\n N2,D2 = sensor_space.shape\n diff = sensor_space.reshape(N2,1,D2) - gamma_space.reshape(1,N1,D1)\n C=np.exp(-np.sum(np.square(diff),-1)*(1./self.sensor_kernel.width))\n self.C=C", "title": "" } ]
e3c73bcf5a8acca28636ed972e9037de
Set up the game and initialize the variables.
[ { "docid": "5763ce967360b1ff9e7daaca59da4982", "score": "0.66307664", "text": "def setup(self):\n\n # Sprite lists\n self.player_list = arcade.SpriteList()\n self.wall_list = arcade.SpriteList(use_spatial_hash=True,\n spatial_hash_cell_size=128)\n self.enemy_list = arcade.SpriteList()\n\n # Set up the player\n resource = \":resources:images/animated_characters/\" \\\n \"female_person/femalePerson_idle.png\"\n self.player = arcade.Sprite(resource, scale=SPRITE_SCALING)\n self.player.center_x = SPRITE_SIZE * 5\n self.player.center_y = SPRITE_SIZE * 1\n self.player_list.append(self.player)\n\n # Set enemies\n resource = \":resources:images/animated_characters/zombie/zombie_idle.png\"\n enemy = arcade.Sprite(resource, scale=SPRITE_SCALING)\n enemy.center_x = SPRITE_SIZE * 4\n enemy.center_y = SPRITE_SIZE * 7\n self.enemy_list.append(enemy)\n\n spacing = SPRITE_SIZE * 3\n for column in range(10):\n for row in range(15):\n sprite = arcade.Sprite(\":resources:images/tiles/grassCenter.png\",\n scale=SPRITE_SCALING)\n\n x = (column + 1) * spacing\n y = (row + 1) * sprite.height\n\n sprite.center_x = x\n sprite.center_y = y\n if random.randrange(100) > 30:\n self.wall_list.append(sprite)\n\n self.physics_engine = arcade.PhysicsEngineSimple(self.player,\n self.wall_list)\n\n # --- Path related\n # This variable holds the travel-path. We keep it as an attribute so\n # we can calculate it in on_update, and draw it in on_draw.\n self.path = None\n # Grid size for calculations. The smaller the grid, the longer the time\n # for calculations. Make sure the grid aligns with the sprite wall grid,\n # or some openings might be missed.\n grid_size = SPRITE_SIZE\n\n # Calculate the playing field size. We can't generate paths outside of\n # this.\n playing_field_left_boundary = -SPRITE_SIZE * 2\n playing_field_right_boundary = SPRITE_SIZE * 35\n playing_field_top_boundary = SPRITE_SIZE * 17\n playing_field_bottom_boundary = -SPRITE_SIZE * 2\n\n # This calculates a list of barriers. By calculating it here in the\n # init, we are assuming this list does not change. In this example,\n # our walls don't move, so that is ok. If we want moving barriers (such as\n # moving platforms or enemies) we need to recalculate. This can be an\n # time-intensive process depending on the playing field size and grid\n # resolution.\n\n # Note: If the enemy sprites are the same size, we only need to calculate\n # one of these. We do NOT need a different one for each enemy. The sprite\n # is just used for a size calculation.\n self.barrier_list = arcade.AStarBarrierList(enemy,\n self.wall_list,\n grid_size,\n playing_field_left_boundary,\n playing_field_right_boundary,\n playing_field_bottom_boundary,\n playing_field_top_boundary)", "title": "" } ]
[ { "docid": "3956c41832a0e4cb1cf11a8add517eef", "score": "0.8164389", "text": "def setup(self):\n # Create your sprites and sprite lists here\n self.game: Game = Game(SCREEN_WIDTH, SCREEN_HEIGHT, TILE_SIZE, 1, grid_layers = 4)\n self.game.game_message = \"Lead the Rabbit home\"\n\n # show the menu so that we see the instructions\n self.game.menu.button_list[0].text = \"Start\"\n self.game.menu.is_visible = True", "title": "" }, { "docid": "8773700bfd9a95656c265c6bd313d6e9", "score": "0.77574474", "text": "def setup_game(self):", "title": "" }, { "docid": "d6f2e0104a864f573e1ebcfb83e821ec", "score": "0.7394462", "text": "def __init__(self):\n\n self.frameCount = 0\n self._initScreen()\n self._initObjects()\n self._initControls()\n self._initLevel()\n self._start()\n print \"DEBUG: Initializing Game\"\n pass", "title": "" }, { "docid": "332678f4bd4fda854a458ca93a320cbd", "score": "0.7383125", "text": "def game_setup(self):\n self.deck = Shoe(6)\n self.initial_draw()\n self.pot = ask_for_bet(self.player.money)\n show_table(self.player, self.dealer, self.pot)\n self.surrender_and_insurance()", "title": "" }, { "docid": "f2042be52d22e6802ebadd2ec82edd98", "score": "0.73794293", "text": "def setup(self):\n # Set up the player\n self.player_sprite = arcade.Sprite(\"Sprites/Jugador/Jugador.jpg\", SPRITE_SCALING)\n self.player_sprite.center_x = 100\n self.player_sprite.center_y = 100\n self.player_list = arcade.SpriteList()\n self.player_list.append(self.player_sprite)\n\n # Listado de habitaciones\n self.rooms = []\n self.rooms.append(setup_pueblo())\n\n #Contador de habitación\n self.current_room = 0\n\n #Fisicas\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.rooms[self.current_room].wall_list)", "title": "" }, { "docid": "0e17cf1d9db4f5bc13da1d42d12d6088", "score": "0.7377979", "text": "def init(cls):\n\n cls.configs = yaml.load( file('../local/config.yaml') )\n cls.is_online = False\n cls.state = State.playing\n cls.classes = classes\n cls.guiclasses = guiclasses\n\n # set up pygame and init\n pygame.init()\n\n # Set up the window\n cls.screen = pygame.display.set_mode(\n tuple(cls.configs['options']['resolution']),\n 0,\n 32)\n classes.screen = cls.screen\n guiclasses.screen = cls.screen", "title": "" }, { "docid": "4351bfac5d4a145db8bf589197643b31", "score": "0.7341448", "text": "def setup_new_game(self):\r\n self._player = Player()\r\n self._stats = GameStats(self._bb_settings)\r\n self._scoreboard = Scoreboard(self._bb_settings, self._screen)", "title": "" }, { "docid": "651a0727ec3ed20eb715acef2a022e23", "score": "0.7279792", "text": "def game_initialize():\n\tglobal SURFACE_MAIN, SURFACE_MAP, PLAYER, FOV_CALCULATE, CLOCK, ASSETS, CAMERA, PREFERENCES\n\n\tpygame.init()\n\tpygame.key.set_repeat(555, 85)\t\n\n\tlibtcod.namegen_parse(\"data\\\\namegen\\\\jice_celtic.cfg\")\n\tlibtcod.namegen_parse(\"data\\\\namegen\\\\jice_fantasy.cfg\")\n\tlibtcod.namegen_parse(\"data\\\\namegen\\\\jice_mesopotamian.cfg\")\n\tlibtcod.namegen_parse(\"data\\\\namegen\\\\jice_norse.cfg\")\n\tlibtcod.namegen_parse(\"data\\\\namegen\\\\jice_region.cfg\")\n\tlibtcod.namegen_parse(\"data\\\\namegen\\\\jice_town.cfg\")\n\tlibtcod.namegen_parse(\"data\\\\namegen\\\\mingos_demon.cfg\")\n\tlibtcod.namegen_parse(\"data\\\\namegen\\\\mingos_dwarf.cfg\")\n\tlibtcod.namegen_parse(\"data\\\\namegen\\\\mingos_norse.cfg\")\n\tlibtcod.namegen_parse(\"data\\\\namegen\\\\mingos_standard.cfg\")\n\tlibtcod.namegen_parse(\"data\\\\namegen\\\\mingos_town.cfg\")\n\n\tCLOCK = pygame.time.Clock()\n\n\tCAMERA = obj_Camera()\n\n\tSURFACE_MAIN = pygame.display.set_mode((CAMERA.width, CAMERA.height))\n\n\tSURFACE_MAP = pygame.Surface((constants.GAME_WIDTH, constants.GAME_HEIGHT))\n\n\tFOV_CALCULATE = True\n\ttry:\n\t\tload_preferences()\n\t\tprint(\"Preferences Loaded\")\n\texcept:\n\t\tPREFERENCES = struc_Preferences()\n\t\tprint(\"Preferences generated\")\n\tASSETS = struc_Assets()", "title": "" }, { "docid": "76c8e437aac97895bfa712a9afbeab57", "score": "0.7257215", "text": "def setUp(self):\n self.game = BuildGame()\n self.effects = []", "title": "" }, { "docid": "d068bc4b98bded3fdd08601cee6eab4a", "score": "0.72419715", "text": "def setup(self):\n header_print(self.data['intro'])\n header_print(self.data['help'])\n random.shuffle(self.data['draw'])\n random.shuffle(self.data['locations'])\n random.shuffle(self.data['events'])\n random.shuffle(self.data['aces'])\n random.shuffle(self.data['personalities'])\n self.stats = {\n 'round': 0,\n 'powers': {\n 'MOONS': 6,\n 'SUNS': 6,\n 'WAVES': 6,\n 'LEAVES': 6,\n 'WYRMS': 6,\n 'KNOTS': 6,\n },\n 'hand': self.data['draw'][:],\n 'discard': [],\n 'active': [],\n 'opponent': {},\n }", "title": "" }, { "docid": "0af3e58ec8db639028b0b0b4a90f0a3b", "score": "0.7219072", "text": "def setup(self):\n self.score = 0\n self.lives = 3\n self.state = GameStates.RUNNING\n self.focus_word = None\n \n self.star_list = set()\n self.word_list = set()\n\n for _ in range(5):\n self.create_word()\n for _ in range(25):\n self.create_star()", "title": "" }, { "docid": "02cd43c71124297797ff0517d062266d", "score": "0.7186087", "text": "def start(self):\n self.__init__()\n self.set_n_players()\n self.init_players()\n self.init_territory_selection_phase()\n self.init_troop_deployment_phase()\n # self.game_phase()", "title": "" }, { "docid": "35d9007e5308beb4ca63688f8c359bac", "score": "0.7121074", "text": "def setup(self):\n setup = RandomWordGenerator().get()\n self.formatted_word = ConvertWord().convert_to_dict(setup)\n self.underscore_word = HangmanUnderscoreDiagram(\n setup).create_hidden_word()\n self.failed_guesses = 0\n print(\"Hello\")\n self.has_won = False\n self.start_game(True)", "title": "" }, { "docid": "0205d8ab0974420fb0840c87b900bc03", "score": "0.708886", "text": "def initialize(self):\n result = pygame.init()\n pygame.font.init()\n pygame.display.set_caption('gomoku TDD')\n self.screen = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\n self.clock = pygame.time.Clock()\n self.smallfont = pygame.font.Font(None, 40)\n self.isinitialized = True", "title": "" }, { "docid": "d94eba530b8e96bd031c4417b4c40147", "score": "0.7068139", "text": "def setUp(self):\n self.game = TTTBoard(3)", "title": "" }, { "docid": "94b4f8d583dd1a15725be374e8aa4bc2", "score": "0.7007462", "text": "def initializeGame(self):\n # Fill deck with cards and shuffle it\n self.deck.fill(104)\n self.deck.shuffle()\n #print \"Deck initialized\"\n\n # Initialize the field\n self.field.initialize(self.deck.draw(4))\n self.field.sortField()\n #self.field.printField()\n\n # Set players to initial state again\n # Distribute cards and set bulls to 0\n for p in self.players:\n p.bulls = 0\n p.setHand(self.deck.draw(10))", "title": "" }, { "docid": "69eadfadb2a57d6cc305543291044957", "score": "0.6989646", "text": "def start_game(self):\n self.board = Board(num_tableaus=self.tableau_qty, num_decks=self.decks, deal_3=self.deal_3)\n self.board.init_move_dict()\n self.board.deal(self.deck)\n\n if self.api_use:\n self.init_game_api()\n elif self.commandline:\n self.init_cl_game()\n else:\n self.init_pygame()", "title": "" }, { "docid": "0656598321e57f2db8785ef86f3fc429", "score": "0.698927", "text": "def setup(self):\n\n # Sprite lists\n self.all_sprite_list = arcade.SpriteList()\n self.player_list = arcade.SpriteList()\n self.coin_list = arcade.SpriteList()\n self.deathclaw_list = arcade.SpriteList()\n # Score\n self.score = 0\n\n # Set up the player\n # Character image from https://www.pngkit.com/\n self.player_sprite = arcade.Sprite(\"Vault_Boy.png\", SPRITE_SCALING_PLAYER)\n self.player_sprite.center_x = 50\n self.player_sprite.center_y = 50\n self.player_list.append(self.player_sprite)\n\n # Create the ENEMY\n for i in range(DEATHCLAW_COUNT):\n\n # Create the ENEMY instance\n # ENEMY image from https://fallout.fandom.com/\n deathclaw = Deathclaw(\"Deathclaw.png\", SPRITE_SCALING_DEATHCLAW)\n\n # Position the coin\n deathclaw.center_x = random.randrange(SCREEN_WIDTH)\n deathclaw.center_y = random.randrange(SCREEN_HEIGHT)\n\n # Add the coin to the lists\n self.deathclaw_list.append(deathclaw)\n\n for i in range(COIN_COUNT):\n\n # Create the Nuka Cola instance\n # Nuka Cola image fromh https://www.cleanpng.com/free/nuka-cola.html\n coin = Coin(\"Nuka_Kola.png\", SPRITE_SCALING_COIN)\n\n # Position the coin\n coin.center_x = random.randrange(SCREEN_WIDTH)\n coin.center_y = random.randrange(SCREEN_HEIGHT)\n\n # Add the coin to the lists\n self.coin_list.append(coin)", "title": "" }, { "docid": "e04bb8a5634f4f1d0d968486f9081e13", "score": "0.69834745", "text": "def setup(self):\n \n self.explosions_list = arcade.SpriteList()\n\n self.explosion_texture_list = []\n \n # Set up the score\n self.score = 0\n self.countdown = 1000\n\n for i in range(EXPLOSION_TEXTURE_COUNT):\n # Files from http://www.explosiongenerator.com are numbered sequentially.\n # This code loads all of the explosion0000.png to explosion0270.png files\n # that are part of this explosion.\n texture_name = f\"images/explosion/explosion{i:04d}.png\"\n\n self.explosion_texture_list.append(arcade.load_texture(texture_name))\n \n # create 10 balls\n for i in range(10):\n myball = make_ball()\n self.ball_list.append(myball)", "title": "" }, { "docid": "e1de58c24db8cf359ff89674c5639168", "score": "0.696734", "text": "def init_pygame(self):\n # Startup the pygame system\n pygame.init()\n # Create our window\n self.screen = pygame.display.set_mode((Settings.width, Settings.height))\n # Set the title that will display at the top of the window.\n pygame.display.set_caption(self.title)\n # Create the clock\n self.clock = pygame.time.Clock()\n self.last_checked_time = pygame.time.get_ticks()\n # Startup the joystick system\n pygame.joystick.init()\n # For each joystick we find, initialize the stick\n for i in range(pygame.joystick.get_count()):\n pygame.joystick.Joystick(i).init()\n # Set the repeat delay for key presses\n pygame.key.set_repeat(Settings.key_repeat)\n # Create statistics font\n self.statistics_font = pygame.font.Font(None,30)", "title": "" }, { "docid": "4d39ffb75325ecf9b736925a6d1cecce", "score": "0.6965951", "text": "def start(self):\n self.player = Player()\n self.dealer = Dealer()\n self.pot = 0\n self.side_bet = 0\n start_game()", "title": "" }, { "docid": "71ab411855b63608cbcfd60862bbb126", "score": "0.6867193", "text": "def game_initialize():\n global SURFACE_MAIN #global vars will be all caps\n # init pygame\n pygame.init()\n SURFACE_MAIN = pygame.display.set_mode((constants.GAME_WIDTH, constants.GAME_HEIGHT))\n constants.initialize_sprites()", "title": "" }, { "docid": "104e2185bf3bf543b423903e9d3262ea", "score": "0.68361783", "text": "def initGameState(self):\n print(\"Setting game state: \")\n self.playGUI = GUI()\n self.playGUI.drawBoard(self.player)", "title": "" }, { "docid": "2020c3959217704cebf8ac35292d4b49", "score": "0.683391", "text": "def on_init(self):\n pygame.init()\n self.background.load_from_file()\n self.hero.load_from_file()\n self.enemy.load_from_file()\n\n # Some music and sound fx\n # frequency, size, channels, buffersize\n # pygame.mixer.pre_init(44100, 16, 2, 4096)\n self.effect = pygame.mixer.Sound('sounds/bounce.wav')\n pygame.mixer.music.load('sounds/music.wav')\n pygame.mixer.music.play(-1)\n\n self.hero.screen = self.background.screen\n self.enemy.screen = self.background.screen\n self.clock = pygame.time.Clock()\n pygame.display.set_caption(\n 'Angry Floating Guy! World: {} (w to change world, arrows to move, Esc to quit).'.format(self.current_world.name))\n\n self._running = True", "title": "" }, { "docid": "ec7a0c81368b69922d526b87e816b3df", "score": "0.6814734", "text": "def setup(self):\n\n self.characters = arcade.SpriteList()\n self.dungeon_sprites = arcade.SpriteList(\n use_spatial_hash=True, spatial_hash_cell_size=16\n )\n\n self.player = Item(ord(\"@\"), arcade.csscolor.WHITE)\n self.player.x = 0\n self.player.y = 0\n self.characters.append(self.player)\n\n # Size of the map\n map_width = MAP_WIDTH\n map_height = MAP_HEIGHT\n\n # Some variables for the rooms in the map\n room_max_size = 10\n room_min_size = 6\n max_rooms = 30\n\n self.game_map = GameMap(map_width, map_height)\n self.game_map.make_map(\n max_rooms, room_min_size, room_max_size, map_width, map_height, self.player\n )\n\n # Draw all the tiles in the game map\n for y in range(self.game_map.height):\n for x in range(self.game_map.width):\n wall = self.game_map.tiles[x][y].block_sight\n sprite = Item(WALL_CHAR, arcade.csscolor.BLACK)\n if wall:\n sprite.block_sight = True\n else:\n sprite.block_sight = False\n\n sprite.x = x\n sprite.y = y\n\n self.dungeon_sprites.append(sprite)\n\n recalculate_fov(\n self.player.x, self.player.y, FOV_RADIUS, self.dungeon_sprites\n )", "title": "" }, { "docid": "898d1bdd3fd199dc5ee5cb07f15df2bf", "score": "0.68145746", "text": "def __init__(self):\n\n super().__init__()\n self.setup_janggi_game()\n self._game_state = 'UNFINISHED'\n self._player_turn = 'BLUE'", "title": "" }, { "docid": "abf5f97e061b958647ca2e9096b94844", "score": "0.6753492", "text": "def setUp(self):\n self.gameBoard = Grid((100, 100), Cell)", "title": "" }, { "docid": "dc927efa1f803392f953c6e4801797ce", "score": "0.67371064", "text": "def __init__(self):\n\t\tpygame.init()\n\t\tself.settings = Settings()\n\n\t\tself.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n\t\tself.settings.screen_width = self.screen.get_rect().width \n\t\tself.settings.screen_height = self.screen.get_rect().height\n\t\tpygame.display.set_caption(\"Pigeon Drop!\")\n\n\t\t# Create an instance to store game statistics,\n\t\t# and create a scoreboard.\n\t\tself.stats = GameStats(self)\n\t\tself.sb = Scoreboard(self)\n\n\t\tself.pigeon = Pigeon(self)\n\t\tself.droppings = pygame.sprite.Group()\n\t\tself.autos = pygame.sprite.Group()\n\n\t\tself._create_fleet()\n\n\t\t# Make the Play button.\n\t\tself.play_button = Button(self, \"Play\")", "title": "" }, { "docid": "54f700774ec5b085ce661e7fa454dbab", "score": "0.6727859", "text": "def init(seed=None):\n\tglobal _game\n\n\tfrom .game import Game\n\tfrom .prompt import install_words\n\n\t_game = Game(seed)\n\tload_advent_dat(_game)\n\tinstall_words(_game)\n\t_game.start()\n\treturn _game", "title": "" }, { "docid": "0a0650d977c78fc25ed12ad48615e281", "score": "0.6699672", "text": "def main():\n g = Game(800, 600)\n g.start()", "title": "" }, { "docid": "d2d7491d84d90efe4f414673fed4354b", "score": "0.66952485", "text": "def setup(self):\n # inicializamos el juego\n\n # Sprite lists\n self.player_list = arcade.SpriteList() # sera lista de personajes\n self.coin_list = arcade.SpriteList() # sera lista de monedas\n self.bullet_list = arcade.SpriteList() # lista de disparos\n\n # Set up the player\n self.score = 0\n\n # Image from kenney.nl\n # cargamos el sprite del jugador\n self.player_sprite = arcade.Sprite(\"character.png\", SPRITE_SCALING_PLAYER)\n # establecemos el inicio de posicion de nuestro jugador\n self.player_sprite.center_x = 50\n self.player_sprite.center_y = 70\n # lo agregamos a la lista de nuestros jugadores\n self.player_list.append(self.player_sprite)\n\n # Create the coins\n for i in range(COIN_COUNT):\n\n # Create the coin instance\n # Coin image from kenney.nl\n # cargamos las monedas\n coin = arcade.Sprite(\"coin_01.png\", SPRITE_SCALING_COIN)\n\n # Position the coin\n coin.center_x = random.randrange(SCREEN_WIDTH)\n coin.center_y = random.randrange(120, SCREEN_HEIGHT)\n\n # Add the coin to the lists\n # lo agregamos a la lista\n self.coin_list.append(coin)\n\n # Set the background color\n # esto aun nose para que sirve\n arcade.set_background_color(arcade.color.AMAZON)", "title": "" }, { "docid": "f2e38a2f48c517dc1d5ff4d49036be37", "score": "0.6681422", "text": "def init(self):\n\n pygame.init()\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n self.x=0\n self.y=0", "title": "" }, { "docid": "1d99aa2f8e52683283d3e9f9fd5507ee", "score": "0.6677753", "text": "def setup(self):\n\n # Sprite lists\n self.player_list = arcade.SpriteList()\n\n # https://opengameart.org/content/animated-top-down-survivor-player\n # Set up the player\n self.player_sprite = Player(\"survivor-idle_rifle_0.png\", 0.5)\n self.player_sprite.center_x = SCREEN_WIDTH / 2\n self.player_sprite.center_y = SCREEN_HEIGHT / 2\n self.player_list.append(self.player_sprite)\n self.wall_list = arcade.SpriteList()\n self.chest_list = arcade.SpriteList()\n self.bullet_list = arcade.SpriteList()\n\n self.score = 0\n\n\n # Set up the player\n # https://opengameart.org/content/animated-top-down-survivor-player\n\n\n\n # -- Set up several columns of walls\n for x in range(-700, 1700, 100):\n # sprite form https://opengameart.org/content/bush-png\n wall = arcade.Sprite(\"bush_11.png\", SPRITE_SCALING)\n wall.center_x = x\n wall.center_y = -300\n self.wall_list.append(wall)\n for x in range(-700, 1700, 100):\n # sprite form https://opengameart.org/content/bush-png\n wall = arcade.Sprite(\"bush_11.png\", SPRITE_SCALING)\n wall.center_x = x\n wall.center_y = 1025\n self.wall_list.append(wall)\n for y in range(-300, 1025, 100):\n # sprite form https://opengameart.org/content/bush-png\n wall = arcade.Sprite(\"bush_11.png\", SPRITE_SCALING)\n wall.center_x = -700\n wall.center_y = y\n self.wall_list.append(wall)\n for y in range(-300, 1025, 100):\n # sprite form https://opengameart.org/content/bush-png\n wall = arcade.Sprite(\"bush_11.png\", SPRITE_SCALING)\n wall.center_x = 1700\n wall.center_y = y\n self.wall_list.append(wall)\n\n\n# https://www.pinterest.com/pin/258042253625289337\n\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.wall_list)\n\n # Set the background color\n arcade.set_background_color(arcade.color.BRITISH_RACING_GREEN)\n\n # Set the viewport boundaries\n # These numbers set where we have 'scrolled' to.\n self.view_left = 0\n self.view_bottom = 0", "title": "" }, { "docid": "875729fd86f2b97dc69f1f05aa7f388e", "score": "0.6669637", "text": "def setup(self):\n\n # Used to keep track of our scrolling\n self.view_bottom = 0\n self.view_left = 0\n\n # Keep track of the score\n self.score = 0\n\n # Create the Sprite lists\n self.player_list = arcade.SpriteList()\n self.wall_list = arcade.SpriteList()\n self.coin_list = arcade.SpriteList()\n\n # Set up the player, specifically placing it at these coordinates.\n # image_source = \":resources:images/animated_characters/female_adventurer/femaleAdventurer_idle.png\"\n self.player_list = arcade.SpriteList()\n self.player_sprite = Player()\n self.player_sprite.center_x = 256\n self.player_sprite.center_y = 256\n self.player_list.append(self.player_sprite)\n\n # --- Load in a map from the tiled editor ---\n\n # Name of map file to load\n map_name = r\"Math_Game\\floor_is_lava.tmx\"\n # Name of the layer in the file that has our platforms/walls\n platforms_layer_name = 'Platforms'\n\n # Read in the tiled map\n my_map = arcade.tilemap.read_tmx(map_name)\n\n # -- Platforms\n self.wall_list = arcade.tilemap.process_layer(map_object=my_map,\n layer_name='Platforms',\n base_directory=r'C:\\Users\\katel\\Desktop\\CSE310\\group_project\\Math_Game\\platformer-art-complete-pack-0\\Base pack\\Tiles',\n scaling=TILE_SCALING,\n use_spatial_hash=True, hit_box_algorithm=\"Simple\", hit_box_detail=4.5)\n\n # --- Other stuff\n # Set the background color\n if my_map.background_color:\n arcade.set_background_color(my_map.background_color)\n\n # Create the 'physics engine'\n self.physics_engine = arcade.PhysicsEnginePlatformer(self.player_sprite,\n self.wall_list,\n GRAVITY)", "title": "" }, { "docid": "37d5681d11fce16f71e1ce356acb3cf6", "score": "0.66560024", "text": "def setUp(self):\r\n self.spaceship = SpaceShipGame()\r\n self.spaceship.init()", "title": "" }, { "docid": "efcb7a4e0a72df48cbe98c4c50737659", "score": "0.66408247", "text": "def setup_play(self, reset=False):\n\n if reset:\n for sprite in self.all_sprites:\n sprite.kill()\n else:\n self.score = 0\n self.lives = START_LIVES\n self.game_state = InGameState.READY\n\n self.all_sprites = pg.sprite.Group()\n self.walls = pg.sprite.Group()\n self.blocks = pg.sprite.Group()\n self.diamonds = pg.sprite.Group()\n self.moving_blocks = pg.sprite.Group()\n self.enemies = pg.sprite.Group()\n self.stunned_enemies = pg.sprite.Group()\n\n # level = Level(path.join(level_dir, '1.txt'))\n level = Level(path.join(level_dir, 'c64_level1.txt'))\n level.load_level(self)\n LOGGER.debug(f\"No. enemies: {len(self.enemies)}, No. blocks: {len(self.blocks)}\")\n\n self.make_boundary_wall(level.grid_height, level.grid_width)\n\n self.timer = TIME_LIMIT\n pg.time.set_timer(TIMER, 1000)\n\n self.target_no_kills = 5\n self.kill_bonus = None\n self.diamond_bonus = None", "title": "" }, { "docid": "1a81ce9b838cf1027c1e428c457c8d39", "score": "0.66376364", "text": "def start_game(self):\n\n\t\tpass", "title": "" }, { "docid": "e90dcd04e4a9ae933f7f04288699505b", "score": "0.66170704", "text": "def setup(self):\n\n self.total_time = 0.0\n\n self.background = arcade.load_texture(\"images\\\\background-1_0 (1).png\")\n\n # Create the Sprite lists\n self.all_sprites_list = arcade.SpriteList()\n self.player_list = arcade.SpriteList()\n self.wall_list = arcade.SpriteList()\n self.coin_list = arcade.SpriteList()\n self.myobject_list = arcade.SpriteList()\n\n # Set up the player\n self.gameover = 0\n self.score = 0\n self.lives = 4\n self.collision_time = 0\n self.numobj = STARTING_OBJECTS_COUNT\n self.ncoins = COIN_COUNT\n self.player_sprite = VehicleSprite(\"images\\\\bugatti.png\",\n CHARACTER_SCALING)\n self.player_sprite.angle = 90\n # self.player_sprite.change_y = 1\n self.all_sprites_list.append(self.player_sprite)\n\n self.create_buddies()\n self.create_treasure()\n\n # Make the mouse disappear when it is over the window.\n # So we just see our object, not the pointer.\n\n # Set the background color\n arcade.set_background_color(arcade.color.ASH_GREY)\n\n # Set up the player, specifically placing it at these coordinates.\n # self.player_sprite = arcade.Sprite(\"images\\\\carcar.png\", CHARACTER_SCALING)\n # self.player_sprite.center_x = 500\n # self.player_sprite.center_y = 110\n # self.player_sprite.angle = 90\n # self.player_sprite.change_y = 1\n # self.player_list.append(self.player_sprite)\n\n # Create the 'physics engine'\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite,\n self.wall_list)\n\n # Set the viewport boundaries\n # These numbers set where we have 'scrolled' to.\n self.view_left = 0\n self.view_bottom = 0\n\n # For draw\n self.line_start = 0", "title": "" }, { "docid": "acc8caa3e51a5309bc3495e712b74873", "score": "0.6608156", "text": "def initialize(self) -> None:\n self.simulation = self.initialize_simulation()\n width, height = get_window_resolution()\n display_dim = ((0, width), (0, height))\n self.coord_mapper = CoordinateMapper2D(*self.simulation.dim, *display_dim)\n self.simple_pygame.all_sprites.empty()\n self.initialize_visualization()", "title": "" }, { "docid": "85a06e74924e42e92edb802abd61a330", "score": "0.6591549", "text": "def __init__(self):\n self.screen_width = 1200\n self.screen_height = 800\n self.bg_color = (0, 230, 0)\n\n # Glove Settings\n self.glove_move_speed = 0.25\n self.glove_size = 100\n\n # Ball Settings\n self.ball_move_speed = 0.25\n self.ball_size = 40", "title": "" }, { "docid": "44be389a1c2b1a58e0dd04eec2e6916d", "score": "0.65776414", "text": "def init(self):\n\n pygame.init()\n pygame.display.set_mode((640, 480))\n pygame.display.set_caption(\"Gears 4 Geeks\")\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n self.ser = serial.Serial('COM4', 9600)\n\n #ADAFRUIT_IO_KEY = 'd1a1bd3737714fa488e0364c775a4b4d' ##This will only be good until the end of the competition\n #self.aio = Client(ADAFRUIT_IO_KEY)", "title": "" }, { "docid": "7ce9b9e23e307284880325001a9655d9", "score": "0.65706956", "text": "def setupStage(self):\n print(\"(\" + str(self.HOST) + \", \" + str(self.PORT) +\"):: Initiating setup stage\", file=self.logs)\n\n # Holds player map and name information here. Will be used to create objects later.\n mapVotes = []\n playerNames = {}\n colors = [\"red\", \"blue\"]\n\n gameState = {\n \"ready\": False,\n \"game\": None \n }\n while True:\n # Continuously saves logging information to a text file:\n self.logs.close()\n self.logs = open(str(self.filepath)+\"/_logs/\"+ str(self.PORT) + \".txt\", \"a+\")\n\n # Gets all the events from the game window. A.k.a., do stuff here.\n inboundData = self.socket.recvfrom(1024) # Gets bundle of data from clients\n data = inboundData[0] # Separates data from address\n address = inboundData[1]\n\n # Keeps track of how often the server recieves information from each client.\n updatedTime = time.time() \n self.clientUpdateTimes[str(address)] = updatedTime\n\n ########\n self.bitsIn += sys.getsizeof(data)\n\n address = inboundData[1] # Separates address from data\n data = pickle.loads(data) # Unpickles data back into a python dict\n\n command = data['command']\n if command != None: \n # Takes in information from both players\n if command == \"SUBMIT\":\n pName = data['playerName']\n mVote = data['mapVote']\n\n mapVotes.append(mVote)\n playerNames[str(address)] = pName\n \n # Both votes are in. Chooses a map, builds the Board object.\n if len(mapVotes) == 2:\n # Only chooses one map for both players\n if self.map == None:\n mapTuple = random.choice(mapVotes)\n size = mapTuple[0]\n m = mapTuple[1]\n\n if size == \"SMALL\":\n randomMap = MapGenerator((5,7), m)\n self.map = randomMap.getMap()\n mapString = self.map\n width = randomMap.getDimensions()[0]\n height = randomMap.getDimensions()[1]\n tokens = randomMap.getTokens()\n if size == \"MEDIUM\":\n randomMap = MapGenerator((7,9), m)\n self.map = randomMap.getMap()\n mapString = self.map\n width = randomMap.getDimensions()[0]\n height = randomMap.getDimensions()[1]\n tokens = randomMap.getTokens()\n if size == \"BIG\":\n randomMap = MapGenerator((10,12), m)\n self.map = randomMap.getMap()\n mapString = self.map\n width = randomMap.getDimensions()[0]\n height = randomMap.getDimensions()[1]\n tokens = randomMap.getTokens()\n if size == \"HUGE\":\n randomMap = MapGenerator((12,15), m)\n self.map = randomMap.getMap()\n mapString = self.map\n width = randomMap.getDimensions()[0]\n height = randomMap.getDimensions()[1]\n tokens = randomMap.getTokens()\n if size == \"RANDOM\":\n randWidth = random.randint(5, 13)\n randHeight = random.randint(5, 13)\n\n randomMap = MapGenerator((randWidth,randHeight), m)\n self.map = randomMap.getMap()\n mapString = self.map\n width = randomMap.getDimensions()[0]\n height = randomMap.getDimensions()[1]\n tokens = randomMap.getTokens()\n\n # Builds the game board\n self.board = Board(width, height, mapString)\n\n # Both players' names have been entered, creates Player objects.\\\n # Appends player objects to state variable. \n if len(playerNames) == 2 and len(colors) > 0:\n p = Player(playerNames[str(address)], colors.pop(), None, tokens, address)\n self.players.append(p)\n \n # Player objects and Board object have both been created.\n # Builds the Game object, stores it, then tells the PlayerViews its ready.\n if len(self.players) == 2 and self.board != None:\n self.game = Game(self.board, self.players[0], self.players[1])\n gameState['game'] = self.game\n gameState['ready'] = True\n\n # Sends data to both players simultaneously\n for client in self.clients:\n outboundData = pickle.dumps(gameState)\n self.socket.sendto(outboundData, client)\n break\n\n # Packages up data and sends it back to the client\n outboundData = pickle.dumps(gameState)\n\n ######\n self.bitsOut += sys.getsizeof(outboundData)\n\n self.socket.sendto(outboundData, address)\n \n # Check client connections here\n self.checkClientConnections(time.time())", "title": "" }, { "docid": "7d474528e9d8f517b7d17680b348b736", "score": "0.6566866", "text": "def init_game_setting(self):\r\n pass", "title": "" }, { "docid": "cb3365d5538ce4bc1d236a6ecb0d00bb", "score": "0.6558296", "text": "def __init__(self):\n\n # Window starting position\n x = 200\n y = 30\n os.environ[\"SDL_VIDEO_WINDOW_POS\"] = \"%d,%d\" % (x, y)\n\n pygame.init()\n # Init window\n self.window = Window()\n # Flag that defines if the program is running or not\n self.running = True\n if Settings.MENU_ENABLED:\n self.main_menu = MainMenu(self.window)\n self.main_loop()", "title": "" }, { "docid": "aadeb2e51bb6bfdabe9f8fac4cbc537b", "score": "0.65457934", "text": "def setup(self):\n self.total_time = 0.0\n self.timer_text = None\n arcade.set_background_color(arcade.color.WHITE)", "title": "" }, { "docid": "1e0eeb84e21a232497764c6f98e71fb5", "score": "0.65436804", "text": "def main():\n field = Field(10, 10)\n snake = Snake((0, 0))\n game = Game(field, snake)\n game.start()", "title": "" }, { "docid": "6707c71df18641862ff91d54774889f8", "score": "0.65426224", "text": "def setup(self):\n build_world.start_level(self)", "title": "" }, { "docid": "041dab956c4e42b43e04780650c6ab9e", "score": "0.65425974", "text": "def initialize():\n\n global PLAYER # this means we use the global var PLAYER and cannot have a local var named PLAYER\n global LEVEL_COUNTER\n\n LEVEL_COUNTER = 1\n \n coordinates = generate_coords()\n\n PLAYER = Stark()\n tree = Tree()\n ww = WhiteWalker()\n crown = Crown()\n gray_gem = GrayGem()\n clear_board()\n GAME_BOARD.create(\"Snow\",\"Snow\")\n GAME_BOARD.draw_msg(\"Level \" + str(LEVEL_COUNTER) + \". Winter is coming.\")\n generate_level(coordinates, [PLAYER, ww, gray_gem, crown, tree, tree, gray_gem, tree, tree, gray_gem, tree])\n\n # for i in range(0,NUM_ELTS):\n # place_on_board(elts[i], coordinates[i][0], coordinates[i][1])", "title": "" }, { "docid": "b5fdd02410aefe9f7cf96519fba74bfb", "score": "0.65379816", "text": "def setupNewGame(self):\r\n self.level = 1\r\n self.num_cows = 2\r\n self.num_farmers = 1\r\n self.levelHeading = Text(self.gameDisplay, 120, 425, 175, self.light_orange, \"Farm 1\")\r\n self.shield_indicator.image = self.greenShield\r\n updatedHeading = self.levelHeading\r\n self.startUX[0] = updatedHeading", "title": "" }, { "docid": "946ac1509799c927602adfa52c72feca", "score": "0.6533264", "text": "def init_game(self):\n self.blind_manager = BlindManager(hands_per_level=10,\n bots=self.living_bot_names())", "title": "" }, { "docid": "5b3654c2b20257aa82090e6ac00e6aa7", "score": "0.65258765", "text": "def load_game(self):\n game = Game(self.w, self.h, self.screen)\n game.run()", "title": "" }, { "docid": "a43e92b3418d0a3671e2a1cf01bd2b56", "score": "0.65238756", "text": "def init_vars(self):\n\n load_dotenv()\n self.smart_cube = True if os.environ.get(\"SMART_CUBE\") == \"True\" else False\n self.gen_parsed_to_cubedb = True if os.environ.get(\"GEN_PARSED_TO_CUBEDB\") == \"True\" else False\n self.name_of_solve = os.environ.get(\"NAME_OF_SOLVE\")\n self.time_solve = os.environ.get(\"TIME_SOLVE\")\n self.comms_unparsed_bool = True if os.environ.get(\"COMMS_UNPARSED\") == \"True\" else False\n self.gen_with_move_count = True if os.environ.get(\"GEN_WITH_MOVE_COUNT\") == \"True\" else False\n self.diff_to_solved_state = float(os.environ.get(\"DIFF_BETWEEN_ALGS\"))\n self.parse_to_lp = True if os.environ.get(\"PARSE_TO_LETTER_PAIR\") == \"True\" else False\n self.gen_with_moves = True if os.environ.get(\"GEN_WITH_MOVE_COUNT\") == \"True\" else False\n self.buffer_ed = self.get_buffer_ed(os.environ.get(\"EDGES_BUFFER\"))\n self.buffer_cor = self.get_buffer_cor(os.environ.get(\"CORNER_BUFFER\"))\n self.path_to_lp = os.environ.get(\"PATH_LETTER_PAIR_FILE\")\n self.dict_lp = self.load_letter_pairs_dict()", "title": "" }, { "docid": "0fd5e3a34e920d83be22012d9feeb6dd", "score": "0.6522762", "text": "def setUp(self):\r\n self.caption = \"mirra extending classes\" # window name\r\n self.size = 640, 480 #window size\r\n self.pos = 100,100 # window top left location\r\n self.fullScreen = 0 # if fullScreen is on it will overwrite your pos and size to match the display's resolution\r\n self.frameRate = 15 # set refresh framerate\r", "title": "" }, { "docid": "b911432ad77007e82f7b5bbcd8836058", "score": "0.65204126", "text": "def __init__(self):\n pygame.init()\n\n self.settings = Settings()\n\n self.screen = pygame.display.set_mode(\n (self.settings.screen_width, self.settings.screen_height))\n pygame.display.set_caption(\"52 Card Trick\")\n self.CardSet = CardSet(self)", "title": "" }, { "docid": "e4c7509560856646276227cd3c6a4074", "score": "0.6516524", "text": "def setUp(self):\n\n self.board = Board(3, 3)", "title": "" }, { "docid": "89cf360e139cc7bdba8d124c0a7ad4d8", "score": "0.6516285", "text": "def initialize_game_params(self):\r\n\r\n self.is_new_game = True\r\n self.is_game_over = False\r\n self.is_game_lost = False\r\n self.is_left_mouse_down = False\r\n self.is_right_mouse_down = False\r\n self.num_of_hidden_non_mines_tiles = self.rows * self.cols - self.num_of_mines", "title": "" }, { "docid": "3ea76d55bd43e9ab984101561951f2a6", "score": "0.6511197", "text": "def setUp(self):\n self.player = ship.Player(\n constants.PLAYER_START_PLACE,\n constants.PLAYER_WIDTH,\n constants.PLAYER_HEIGHT,\n constants.PLAYER_IMG,\n constants.PLAYER_HEALTH\n )\n\n self.alien = ship.Alien(\n [320, 300],\n 30,\n 30,\n constants.GREEN_ALIEN_IMG,\n 1\n )\n\n self.alien.shooting([320, 300], 5, False)\n\n self.player.shooting([self.player.position[0] + 3, self.player.position[1]], 1, True)", "title": "" }, { "docid": "332b88663fe7d53af8e80737950739d7", "score": "0.6507396", "text": "def __init__(self):\n\t\t# Screen setting.\n\t\tself.screen_width = 1200\n\t\tself.screen_height = 800\n\t\tself.bg_color = (230, 230, 230)\t\n\n\t\t#Ship setting\n\t\tself.ship_speed_factor = 10\t\t\n\t\tself.ship_limit = 3\t\t\t# number ship \n\n\t\t# Bullet setting.\n\t\tself.bullet_speed_factor = 3\n\t\tself.bullet_width = 3\n\t\tself.bullet_height = 15\n\t\tself.bullet_color = (60,60,60) #dark gray bullet\n\t\tself.bullets_allowed = 6\t\t# number bullet in screen\n\n\t\t#Alien setting.\n\t\tself.alien_speed_factor = 3\n\t\tself.fleet_drop_speed = 50\n\t\t# fleet_direction of 1 represents right; -1 represents left. \n\t\tself.fleet_direction = 1\n\n\t\t# Scoring\n\t\tself.alien_points = 50\n\n\t\t# How quickly the game speed ups\n\t\tself.speedup_scale = 1.1\n\t\tself.iniitialize_dynamic_settings()\n\t\t# How quickly score increase.\n\t\tself.score_scale = 1.5", "title": "" }, { "docid": "4a0a37f90385dd89f2994154b5d24944", "score": "0.6501459", "text": "def startup(self):\n self.prev_gray = None\n self.frame_idx = 1\n self.tracks = []\n self.fps = []\n self.vid_info = None\n self.track_new_points_count = 0", "title": "" }, { "docid": "7c9c06745971f04f7a5a6fc62e584161", "score": "0.64820516", "text": "def __init__(self):\n self.__grid = create_grid(\n Settings.SIZE_X, Settings.SIZE_Y, MarkerType.NONE)\n\n self.__turn = 0\n self.__state = GameState.PLAYING\n self.__winner = MarkerType.NONE\n self.__loser = MarkerType.NONE\n\n # Separate counter for turns, because __turn depends on starting player\n self.__turns_played = 0", "title": "" }, { "docid": "d9ba9411b9f12679a8bc536d94912571", "score": "0.64732724", "text": "def setup(self):\n self.ui_manager.purge_ui_elements()\n\n global text_prompt\n global fails\n\n # Text elements\n self.ui_manager.add_ui_element(arcade.gui.UILabel(\n \"\"\"You need to crack the terminal passcode.\\n\nEnter \"\"\" + str(global_n) + \"\"\" unique digits and try to guess the code!\"\"\",\n center_x=self.left_column_x * 2,\n center_y=self.y_slot * 3,\n id=\"fcg\"\n\n ))\n\n self.ui_manager.add_ui_element(arcade.gui.UILabel(\n text_prompt,\n center_x=self.left_column_x * 2,\n center_y=self.y_slot * 2.25,\n id=\"mathGame_UI2\"\n\n ))\n\n self.ui_manager.add_ui_element(arcade.gui.UILabel(\n \"Tries left: \" + str(20 - fails),\n center_x=self.left_column_x * 2,\n center_y=self.y_slot / 1.7,\n id=\"Tries\"\n\n ))\n\n # Input Box\n ui_input_box = arcade.gui.UIInputBox(\n center_x=self.left_column_x * 2,\n center_y=self.y_slot * 1.5,\n width=300,\n id=\"InputBox\"\n )\n ui_input_box.text = ''\n ui_input_box.cursor_index = len(ui_input_box.text)\n self.ui_manager.add_ui_element(ui_input_box)\n\n # Buttons\n button = EnterCodeButton(\n center_x=self.left_column_x * 2,\n center_y=self.y_slot,\n input_box=ui_input_box,\n minigame=self,\n ui_manager=self.ui_manager,\n n=global_n,\n choice_list=chosen_list\n )\n self.ui_manager.add_ui_element(button)\n\n button = ExitButton(\n center_x=self.right_column_x * 1.25,\n center_y=self.y_slot * 3.7,\n minigame=self\n )\n self.ui_manager.add_ui_element(button)", "title": "" }, { "docid": "6aa916be2fd937c6cb716d3025adc3dd", "score": "0.64697623", "text": "def setup(self):\n # Initialize the drawing environment (create main windows, etc)\n glutInit(sys.argv)\n glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB | GLUT_DEPTH)\n glutInitWindowSize(WINDOW_WIDTH, WINDOW_HEIGHT)\n glutCreateWindow(name)\n\n glShadeModel(GL_SMOOTH)\n\n glClearDepth(1.0)\n glDepthFunc(GL_LESS) # The Type Of Depth Test To Do\n glEnable(GL_DEPTH_TEST) # Enables Depth Testing\n glShadeModel(GL_SMOOTH) # Enables Smooth Color Shading\n\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity() # Reset The Projection Matrix\n\n # Calculate The Aspect Ratio Of The Window\n gluPerspective(45.0, float(WINDOW_WIDTH)/float(WINDOW_HEIGHT), 0.1, 100.0)\n\n glMatrixMode(GL_MODELVIEW)\n\n # Set up keyboard listeners.\n glutKeyboardFunc(self.on_key)", "title": "" }, { "docid": "70b23db89e61198c0f3c476b2d7764c4", "score": "0.6467842", "text": "def setUp(self):\n self.player = Player()", "title": "" }, { "docid": "4c39ca734f8b91d1aee2b393a5df598a", "score": "0.6457494", "text": "def setup_members(self):\n ### cell\n self.cell_size = 8\n self.cell_row = 80\n self.cell_col = 100\n self.color_alive = \"black\"\n self.color_dead = \"white\"\n\n ### world\n self.init_modes = {} # read modes from json file\n self.init_world = {} # begining status\n self.world = {} # world's map\n # current status of world\n self.world_status = GOL(self.cell_row, self.cell_col)\n self.world_setable = True\n self.world_alive = False\n\n # widgets\n self.toolbar_height = 40\n self.world_size = [self.cell_size * self.cell_row,\n self.cell_size * self.cell_col]\n self.window_size = self.world_size\n self.window_size[0] += self.toolbar_height\n\n # resource\n self.saver_icon = \"save.gif\"\n self.run_icon = \"run.gif\"\n self.pause_icon = \"pause.gif\"\n self.stop_icon = \"stop.gif\"\n self.modes_file = \"gol.json\"\n self.modes_names = []", "title": "" }, { "docid": "84c5d5de24868699578c5df0dd92a9e5", "score": "0.6449371", "text": "def setup(self, level):\r\n\r\n # Used to keep track of our scrolling\r\n self.view_bottom = 0\r\n self.view_left = 0\r\n\r\n # Keep track of the score\r\n self.score = 0\r\n\r\n # Keep track of lives\r\n # self.lives = 5\r\n\r\n # Create the Sprite lists\r\n self.player_list = arcade.SpriteList()\r\n self.foreground_list = arcade.SpriteList()\r\n self.background_list = arcade.SpriteList()\r\n self.wall_list = arcade.SpriteList()\r\n self.coin_list = arcade.SpriteList()\r\n\r\n # Set up the player, specifically placing it at these coordinates.\r\n image_source = \"images/Alice/Alice7_front.png\"\r\n self.player_sprite = arcade.Sprite(image_source, CHARACTER_SCALING)\r\n self.player_sprite.center_x = PLAYER_START_X\r\n self.player_sprite.center_y = PLAYER_START_Y\r\n self.player_list.append(self.player_sprite)\r\n\r\n # --- Load in a map from the tiled editor ---\r\n\r\n # Name of the layer in the file that has our platforms/walls\r\n platforms_layer_name = 'Platforms'\r\n moving_platforms_layer_name = 'Moving Platforms'\r\n # Name of the layer that has items for pick-up\r\n coins_layer_name = 'Coins'\r\n # Name of the layer that has items for foreground\r\n foreground_layer_name = 'Foreground'\r\n # Name of the layer that has items for background\r\n background_layer_name = 'Background'\r\n # Name of the layer that has items we shouldn't touch\r\n dont_touch_layer_name = \"Don't Touch\"\r\n\r\n # Map name\r\n map_name = f\"map4_level_{level}.tmx\"\r\n\r\n # Read in the tiled map\r\n my_map = arcade.tilemap.read_tmx(map_name)\r\n\r\n # Calculate the right edge of the my_map in pixels\r\n self.end_of_map = my_map.map_size.width * GRID_PIXEL_SIZE\r\n\r\n # -- Background\r\n self.background_list = arcade.tilemap.process_layer(my_map,\r\n background_layer_name,\r\n TILE_SCALING)\r\n\r\n # -- Foreground\r\n self.foreground_list = arcade.tilemap.process_layer(my_map,\r\n foreground_layer_name,\r\n TILE_SCALING)\r\n\r\n # -- Platforms\r\n self.wall_list = arcade.tilemap.process_layer(map_object=my_map,\r\n layer_name=platforms_layer_name,\r\n scaling=TILE_SCALING,\r\n use_spatial_hash=True)\r\n # -- Moving Platforms\r\n moving_platforms_list = arcade.tilemap.process_layer(my_map, moving_platforms_layer_name, TILE_SCALING)\r\n for sprite in moving_platforms_list:\r\n self.wall_list.append(sprite)\r\n\r\n # -- Coins\r\n self.coin_list = arcade.tilemap.process_layer(my_map,\r\n coins_layer_name,\r\n TILE_SCALING,\r\n use_spatial_hash=True)\r\n\r\n # -- Don't Touch Layer\r\n self.dont_touch_list = arcade.tilemap.process_layer(my_map,\r\n dont_touch_layer_name,\r\n TILE_SCALING,\r\n use_spatial_hash=True)\r\n\r\n # --- Other stuff\r\n # Set the background color\r\n if my_map.background_color:\r\n arcade.set_background_color(my_map.background_color)\r\n\r\n # Create the 'physics engine'\r\n self.physics_engine = arcade.PhysicsEnginePlatformer(self.player_sprite,\r\n self.wall_list,\r\n GRAVITY)", "title": "" }, { "docid": "2d9816345ac7805f9405790a66da0c26", "score": "0.6441203", "text": "def setup(self):\n\n # Set up the Cameras\n viewport = (0, 0, self.window.width, self.window.height)\n self.camera = arcade.SimpleCamera(viewport=viewport)\n self.gui_camera = arcade.SimpleCamera(viewport=viewport)\n\n # Map name\n map_name = \":resources:tiled_maps/map_with_ladders.json\"\n\n # Layer Specific Options for the Tilemap\n layer_options = {\n LAYER_NAME_PLATFORMS: {\n \"use_spatial_hash\": True,\n },\n LAYER_NAME_MOVING_PLATFORMS: {\n \"use_spatial_hash\": False,\n },\n LAYER_NAME_LADDERS: {\n \"use_spatial_hash\": True,\n },\n LAYER_NAME_COINS: {\n \"use_spatial_hash\": True,\n },\n }\n\n # Load in TileMap\n self.tile_map = arcade.load_tilemap(map_name, TILE_SCALING, layer_options)\n\n # Initiate New Scene with our TileMap, this will automatically add all layers\n # from the map as SpriteLists in the scene in the proper order.\n self.scene = arcade.Scene.from_tilemap(self.tile_map)\n\n # Keep track of the score\n self.score = 0\n\n # Shooting mechanics\n self.can_shoot = True\n self.shoot_timer = 0\n\n # Set up the player, specifically placing it at these coordinates.\n self.player_sprite = PlayerCharacter()\n self.player_sprite.center_x = (\n self.tile_map.tile_width * TILE_SCALING * PLAYER_START_X\n )\n self.player_sprite.center_y = (\n self.tile_map.tile_height * TILE_SCALING * PLAYER_START_Y\n )\n self.scene.add_sprite(LAYER_NAME_PLAYER, self.player_sprite)\n\n # Calculate the right edge of the my_map in pixels\n self.end_of_map = self.tile_map.width * GRID_PIXEL_SIZE\n\n # -- Enemies\n enemies_layer = self.tile_map.object_lists[LAYER_NAME_ENEMIES]\n\n for my_object in enemies_layer:\n cartesian = self.tile_map.get_cartesian(\n my_object.shape[0], my_object.shape[1]\n )\n enemy_type = my_object.properties[\"type\"]\n if enemy_type == \"robot\":\n enemy = RobotEnemy()\n elif enemy_type == \"zombie\":\n enemy = ZombieEnemy()\n enemy.center_x = math.floor(\n cartesian[0] * TILE_SCALING * self.tile_map.tile_width\n )\n enemy.center_y = math.floor(\n (cartesian[1] + 1) * (self.tile_map.tile_height * TILE_SCALING)\n )\n if \"boundary_left\" in my_object.properties:\n enemy.boundary_left = my_object.properties[\"boundary_left\"]\n if \"boundary_right\" in my_object.properties:\n enemy.boundary_right = my_object.properties[\"boundary_right\"]\n if \"change_x\" in my_object.properties:\n enemy.change_x = my_object.properties[\"change_x\"]\n self.scene.add_sprite(LAYER_NAME_ENEMIES, enemy)\n\n # Add bullet spritelist to Scene\n self.scene.add_sprite_list(LAYER_NAME_BULLETS)\n\n # --- Other stuff\n # Set the background color\n if self.tile_map.background_color:\n self.window.background_color = self.tile_map.background_color\n\n # Create the 'physics engine'\n self.physics_engine = arcade.PhysicsEnginePlatformer(\n self.player_sprite,\n platforms=self.scene[LAYER_NAME_MOVING_PLATFORMS],\n gravity_constant=GRAVITY,\n ladders=self.scene[LAYER_NAME_LADDERS],\n walls=self.scene[LAYER_NAME_PLATFORMS],\n )", "title": "" }, { "docid": "6834ee80b5a8768280d919c99bf26a5a", "score": "0.6434447", "text": "def setup(self):\n self.player = arcade.Sprite(\"tank.png\", 0.5)\n self.num_coins = 30\n\n # Initialize brick_list as a SpriteList\n # Then use nested for loops to place rows of bricks. \n # (Hint: Outer for loop is y coordinate skipping every 150 pixels\n # Inner for loop is x coordinate skipping every 64 pixels)\n # Append bricks to brick_list\n\n\n\n\n \n # Initialize coin_list as a SpriteList\n self.coin_list = arcade.SpriteList()\n \n # PSEUDOCODE for how to place coins. \n # for each i in range of number of coins \n # create coin Sprite\n # set boolean variable successfully_placed to False\n # while not successfully_placed\n # set center_x and center_y randomly\n # compute collision lists for coin with bricks\n # AND coin with other coins (2 lists)\n # if both lists have 0 length, then we have successfully placed the coin\n # add coin to coin_list\n \n \n \n \n \n \n \n # initialize physics_engine", "title": "" }, { "docid": "22073cb86b548bd7aa8ab34e523b6572", "score": "0.6433896", "text": "def setup(self):\n\n # Create the Sprite lists\n self.sprite_list = arcade.SpriteList()\n\n r = 60\n for x in rand_range(0, 100 * math.pi, scale=math.pi / 5):\n star = arcade.Sprite(\"../../resources/arcade/gold_1.png\")\n star.center_x = SCREEN_WIDTH / 2 + r * math.cos(x)\n star.center_y = SCREEN_HEIGHT / 2 + r * math.sin(x)\n star.seed = scale_generator(x=random() * math.pi, offset=.5, step=.01)\n star.scale = next(star.seed)\n self.sprite_list.append(star)\n r += 3", "title": "" }, { "docid": "ea777f4769348b29ef8c98d8ba2109ac", "score": "0.64236575", "text": "def setUp(self):\r\n self.spaceship = SpaceShipGame()", "title": "" }, { "docid": "ea777f4769348b29ef8c98d8ba2109ac", "score": "0.64236575", "text": "def setUp(self):\r\n self.spaceship = SpaceShipGame()", "title": "" }, { "docid": "ea777f4769348b29ef8c98d8ba2109ac", "score": "0.64236575", "text": "def setUp(self):\r\n self.spaceship = SpaceShipGame()", "title": "" }, { "docid": "ea777f4769348b29ef8c98d8ba2109ac", "score": "0.64236575", "text": "def setUp(self):\r\n self.spaceship = SpaceShipGame()", "title": "" }, { "docid": "ea777f4769348b29ef8c98d8ba2109ac", "score": "0.64236575", "text": "def setUp(self):\r\n self.spaceship = SpaceShipGame()", "title": "" }, { "docid": "fec745c478de3ff327552f7edca647db", "score": "0.6420831", "text": "def setUp(self):\n\n self.sold = Soldier(0, 0)\n self.R = Random(seed)", "title": "" }, { "docid": "b3f578c7fed4d6b351c3595091ed55c1", "score": "0.6419482", "text": "def main():\n g = DemoGame(800, 600)\n g.start()", "title": "" }, { "docid": "944ca57bb03c227960b99967b99dfe32", "score": "0.6415772", "text": "def start(self):\n pygame.init()\n self.screen = pygame.display.set_mode((self.width, self.height))\n pygame.display.set_caption(\"PyStroke\")\n self.engines = [GameEngine(self.screen, self.e_e)] # add others here\n self.engine = self.engines[0]\n self.run()", "title": "" }, { "docid": "d59a37df8a7fe9b043bd00f4df69a8f8", "score": "0.64034855", "text": "def __init__(self):\n #Screen settings\n self.screen_width=1200\n self.screen_height=800\n self.bg_color=(230,230,230)\n #ship settings\n self.ship_limit=1\n #bullet settings\n self.bullet_width=300\n self.bullet_height=15\n self.bullet_color=(60,60,60)\n self.bullets_allowed=3\n #Alien settings\n self.fleet_drop_speed = 20\n \n \n #how quickly the game speeds up\n self.speedup_scale=1.1\n #how quickly the point values increase\n self.score_scale=1.5\n \n self.initialize_dynamic_settings()", "title": "" }, { "docid": "5c5aa112a96d3bcd6cf22f2a0996df71", "score": "0.63967246", "text": "def ready(self):\r\n\t\t# Remove attract mode from mode queue - Necessary?\r\n\t\tself.game.modes.remove(self)\r\n\t\t# Initialize game\t\r\n\t\tself.game.start_game()\r\n\t\t# Add the first player\r\n\t\tself.game.add_player()\r\n #self.game.add_player()\r\n\t\t# Start the ball. This includes ejecting a ball from the trough.\r\n\t\tself.game.start_ball()", "title": "" }, { "docid": "15a3fa4330760fb2d31506163e50273f", "score": "0.6395593", "text": "def setup(self):\n # Create your sprites and sprite lists here\n self.wall_list = arcade.SpriteList()\n for x in range(128, SCREEN_WIDTH, 196):\n for y in range(128, SCREEN_HEIGHT, 196):\n wall = arcade.Sprite(\"building.png\",.3)\n wall.center_x = x\n wall.center_y = y\n # wall.angle = 45\n self.wall_list.append(wall)\n self.player_sprite = arcade.Sprite(\"taxi.png\")\n self.player_sprite.center_x = 50\n self.player_sprite.center_y = 50\n self.player_sprite.scale = .2\n self.player_list = arcade.SpriteList()\n self.player_list.append(self.player_sprite)\n\n #Spawns people and makes list\n self.person = arcade.Sprite(\"person.png\")\n self.person.scale = .2\n self.person.center_x = random.randrange(SCREEN_WIDTH)\n self.person.center_y = random.randrange(SCREEN_HEIGHT)\n #Spawns target\n self.target = arcade.Sprite(\"target.png\")\n self.target.scale = .5\n self.target.center_x = random.randrange(60,SCREEN_WIDTH)\n self.target.center_y = random.randrange(60,SCREEN_HEIGHT)\n color_list = [\"BLUE\",\"RED\"]", "title": "" }, { "docid": "6fb594b736454b3248a1646e21447958", "score": "0.63796514", "text": "def initialise(self):\n self.set_up()", "title": "" }, { "docid": "7eef679870f9b9257f328c0e702a2060", "score": "0.6377583", "text": "def init_game(self):\n self.view.carregar_jogadores_possiveis(self._possible_players_list())\n self.view.put_view_in_main_loop()", "title": "" }, { "docid": "6f06bb7f659d9f9536ca96ded727d79a", "score": "0.6373506", "text": "def run(self):\n pygame.init()\n pygame.display.set_caption(\"Genetic Game\")\n self.screen = pygame.display.set_mode((self.SCREEN_W, self.SCREEN_H), 0, 32)\n\n self.ominus_sprites = [OminusSprite(self.screen, o, PLAYERS_COLORS[o.id]) for o in self.model.get_players()]\n for o in self.ominus_sprites:\n self.agent_group.add(o)\n\n self.wall_sprites = [WallSprite(self.screen, w) for w in self.model.get_walls()]\n for w in self.wall_sprites:\n self.terrain_group.add(w)", "title": "" }, { "docid": "59942af24376fb5026cb81340b8a4822", "score": "0.6373452", "text": "def __init__(self):\n #screen Settings\n self.screen_width = 1024\n self.screen_height = 768\n self.bg_color = (32, 32, 32)\n\n #rocket settings\n self.rocket_speed = 1\n\n #laser Settings\n self.laser_speed = 1.0\n self.laser_width = 3\n self.laser_height = 15\n self.laser_color = (0, 255, 255)\n self.lasers_allowed = 3", "title": "" }, { "docid": "62f53bd434d32af1c87016a779056339", "score": "0.63714504", "text": "def __init__(self):\n #Screen configuration\n self.screen_width = 1200\n self.screen_height = 680\n self.bg_color = (0,20,50)\n \n #Hero configuration\n #Increase of ship speed to 1.5 pixels instead of 1\n #self.hero_speed_factor = 1.5\n self.hero_limit = 3\n \n #Syringes (bullets) configuration\n #self.bullet_speed_factor = 1\n self.bullets_allowed = 5\n \n #Covids configuration\n self.covid_vertical_speed_factor = 1\n #The value of the movement is negative because it is increasing\n # from the right to the left\n #self.covid_horizontal_speed_factor = -10\n #The pandemy direction equals 1 means to the bottom; -1 means to the top\n # The randint ensures an randomly direction when starting the game\n #if randint(0,1) == 1:\n # self.pandemy_direction = 1\n #else:\n # self.pandemy_direction = -1\n\n #The rate that increases the game speed\n self.speedup_scale = 1.1\n \n self.initialize_dynamic_settings()", "title": "" }, { "docid": "dd5240037eab4029c3f040a6c1aba1cc", "score": "0.63671416", "text": "def __init__(self):\n self.screen = pg.display.get_surface()\n self.screen_rect = self.screen.get_rect()\n self.clock = pg.time.Clock()\n self.fps = 60\n self.keys = pg.key.get_pressed()\n self.done = False\n # ship = random.choice(list(prepare.GFX[\"ships\"].values()))\n ship = list(prepare.GFX[\"ships\"].values())[7] # pick first ship available\n self.player = actors.Player((0, 0), ship)\n self.level = level.Level(self.screen_rect.copy(), self.player)\n\n self.energyloss_counter = 0\n self.energygain_counter = 0", "title": "" }, { "docid": "0f7194f91226989dda2cdda5e9fc3c6c", "score": "0.63631403", "text": "def __init__(self):\n game_engine = get_gameengine()\n if game_engine is not None:\n self = game_engine\n else:\n ## The targeted frames per second\n self.target_fps = 200\n\n ## The start time\n self.time = time.time()\n\n ## A list of all registered game objects\n self.game_objects = list()\n\n ## A list of colliders\n self.colliders = list()\n\n ## Manage the user inputs\n self.input_manager = InputManager(self)\n\n ## Determines the state of the Game Engine\n self.running = False\n\n ## Variable to pause the Game Engine\n self.paused = False\n\n self.time_node = pm.PyNode('time1')\n # end if", "title": "" }, { "docid": "2b1aec276551d5c6cf8e7446310d182a", "score": "0.6361806", "text": "def __init__(self):\n super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)\n self.player_count: int = None\n self.player_hand_0: arcade.SpriteList = None\n self.player_hand_1: arcade.SpriteList = None\n self.deck: arcade.SpriteList = None\n self.pile: arcade.SpriteList = None", "title": "" }, { "docid": "72672d21106099e15bfae6aee9a6e0a0", "score": "0.63469946", "text": "def start_game(self) -> None:\n self.init_game()\n self.play()", "title": "" }, { "docid": "30a48c448c1c746bc829dfa3391d2526", "score": "0.6346134", "text": "def initialize(self):\n\n # --------- BEGIN YOUR CODE ----------\n\n # This is exactly the same as Human.initialize, just copy the code over\n\n # --------- END YOUR CODE ----------\n pass", "title": "" }, { "docid": "472542e9894153ef4925551611ca0402", "score": "0.6342365", "text": "def start(self):\n # asserts preconditions are met\n #assert self.validGameSettings()\n\n #draws initial welcome screen\n #self._text = GLabel(text=\"Press 'S' to Play\")\n #self._text.draw(self.view)\n\n # initializing instance variables\n self.setState(STATE_INACTIVE)\n self.setWave(None)\n self.setText(None)\n self.lastkeys = 0 #ADD MORE ATTRIBUTES\n\n # draws iniital welcome screen\n self.welcomeScreen()", "title": "" }, { "docid": "d7fefa3e89f5460cda184529b474d2fc", "score": "0.63385844", "text": "def init(self):\n self._last = None\n self._game = None\n self._state = STATE_INACTIVE\n self._message = GLabel(text='Breakout\\n\\nClick To Begin\\n\\nGood Luck',\n font_size=24,x=GAME_WIDTH / 2.0, y=GAME_HEIGHT*(2.0/3.0),\n halign='center', valign='middle', linecolor=colormodel.WHITE)\n self._touch = None\n self._countdownTime = 0\n self._countdownMessage = GLabel(text='3', font_size=40,x=GAME_WIDTH / 2.0,\n y=GAME_HEIGHT*(2.0/3.0), halign='center',\n valign='middle', linecolor=colormodel.WHITE)\n self._pausedMessage = GLabel()\n self._sound = True\n self._soundImage = GImage(x=GAME_WIDTH-32, y=0, width=32, height=22,\n source='whitevolumeon.png')\n self._background = GRectangle(x=0, y=0, width=GAME_WIDTH, height=GAME_HEIGHT,\n fillcolor=colormodel.BLACK, linecolor=colormodel.BLACK)", "title": "" }, { "docid": "7628ed4be734b19f77e1a20b4184fae4", "score": "0.6336813", "text": "def __init__(self):\n pygame.init()\n self.settings = Settings()\n self.screen = pygame.display.set_mode(\n (self.settings.screen_width, self.settings.screen_height)\n )\n pygame.display.set_caption(\"Sideways Shooter\")\n self.stats = GameStats(self)\n self.sideways_ship = SidewaysShip(self)\n self.bullets = pygame.sprite.Group()\n self.aliens = pygame.sprite.Group()\n self._create_fleet()", "title": "" }, { "docid": "424833a78c0bfdeed90d07499d7cd6f7", "score": "0.6334929", "text": "def __init__(self):\n\n # Screen's settings\n self.screen_width = 1200\n self.screen_height = 800\n self.bg_color = (230, 230, 230)\n\n # Bluebee Settings\n self.bb_speed = 1.0\n\n # Moving test.\n self.counter = 0\n self.max_left = 400\n self.max_up = 300\n self.max_right = 400\n self.max_down = 300", "title": "" }, { "docid": "15c02130f22707d1974ffa12a4a11c5b", "score": "0.6327699", "text": "def start() -> None:\n\n # PREPARE\n clone_game_files()\n\n # SIMULATE\n turns = run.simulation()\n\n # LOG\n logs = read.combine_logs(turns)\n\n # CALCULATE\n results = calculate.results(logs)\n\n # DISPLAY\n visualize.charts(results)\n\n # CLEAN\n remove_cloned_files()", "title": "" }, { "docid": "ed037633b0e87e59f92106836bcb74fe", "score": "0.6324671", "text": "def setUp(self):\n d = self.deck = TestDeck()\n self.game = test_setup.two_player_lead('Laborer', deck=d)\n self.p1, self.p2 = self.game.players", "title": "" }, { "docid": "305651a134f8726b49cb090804b95f21", "score": "0.6319136", "text": "def __init__(self) -> None:\n self.win = self.__init_window()\n self.BACKGROUND = pygame.transform.scale(pygame.image.load(os.path.join(\"assets\", \"background\", \"background.png\")), (WIDTH, HEIGHT))\n self.highscore = 0\n self.gamemode = Gamemodes.startscreen\n self.clock = pygame.time.Clock()", "title": "" }, { "docid": "ded65e67ac273ec31aaf5cec21a1abb6", "score": "0.63133985", "text": "def __init__(self):\n pygame.init()\n\n self.settings = Settings()\n\n self.scenes = {\"menu\": MenuScene(),\n \"settings\": SettingsScene(),\n \"score\": ScoreScene(),\n \"game\": GameScene(),\n \"pause\": PauseScene(),\n \"game_over\": GameOverScene(),\n \"logo\": LogoScene()}\n self.scene_name = \"logo\" # start scene\n self.previous_scene_name = None\n self.scene = self.scenes[self.scene_name]\n\n self.__manager = ResourceManager()\n self.__display = self.settings.get_display()\n self.__display.fill(BACKGROUND_COLOR)\n pygame.display.flip()", "title": "" }, { "docid": "3a7fbbf00153d93349b3f3406c324732", "score": "0.63096815", "text": "def setUp(self):\r\n global ship_image\r\n self.spaceshipgame = SpaceShipGame()\r\n self.spaceshipgame.init()\r\n pos = [1,1]\r\n vel = [1,1]\r\n angle = 0\r\n image = ship_image\r\n\r\n center = [1,1]\r\n size = 1\r\n info = ImageInfo(center, size)\r\n\r\n self.ship = Ship( pos, vel, angle, image, info)", "title": "" }, { "docid": "3a7fbbf00153d93349b3f3406c324732", "score": "0.63096815", "text": "def setUp(self):\r\n global ship_image\r\n self.spaceshipgame = SpaceShipGame()\r\n self.spaceshipgame.init()\r\n pos = [1,1]\r\n vel = [1,1]\r\n angle = 0\r\n image = ship_image\r\n\r\n center = [1,1]\r\n size = 1\r\n info = ImageInfo(center, size)\r\n\r\n self.ship = Ship( pos, vel, angle, image, info)", "title": "" }, { "docid": "e22ba600cbf922333b0d8230d4715ba2", "score": "0.63095695", "text": "def setUp(self):\n self.delegate = AlwaysHitDelegate(\"\")\n self.environment = BattleEnvironment()", "title": "" } ]
eff4806a2046b7b3e0818da3f6b39388
create instantiate of a DeploymentRequest This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response.
[ { "docid": "1f6251f81e8736df9255b2bec99d496b", "score": "0.5890666", "text": "def create_namespaced_deployment_request_instantiate_with_http_info(self, body, name, namespace, **kwargs):\n\n all_params = ['body', 'name', 'namespace', 'pretty']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_deployment_request_instantiate\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_deployment_request_instantiate`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `create_namespaced_deployment_request_instantiate`\")\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `create_namespaced_deployment_request_instantiate`\")\n\n\n collection_formats = {}\n\n resource_path = '/oapi/v1/namespaces/{namespace}/deploymentconfigs/{name}/instantiate'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = ['BearerToken']\n\n return self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1DeploymentRequest',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "title": "" } ]
[ { "docid": "c8faee09c9ac08cfa320b38d31c94c48", "score": "0.74152315", "text": "def create_namespaced_deployment_request_instantiate(self, body, name, namespace, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.create_namespaced_deployment_request_instantiate_with_http_info(body, name, namespace, **kwargs)\n else:\n (data) = self.create_namespaced_deployment_request_instantiate_with_http_info(body, name, namespace, **kwargs)\n return data", "title": "" }, { "docid": "22cc9aacc27517e01158df0d46f927ff", "score": "0.6730303", "text": "def create_deployment(self, namespace, deployment):\n api_instance = client.AppsV1Api()\n return api_instance.create_namespaced_deployment(namespace, deployment)", "title": "" }, { "docid": "aa1b4570dc15c2b5106b6a1ea306d229", "score": "0.6238238", "text": "def make_object(self, data):\n return Deployment(**data)", "title": "" }, { "docid": "aa1b4570dc15c2b5106b6a1ea306d229", "score": "0.6238238", "text": "def make_object(self, data):\n return Deployment(**data)", "title": "" }, { "docid": "aa1b4570dc15c2b5106b6a1ea306d229", "score": "0.6238238", "text": "def make_object(self, data):\n return Deployment(**data)", "title": "" }, { "docid": "aae454be2bf98674ea749dc3fb2108cd", "score": "0.61750245", "text": "def __create_deployment(self, deployment_name):\n\n # try to create a new deployment\n try:\n\n # create a new deployment\n command = str('kubectl run ' + deployment_name + \" --image=kubipy-image:latest --image-pull-policy='Never'\")\n os.system(command)\n\n # return True\n return True\n \n # handle exception\n except:\n\n # return False\n return False", "title": "" }, { "docid": "6dc547c9ca59763bf72028d4b3a478f1", "score": "0.5856244", "text": "def create(\n cls,\n deployment_def: Union[Callable, str],\n init_args: Optional[Tuple[Any]] = None,\n init_kwargs: Optional[Dict[Any, Any]] = None,\n ray_actor_options: Optional[Dict] = None,\n placement_group_bundles: Optional[List[Dict[str, float]]] = None,\n placement_group_strategy: Optional[str] = None,\n max_replicas_per_node: Optional[int] = None,\n deployment_def_name: Optional[str] = None,\n ):\n\n if not callable(deployment_def) and not isinstance(deployment_def, str):\n raise TypeError(\"@serve.deployment must be called on a class or function.\")\n\n if not (init_args is None or isinstance(init_args, (tuple, list))):\n raise TypeError(\"init_args must be a tuple.\")\n\n if not (init_kwargs is None or isinstance(init_kwargs, dict)):\n raise TypeError(\"init_kwargs must be a dict.\")\n\n if inspect.isfunction(deployment_def):\n if init_args:\n raise ValueError(\"init_args not supported for function deployments.\")\n elif init_kwargs:\n raise ValueError(\"init_kwargs not supported for function deployments.\")\n\n if not isinstance(deployment_def, (Callable, str)):\n raise TypeError(\n f'Got invalid type \"{type(deployment_def)}\" for '\n \"deployment_def. Expected deployment_def to be a \"\n \"class, function, or string.\"\n )\n # Set defaults\n if init_args is None:\n init_args = ()\n if init_kwargs is None:\n init_kwargs = {}\n if ray_actor_options is None:\n ray_actor_options = {}\n if deployment_def_name is None:\n if isinstance(deployment_def, str):\n deployment_def_name = deployment_def\n else:\n deployment_def_name = deployment_def.__name__\n\n config = cls(\n deployment_def_name,\n pickle_dumps(\n deployment_def,\n f\"Could not serialize the deployment {repr(deployment_def)}\",\n ),\n pickle_dumps(init_args, \"Could not serialize the deployment init args\"),\n pickle_dumps(init_kwargs, \"Could not serialize the deployment init kwargs\"),\n ray_actor_options,\n placement_group_bundles,\n placement_group_strategy,\n max_replicas_per_node,\n )\n\n config._deployment_def = deployment_def\n config._init_args = init_args\n config._init_kwargs = init_kwargs\n\n return config", "title": "" }, { "docid": "ad7fb4037e77fb841e5a1c66668787be", "score": "0.5843691", "text": "def make_object(self, data):\n return DeploymentTarget(**data)", "title": "" }, { "docid": "ad7fb4037e77fb841e5a1c66668787be", "score": "0.5843691", "text": "def make_object(self, data):\n return DeploymentTarget(**data)", "title": "" }, { "docid": "ad7fb4037e77fb841e5a1c66668787be", "score": "0.5843691", "text": "def make_object(self, data):\n return DeploymentTarget(**data)", "title": "" }, { "docid": "ca29d0b4bfbbb6008d5170a0fa6ed1d5", "score": "0.57843614", "text": "def create(self):\n evaluate_request(\n self.__v1_api.create_namespaced_service(\n namespace=self.__namespace, body=self.load_gen_service, async_req=True\n ),\n allowed_statuses=[409],\n )\n evaluate_request(\n self.__v1_apps_api.create_namespaced_deployment(\n namespace=self.__namespace,\n body=self.load_gen_deployment,\n async_req=True,\n ),\n allowed_statuses=[409],\n )\n # Make a second call to patch the configuration to make sure changes in replicas are applied\n # TODO: Investigate making a utility method similar to `kubectl apply`\n evaluate_request(\n self.__v1_apps_api.patch_namespaced_deployment(\n namespace=self.__namespace,\n body=self.load_gen_deployment,\n name=get_name(self.load_gen_deployment),\n async_req=True,\n )\n )", "title": "" }, { "docid": "148044ed07fcebbb2c1a2dd4d981eacd", "score": "0.57293147", "text": "def create_deployment(ApiId=None, Description=None, StageName=None):\n pass", "title": "" }, { "docid": "d26c29d6ec490cbe79d5fcc56f07c708", "score": "0.5728097", "text": "def Deploy(self, request, global_params=None):\n config = self.GetMethodConfig('Deploy')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "235b3bae1d74d97304abed8dea43936e", "score": "0.5602673", "text": "def Deploy(apig,api_id: str,stage_name: str,stage_purpose: str,deployment_purpose: str):\n\n\t\t\tresponse = apig.client.create_deployment(\n\t\t\t\trestApiId=api_id,\n\t\t\t\tstageName=stage_name,\n\t\t\t\tstageDescription=stage_purpose,\n\t\t\t\tdescription=deployment_purpose,\n\t\t\t\t#cacheClusterEnabled=True|False,\n\t\t\t\t#cacheClusterSize='0.5'|'1.6'|'6.1'|'13.5'|'28.4'|'58.2'|'118'|'237',\n\t\t\t\t#variables={\n\t\t\t\t#\t'string': 'string'\n\t\t\t\t#},\n\t\t\t\t#canarySettings={\n\t\t\t\t#\t'percentTraffic': 123.0,\n\t\t\t\t#\t'stageVariableOverrides': {\n\t\t\t\t#\t\t'string': 'string'\n\t\t\t\t#\t},\n\t\t\t\t#\t'useStageCache': True|False\n\t\t\t\t#},\n\t\t\t\t#tracingEnabled=True|False\n\t\t\t)\n\t\t\treturn response", "title": "" }, { "docid": "ce6d8cfb1e8c285c274bdb715ef48d27", "score": "0.55937594", "text": "def deployment(self):\n try:\n deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],\n self.config['gce_zone'])\n info = deployment.get_info()\n errors = info['operation'].get('error')\n if errors:\n raise util.LauncherError('DeploymentContainsErrors', str(errors))\n return deployment\n except HttpError as e:\n if e.resp.status == 404:\n raise util.LauncherError('DeploymentNotFound',\n \"The deployment you are trying to access doesn't exist\") from e\n raise e", "title": "" }, { "docid": "5f08fcba693bd35d4db4a0cf8ec926d5", "score": "0.55091316", "text": "async def create_game_server_deployment(self,\n request: game_server_deployments.CreateGameServerDeploymentRequest = None,\n *,\n parent: str = None,\n game_server_deployment: game_server_deployments.GameServerDeployment = None,\n retry: retries.Retry = gapic_v1.method.DEFAULT,\n timeout: float = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> operation_async.AsyncOperation:\n # Create or coerce a protobuf request object.\n # Sanity check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([parent, game_server_deployment])\n if request is not None and has_flattened_params:\n raise ValueError(\"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\")\n\n request = game_server_deployments.CreateGameServerDeploymentRequest(request)\n\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if parent is not None:\n request.parent = parent\n if game_server_deployment is not None:\n request.game_server_deployment = game_server_deployment\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method_async.wrap_method(\n self._client._transport.create_game_server_deployment,\n default_timeout=60.0,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata((\n (\"parent\", request.parent),\n )),\n )\n\n # Send the request.\n response = await rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Wrap the response in an operation future.\n response = operation_async.from_gapic(\n response,\n self._client._transport.operations_client,\n game_server_deployments.GameServerDeployment,\n metadata_type=common.OperationMetadata,\n )\n\n # Done; return the response.\n return response", "title": "" }, { "docid": "3f9e2f169726958f09efb705ba205833", "score": "0.54509884", "text": "def create_deployment_from_file():\n parser = ArgumentParser(description=\"Deployment JSON Descriptor\")\n if is_valid_file(parser,filename):\n f=open(filename,'r')\n json_object = json.load(f)\n\n new_deployment = Deployment()\n for value in json_object.values():\n for v in range(0,len(value)):\n new_deployment.description=value[v]['description']\n new_deployment.name = value[v]['name']\n new_deployment.region = value[v]['regionId']\n new_deployment.budget = value[v]['budget']\n result=new_deployment.create()\n print new_deployment.current_job", "title": "" }, { "docid": "361a44319e34d24d399054508a4dba1d", "score": "0.5314766", "text": "def CreateInstance(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"CreateInstance\", params, headers=headers)\n response = json.loads(body)\n model = models.CreateInstanceResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "title": "" }, { "docid": "f5e3e6698dd5446275e465730755add3", "score": "0.53035444", "text": "def create_namespaced_build_request_instantiate(self, body, name, namespace, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.create_namespaced_build_request_instantiate_with_http_info(body, name, namespace, **kwargs)\n else:\n (data) = self.create_namespaced_build_request_instantiate_with_http_info(body, name, namespace, **kwargs)\n return data", "title": "" }, { "docid": "f2ba182add2613fff44ac66cb271bebe", "score": "0.5297318", "text": "def deployment(ctx, image):\n body = {'template' : image}\n resp = consume_task(ctx.obj.vlab_api,\n endpoint='/api/2/inf/deployment',\n message='Deploying template {}'.format(image),\n body=body,\n timeout=3600,\n pause=20)\n data = resp.json()['content']\n typewriter(\"Successfully created the following machines:\")\n click.echo('\\t{}'.format('\\n\\t'.join(data.keys())))\n typewriter(\"\\nUse 'vlab connect deployment --name <name> --protocol <protocol>' to access a deployed machine\")", "title": "" }, { "docid": "696f2763c4cde4413e6a94516ef60867", "score": "0.5296554", "text": "def create_namespaced_deployment(body, namespace=\"default\", extv1Client=None):\n if extv1Client is None:\n # load the kubernetes config\n load_k8s_config()\n #create the api client\n extv1Client=client.ExtensionsV1beta1Api()\n\n deployment = None\n try:\n deployment = extv1Client.create_namespaced_deployment(namespace=namespace, body=body)\n log.debug(\"Deployment created: \\n{0}\\n\".format(deployment))\n except ApiException as e:\n log.error(\"Exception when calling ExtensionsV1beta1Api->create_namespaced_deployment: %s\\n\" % e)\n raise e\n\n return deployment", "title": "" }, { "docid": "95f4a962f5aadbed03366082f3f5a4e7", "score": "0.5293826", "text": "def create(self, name, filename, do_use=False):\n\n filename = os.path.expanduser(filename)\n print(\"file:\" + filename)\n\n with open(filename, \"rb\") as deploy_file:\n config = yaml.safe_load(deploy_file.read())\n\n try:\n deployment = api.Deployment.create(config, name)\n except jsonschema.ValidationError:\n print(_(\"Config schema validation error: %s.\") % sys.exc_info()[1])\n return(1)\n except exceptions.DeploymentNameExists:\n print(_(\"Error: %s\") % sys.exc_info()[1])\n return(1)\n\n self.list(deployment_list=[deployment])\n if do_use:\n self.use(deployment[\"uuid\"])", "title": "" }, { "docid": "768cd02652f3c2d3997d4a8999a4e438", "score": "0.52635235", "text": "def create(name, cluster, model, sha, env_file, env):\n # Create map of custom environment variables to use with this train job.\n envs = parse_cmd_envs(env_file_path=env_file, env_options=env)\n\n # Create payload.\n payload = project_payload({\n 'name': name,\n 'apiCluster': cluster,\n 'model': model,\n 'sha': sha,\n 'envs': json.dumps(envs)\n })\n\n try:\n # Create the deploy.\n resp = api.post('/deploy', payload=payload, stream=True)\n except KeyboardInterrupt:\n return\n\n # Stream the response logs.\n resp.log_stream()", "title": "" }, { "docid": "ca32146736a6f146e5dd539f10cd0f2f", "score": "0.5260751", "text": "def deployment(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"deployment\")", "title": "" }, { "docid": "0aa30bb2fa60d44677cfefe8d66db21d", "score": "0.5154369", "text": "def make_object(self, data):\n return DeploymentImage(**data)", "title": "" }, { "docid": "0aa30bb2fa60d44677cfefe8d66db21d", "score": "0.5154369", "text": "def make_object(self, data):\n return DeploymentImage(**data)", "title": "" }, { "docid": "0aa30bb2fa60d44677cfefe8d66db21d", "score": "0.5154369", "text": "def make_object(self, data):\n return DeploymentImage(**data)", "title": "" }, { "docid": "0aa30bb2fa60d44677cfefe8d66db21d", "score": "0.5154369", "text": "def make_object(self, data):\n return DeploymentImage(**data)", "title": "" }, { "docid": "e7e62a0e6332b9ca108377885a6bdf61", "score": "0.5148939", "text": "def createDeploymentFromFile(filePath, namespace='default'):\n txClient = TxKubernetesClient()\n\n with open(filePath, 'r') as file:\n deployment = yaml.load(file)\n\n # create a deployment in a namespace\n d = txClient.call(txClient.extV1Beta1.create_namespaced_deployment,\n body=deployment,\n namespace=namespace,\n )\n return d", "title": "" }, { "docid": "007210337460416121b109c416bd824b", "score": "0.5039167", "text": "def _create_new_deployments(self, sm, group, request_dict):\n rm = get_resource_manager()\n new_deployments = request_dict.get('new_deployments')\n if not new_deployments:\n return\n with sm.transaction():\n if not group.default_blueprint:\n raise manager_exceptions.ConflictError(\n 'Cannot create deployments: group {0} has no '\n 'default blueprint set'.format(group.id))\n if not all(spec.get('skip_plugins_validation')\n for spec in new_deployments):\n rm.check_blueprint_plugins_installed(\n group.default_blueprint.plan)\n deployment_count = len(group.deployments)\n create_exec_group = models.ExecutionGroup(\n id=str(uuid.uuid4()),\n deployment_group=group,\n workflow_id='create_deployment_environment',\n visibility=group.visibility,\n concurrency=10,\n )\n sm.put(create_exec_group)\n self._prepare_sites(sm, new_deployments)\n for new_dep_spec in new_deployments:\n dep = self._make_new_group_deployment(\n rm, group, new_dep_spec, deployment_count, group.labels)\n group.deployments.append(dep)\n create_exec_group.executions.append(dep.create_execution)\n deployment_count += 1\n messages = create_exec_group.start_executions(sm, rm)\n workflow_executor.execute_workflow(messages)", "title": "" }, { "docid": "52aeb46eb05931bc19fd86699340acd7", "score": "0.50310636", "text": "def deployment(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"deployment\")", "title": "" }, { "docid": "2961de7404e616b49c17e55067617a76", "score": "0.5011048", "text": "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n deploy_fleet_params = cohesity_management_sdk.models.deploy_fleet_params.DeployFleetParams.from_dictionary(dictionary.get('deployFleetParams')) if dictionary.get('deployFleetParams') else None\n deploy_vms_to_aws_params = cohesity_management_sdk.models.deploy_vms_to_aws_params.DeployVMsToAWSParams.from_dictionary(dictionary.get('deployVmsToAwsParams')) if dictionary.get('deployVmsToAwsParams') else None\n deploy_vms_to_azure_params = cohesity_management_sdk.models.deploy_vms_to_azure_params.DeployVMsToAzureParams.from_dictionary(dictionary.get('deployVmsToAzureParams')) if dictionary.get('deployVmsToAzureParams') else None\n deploy_vms_to_gcp_params = cohesity_management_sdk.models.deploy_vms_to_gcp_params.DeployVMsToGCPParams.from_dictionary(dictionary.get('deployVmsToGcpParams')) if dictionary.get('deployVmsToGcpParams') else None\n replicate_snapshots_to_aws_params = cohesity_management_sdk.models.replicate_snapshots_to_aws_params.ReplicateSnapshotsToAWSParams.from_dictionary(dictionary.get('replicateSnapshotsToAwsParams')) if dictionary.get('replicateSnapshotsToAwsParams') else None\n replicate_snapshots_to_azure_params = cohesity_management_sdk.models.replicate_snapshots_to_azure_params.ReplicateSnapshotsToAzureParams.from_dictionary(dictionary.get('replicateSnapshotsToAzureParams')) if dictionary.get('replicateSnapshotsToAzureParams') else None\n\n # Return an object of this model\n return cls(\n deploy_fleet_params,\n deploy_vms_to_aws_params,\n deploy_vms_to_azure_params,\n deploy_vms_to_gcp_params,\n replicate_snapshots_to_aws_params,\n replicate_snapshots_to_azure_params\n)", "title": "" }, { "docid": "fb25da55f48886ed1cd003679994da21", "score": "0.497082", "text": "def create_server(self, request, tenant_id):\n try:\n content = json.loads(request.content.read())\n except ValueError:\n request.setResponseCode(400)\n return json.dumps(bad_request(\"Invalid JSON request body\"))\n\n try:\n creation = (self._region_collection_for_tenant(tenant_id)\n .request_creation(request, content, self.url))\n except ValueError:\n request.setResponseCode(400)\n return json.dumps(\n bad_request(\n \"OS-DCF:diskConfig must be either 'MANUAL' or 'AUTO'.\"))\n\n return creation", "title": "" }, { "docid": "79e0a1a026f7e4cdf27b38cb6667c206", "score": "0.4965557", "text": "def create(self,\n model: str,\n deployment_name: str,\n metadata: Optional[Dict] = None,\n training_data: Optional[Union['DataFrame', 'ndarray']] = None,\n training_target: Optional[Union['DataFrame', 'ndarray']] = None,\n experiment_run_id: Optional[str] = None) -> None:\n return super().create(model=model,\n deployment_name=deployment_name,\n metadata=metadata,\n training_data=training_data,\n training_target=training_target,\n experiment_run_id=experiment_run_id,\n deployment_type='online')", "title": "" }, { "docid": "d0ddeafc96fbba74619cae752d91b369", "score": "0.49553844", "text": "def deploy(self, deployment_yaml):\n\n cmd = \"%s %s\" % (KUBECTL_APPLY % (self.context), deployment_yaml)\n result = self.nuvoloso_helper.run_check_output(cmd)\n return result", "title": "" }, { "docid": "f1ac62f8345fb8e35281f13c5bf4aaf1", "score": "0.49248436", "text": "def create(self, request, *args, **kwargs):\n data = self.request.DATA\n packaged = 'upload' in data\n form = (NewPackagedForm(data) if packaged\n else NewManifestForm(data))\n\n if not form.is_valid():\n return Response(form.errors, status=HTTP_400_BAD_REQUEST)\n\n if not packaged:\n upload = FileUpload.objects.create(\n user=getattr(request, 'amo_user', None))\n # The hosted app validator is pretty fast.\n tasks.fetch_manifest(form.cleaned_data['manifest'], upload.pk)\n else:\n upload = form.file_upload\n # The packaged app validator is much heavier.\n tasks.validator.delay(upload.pk)\n\n log.info('Validation created: %s' % upload.pk)\n self.kwargs = {'pk': upload.pk}\n # Re-fetch the object, fetch_manifest() might have altered it.\n upload = self.get_object()\n serializer = self.get_serializer(upload)\n status = HTTP_201_CREATED if upload.processed else HTTP_202_ACCEPTED\n return Response(serializer.data, status=status)", "title": "" }, { "docid": "d1767a93be08f47e1a6a1e475a096097", "score": "0.49178505", "text": "def create_namespaced_deployment_config(self, namespace, body, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.create_namespaced_deployment_config_with_http_info(namespace, body, **kwargs)\n else:\n (data) = self.create_namespaced_deployment_config_with_http_info(namespace, body, **kwargs)\n return data", "title": "" }, { "docid": "0cd42809ff676fd4e00e5b749670ae22", "score": "0.48909155", "text": "def deploy_dag() -> Response:\n\n json_request = request.get_json(cache=False)\n\n try:\n\n if json_request is None:\n return ApiResponse.error(\n status=ApiResponse.STATUS_BAD_REQUEST,\n error=f\"Did not receive any JSON request to deploy\",\n )\n\n ingestion_pipeline = IngestionPipeline(**json_request)\n\n deployer = DagDeployer(ingestion_pipeline)\n response = deployer.deploy()\n\n return response\n\n except ValidationError as err:\n logger.debug(traceback.format_exc())\n logger.error(\n f\"Request Validation Error parsing payload [{json_request}]. IngestionPipeline expected: {err}\"\n )\n return ApiResponse.error(\n status=ApiResponse.STATUS_BAD_REQUEST,\n error=f\"Request Validation Error parsing payload. IngestionPipeline expected: {err}\",\n )\n\n except Exception as exc:\n logger.debug(traceback.format_exc())\n logger.error(f\"Internal error deploying [{json_request}] due to [{exc}] \")\n return ApiResponse.error(\n status=ApiResponse.STATUS_SERVER_ERROR,\n error=f\"Internal error while deploying due to [{exc}] \",\n )", "title": "" }, { "docid": "9d07e6605fdefe7d821ed430f2465009", "score": "0.48740977", "text": "def deploy(self):\n logger.debug(\"======= deploy =======\")\n\n try:\n err, pkg_name, dest, email, passwd = \\\n self.__get_deploy_request_params()\n if (err):\n return HTTPBadRequest(err)\n\n result = deploy_package(pkg_name, dest, email, passwd)\n\n if result['status'] == 202:\n deployment_id = result['deployment_id']\n status_url = 'deployment/{0}'.format(deployment_id)\n contents = json.dumps(result, sort_keys=True)\n response = HTTPAccepted()\n response.headers = {\n 'Content-Type': 'application/json; charset=UTF-8',\n 'Location': status_url}\n response.body = contents\n return response\n elif result['status'] == 404:\n msg = 'Cannot find package \"{0}\" installed.'.format(pkg_name)\n return HTTPNotFound(detail=msg)\n else:\n return HTTPInternalServerError(detail=result['errors'])\n\n except Exception as e:\n stack_info = traceback.format_exc()\n logger.exception(stack_info)\n return HTTPInternalServerError(detail=stack_info)", "title": "" }, { "docid": "4a18028e12f5b6b9a07065a590a6844d", "score": "0.4848733", "text": "def deploy(self, script_file, requirements_file, port, deployment_name = None):\n\n # check if deployment name was given\n if deployment_name is None:\n\n # assign default\n deployment_name = \"kubipy-deployment\"\n\n # if it was given\n else:\n\n # take care of special characters\n deployment_name = deployment_name.replace('_', '-').replace('/', '-')\n\n # build info message\n info_message = \"\"\"\n ____________________________________________________________\n | To deploy your image, you will need to be logged in to |\n | Docker. So please, login if you have an account, or sign |\n | up, if you do not have an account yet. |\n ------------------------------------------------------------\n \n \"\"\"\n\n # print info about graphical interface\n print (info_message)\n\n # check script_file input\n if isinstance(script_file, str):\n\n # try to read in the first line\n try:\n\n # read in file\n with open(script_file) as file:\n first_line = file.readline()\n\n # print out first line\n info_message = \"\"\"\\\n This is the first line of the file you want to deploy:\n -------------------------\n {first_line}\n -------------------------\\\n \"\"\".format(first_line = first_line)\n\n # print info message\n print (info_message)\n\n # handle exception\n except:\n\n # handle exception\n raise Exception('I could not find the script_file')\n\n # if script_file input is the wrong format\n else:\n\n # raise Exception\n raise Exception(\"\"\"\n script_file should be a path and filename to your python api\n script, that you want to deploy.\n e.g. /your_folder/your_script.py\n \n \"\"\")\n\n # check requirements_file input\n if isinstance(requirements_file, str):\n\n # try to read in the first line\n try:\n\n # read in file\n with open(requirements_file) as file:\n first_line = file.readline()\n\n # print out first line\n info_message = \"\"\"\\\n This is the first line of your requirements file:\n -------------------------\n {first_line}\n -------------------------\\\n \"\"\".format(first_line = first_line)\n\n # print info message\n print (info_message)\n\n # handle exception\n except:\n\n # handle exception\n raise Exception('I could not find the script_file')\n\n # if script_file input is the wrong format\n else:\n\n # raise Exception\n raise Exception(\"\"\"\n script_file should be a path and filename to your python api\n script, that you want to deploy.\n e.g. /your_folder/your_requirements.txt\n \n \"\"\")\n\n # check port input\n if isinstance(port, str):\n\n # build info message\n info_message = \"\"\"\n ___________________________________________\n | API will be deployed on {port} |\n -------------------------------------------\n \n \"\"\".format(port = port)\n\n # print info about graphical interface\n print (info_message)\n\n # else if port input is the wrong fromat\n else:\n\n # raise Exception\n raise Exception('port should be just a string such as \"8000\"')\n\n # build Dockerfile\n built_dk = self.__build_dockerfile(script_file = script_file,\n requirements_file = requirements_file,\n port = port)\n\n # check if it worked\n if built_dk:\n\n # build info message\n info_message = \"\"\"\n ___________________________________________\n | Successfully built Dockerfile |\n -------------------------------------------\n \"\"\"\n\n # print info message\n print (info_message)\n\n # break the function if it didn't work\n else:\n\n # raise Exception\n raise Exception('I could not built a Dockerfile')\n \n # built docker image\n built_di = self.__build_image()\n\n # check if it worked\n if built_di:\n\n # build info message\n info_message = \"\"\"\n ___________________________________________\n | Successfully built Docker image |\n -------------------------------------------\n \"\"\"\n\n # print info message\n print (info_message)\n\n # break the function if it didn't work\n else:\n\n # raise Exception\n raise Exception('I could not built a Docker image')\n\n # check if deployment already exists\n exists_dp = self.__check_deployment(deployment_name = deployment_name)\n\n # delete if exists\n if exists_dp:\n\n # delete deployment\n self.delete_object(deployment = deployment_name)\n\n # create deployment\n created_dp = self.__create_deployment(deployment_name = deployment_name)\n\n # check if it worked\n if created_dp:\n\n # build info message\n info_message = \"\"\"\n ___________________________________________\n | Successfully created deployment |\n -------------------------------------------\n \"\"\"\n\n # print info message\n print (info_message)\n\n # break the function if it didn't work\n else:\n\n # raise Exception\n raise Exception('I could not create a deployment')\n\n # check if service already exists\n exists_sv = self.__check_service(deployment_name = deployment_name)\n\n # delete if exists\n if exists_sv:\n\n # delete service\n self.delete_object(service = deployment_name)\n\n # expose service\n exposed_sv = self.__expose_service(port, deployment_name = deployment_name)\n\n # check if it worked\n if exposed_sv:\n\n # build info message\n info_message = \"\"\"\n ___________________________________________\n | Successfully expose |\n -------------------------------------------\n \"\"\"\n\n # print info message\n print (info_message)\n\n # break the function if it didn't work\n else:\n\n # raise Exception\n raise Exception('I could not expose the service')\n\n # get the service url\n url_exposed = self.__get_url(deployment_name = deployment_name)\n\n # check if it worked\n if url_exposed:\n\n # info message\n info_message = \"\"\"\n ____________________________________________________________\n | Your deployment is ready and you can access the API via: |\n | {url} |\n ------------------------------------------------------------\n \"\"\".format(url = self.service_url)\n\n # print message\n print (info_message)\n\n # if it didn't work\n else:\n\n # raise Exception\n raise Exception('I could not get the url of the service')", "title": "" }, { "docid": "5e9204945c3b4803145bcda3a6b557f6", "score": "0.48196542", "text": "def canarize_deployment(args, input_yaml, labels={}):\n\n # append the -canary to the Deployment name\n output_yaml = copy.deepcopy(input_yaml)\n canary_deployment_name = input_yaml[\"metadata\"][\"name\"] + args.suffix\n output_yaml[\"metadata\"][\"name\"] = canary_deployment_name\n\n print(f\"# Creating canary Deployment {canary_deployment_name}\")\n\n # append the -canary to all the labels in the selector\n try:\n for (k, v) in input_yaml[\"spec\"][\"selector\"][\"matchLabels\"].items():\n output_yaml[\"spec\"][\"selector\"][\"matchLabels\"][k] = v + \\\n args.suffix\n except IndexError:\n pass\n\n for (k, v) in input_yaml[\"spec\"][\"template\"][\"metadata\"][\"labels\"].items():\n output_yaml[\"spec\"][\"template\"][\"metadata\"][\"labels\"][k] = v + args.suffix\n\n if args.image:\n for container in output_yaml[\"spec\"][\"template\"][\"spec\"][\"containers\"]:\n if image_except_tag(container[\"image\"]) == image_except_tag(args.image):\n print(f\"# Replacing Deployment image {args.image}\")\n container[\"image\"] = args.image\n\n if args.namespace:\n output_yaml[\"metadata\"][\"namespace\"] = args.namespace\n\n if len(labels) > 0:\n if len(output_yaml[\"metadata\"][\"labels\"]) > 0:\n output_yaml[\"metadata\"][\"labels\"].update(labels)\n else:\n output_yaml[\"metadata\"][\"labels\"] = labels\n\n return [output_yaml]", "title": "" }, { "docid": "b51d3524ad70eb10bab0b31bfa17cbd9", "score": "0.47847378", "text": "def create_cloud_domain_migration_request(self):\n try:\n self.logger.info('create_cloud_domain_migration_request called.')\n\n # Prepare query URL\n self.logger.info('Preparing query URL for create_cloud_domain_migration_request.')\n _url_path = '/public/remoteVaults/cloudDomainMigration'\n _query_builder = self.config.get_base_uri()\n _query_builder += _url_path\n _query_url = APIHelper.clean_url(_query_builder)\n\n # Prepare headers\n self.logger.info('Preparing headers for create_cloud_domain_migration_request.')\n _headers = {\n 'accept': 'application/json',\n 'content-type': 'application/json; charset=utf-8'\n }\n\n # Prepare and execute request\n self.logger.info(\n 'Preparing and executing request for create_cloud_domain_migration_request.')\n _request = self.http_client.post(\n _query_url,\n headers=_headers)\n AuthManager.apply(_request, self.config)\n _context = self.execute_request(_request, name='create_cloud_domain_migration_request')\n\n # Endpoint and global error handling using HTTP status codes.\n self.logger.info('Validating response for create_cloud_domain_migration_request.')\n if _context.response.status_code == 0:\n raise RequestErrorErrorException('Error', _context)\n self.validate_response(_context)\n\n except Exception as e:\n self.logger.error(e, exc_info=True)\n raise", "title": "" }, { "docid": "899b85253d31cf4dbd988aaaaa19a07a", "score": "0.477397", "text": "def predict_from_dataset(self, dataset: Dataset) -> DeploymentPrediction:\n if self.training_type not in ['regression', 'classification', 'multiclassification']:\n PrevisionException('Prediction not supported yet for training type {}', self.training_type)\n data = {\n 'dataset_id': dataset._id,\n }\n\n predict_start = client.request('/deployments/{}/deployment-predictions'.format(self._id),\n method=requests.post,\n data=data,\n message_prefix='Bulk predict')\n predict_start_parsed = parse_json(predict_start)\n return DeploymentPrediction.from_id(predict_start_parsed['_id'])", "title": "" }, { "docid": "ed979a745afa07b8d2f1edbc2f9339a4", "score": "0.4745488", "text": "def loadDeployment(deploymentData):\n #check that the deploymentData is valid\n if not (\"deploymentId\" in deploymentData and type(deploymentData[\"deploymentId\"])==str and len(deploymentData[\"deploymentId\"])>0):\n raise errors.BadRequestError(\"Deployment must have attribute 'deploymentId' (type=str and length>0)\")\n if not (\"name\" in deploymentData and type(deploymentData[\"name\"])==str and len(deploymentData[\"name\"])>0):\n raise errors.BadRequestError(\"Deployment must have attribute 'name' (type=str and length>0)\")\n if not (\"status\" in deploymentData and type(deploymentData[\"status\"])==str and len(deploymentData[\"status\"])>0):\n raise errors.BadRequestError(\"Deployment must have attribute 'status' (type=str and length>0)\")\n if not (\"dateCreated\" in deploymentData and type(deploymentData[\"dateCreated\"])==str and len(deploymentData[\"dateCreated\"])>0):\n raise errors.BadRequestError(\"Deployment must have attribute 'dateCreated' (type=str and length>0)\")\n if not (\"dateModified\" in deploymentData and type(deploymentData[\"dateModified\"])==str and len(deploymentData[\"dateModified\"])>0):\n raise errors.BadRequestError(\"Deployment must have attribute 'dateCreated' (type=str and length>0)\")\n if not (\"archived\" in deploymentData and type(deploymentData[\"archived\"])==bool):\n raise errors.BadRequestError(\"Deployment must have attribute 'archived' (type=bool)\")\n if not (\"goalSampleSize\" in deploymentData and type(deploymentData[\"goalSampleSize\"]) in [int, Decimal] and deploymentData[\"goalSampleSize\"]>0):\n raise errors.BadRequestError(\"Deployment must have attribute 'goalSampleSize' (type=int and value>0)\")\n if not (\"currentSampleSize\" in deploymentData and type(deploymentData[\"currentSampleSize\"]) in [int, Decimal] and deploymentData[\"currentSampleSize\"]>=0):\n raise errors.BadRequestError(\"Deployment must have attribute 'currentSampleSize' (type=int and value>=0)\")\n if not (\"facility\" in deploymentData and type(deploymentData[\"facility\"]) in [str, dict] and len(deploymentData[\"facility\"])>0):\n raise errors.BadRequestError(\"Deployment must have attribute 'facility' (type=str or dict and length>0)\")\n #construct the deployment\n d = Deployment()\n d.deploymentId = deploymentData[\"deploymentId\"]\n d.name = deploymentData[\"name\"]\n if \"description\" in deploymentData:\n d.description = deploymentData[\"description\"]\n d.status = deploymentData[\"status\"]\n d.dateCreated = deploymentData[\"dateCreated\"]\n d.dateModified = deploymentData[\"dateModified\"]\n d.archived = deploymentData[\"archived\"]\n d.goalSampleSize = int(deploymentData[\"goalSampleSize\"])\n d.currentSampleSize = int(deploymentData[\"currentSampleSize\"])\n d.facility = facility_db_access.loadFacility(deploymentData[\"facility\"])\n return d", "title": "" }, { "docid": "bc91a32b8ed3b0cc5090e17056b5043a", "score": "0.47425556", "text": "def _start_job(self, start_job_deployment: dict) -> None:\n # Standardize start job deployment.\n job_details = K8sExecutor._standardize_job_details(start_job_deployment=start_job_deployment)\n\n # Save details\n K8sDetailsWriter.save_job_details(job_details=job_details)\n\n # Create and apply k8s config\n k8s_job = self._create_k8s_job(job_details=job_details)\n client.BatchV1Api().create_namespaced_job(body=k8s_job, namespace=\"default\")", "title": "" }, { "docid": "d384e9e873acaef107132e294f7f9d6a", "score": "0.47337088", "text": "def create(definition: dict, version: str, parameter: tuple,\n region: str,\n disable_rollback: bool,\n dry_run: bool,\n force: bool,\n tag: List[str],\n timeout: int,\n keep_stacks: Optional[int],\n traffic: int,\n verbose: bool,\n remote: str,\n parameter_file: Optional[str]\n ):\n lizzy = setup_lizzy_client(remote)\n parameter = list(parameter) or []\n if parameter_file:\n parameter.extend(read_parameter_file(parameter_file))\n\n if not force: # pragma: no cover\n # supporting artifact checking would imply copying a large amount of code\n # from senza, so it should be considered out of scope until senza\n # and lizzy client are merged\n warning(\"WARNING: \"\n \"Artifact checking is still not supported by lizzy-client.\")\n\n with Action('Requesting new stack..') as action:\n new_stack, output = lizzy.new_stack(keep_stacks, traffic,\n definition, version,\n disable_rollback, parameter,\n region=region,\n dry_run=dry_run,\n tags=tag)\n\n stack_id = '{stack_name}-{version}'.format_map(new_stack)\n print(output)\n\n info('Stack ID: {}'.format(stack_id))\n\n if dry_run:\n info(\"Post deployment steps skipped\")\n exit(0)\n\n with Action('Waiting for new stack...') as action:\n if verbose:\n print() # ensure that new states will not be printed on the same line as the action\n\n last_state = None\n for state in lizzy.wait_for_deployment(stack_id, region=region):\n if state != last_state and verbose:\n click.echo(' {}'.format(state))\n else:\n action.progress()\n last_state = state\n\n # TODO be prepared to handle all final AWS CF states\n if last_state == 'ROLLBACK_COMPLETE':\n fatal_error(\n 'Stack was rollback after deployment. Check your application log for possible reasons.')\n elif last_state != 'CREATE_COMPLETE':\n fatal_error('Deployment failed: {}'.format(last_state))\n\n info('Deployment Successful')\n\n if traffic is not None:\n with Action('Requesting traffic change..'):\n try:\n lizzy.traffic(stack_id, traffic, region=region)\n except requests.ConnectionError as e:\n connection_error(e, fatal=False)\n except requests.HTTPError as e:\n agent_error(e, fatal=False)\n\n # TODO unit test this\n if keep_stacks is not None:\n versions_to_keep = keep_stacks + 1\n stacks_to_remove_counter = 1\n end_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=timeout)\n while stacks_to_remove_counter > 0 and datetime.datetime.utcnow() <= end_time:\n try:\n all_stacks = lizzy.get_stacks([new_stack['stack_name']],\n region=region)\n except requests.ConnectionError as e:\n connection_error(e, fatal=False)\n error(\"Failed to fetch old stacks. \"\n \"Old stacks WILL NOT BE DELETED\")\n exit(1)\n except requests.HTTPError as e:\n agent_error(e, fatal=False)\n error(\"Failed to fetch old stacks. \"\n \"Old stacks WILL NOT BE DELETED\")\n exit(1)\n else:\n sorted_stacks = sorted(all_stacks,\n key=lambda stack: stack['creation_time'])\n stacks_to_remove = sorted_stacks[:-versions_to_keep]\n stacks_to_remove_counter = len(stacks_to_remove)\n with Action('Deleting old stacks..'):\n print()\n for old_stack in stacks_to_remove:\n old_stack_id = '{stack_name}-{version}'.format_map(\n old_stack)\n if old_stack['status'] in COMPLETE_STATES:\n click.echo(' {}'.format(old_stack_id))\n try:\n lizzy.delete(old_stack_id, region=region)\n stacks_to_remove_counter -= 1\n except requests.ConnectionError as e:\n connection_error(e, fatal=False)\n except requests.HTTPError as e:\n agent_error(e, fatal=False)\n else:\n click.echo(' > {} current status is {} trying '\n 'again later'.format(old_stack_id,\n old_stack['status']))\n if stacks_to_remove_counter > 0:\n time.sleep(5)\n\n if datetime.datetime.utcnow() > end_time:\n click.echo('Timeout waiting for related stacks to be ready.')", "title": "" }, { "docid": "3a412099e06317345559f8858120bad2", "score": "0.47257835", "text": "def show(self, uuid, **kwargs):\n resp, result = self.client.get(\"./deployments/%s\" % uuid, **kwargs)\n return base.Deployment(result)", "title": "" }, { "docid": "ab4de1e2c8f6f1ad5b1ef8545531aa1c", "score": "0.4714727", "text": "def generate_namespaced_deployment_config(self, name, namespace, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.generate_namespaced_deployment_config_with_http_info(name, namespace, **kwargs)\n else:\n (data) = self.generate_namespaced_deployment_config_with_http_info(name, namespace, **kwargs)\n return data", "title": "" }, { "docid": "666873cc8dffae880e12e718f658f36c", "score": "0.47108465", "text": "async def create_deployment(\n deployment: schemas.actions.DeploymentCreate,\n response: Response,\n worker_lookups: WorkerLookups = Depends(WorkerLookups),\n db: PrefectDBInterface = Depends(provide_database_interface),\n) -> schemas.responses.DeploymentResponse:\n\n async with db.session_context(begin_transaction=True) as session:\n if (\n deployment.work_pool_name\n and deployment.work_pool_name != DEFAULT_AGENT_WORK_POOL_NAME\n ):\n # Make sure that deployment is valid before beginning creation process\n work_pool = await models.workers.read_work_pool_by_name(\n session=session, work_pool_name=deployment.work_pool_name\n )\n if work_pool is None:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f'Work pool \"{deployment.work_pool_name}\" not found.',\n )\n try:\n deployment.check_valid_configuration(work_pool.base_job_template)\n except (MissingVariableError, jsonschema.exceptions.ValidationError) as exc:\n raise HTTPException(\n status_code=status.HTTP_409_CONFLICT,\n detail=f\"Error creating deployment: {exc!r}\",\n )\n\n # hydrate the input model into a full model\n deployment_dict = deployment.dict(exclude={\"work_pool_name\"})\n if deployment.work_pool_name and deployment.work_queue_name:\n # If a specific pool name/queue name combination was provided, get the\n # ID for that work pool queue.\n deployment_dict[\"work_queue_id\"] = (\n await worker_lookups._get_work_queue_id_from_name(\n session=session,\n work_pool_name=deployment.work_pool_name,\n work_queue_name=deployment.work_queue_name,\n create_queue_if_not_found=True,\n )\n )\n elif deployment.work_pool_name:\n # If just a pool name was provided, get the ID for its default\n # work pool queue.\n deployment_dict[\"work_queue_id\"] = (\n await worker_lookups._get_default_work_queue_id_from_work_pool_name(\n session=session,\n work_pool_name=deployment.work_pool_name,\n )\n )\n elif deployment.work_queue_name:\n # If just a queue name was provided, ensure that the queue exists and\n # get its ID.\n work_queue = await models.work_queues._ensure_work_queue_exists(\n session=session, name=deployment.work_queue_name\n )\n deployment_dict[\"work_queue_id\"] = work_queue.id\n\n deployment = schemas.core.Deployment(**deployment_dict)\n # check to see if relevant blocks exist, allowing us throw a useful error message\n # for debugging\n if deployment.infrastructure_document_id is not None:\n infrastructure_block = (\n await models.block_documents.read_block_document_by_id(\n session=session,\n block_document_id=deployment.infrastructure_document_id,\n )\n )\n if not infrastructure_block:\n raise HTTPException(\n status_code=status.HTTP_409_CONFLICT,\n detail=(\n \"Error creating deployment. Could not find infrastructure\"\n f\" block with id: {deployment.infrastructure_document_id}. This\"\n \" usually occurs when applying a deployment specification that\"\n \" was built against a different Prefect database / workspace.\"\n ),\n )\n\n if deployment.storage_document_id is not None:\n infrastructure_block = (\n await models.block_documents.read_block_document_by_id(\n session=session,\n block_document_id=deployment.storage_document_id,\n )\n )\n if not infrastructure_block:\n raise HTTPException(\n status_code=status.HTTP_409_CONFLICT,\n detail=(\n \"Error creating deployment. Could not find storage block with\"\n f\" id: {deployment.storage_document_id}. This usually occurs\"\n \" when applying a deployment specification that was built\"\n \" against a different Prefect database / workspace.\"\n ),\n )\n\n now = pendulum.now(\"UTC\")\n model = await models.deployments.create_deployment(\n session=session, deployment=deployment\n )\n\n if model.created >= now:\n response.status_code = status.HTTP_201_CREATED\n\n return schemas.responses.DeploymentResponse.from_orm(model)", "title": "" }, { "docid": "ed72b0ce912d483f2a13f476b19129fb", "score": "0.46907023", "text": "def create_task(deploy_uuid, tag):\n return objects.Task(deployment_uuid=deploy_uuid, tag=tag)", "title": "" }, { "docid": "7d5f3848fff4506962d52fbcf8521860", "score": "0.46852064", "text": "def deploy_issue_model(\n self,\n request: Optional[\n Union[contact_center_insights.DeployIssueModelRequest, dict]\n ] = None,\n *,\n name: Optional[str] = None,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Union[float, object] = gapic_v1.method.DEFAULT,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> operation.Operation:\n # Create or coerce a protobuf request object.\n # Quick check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([name])\n if request is not None and has_flattened_params:\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n # Minor optimization to avoid making a copy if the user passes\n # in a contact_center_insights.DeployIssueModelRequest.\n # There's no risk of modifying the input as we've already verified\n # there are no flattened fields.\n if not isinstance(request, contact_center_insights.DeployIssueModelRequest):\n request = contact_center_insights.DeployIssueModelRequest(request)\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if name is not None:\n request.name = name\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = self._transport._wrapped_methods[self._transport.deploy_issue_model]\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"name\", request.name),)),\n )\n\n # Send the request.\n response = rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Wrap the response in an operation future.\n response = operation.from_gapic(\n response,\n self._transport.operations_client,\n contact_center_insights.DeployIssueModelResponse,\n metadata_type=contact_center_insights.DeployIssueModelMetadata,\n )\n\n # Done; return the response.\n return response", "title": "" }, { "docid": "83b634c97e7afe7f1c7783396b29643e", "score": "0.46760336", "text": "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "83b634c97e7afe7f1c7783396b29643e", "score": "0.46760336", "text": "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "83b634c97e7afe7f1c7783396b29643e", "score": "0.46760336", "text": "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "83b634c97e7afe7f1c7783396b29643e", "score": "0.46760336", "text": "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "83b634c97e7afe7f1c7783396b29643e", "score": "0.46760336", "text": "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "83b634c97e7afe7f1c7783396b29643e", "score": "0.46760336", "text": "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "83b634c97e7afe7f1c7783396b29643e", "score": "0.46760336", "text": "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "83b634c97e7afe7f1c7783396b29643e", "score": "0.46760336", "text": "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "83b634c97e7afe7f1c7783396b29643e", "score": "0.46760336", "text": "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "83b634c97e7afe7f1c7783396b29643e", "score": "0.46760336", "text": "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "83b634c97e7afe7f1c7783396b29643e", "score": "0.46760336", "text": "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "83b634c97e7afe7f1c7783396b29643e", "score": "0.46760336", "text": "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "83b634c97e7afe7f1c7783396b29643e", "score": "0.46760336", "text": "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "83b634c97e7afe7f1c7783396b29643e", "score": "0.46760336", "text": "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "83b634c97e7afe7f1c7783396b29643e", "score": "0.46760336", "text": "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "83b634c97e7afe7f1c7783396b29643e", "score": "0.46760336", "text": "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "83b634c97e7afe7f1c7783396b29643e", "score": "0.46760336", "text": "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "83b634c97e7afe7f1c7783396b29643e", "score": "0.46760336", "text": "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "83b634c97e7afe7f1c7783396b29643e", "score": "0.46760336", "text": "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "83b634c97e7afe7f1c7783396b29643e", "score": "0.46760336", "text": "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "83b634c97e7afe7f1c7783396b29643e", "score": "0.46760336", "text": "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "83b634c97e7afe7f1c7783396b29643e", "score": "0.46760336", "text": "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "83b634c97e7afe7f1c7783396b29643e", "score": "0.46760336", "text": "def Create(self, request, global_params=None):\n config = self.GetMethodConfig('Create')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "11dc854e2eedb67d370d96c9ade296dc", "score": "0.46733534", "text": "def _new(\n cls,\n project_id: str,\n name: str,\n main_model: Model,\n challenger_model: Model = None,\n type_violation_policy: str = 'best_effort',\n access_type: str = None,\n ) -> 'BaseExperimentDeployment':\n\n if access_type and access_type not in ['public', 'fine_grained', 'private']:\n raise PrevisionException('access type must be public, fine_grained or private')\n if type_violation_policy not in ['best_effort', 'strict']:\n raise PrevisionException('type_violation_policy must be best_effort or strict')\n main_model_experiment_version_id = main_model.experiment_version_id\n main_experiment = BaseExperimentVersion._from_id(main_model_experiment_version_id)\n main_experiment_id = main_experiment['experiment_id']\n data = {\n 'name': name,\n 'experiment_id': main_experiment_id,\n 'main_model_experiment_version_id': main_model_experiment_version_id,\n 'main_model_id': main_model._id,\n 'type_violation_policy': type_violation_policy\n }\n if access_type:\n data['access_type'] = access_type\n if challenger_model:\n challenger_model_experiment_version_id = challenger_model.experiment_version_id\n challenger_experiment = BaseExperimentVersion._from_id(main_model_experiment_version_id)\n challenger_experiment_id = challenger_experiment['experiment_id']\n if main_experiment_id != challenger_experiment_id:\n raise PrevisionException('main and challenger models must be from the same experiment')\n data['challenger_model_experiment_version_id'] = challenger_model_experiment_version_id\n data['challenger_model_id'] = challenger_model._id\n\n url = '/projects/{}/model-deployments'.format(project_id)\n resp = client.request(url,\n data=data,\n method=requests.post,\n message_prefix='ExperimentDeployment creation')\n json_resp = parse_json(resp)\n experiment_deployment = cls.from_id(json_resp['_id'])\n return experiment_deployment", "title": "" }, { "docid": "95981a399dc790e922d46f3ca77ac602", "score": "0.4671982", "text": "def duplicate_deployment(name, namespace=\"default\", extv1Client=None):\n if extv1Client is None :\n # load the kubernetes config\n load_k8s_config()\n #create the api client\n extv1Client=client.ExtensionsV1beta1Api()\n\n log.debug(\"looking for deployment '%s' in namespace '%s'...\"%(name, namespace))\n dep = None\n try:\n dep = get_namespaced_deployment(name = name, namespace = namespace, extv1Client=extv1Client)\n except K8sDeploymentNotFoundError as e:\n log.error(\"Deployment '%s' not found in namespace '%s'...\"%(name, namespace))\n raise e\n log.debug(\"Deployment '%s' found.\"%(dep.metadata.name))\n timestamp = getSimpleTimestamp()\n dep_copy = duplicate_deployment_config(dep, timestamp)\n log.debug(\"Creating a copy of '%s' with new name '%s' in namespace '%s'\"\n %(dep.metadata.name, dep_copy.metadata.name, namespace))\n deployment = create_namespaced_deployment(body=dep_copy, namespace=namespace, extv1Client=extv1Client)\n log.debug(\"Copy of deployment '%s' created: %s\" \n %(dep.metadata.name, deployment.metadata.name))\n return deployment", "title": "" }, { "docid": "2b75ca7b47d8f3fea14faeade32d7603", "score": "0.46586993", "text": "def init(\n cls,\n project: _ProjectTypeVar,\n usage_type: Literal[\"function\", \"layer\"] = \"function\",\n ) -> DeploymentPackage[_ProjectTypeVar]:\n s3_obj = DeploymentPackageS3Object(project, usage_type)\n if s3_obj.exists:\n if s3_obj.runtime == project.runtime:\n return s3_obj\n LOGGER.warning(\n \"runtime of deployment package found in S3 (%s) does not match \"\n \"requirement (%s); deleting & recreating...\",\n s3_obj.runtime,\n project.runtime,\n )\n s3_obj.delete()\n return cls(project, usage_type)", "title": "" }, { "docid": "437d30909e0b187e3cbae28a5ea0cfb2", "score": "0.46529797", "text": "def CreateAdaptiveDynamicStreamingTemplate(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"CreateAdaptiveDynamicStreamingTemplate\", params, headers=headers)\n response = json.loads(body)\n model = models.CreateAdaptiveDynamicStreamingTemplateResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "title": "" }, { "docid": "8bd4749af369d8ea7d583d9ed666cd3e", "score": "0.46441373", "text": "def CreateTagRetentionExecution(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"CreateTagRetentionExecution\", params, headers=headers)\n response = json.loads(body)\n model = models.CreateTagRetentionExecutionResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "title": "" }, { "docid": "3558862f8558b92ded636782545e3fd9", "score": "0.4620099", "text": "async def get_game_server_deployment(self,\n request: game_server_deployments.GetGameServerDeploymentRequest = None,\n *,\n name: str = None,\n retry: retries.Retry = gapic_v1.method.DEFAULT,\n timeout: float = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> game_server_deployments.GameServerDeployment:\n # Create or coerce a protobuf request object.\n # Sanity check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([name])\n if request is not None and has_flattened_params:\n raise ValueError(\"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\")\n\n request = game_server_deployments.GetGameServerDeploymentRequest(request)\n\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if name is not None:\n request.name = name\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method_async.wrap_method(\n self._client._transport.get_game_server_deployment,\n default_retry=retries.Retry(\ninitial=1.0,maximum=10.0,multiplier=1.3, predicate=retries.if_exception_type(\n core_exceptions.ServiceUnavailable,\n ),\n deadline=60.0,\n ),\n default_timeout=60.0,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata((\n (\"name\", request.name),\n )),\n )\n\n # Send the request.\n response = await rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Done; return the response.\n return response", "title": "" }, { "docid": "3aa5e1369a6ccd42a79f1931f4668617", "score": "0.45987225", "text": "def deploy(deployment_name: str, environment: str = 'production'):\n alter_path()\n asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\n\n tasks_module = importlib.import_module(\n f\"deployments.{deployment_name}.tasks\"\n )\n hosts_module = importlib.import_module(\n f\"deployments.{deployment_name}.hosts\"\n )\n\n host_registry: HostRegistry = getattr(hosts_module, \"host_registry\", None)\n if not host_registry:\n raise Exception(\"Can't find 'host_registry' in hosts file.\")\n\n hosts = host_registry.host_class_map.get(environment)\n\n if not hosts:\n print(colored(f\"No hosts defined for {environment}!\", \"red\"))\n sys.exit(1)\n else:\n print(\n colored(f\"Deploying {deployment_name} to {environment}\", \"green\")\n )\n\n for host in hosts:\n host.environment = environment\n\n task_registry: TaskRegistry = getattr(tasks_module, \"task_registry\", None)\n if not task_registry:\n raise Exception(\"Can't find 'task_registry' in tasks file.\")\n\n asyncio.run(\n host_registry.run_tasks(\n tasks=task_registry.task_classes, environment=environment\n )\n )", "title": "" }, { "docid": "2fea32326651bb911a0d95b1bd83a9e6", "score": "0.4576947", "text": "def post(self):\n sm = get_storage_manager()\n\n params = rest_utils.get_json_and_verify_params({\n 'source_deployment_id': {'type': str},\n 'inter_deployment_dependencies': {'type': list}\n })\n\n dependencies = params.get('inter_deployment_dependencies')\n\n if any(item.get('created_by') for item in dependencies):\n check_user_action_allowed('set_owner')\n if any(item.get('created_at') for item in dependencies):\n check_user_action_allowed('set_timestamp')\n\n if len(dependencies) > 0 and EXTERNAL_SOURCE in dependencies[0]:\n source_deployment = None\n else:\n source_deployment = sm.get(models.Deployment,\n params['source_deployment_id'])\n\n created_ids = []\n with sm.transaction():\n for dependency in dependencies:\n record = _create_inter_deployment_dependency(\n source_deployment, dependency, sm)\n created_ids += [record.id]\n\n return ListResponse(\n items=[{'id': i} for i in created_ids],\n metadata={'pagination': {\n 'total': len(created_ids),\n 'size': len(created_ids),\n 'offset': 0,\n }}\n )", "title": "" }, { "docid": "05939f00c583a6b07bf49c7be9538f76", "score": "0.45655584", "text": "def create(self, model_id: str, revision: str, instance_name: str,\n resource: ServiceResourceConfig,\n provider: ServiceProviderParameters):\n if provider.vendor != Vendor.EAS:\n raise NotSupportError(\n 'Not support vendor: %s ,only support EAS current.' %\n (provider.vendor))\n create_params = DeployServiceParameters(\n instance_name=instance_name,\n model_id=model_id,\n revision=revision,\n resource=resource,\n provider=provider)\n path = f'{self.endpoint}/api/v1/deployer/endpoint'\n body = asdict(create_params)\n r = requests.post(\n path, json=body, cookies=self.cookies, headers=self.headers)\n handle_http_response(r, logger, self.cookies, 'create_service')\n if r.status_code >= HTTPStatus.OK and r.status_code < HTTPStatus.MULTIPLE_CHOICES:\n if is_ok(r.json()):\n data = r.json()[API_RESPONSE_FIELD_DATA]\n return data\n else:\n raise RequestError(r.json()[API_RESPONSE_FIELD_MESSAGE])\n else:\n raise_for_http_status(r)\n return None", "title": "" }, { "docid": "f2e889d7f8f9cbfd8c394b2275953a07", "score": "0.45585397", "text": "def k8s_custom_deploy(name: str,\n apply_cmd: Union[str, List[str]],\n delete_cmd: Union[str, List[str]],\n deps: Union[str, List[str]],\n image_selector: str=\"\",\n live_update: List[LiveUpdateStep]=[],\n apply_dir: str=\"\",\n apply_env: Dict[str, str]={},\n apply_cmd_bat: Union[str, List[str]]=\"\",\n delete_dir: str=\"\",\n delete_env: Dict[str, str]={},\n delete_cmd_bat: Union[str, List[str]]=\"\",\n container_selector: str=\"\",\n image_deps: List[str]=[]) -> None:\n pass", "title": "" }, { "docid": "8cb801659557df9b1070b30fb03b998a", "score": "0.45562708", "text": "def create(cls, argv):\n request = cls(argv, dict());\n return request;", "title": "" }, { "docid": "0b10092a7ac1f7fd92a757f2f807613b", "score": "0.45510134", "text": "def make_object(self, data):\n return DeploymentMetric(**data)", "title": "" }, { "docid": "0b10092a7ac1f7fd92a757f2f807613b", "score": "0.45510134", "text": "def make_object(self, data):\n return DeploymentMetric(**data)", "title": "" }, { "docid": "0b10092a7ac1f7fd92a757f2f807613b", "score": "0.45510134", "text": "def make_object(self, data):\n return DeploymentMetric(**data)", "title": "" }, { "docid": "6014691e9c23047186d1a2c401f420e5", "score": "0.4536346", "text": "def deployment(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"deployment\")", "title": "" }, { "docid": "0a19a6fbd4c78e8218916c2c7877ce93", "score": "0.45356768", "text": "def create_namespaced_build_request_instantiate_with_http_info(self, body, name, namespace, **kwargs):\n\n all_params = ['body', 'name', 'namespace', 'pretty']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_build_request_instantiate\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_build_request_instantiate`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `create_namespaced_build_request_instantiate`\")\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `create_namespaced_build_request_instantiate`\")\n\n\n collection_formats = {}\n\n resource_path = '/oapi/v1/namespaces/{namespace}/buildconfigs/{name}/instantiate'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = ['BearerToken']\n\n return self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1BuildRequest',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "title": "" }, { "docid": "c9a72813396cb562f853625569649da9", "score": "0.45355946", "text": "def post(self, name, namespace):\n pod_manifest = request.get_json()\n CRUDService. \\\n get_instance(). \\\n create_pod(name, namespace, pod_manifest)\n return KubeApiResponseDto('Resource created.'), \\\n HttpStatusCode.Accepted.value", "title": "" }, { "docid": "690c624fece2f5c9ad89f659e25e4744", "score": "0.4534196", "text": "def start_job(self, deployment_path: str) -> None:\n # Load start_job_deployment.\n with open(deployment_path, \"r\") as fr:\n start_job_deployment = yaml.safe_load(fr)\n\n # Start job\n self._start_job(start_job_deployment=start_job_deployment)", "title": "" }, { "docid": "dfc7dc0a3be4d20a0344fdc02429f811", "score": "0.45341516", "text": "def __init__(self, deployment_input_field_1=None, deployment_input_field_2=None, local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._deployment_input_field_1 = None\n self._deployment_input_field_2 = None\n self.discriminator = None\n\n if deployment_input_field_1 is not None:\n self.deployment_input_field_1 = deployment_input_field_1\n if deployment_input_field_2 is not None:\n self.deployment_input_field_2 = deployment_input_field_2", "title": "" }, { "docid": "b3a7ff76f18e399c9d9d1b989be70475", "score": "0.45331883", "text": "def create_or_resume(name, spec, **_):\r\n\r\n # deploy mysql for barbican\r\n utils.ensure_mysql_cluster(\"barbican\", spec[\"mysql\"])\r\n\r\n # deploy barbican api\r\n utils.create_or_update('barbican/daemonset.yml.j2',\r\n name=name, spec=spec)\r\n utils.create_or_update('barbican/service.yml.j2',\r\n name=name, spec=spec)\r\n\r\n url = None\r\n if \"ingress\" in spec:\r\n utils.create_or_update('barbican/ingress.yml.j2',\r\n name=name, spec=spec)\r\n url = spec[\"ingress\"][\"host\"]\r\n identity.ensure_service(name=\"barbican\", service_type=\"key-manager\",\r\n url=url, desc=\"Barbican Service\")", "title": "" }, { "docid": "7617fac347be42843b3cab0d09f08ce2", "score": "0.4518388", "text": "def make_object(self, data):\n return DeploymentLog(**data)", "title": "" }, { "docid": "7617fac347be42843b3cab0d09f08ce2", "score": "0.4518388", "text": "def make_object(self, data):\n return DeploymentLog(**data)", "title": "" }, { "docid": "7617fac347be42843b3cab0d09f08ce2", "score": "0.4518388", "text": "def make_object(self, data):\n return DeploymentLog(**data)", "title": "" }, { "docid": "2a142d94837cf76c464d08b802653e98", "score": "0.4511214", "text": "def CreateTemplate(self, tag, bucket, task_dir):\n image_url = self._COMPUTE_API_ROOT + \\\n 'ubuntu-os-cloud/global/images/ubuntu-1404-trusty-v20160406'\n request_body = {\n 'name': self._GetTemplateName(tag),\n 'properties': {\n 'machineType': 'n1-standard-1',\n 'networkInterfaces': [{\n 'network': self._project_api_url + '/global/networks/default',\n 'accessConfigs': [{\n 'name': 'external-IP',\n 'type': 'ONE_TO_ONE_NAT'\n }]}],\n 'disks': [{\n 'type': 'PERSISTENT',\n 'boot': True,\n 'autoDelete': True,\n 'mode': 'READ_WRITE',\n 'initializeParams': {'sourceImage': image_url}}],\n 'canIpForward': False,\n 'scheduling': {\n 'automaticRestart': True,\n 'onHostMaintenance': 'MIGRATE',\n 'preemptible': False},\n 'serviceAccounts': [{\n 'scopes': [\n 'https://www.googleapis.com/auth/cloud-platform',\n 'https://www.googleapis.com/auth/cloud-taskqueue'],\n 'email': 'default'}],\n 'metadata': { 'items': [\n {'key': 'cloud-storage-path',\n 'value': bucket},\n {'key': 'task-dir',\n 'value': task_dir},\n {'key': 'startup-script-url',\n 'value': 'gs://%s/deployment/startup-script.sh' % bucket},\n {'key': 'taskqueue-tag', 'value': tag}]}}}\n request = self._compute_api.instanceTemplates().insert(\n project=self._project, body=request_body)\n return self._ExecuteApiRequest(request)[0]", "title": "" }, { "docid": "3d2c19f9a8c3ff259c9f2cc7bed1be04", "score": "0.4505982", "text": "def CreateInstanceCustomizedDomain(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"CreateInstanceCustomizedDomain\", params, headers=headers)\n response = json.loads(body)\n model = models.CreateInstanceCustomizedDomainResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "title": "" }, { "docid": "bad3f96d2cb7f93f0fca470786d862f6", "score": "0.44951227", "text": "def __init__(__self__, *,\n deployment_duration_in_minutes: pulumi.Input[int],\n growth_factor: pulumi.Input[float],\n replicate_to: pulumi.Input[str],\n description: Optional[pulumi.Input[str]] = None,\n final_bake_time_in_minutes: Optional[pulumi.Input[int]] = None,\n growth_type: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"deployment_duration_in_minutes\", deployment_duration_in_minutes)\n pulumi.set(__self__, \"growth_factor\", growth_factor)\n pulumi.set(__self__, \"replicate_to\", replicate_to)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if final_bake_time_in_minutes is not None:\n pulumi.set(__self__, \"final_bake_time_in_minutes\", final_bake_time_in_minutes)\n if growth_type is not None:\n pulumi.set(__self__, \"growth_type\", growth_type)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)", "title": "" } ]
c708c456f86c5430638d2bf062dea899
r"""Updates a zone resource.
[ { "docid": "dd6d9c86894eb6ab1a1e288ee79a75af", "score": "0.0", "text": "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" } ]
[ { "docid": "b908bbf5043f2fb197e9223c0fc925f6", "score": "0.66193604", "text": "def _update(self) -> None:\n import regenmaschine.exceptions as exceptions\n\n try:\n self._entity_json = self._client.zones.get(self.rainmachine_id)\n except exceptions.HTTPError as exc_info:\n _LOGGER.error('Unable to update info for zone \"%s\"',\n self.rainmachine_id)\n _LOGGER.debug(exc_info)", "title": "" }, { "docid": "8cf5126c9a67a676b1faf7f347f2e5ec", "score": "0.6341421", "text": "def update(self, req, id, *args, **kwargs):\n # get the context\n context = req.context\n try:\n # get the url\n url = req.url\n # if len(args) > 2:\n # raise BadRequest(resource=\"zone update\", msg=url)\n # get the body\n values = json.loads(req.body)\n values['id'] = id\n LOG.info(_(\"the in value body is %(body)s\"), {\"body\": values})\n LOG.info(_(\"the id is %(id)s\"), {\"id\": id})\n if kwargs.get('owners'):\n # check the in values\n valid_attributes = ['id', 'tenant_id', 'owners']\n recom_msg = self.validat_parms(values, valid_attributes)\n # from rpc server update the zones in db and device\n zones = self.manager.update_zone_owners(context, recom_msg,\n recom_msg['id'])\n else:\n # check the in values\n valid_attributes = ['id', 'tenant_id', 'default_ttl']\n recom_msg = self.validat_parms(values, valid_attributes)\n # from rpc server update the zones in db and device\n zones = self.manager.update_zone(context, recom_msg,\n recom_msg['id'])\n except Nca47Exception as e:\n self.response.status = e.code\n LOG.error(_LE('Error exception! error info: %' + e.message))\n LOG.exception(e)\n return tools.ret_info(e.code, e.message)\n except RemoteError as exception:\n self.response.status = 500\n message = exception.value\n return tools.ret_info(self.response.status, message)\n except Exception as exception:\n LOG.exception(exception)\n self.response.status = 500\n return tools.ret_info(self.response.status, exception.message)\n return zones", "title": "" }, { "docid": "2929ebbd94d7a479ddef9a5412a3e7ea", "score": "0.6042733", "text": "def update(self, ss_resource):\n raise NotImplementedError(\"update() method should be implemented.\")", "title": "" }, { "docid": "7e01de995e456f9fc1fa07e18241a524", "score": "0.6037626", "text": "def update_route53(self):\n self.log.debug('Updating Route53 information for {}'.format(self.account))\n\n # region Update zones\n existing_zones = DNSZone.get_all(self.account)\n zones = self.__fetch_route53_zones()\n for resource_id, data in zones.items():\n if resource_id in existing_zones:\n zone = DNSZone.get(resource_id)\n if zone.update(data):\n self.log.debug('Change detected for Route53 zone {}/{}'.format(\n self.account,\n zone.name\n ))\n zone.save()\n else:\n tags = data.pop('tags')\n DNSZone.create(\n resource_id,\n account_id=self.account.account_id,\n properties=data,\n tags=tags\n )\n\n self.log.debug('Added Route53 zone {}/{}'.format(\n self.account,\n data['name']\n ))\n\n db.session.commit()\n\n zk = set(zones.keys())\n ezk = set(existing_zones.keys())\n\n for resource_id in ezk - zk:\n zone = existing_zones[resource_id]\n\n db.session.delete(zone.resource)\n self.log.debug('Deleted Route53 zone {}/{}'.format(\n self.account.account_name,\n zone.name.value\n ))\n db.session.commit()\n # endregion\n\n # region Update resource records\n try:\n for zone_id, zone in DNSZone.get_all(self.account).items():\n existing_records = {rec.id: rec for rec in zone.records}\n records = self.__fetch_route53_zone_records(zone.get_property('zone_id').value)\n\n for data in records:\n if data['id'] in existing_records:\n record = existing_records[data['id']]\n if record.update(data):\n self.log.debug('Changed detected for DNSRecord {}/{}/{}'.format(\n self.account,\n zone.name,\n data['name']\n ))\n record.save()\n else:\n record = DNSRecord.create(\n data['id'],\n account_id=self.account.account_id,\n properties={k: v for k, v in data.items() if k != 'id'},\n tags={}\n )\n self.log.debug('Added new DNSRecord {}/{}/{}'.format(\n self.account,\n zone.name,\n data['name']\n ))\n zone.add_record(record)\n db.session.commit()\n\n rk = set(x['id'] for x in records)\n erk = set(existing_records.keys())\n\n for resource_id in erk - rk:\n record = existing_records[resource_id]\n zone.delete_record(record)\n self.log.debug('Deleted Route53 record {}/{}/{}'.format(\n self.account.account_name,\n zone_id,\n record.name\n ))\n db.session.commit()\n except:\n raise\n # endregion", "title": "" }, { "docid": "d9d6224a3a6d49357c502819ffbfea36", "score": "0.5900831", "text": "def zone(self, zone):\n\n self._zone = zone", "title": "" }, { "docid": "54990993e1c0163d478991a205b9ba6f", "score": "0.5839018", "text": "def set_zonefile(config, name, zonefile):\n path = \"/v1/names/{}/zonefile\".format(name)\n payload = { 'zonefile': zonefile.read() }\n url = create_url(config,path)\n r = requests.put(url, headers=make_headers(config), json=payload)\n output(config, url, r)", "title": "" }, { "docid": "bcccbdc61d08b63f83249b16aa12af5d", "score": "0.58341324", "text": "async def async_update(self):\n try:\n LOG.debug(f\"Updating {self.zone_info}\")\n status = await self._amp.zone_status(self._zone_id)\n if not status:\n return\n except Exception as e:\n # log up to two times within a specific period to avoid saturating the logs\n @limits(calls=2, period=10 * MINUTES)\n def log_failed_zone_update(e):\n LOG.warning(f\"Failed updating {self.zone_info}: {e}\")\n\n log_failed_zone_update(e)\n return\n\n LOG.debug(f\"{self.zone_info} status update: {status}\")\n self._status = status\n\n source_id = status.get(\"source\")\n if source_id:\n source_name = self._source_id_to_name.get(source_id)\n if source_name:\n self._source = source_name\n else:\n # sometimes the client may have not configured a source, but if the amplifier is set\n # to a source other than one defined, go ahead and dynamically create that source. This\n # could happen if the user changes the source through a different app or command.\n source_name = f\"Source {source_id}\"\n LOG.warning(\n f\"Undefined source id {source_id} for {self.zone_info}, adding '{source_name}'!\"\n )\n self._source_id_to_name[source_id] = source_name\n self._source_name_to_id[source_name] = source_id", "title": "" }, { "docid": "298b2e7c4e3a798c3d5d2e9c0ff50407", "score": "0.58222544", "text": "def update_resource(RoleArn=None, ResourceArn=None):\n pass", "title": "" }, { "docid": "f053cf63facc2cf646528d110db0b700", "score": "0.57523984", "text": "def set_zone(self, zone, x, y):\n\t\tself.zone = zone\n\t\tself.x = x\n\t\tself.y = y", "title": "" }, { "docid": "95f013b2d193b0b79f8627ffc30e08ee", "score": "0.5736969", "text": "def modify_zone(zone_name, endpoint=None, comment=None):\n\n if not zone_name:\n return None\n\n if comment and run_iadmin('modzone', [zone_name, 'comment', comment]):\n return None\n\n if endpoint and run_iadmin('modzone', [zone_name, 'conn', endpoint]):\n return None\n\n zone = get_zone_details(zone_name)\n\n return zone[0]", "title": "" }, { "docid": "d4f0ad099b6f906e2cc62d61299ebea2", "score": "0.56937206", "text": "def set_zone(self, zone):\n self._zone = zone", "title": "" }, { "docid": "db74f7a93128ca157e6bc775fea5988a", "score": "0.56497073", "text": "def _signal_zone_update(self):\n dispatcher_send(self.hass, f\"{SIGNAL_ZONE_UPDATE}-{self._zone.zone_id}\")", "title": "" }, { "docid": "ea346236f4546ea373fa1da437725483", "score": "0.564124", "text": "def update_record(acct:Account, zone:str, record:str) -> bool:\n headers = {\n \"X-Auth-Email\": acct.email,\n \"X-Auth-Key\": acct.api_key,\n \"Content-Type\": \"application/json\",\n }\n zone_id = \"\"\n record_id = \"\"\n\n _zone = get_zone(acct, zone)\n if _zone:\n zone_id = _zone.get(\"result\")[0].get(\"id\")\n if not zone_id:\n # lets just stop here, no need to further requesting\n return False\n\n _records = get_records(acct, zone_id, record)\n if _zone:\n record_id = _records.get(\"result\")[0].get(\"id\")\n if not record_id:\n # lets just stop here, no need to further requesting\n return False\n\n url = f\"https://api.cloudflare.com/client/v4/zones/{zone_id}/dns_records/{record_id}\"\n data = {\"type\": \"A\", \"name\": record, \"content\": acct.ip}\n r = requests.put(url, headers=headers, data=json.dumps(data))\n\n return r.status_code == 200", "title": "" }, { "docid": "ab9aae6eced19396e27567fc895b810f", "score": "0.5596219", "text": "def set_zone(self, zone: Zone,\n timeout=DEFAULT_TIMEOUT):\n req = \"/api/zones\"\n data = self._post_codec(req, timeout, zone)\n new_zone = Zone.from_dict(data)\n return new_zone", "title": "" }, { "docid": "88b0722c4f26420609410411b98f4224", "score": "0.5582042", "text": "def edit_record(self, id_, **kwargs):\n\n if self.is_valid_id(id_):\n response = self.session.put(f\"{self.api}{self.default_path}/zone/{self.domain}/record/{id_}\",\n json=kwargs).json()\n else:\n response = {'status': 'error', 'item': 'Not found',\n 'errors': {'content': ['Specified ID is invalid, no record found.']}}\n\n log_response(\"EDITING RECORD\", response)\n return response", "title": "" }, { "docid": "d5b49de66418cf1d6e3ff1fa05bb8512", "score": "0.55711555", "text": "def update_record_value(self, new_value, ttl=300):\n new_zone_version = None\n zone_id = self.__get_active_zone_id()\n try:\n # create new zone version\n new_zone_version = self.__api.domain.zone.version.new(\n self.api_key,\n zone_id\n )\n logging.debug('DNS working on a new zone (version %s)', new_zone_version)\n\n record_list = self.__api.domain.zone.record.list(\n self.api_key,\n zone_id,\n new_zone_version,\n self.record\n )\n\n logging.debug('Updating records :%s', record_list)\n\n # Update each record that matches the filter\n for a_record in record_list:\n # get record id\n a_record_id = a_record['id']\n a_record_name = a_record['name']\n a_record_type = a_record['type']\n\n # update record value\n new_record = self.record.copy()\n new_record.update({'name': a_record_name, 'type': a_record_type, 'value': new_value, 'ttl': ttl})\n updated_record = self.__api.domain.zone.record.update(\n self.api_key,\n zone_id,\n new_zone_version,\n {'id': a_record_id},\n new_record\n )\n except xmlrpc.client.Fault as e:\n # delete updated zone\n if new_zone_version is not None:\n self.__api.domain.zone.version.delete(\n self.api_key,\n zone_id,\n new_zone_version\n )\n raise\n else:\n # activate updated zone\n self.__api.domain.zone.version.set(\n self.api_key,\n zone_id,\n new_zone_version\n )", "title": "" }, { "docid": "82d87b2fb9b56f0a58d16cc1ac06b317", "score": "0.55356956", "text": "def upsert_route53_record(self, account_name, zone_id, recordset):\n session = self.get_session(account_name)\n client = session.client('route53')\n\n try:\n client.change_resource_record_sets(\n HostedZoneId=zone_id,\n ChangeBatch={\n 'Changes': [{\n 'Action': 'UPSERT',\n 'ResourceRecordSet': recordset,\n }]\n }\n )\n except client.exceptions.NoSuchHostedZone:\n logging.error(f'[{account_name}] Error trying to delete record: '\n f'unknown DNS zone {zone_id}')\n except Exception as e:\n logging.error(f'[{account_name}] unhandled exception: {e}')", "title": "" }, { "docid": "f268c8b33f6de1220103f1fd8b585c15", "score": "0.550737", "text": "def update(\n self, options, callback=None, errback=None, parent=True, **kwargs\n ):\n\n if not self.data:\n raise ReservationException(\"Reservation not loaded\")\n\n def success(result, *args):\n self.data = result\n self.id = result[\"id\"]\n self.address_id = result[\"address_id\"]\n self.mac = result[\"mac\"]\n self.options = result[\"options\"]\n\n if callback:\n return callback(self)\n else:\n return self\n\n return self._rest.update(\n self.id,\n options,\n callback=success,\n errback=errback,\n parent=parent,\n **kwargs\n )", "title": "" }, { "docid": "467321618ddf902ded11714253b2e8f5", "score": "0.5499926", "text": "def update_resource(resource, destroy_bash, rc, stderr, stdout):\n resource['destroy'] = {\n 'cmd': destroy_bash,\n 'rc': rc,\n 'stderr': stderr,\n 'stdout': stdout\n }\n return maccli.dao.api_resource.update(resource)", "title": "" }, { "docid": "b7d339d31f023470adca0be360977e54", "score": "0.5494631", "text": "async def update_switch(self, zone, state, momentary=None, times=None, pause=None):\n try:\n if self.client:\n if self.api_version == CONF_ZONE:\n return await self.client.put_zone(\n zone,\n state,\n momentary,\n times,\n pause,\n )\n\n # device endpoint uses pin number instead of zone\n return await self.client.put_device(\n ZONE_TO_PIN[zone],\n state,\n momentary,\n times,\n pause,\n )\n\n except self.client.ClientError as err:\n _LOGGER.warning(\"Exception trying to update panel: %s\", err)\n\n raise CannotConnect", "title": "" }, { "docid": "dcd24357bb21fe51de5d0a45de31d916", "score": "0.54757786", "text": "def update_resource(resource):\n\n known_resources = {'courses', 'programs'}\n\n if not resource in known_resources:\n return abort(400, 'Not a known data resource.')\n\n return \"200 - OK!\"", "title": "" }, { "docid": "22a3d5daedffb144e8c32576fcd709fe", "score": "0.5445498", "text": "def AddZoneResourceArg(parser, verb, positional=True):\n name = 'zone' if positional else '--zone'\n return concept_parsers.ConceptParser.ForResource(\n name,\n GetZoneResourceSpec(),\n 'Arguments and flags that define the Dataplex zone you want {}'.format(\n verb),\n required=True).AddToParser(parser)", "title": "" }, { "docid": "0b26a08be8d0fb7e2a00477c76e63294", "score": "0.54265845", "text": "def update_resource_record(resource_record, value, client=None):\n\n if not client: # pragma: no cover\n client = get_client()\n\n record_name = resource_record['Name']\n record_type = resource_record['Type']\n set_identifier = resource_record['SetIdentifier']\n current_value = resource_record['ResourceRecords'][0]['Value']\n\n if value == current_value:\n return True # Nothing to do, it is already up-to-date\n\n try:\n client.change_resource_record_sets(\n HostedZoneId='string',\n ChangeBatch={\n 'Comment': \"Updating DNS record via route53_dyndns\",\n 'Changes': [\n {\n 'Action': 'UPSERT',\n 'ResourceRecordSet': {\n 'Name': record_name,\n 'Type': record_type,\n 'SetIdentifier': set_identifier,\n 'ResourceRecords': [\n {\n 'Value': value\n },\n ],\n }\n },\n ]\n }\n )\n except Exception as e:\n # TODO - Log the error\n raise Route53Exception(e)\n\n return True", "title": "" }, { "docid": "db797c37ea00fdb66ad89c5c5d416965", "score": "0.5385896", "text": "def update(resource_type, resource_id, data):\n # type: (str, int, dict) -> dict\n url = '/'.join([CONF.server.rstrip('/'), 'api', '1.0', resource_type,\n str(resource_id), ''])\n\n return put(url, data).json()", "title": "" }, { "docid": "9a49ea8c469f3fba96dfddcc2f9edb3e", "score": "0.53846484", "text": "def patch_resource(self, namespace: typing.Optional[str] = None):\n pass # pragma: no cover", "title": "" }, { "docid": "f75f8c033821f9a3ecfc785e70696945", "score": "0.53627706", "text": "def _update_dnsrecord(self, session, record_id, record, opts):\n zone_id = opts['zone_id']\n request = Request(\n 'PUT',\n self._url + \"/zones/{0}/dns_records/{1}\".format(zone_id,\n record_id),\n json=record,\n auth=self._auth)\n res = _call(session, request)\n return (res['id'], res['content'])", "title": "" }, { "docid": "a85433d61bc5db142d57b6e75996cabb", "score": "0.5352522", "text": "def update(self, resource, data):\n pf = self.log_prefix()\n if not self.dry_run:\n resource.update(data)\n self.session.commit()\n self.logger.debug('%s update resource %s', pf, resource.to_str())", "title": "" }, { "docid": "ff71f377ad4bd1ea09e39ff554ee30ce", "score": "0.53504497", "text": "def update_timezone(self, timezone):\n\n body = {\n \"requests\": [\n {\n \"updateSpreadsheetProperties\": {\n \"properties\": {\"timeZone\": timezone},\n \"fields\": \"timeZone\",\n },\n },\n ]\n }\n\n res = self.batch_update(body)\n self._properties[\"timeZone\"] = timezone\n return res", "title": "" }, { "docid": "671e3d5b69fbfa76add61b1740018ed5", "score": "0.53392094", "text": "def update(context, resource, **kwargs):\n etag = kwargs.pop('etag')\n id = kwargs.pop('id')\n data = utils.sanitize_kwargs(**kwargs)\n uri = '%s/%s/%s' % (context.dci_cs_api, resource, id)\n r = context.session.put(uri, headers={'If-match': etag}, json=data)\n return r", "title": "" }, { "docid": "0587394b391db90025fda98a506062cc", "score": "0.5334158", "text": "def update(context, resource, **kwargs):\n etag = kwargs.pop(\"etag\")\n id = kwargs.pop(\"id\")\n data = utils.sanitize_kwargs(**kwargs)\n uri = \"%s/%s/%s\" % (context.dci_cs_api, resource, id)\n r = context.session.put(\n uri, timeout=HTTP_TIMEOUT, headers={\"If-match\": etag}, json=data\n )\n return r", "title": "" }, { "docid": "a9a09cdf45609a04823be89a32f77e79", "score": "0.5328376", "text": "def run_UpdateResourceW(regs):\n esp = regs[\"ESP\"]\n res_type = xrkutil.dbg_read_pwstring(esp + 8)\n res_name = xrkutil.dbg_read_pwstring(esp + 0xC)\n\n suck_api_call(\"UpdateResourceW\", regs, {\"res_type\": res_type, \"res_name\": res_name})", "title": "" }, { "docid": "e089f170d5d3289c07765070ceb05220", "score": "0.52920365", "text": "def api_location_update(logger, location_id, data, scheme=\"nrega\"):\n utc_now = pytz.utc.localize(datetime.datetime.utcnow())\n india_now = utc_now.astimezone(pytz.timezone(\"Asia/Calcutta\"))\n india_now_isoformat = india_now.isoformat()\n headers = get_authentication_header()\n data = {\n \"id\" : location_id,\n \"data_json\" : data\n }\n res = requests.patch(LOCATIONURL, headers=headers, data=json.dumps(data))", "title": "" }, { "docid": "75732b8addfe0cdcb894efe1041cabdb", "score": "0.5280714", "text": "def update(self, request, pk=None):\n schedule = Schedule.objects.get(pk=pk)\n\n schedule.date = request.data[\"date\"]\n schedule.description = request.data[\"description\"]\n schedule.time = request.data[\"time\"]\n\n show = Show.objects.get(pk=request.data[\"show\"])\n schedule.show = show\n\n schedule.save()\n\n return Response({}, status=status.HTTP_204_NO_CONTENT)", "title": "" }, { "docid": "b6714ae2ea4e6f63ba333b358a1c482c", "score": "0.525226", "text": "def update(self,request,pk=None):\n\n return Response({'http_method':'PUT'})", "title": "" }, { "docid": "f727328c52a5224d4cbdfda9008c914e", "score": "0.52393675", "text": "async def update(self):\n self._zones = self._test_json[\"zones\"]\n self._devices = self._test_json[\"devices\"]\n self._issues = _issues_via_v3_zones({\"data\": self._zones})\n self._version = _version_via_v3_zones({\"data\": self._zones}) # a hack\n\n await self._update() # now convert all the raw JSON", "title": "" }, { "docid": "7801ce7cbcdb445071ec378017a4086c", "score": "0.5226109", "text": "def update_dns_record(self, zone, record, record_type, record_value, is_alias=False, dry_run=False):\n changes = boto.route53.record.ResourceRecordSets(self.conn_r53, zone)\n change = changes.add_change(\"UPSERT\", record, record_type, ttl=60)\n if is_alias:\n # provide list of params as needed by function set_alias\n # http://boto.readthedocs.org/en/latest/ref/route53.html#boto.route53.record.Record.set_alias\n change.set_alias(*record_value)\n else:\n change.add_value(record_value)\n if dry_run:\n print(changes)\n else:\n changes.commit()\n return True", "title": "" }, { "docid": "5137349c2ca684b547e32657cdb0587e", "score": "0.52146596", "text": "def update_a(self, name, value, ttl=None, comment=\"\"):\n name = self.route53connection._make_qualified(name)\n old_record = self.get_a(name)\n ttl = ttl or old_record.ttl\n return self.update_record(resource_type='A',\n name=name,\n old_value=old_record.resource_records,\n new_value=value,\n old_ttl=old_record.ttl,\n new_ttl=ttl,\n comment=comment)", "title": "" }, { "docid": "8d4255cf031229d0d8e9ed07801b2089", "score": "0.51888937", "text": "def update(self, request, pk=None):\n \n return Response({'http_method':'PUT'})", "title": "" }, { "docid": "a458ee11743e01ac3ce8934f3b93b234", "score": "0.51835114", "text": "def put(self, privilege_id):\n current_privilege = DBPrivilege.query.get(privilege_id)\n args = dns_privilege_common_parser.parse_args()\n privilege_name = args['name']\n operation = args['operation']\n resource_type = args['resource_type']\n resource_id = args['resource_id']\n comment = args.get('comment', '')\n try:\n current_privilege.name = privilege_name\n current_privilege.operation = operation\n current_privilege.resource_type = resource_type\n current_privilege.resource_id = resource_id\n current_privilege.comment = comment\n db.session.add(current_privilege)\n except Exception as e:\n db.session.rollback()\n return get_response(RequestCode.OTHER_FAILED, '修改失败!\\n{e}'.format(e=str(e)))\n return get_response(RequestCode.SUCCESS, '修改成功!')", "title": "" }, { "docid": "bc312f060f7a5d920aa727f5d7a2c9cc", "score": "0.51795584", "text": "def testUpdate(self):\n data = {'name': 'toto2'}\n response = requests.put(url=self.url, json=data)\n headers = response.headers\n\n self.assertTrue(self.city == storage.get(City, self.city_id))\n self.assertEqual(response.status_code, 200, WRONG_STATUS_CODE_MSG)\n self.assertEqual(\n headers['Content-Type'], 'application/json', WRONG_TYPE_RETURN_MSG)\n json_data = response.json()\n storage.reload()\n city = storage.get(City, self.city_id)\n self.assertEqual(city.name, 'toto2')\n self.assertIn('name', json_data, MISSING_NAME_ATTR_MSG)\n self.assertIn('created_at', json_data, MISSING_CREATED_AT_ATTR_MSG)\n self.assertIn('updated_at', json_data, MISSING_UPDATED_AT_ATTR_MSG)\n self.assertIn('__class__', json_data, MISSING_CLASS_ATTR_MSG)\n self.assertIn('state_id', json_data, MISSING_STATE_ID_ATTR_MSG)\n self.assertEqual(json_data['name'], 'toto2')\n storage.delete(city)\n storage.save()", "title": "" }, { "docid": "8bb346989e3ba5eae401d59f12d8ed5b", "score": "0.5125676", "text": "def plan_resources_update(context, plan_id, resources):\n return IMPL.plan_resources_update(context, plan_id, resources)", "title": "" }, { "docid": "7a73ce00829c70863b00759d004fa840", "score": "0.51215047", "text": "def update(self, request, pk=None):\n return Response({'http_method': 'PUT'})", "title": "" }, { "docid": "7a73ce00829c70863b00759d004fa840", "score": "0.51215047", "text": "def update(self, request, pk=None):\n return Response({'http_method': 'PUT'})", "title": "" }, { "docid": "7a73ce00829c70863b00759d004fa840", "score": "0.51215047", "text": "def update(self, request, pk=None):\n return Response({'http_method': 'PUT'})", "title": "" }, { "docid": "7a73ce00829c70863b00759d004fa840", "score": "0.51215047", "text": "def update(self, request, pk=None):\n return Response({'http_method': 'PUT'})", "title": "" }, { "docid": "d801b3079fa67c5b855fd620a616f0f4", "score": "0.5120801", "text": "def update_placement(self, placementName: str, projectName: str, attributes: Dict = None) -> Dict:\n pass", "title": "" }, { "docid": "7c91c8c99b529a5b1f932043f0940afd", "score": "0.5118039", "text": "def test_update_resource(self):\n # create a valid http response with a successful status code.\n fake_response = requests.Response()\n fake_response.status_code = requests.codes.ok\n fake_response.headers = []\n # obtain the mock object that corresponds to the call of request()\n self.request_method_mock.return_value = fake_response\n resource_path = \"%s/%s\" % (netscaler_driver.VIPS_RESOURCE,\n TESTVIP_ID)\n resource_name = netscaler_driver.VIP_RESOURCE\n resource_body = self._get_testvip_httpbody_for_update()\n # call method under test: update_resource.\n self.testclient.update_resource(TEST_TENANT_ID, resource_path,\n resource_name, resource_body)\n resource_url = \"%s/%s\" % (self.testclient.service_uri, resource_path)\n # assert that requests.request() was called with the\n # expected params.\n self.request_method_mock.assert_called_once_with(\n 'PUT',\n url=resource_url,\n headers=mock.ANY,\n data=mock.ANY)", "title": "" }, { "docid": "7429b21319720bfe73bcfef06136b4b1", "score": "0.5116938", "text": "def _update_resource(\n cls, client: utils.MetadataClientWithOverride, resource: proto.Message,\n ) -> proto.Message:\n\n return client.update_context(context=resource)", "title": "" }, { "docid": "45e474177ed1d8d332bbd96ee9201601", "score": "0.5105461", "text": "def update(self, request, pk=None):\n\n return Response({'http_method': 'PUT'})", "title": "" }, { "docid": "45e474177ed1d8d332bbd96ee9201601", "score": "0.5105461", "text": "def update(self, request, pk=None):\n\n return Response({'http_method': 'PUT'})", "title": "" }, { "docid": "45e474177ed1d8d332bbd96ee9201601", "score": "0.5105461", "text": "def update(self, request, pk=None):\n\n return Response({'http_method': 'PUT'})", "title": "" }, { "docid": "530004acde4c3436899992ad02eb1eb5", "score": "0.50869197", "text": "def test_update_country(self):\n body = Country()\n response = self.client.open(\n '/Erraticturtle35/Tantakatu/1.0.0/countries/{country_id}'.format(country_id=789),\n method='PUT',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "title": "" }, { "docid": "2026d72227ffd21138eb6a380f66b30e", "score": "0.50614107", "text": "def _add_zone_to_bind(self, zone):\n _, _, ret = self.rndc_target.write_zone_file(zone)\n assert ret == 0\n\n _, _, ret = self.rndc_target.addzone(zone)\n assert ret == 0", "title": "" }, { "docid": "f8374eba22aa4d087499950bab4cc5af", "score": "0.50612473", "text": "def update(self, respool, name, cpu, memory, shares='normal'):\n try:\n config = vim.ResourceConfigSpec()\n config.cpuAllocation = vim.ResourceAllocationInfo()\n config.cpuAllocation.expandableReservation = False\n config.cpuAllocation.limit = cpu\n config.cpuAllocation.reservation = cpu\n config.cpuAllocation.shares = vim.SharesInfo()\n config.cpuAllocation.shares.level = shares \n config.memoryAllocation = vim.ResourceAllocationInfo()\n config.memoryAllocation.expandableReservation = False\n config.memoryAllocation.limit = memory\n config.memoryAllocation.reservation = memory\n config.memoryAllocation.shares = vim.SharesInfo()\n config.memoryAllocation.shares.level = shares\n \n res = respool.UpdateConfig(name, config)\n self.logger.debug(\"Update resource pool %s\" % name)\n return res\n except vmodl.MethodFault as error:\n self.logger.error(error.msg, exc_info=True)\n raise VsphereError(error.msg)", "title": "" }, { "docid": "3df8678201f47047d9cf787855c065f7", "score": "0.50491494", "text": "def associate_zone(client, hosted_zone_id, region, vpc_id):\n try:\n return client.associate_vpc_with_hosted_zone(\n HostedZoneId=hosted_zone_id,\n VPC={\n 'VPCRegion': region,\n 'VPCId': vpc_id\n },\n Comment='Updated by Lambda DDNS'\n )\n except:\n publish_to_sns(SNS_CLIENT, ACCOUNT, REGION, \"Unexpected error:\" +\n str(sys.exc_info()[0]) + lineno())", "title": "" }, { "docid": "ecdc37464c6ff3ba9e2872facb71c2a6", "score": "0.5039758", "text": "def put(self, **params):\n return self.patch(**params)", "title": "" }, { "docid": "91b667a700c6ae5c5630c84983910eb0", "score": "0.503604", "text": "def rput(self, resource, **kw):\n return self.put(resource, **kw)", "title": "" }, { "docid": "28a3f5f4d4e66bc0f438737c596e96e5", "score": "0.5018837", "text": "def mark_zone(self, zone_start):\n row_end = self.get_end(zone_start)\n zone_end = self.get_rows(zone_start, row_end)\n new_zone = Zone(zone_start, zone_end)\n self.add_zone(new_zone)\n return new_zone", "title": "" }, { "docid": "f6ce6d2db6f9b5cba81c6b25120fd4ba", "score": "0.50127333", "text": "def _update_resource(\n cls,\n client: utils.MetadataClientWithOverride,\n resource: proto.Message,\n ) -> proto.Message:\n\n return client.update_artifact(artifact=resource)", "title": "" }, { "docid": "755a0a19c80fcc5753ed8a9276843770", "score": "0.5008044", "text": "def update(self, ip=None, ttl=None):\n if ip is not None:\n payload = '{{\"ipv4addr\":\"{0}\"}}'.format(ip)\n if ttl is not None:\n payload = '{{\"ttl\":{0}}}'.format(ttl)\n resp = self.infoblox_.put(self._ref_, payload)\n if resp.status_code != 200:\n try:\n return self.infoblox_.__caller__(\n 'Could not update A record for {0} - Status {1}'\n .format(self.name, resp.status_code), resp.status_code)\n except Exception:\n return resp.status_code\n return 0", "title": "" }, { "docid": "8505a302272a8553131e5778de7fe3e4", "score": "0.49867657", "text": "def updateloc(req, *args, **kwargs):\n return _matchloc(req, insert=True )", "title": "" }, { "docid": "7c2f6077dd3fbef0a1dd70757abee2fc", "score": "0.49825355", "text": "def put(self,request,pk):\n\n saved_plan = self.get_object(pk=pk)\n #features = request.data.get('plans')[0].pop('features')\n #Supports partial request - PATCH in Django might be broken.\n serializer = PlanSerializer(instance=saved_plan, data=request.data[\"plans\"], partial=True)\n\n #TODO: this did not raise exception when request.data was mistakenly supplie\n\n if serializer.is_valid(raise_exception=True):\n plan_saved = serializer.save()\n\n # plan_feature_serializer = PlanFeatureSerializer(instance=saved_plan.features, data=features, partial=True, many=True)\n # if plan_feature_serializer.is_valid(raise_exception=True):\n # plan_feature_saved = plan_feature_serializer.save()\n\n return Response({\n \"success\": \"Plan '{}' updated successfully\".format(plan_saved.plan_name),\n \"plans\":serializer.data\n }, \n status=200)", "title": "" }, { "docid": "c7b46a73c36c48b6f2431e53205c66e4", "score": "0.49727967", "text": "def update(cls, client, resource) :\n\t\ttry :\n\t\t\tif type(resource) is not list :\n\t\t\t\tupdateresource = dnssoarec()\n\t\t\t\tupdateresource.domain = resource.domain\n\t\t\t\tupdateresource.originserver = resource.originserver\n\t\t\t\tupdateresource.contact = resource.contact\n\t\t\t\tupdateresource.serial = resource.serial\n\t\t\t\tupdateresource.refresh = resource.refresh\n\t\t\t\tupdateresource.retry = resource.retry\n\t\t\t\tupdateresource.expire = resource.expire\n\t\t\t\tupdateresource.minimum = resource.minimum\n\t\t\t\tupdateresource.ttl = resource.ttl\n\t\t\t\treturn updateresource.update_resource(client)\n\t\t\telse :\n\t\t\t\tif (resource and len(resource) > 0) :\n\t\t\t\t\tupdateresources = [ dnssoarec() for _ in range(len(resource))]\n\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\tupdateresources[i].domain = resource[i].domain\n\t\t\t\t\t\tupdateresources[i].originserver = resource[i].originserver\n\t\t\t\t\t\tupdateresources[i].contact = resource[i].contact\n\t\t\t\t\t\tupdateresources[i].serial = resource[i].serial\n\t\t\t\t\t\tupdateresources[i].refresh = resource[i].refresh\n\t\t\t\t\t\tupdateresources[i].retry = resource[i].retry\n\t\t\t\t\t\tupdateresources[i].expire = resource[i].expire\n\t\t\t\t\t\tupdateresources[i].minimum = resource[i].minimum\n\t\t\t\t\t\tupdateresources[i].ttl = resource[i].ttl\n\t\t\t\tresult = cls.update_bulk_request(client, updateresources)\n\t\t\treturn result\n\t\texcept Exception as e :\n\t\t\traise e", "title": "" }, { "docid": "3b51dd91b975397af5bf5ce05fbf84a1", "score": "0.4968589", "text": "def update(self, name, description, public):\n update_swarm = {}\n if name != None:\n update_swarm[\"name\"] = name\n if description != None:\n update_swarm[\"description\"] = description\n if public != None:\n update_swarm[\"public\"] = public\n update_swarm_json = json.dumps(update_swarm)\n conn = httplib.HTTPConnection(self.apikey.server)\n conn.request(\"PUT\", \"/swarms/%s\"%(self.id), update_swarm_json,\n {\"x-bugswarmapikey\":self.apikey.configuration})\n resp = conn.getresponse()\n txt = resp.read()\n conn.close()\n if resp.status >= 400:\n logging.warning('Swarm info response: ('+str(resp.status)+'): '+txt)\n return False\n logging.debug('Swarm info response: ('+str(resp.status)+'): '+txt)\n if name != None:\n self.name = name\n if description != None:\n self.description = description\n if public != None:\n self.public = public\n return True", "title": "" }, { "docid": "264b78e936e97bd00727c672e1662897", "score": "0.49667677", "text": "def update_record(\n name,\n value,\n zone,\n record_type,\n identifier=None,\n ttl=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n wait_for_sync=True,\n split_dns=False,\n private_zone=False,\n retry_on_rate_limit=None,\n rate_limit_retries=None,\n retry_on_errors=True,\n error_retries=5,\n):\n if region is None:\n region = \"universal\"\n\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n if split_dns:\n _zone = _get_split_zone(zone, conn, private_zone)\n else:\n _zone = conn.get_zone(zone)\n if not _zone:\n msg = \"Failed to retrieve zone {}\".format(zone)\n log.error(msg)\n return False\n _type = record_type.upper()\n\n if retry_on_rate_limit or rate_limit_retries is not None:\n if retry_on_rate_limit is not None:\n retry_on_errors = retry_on_rate_limit\n if rate_limit_retries is not None:\n error_retries = rate_limit_retries\n\n _value = _munge_value(value, _type)\n while error_retries > 0:\n try:\n old_record = _zone.find_records(name, _type, identifier=identifier)\n if not old_record:\n return False\n status = _zone.update_record(old_record, _value, ttl, identifier)\n return _wait_for_sync(status.id, conn, wait_for_sync)\n\n except DNSServerError as e:\n if retry_on_errors and _is_retryable_error(e):\n if \"Throttling\" == e.code:\n log.debug(\"Throttled by AWS API.\")\n elif \"PriorRequestNotComplete\" == e.code:\n log.debug(\n \"The request was rejected by AWS API. \"\n \"Route 53 was still processing a prior request.\"\n )\n time.sleep(3)\n error_retries -= 1\n continue\n raise\n return False", "title": "" }, { "docid": "d68d755d35a1f03438e9f4215ed61a30", "score": "0.49530244", "text": "def set_zone_mode(\n cls,\n ctl_id: str,\n zone_idx: int,\n mode: str = None,\n setpoint: float = None,\n until: dt = None,\n duration: int = None,\n **kwargs,\n ):\n # W --- 18:013393 01:145038 --:------ 2349 013 0004E201FFFFFF330B1A0607E4\n # W --- 22:017139 01:140959 --:------ 2349 007 0801F400FFFFFF\n\n if mode is None:\n if setpoint is None:\n raise ValueError(\"Invalid args: Both mode and setpoint are None\")\n elif until:\n mode = ZoneMode.TEMPORARY\n elif duration:\n mode = ZoneMode.COUNTDOWN\n else:\n mode = ZoneMode.PERMANENT # or: ZoneMode.ADVANCED\n elif isinstance(mode, int):\n mode = f\"{mode:02X}\"\n\n if mode in ZONE_MODE_MAP:\n mode = ZONE_MODE_MAP[mode]\n elif mode not in ZONE_MODE_LOOKUP:\n raise TypeError(f\"Invalid args: Unknown mode: {mode}\")\n\n if setpoint is None:\n if mode != ZoneMode.SCHEDULE:\n raise ValueError(f\"Invalid args: For {mode}, setpoint cant be None\")\n elif not isinstance(setpoint, (int, float)):\n raise ValueError(f\"Invalid args: setpoint={setpoint}, should be float\")\n\n if mode == ZoneMode.TEMPORARY and until is None:\n mode = ZoneMode.ADVANCED # until = dt.now() + td(hour=1)\n elif mode in (ZoneMode.SCHEDULE, ZoneMode.PERMANENT) and (\n until is not None or duration is not None\n ):\n raise ValueError(f\"Invalid args: For {mode}, until should be None\")\n\n assert mode in ZONE_MODE_LOOKUP, mode\n\n payload = \"\".join(\n (\n f\"{zone_idx:02X}\",\n temp_to_hex(setpoint), # None means max, if a temp is required\n ZONE_MODE_LOOKUP[mode],\n \"FFFFFF\" if duration is None else f\"{duration:06X}\",\n \"\" if until is None else dtm_to_hex(until),\n )\n )\n\n return cls(W_, _2349, payload, ctl_id, **kwargs)", "title": "" }, { "docid": "347fa978afa53680944e303392e62ffd", "score": "0.49470228", "text": "def _update_resource(resource_type, id):\n if resource_type not in schemas.keys():\n raise exceptions.NotFoundException('Resource of type \\'%s\\' not found' % resource_type)\n\n resource = None\n if 'slug' in schemas[resource_type]['properties'].keys():\n resource = db[resource_type].find_one({'slug': id})\n if not resource:\n resource = db[resource_type].find_one({'primaryKey.id': id})\n if not resource:\n raise exceptions.NotFoundException('Resource not found')\n\n if 'save_history' in flask.request.args:\n old_data = copy.deepcopy(resource)\n\n data = flask.request.get_json(force=True, silent=True)\n if not data or not isinstance(data, dict):\n raise exceptions.BadRequestException('Malformed JSON in PATCH data')\n\n # save keys that cannot be changed on update\n resource_id = resource.pop('_id')\n\n # restore keys that cannot be changed on update\n data['updatedAt'] = time_now()\n data['updatedBy'] = get_sub()\n data['createdAt'] = resource.pop('createdAt')\n data['createdBy'] = resource.pop('createdBy', 'Unknown')\n data['primaryKey'] = resource.pop('primaryKey')\n\n validate(data, schemas.get(resource_type))\n\n # _id is not part of the schema, so restore this after validation\n data['_id'] = resource_id\n\n if 'test' not in flask.request.args:\n update = resource2update(schemas[resource_type], data)\n db[resource_type].update_one({'_id': resource_id},\n update)\n\n if 'save_history' in flask.request.args:\n old_data.pop('_id')\n db[resource_type+'_history'].insert(old_data)\n\n return data, 200", "title": "" }, { "docid": "b140e9e324bcbf9d585e9ec24655b37b", "score": "0.4943501", "text": "def perform_update(self, serializer):\n reporting_time = arrow.get(serializer.validated_data.get('time'))\n duration = serializer.validated_data.get('duration')\n requested_slot = [\n reporting_time,\n reporting_time.shift(minutes=duration),\n serializer.validated_data.get('attender'),\n ]\n if self.validate_slot(requested_slot):\n serializer.save()\n else:\n raise APIException({'message': 'This slot has been already reserved !!!'})", "title": "" }, { "docid": "9aaa19876a28c74ef1a2424f0111e79e", "score": "0.49418437", "text": "def put(self,request,pk=None):\n return Response({\"method\":\"put\"})", "title": "" }, { "docid": "b5cdb914f7cf325934fa1dc0ad3206f6", "score": "0.4924634", "text": "def update(cls, client, resource) :\n\t\ttry :\n\t\t\tif type(resource) is not list :\n\t\t\t\tupdateresource = nsvariable()\n\t\t\t\tupdateresource.name = resource.name\n\t\t\t\tupdateresource.type = resource.type\n\t\t\t\tupdateresource.iffull = resource.iffull\n\t\t\t\tupdateresource.ifvaluetoobig = resource.ifvaluetoobig\n\t\t\t\tupdateresource.ifnovalue = resource.ifnovalue\n\t\t\t\tupdateresource.init = resource.init\n\t\t\t\tupdateresource.expires = resource.expires\n\t\t\t\tupdateresource.comment = resource.comment\n\t\t\t\treturn updateresource.update_resource(client)\n\t\t\telse :\n\t\t\t\tif (resource and len(resource) > 0) :\n\t\t\t\t\tupdateresources = [ nsvariable() for _ in range(len(resource))]\n\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\tupdateresources[i].name = resource[i].name\n\t\t\t\t\t\tupdateresources[i].type = resource[i].type\n\t\t\t\t\t\tupdateresources[i].iffull = resource[i].iffull\n\t\t\t\t\t\tupdateresources[i].ifvaluetoobig = resource[i].ifvaluetoobig\n\t\t\t\t\t\tupdateresources[i].ifnovalue = resource[i].ifnovalue\n\t\t\t\t\t\tupdateresources[i].init = resource[i].init\n\t\t\t\t\t\tupdateresources[i].expires = resource[i].expires\n\t\t\t\t\t\tupdateresources[i].comment = resource[i].comment\n\t\t\t\tresult = cls.update_bulk_request(client, updateresources)\n\t\t\treturn result\n\t\texcept Exception as e :\n\t\t\traise e", "title": "" }, { "docid": "0768e6867199ac02943409275f7465ee", "score": "0.49196827", "text": "def update(cls, client, resource) :\n\t\ttry :\n\t\t\tif type(resource) is not list :\n\t\t\t\tupdateresource = dnsparameter()\n\t\t\t\tupdateresource.retries = resource.retries\n\t\t\t\tupdateresource.minttl = resource.minttl\n\t\t\t\tupdateresource.maxttl = resource.maxttl\n\t\t\t\tupdateresource.cacherecords = resource.cacherecords\n\t\t\t\tupdateresource.namelookuppriority = resource.namelookuppriority\n\t\t\t\tupdateresource.recursion = resource.recursion\n\t\t\t\tupdateresource.resolutionorder = resource.resolutionorder\n\t\t\t\tupdateresource.dnssec = resource.dnssec\n\t\t\t\tupdateresource.maxpipeline = resource.maxpipeline\n\t\t\t\tupdateresource.dnsrootreferral = resource.dnsrootreferral\n\t\t\t\tupdateresource.dns64timeout = resource.dns64timeout\n\t\t\t\tupdateresource.ecsmaxsubnets = resource.ecsmaxsubnets\n\t\t\t\tupdateresource.maxnegcachettl = resource.maxnegcachettl\n\t\t\t\tupdateresource.cachehitbypass = resource.cachehitbypass\n\t\t\t\tupdateresource.maxcachesize = resource.maxcachesize\n\t\t\t\tupdateresource.maxnegativecachesize = resource.maxnegativecachesize\n\t\t\t\tupdateresource.cachenoexpire = resource.cachenoexpire\n\t\t\t\tupdateresource.splitpktqueryprocessing = resource.splitpktqueryprocessing\n\t\t\t\tupdateresource.cacheecszeroprefix = resource.cacheecszeroprefix\n\t\t\t\treturn updateresource.update_resource(client)\n\t\texcept Exception as e :\n\t\t\traise e", "title": "" }, { "docid": "b8dba8265bf30d89baf299055cb0a277", "score": "0.49184713", "text": "def ground_zones_update(pcb_modules: \"List[PCBModule]\", zone_polygon: SimplePolygon,\n pcb_origin: P2D, tracing: str = \"\") -> None:\n # We are serching for XXX where XXX is either GND or /GND:\n # (zone (net NUMBER) (net_name XXX) (layer X.Cu) ... # X is B or F (i.e. Back/Front).\n # ...\n # (polygon\n # (pts\n # (xy X Y) ... # Replace these with the new ones.\n # )\n # )\n # (filled_polygon # The filled zone may not be present at first.\n # (pts\n # (xy X Y) ... # Keep these and let KiCad do the update.\n # )\n # )\n # )\n net_numbers_table: Dict[str, int] = PCBModule.net_numbers_table_get(pcb_modules)\n if \"GND\" in net_numbers_table:\n ground_net_number: int = net_numbers_table[\"GND\"]\n zone_size: int = len(zone_polygon)\n pcb_origin_x: float = pcb_origin.x\n pcb_origin_y: float = pcb_origin.y\n\n zone_pattern1 = f\" (zone (net {ground_net_number}) (net_name GND) (layer \"\n zone_pattern2 = f\" (zone (net {ground_net_number}) (net_name /GND) (layer \"\n pcb_module: PCBModule\n for pcb_module in pcb_modules:\n # Search through *preceeding_lines* looking for a ground zone and updating them\n # as appropriate. Construct *updated_lines* as we sweep through:\n updated_lines: List[str] = []\n preceding_lines: List[str] = pcb_module.preceding_lines\n zone_started: bool = False\n polygon_started: bool = False\n points_started: bool = False\n new_xys_inserted: bool = False\n xys_skipping: bool = False\n pcb_line_index: int\n pcb_line: str\n for pcb_line_index, pcb_line in enumerate(preceding_lines):\n if pcb_line.startswith(zone_pattern1) or pcb_line.startswith(zone_pattern2):\n # Start of a fill zone -- (zone ...:\n zone_started = True\n polygon_started = False\n points_started = False\n xys_skipping = False\n new_xys_inserted = False\n if tracing:\n print(f\"{tracing}Zone started: '{pcb_line}'\")\n elif pcb_line.startswith(\" (polygon\"): # )\n # Start of polygon outline (i.e. unfilled) -- (polygon ...:\n polygon_started = zone_started\n points_started = False\n xys_skipping = False\n elif pcb_line.startswith(\" (pts\"): # )\n # Start of points -- (pts ...)\n points_started = polygon_started\n xys_skipping = False\n elif pcb_line.startswith(\" (xy \"):\n # Start of xys -- (xy ...):\n xys_skipping = points_started\n # Insert new xys here if they have not already been inserted:\n if xys_skipping and not new_xys_inserted:\n index: int\n zone_xys: List[str] = [\n (f\"(xy {KicadPCB.number(pcb_origin_x + zone_polygon[index].x, 1)} \"\n f\"{KicadPCB.number(pcb_origin_y - zone_polygon[index].y, 1)})\")\n for index in range(zone_size)]\n\n # This is weird sometime we get duplicate points in *zone_xys*.\n # This code removes adjacent values that are duplicates:\n culled_zone_xys: List[str] = zone_xys[:1]\n zone_text: str\n for zone_text in zone_xys[1:]:\n if culled_zone_xys[-1] != zone_text:\n culled_zone_xys.append(zone_text)\n culled_zone_xys_size: int = len(culled_zone_xys)\n if tracing and zone_size != culled_zone_xys_size:\n print(f\"{tracing}zone_size={zone_size} != \"\n f\"{culled_zone_xys_size}= culled_zone_xys_size\")\n\n # Organize the the values in groups of 5:\n zones_quints: List[List[str]] = [\n culled_zone_xys[index:index + 5]\n for index in range(0, culled_zone_xys_size, 5)\n ]\n if tracing:\n print(f\"{tracing}zone_size={zone_size}\")\n print(f\"{tracing}zones_quints={zones_quints}\")\n\n # Append the lines of *zones_quints* to *updated_lines*:\n zones_quint: List[str]\n zone_index: int\n for zone_index, zones_quint in enumerate(zones_quints):\n quint_line: str = \" \" + ' '.join(zones_quint)\n updated_lines.append(quint_line)\n if tracing:\n print(f\"[{pcb_line_index}]\\tI[{zone_index}]\\t{quint_line}\")\n new_xys_inserted = True\n elif pcb_line.startswith(\" )\"): # End of (pts ...)\n new_xys_inserted = False\n xys_skipping = False\n points_started = False\n elif pcb_line.startswith(\" )\"): # End of (polygon ...)\n polygon_started = False\n elif pcb_line.startswith(\" )\"): # End of (zone ...)\n zone_started = False\n\n # Copy over *pcb_line* to *updated_lines* if we are not in *xys_skipping*:\n if tracing:\n zone_flag: str = 'Z' if zone_started else '-'\n polygon_flag: str = 'G' if polygon_started else '-'\n points_flag: str = 'P' if points_started else '-'\n skipping_flag: str = 'S' if xys_skipping else '-'\n print(f\"[{pcb_line_index}]\\t\"\n f\"{zone_flag}{polygon_flag}{points_flag}{skipping_flag}\\t\"\n f\"{pcb_line}\")\n if not xys_skipping:\n updated_lines.append(pcb_line)\n\n # Replace the updated_lines:\n pcb_module.preceding_lines = updated_lines", "title": "" }, { "docid": "b90e981a8720785eaa0fdb0891c83bd2", "score": "0.4917183", "text": "def put(self, user_id, current_user, flight_id):\n\n flight = Flights.query.filter_by(id=flight_id).first()\n created_by = flight.created_by\n if user_id == created_by:\n # Obtain the new name of the flight from the request data\n edited = request.get_json()\n flight.name = edited['name']\n flight.origin = edited['origin']\n flight.location = edited['destination']\n flight.date = edited['date']\n flight.time = edited['time']\n flight.save()\n\n response = {\n 'id': flight.id, 'name' : flight.name, 'origin' : flight.origin,\n 'destination' : flight.destination, 'date' : flight.date, 'time' : flight.time\n }\n\n msg = {\"message\": \"Flight details updated successfully\"}\n return make_response(jsonify(msg)), 200\n response = {\"message\": \"You can only modify the flights you created\"}\n return jsonify(response), 401", "title": "" }, { "docid": "b304669a393449e37fdc6baac67fc67b", "score": "0.49152347", "text": "def update(self, resource, uri=None, force=False, timeout=-1, custom_headers=None, default_values={}):\n if not resource:\n logger.exception(RESOURCE_CLIENT_RESOURCE_WAS_NOT_PROVIDED)\n raise ValueError(RESOURCE_CLIENT_RESOURCE_WAS_NOT_PROVIDED)\n\n logger.debug('Update async (uri = %s, resource = %s)' %\n (self._uri, str(resource)))\n\n if not uri:\n uri = resource['uri']\n\n if force:\n uri += '?force=True'\n\n resource = self.merge_default_values(resource, default_values)\n\n return self.__do_put(uri, resource, timeout, custom_headers)", "title": "" }, { "docid": "9c7325d82e7324cb3baef7efe347ffb4", "score": "0.49144873", "text": "def testUpdateSegment(self):\n I = item.Item(2, 2, CornerPoint=[0,0])\n S1 = skyline.SkylineSegment(0, 2, 2)\n S2 = skyline.SkylineSegment(2, 0, 6)\n\n res = self.S._update_segment(self.S.skyline[0], 0, I)\n self.assertCountEqual(res, [S1, S2])", "title": "" }, { "docid": "dda2977cf097f7da09155af5064de252", "score": "0.49078995", "text": "def test_resource_manager_updation(self):\n resources = self._set_and_get_resources(['test_resources/input/resource_one.json',\n 'test_resources/input/resource_one_updated.json'])\n self.assertEqual(len(resources), 1)\n self.assertEqual(resources.resources.pop().resource_creation_timestamp, 1526331464.49)", "title": "" }, { "docid": "8b1ef3925b697c69a3d4adce2955e385", "score": "0.48944154", "text": "def test_05_test_settings_for_zone(self):\n config_name = \"enable.dynamic.scale.vm\"\n configs = Configurations.list(\n self.apiclient,\n name=config_name,\n zoneid=self.zone.id\n )\n self.assertIsNotNone(configs, \"Fail to get zone setting %s \" % config_name)\n\n orig_value = str(configs[0].value)\n new_value = 'true'\n\n Configurations.update(\n self.apiclient,\n name=config_name,\n value=new_value,\n zoneid=self.zone.id\n )\n\n configs = Configurations.list(\n self.apiclient,\n name=config_name,\n zoneid=self.zone.id\n )\n self.assertIsNotNone(configs, \"Fail to get ol setting %s \" % config_name)\n\n self.assertEqual(new_value,\n (configs[0].value),\n \"Failed to set new config value\")\n\n Configurations.reset(\n self.apiclient,\n name=config_name,\n zoneid=self.zone.id\n )\n\n configs = Configurations.list(\n self.apiclient,\n name=config_name,\n zoneid=self.zone.id\n )\n self.assertIsNotNone(configs, \"Fail to get zone setting %s \" % config_name)\n\n self.assertEqual(orig_value,\n (configs[0].value),\n \"Failed to reset the value for zone\")", "title": "" }, { "docid": "32533953716ec19bf276f0340c20ff0b", "score": "0.48901463", "text": "def callback_2(status):\n async_dispatcher_send( hass, SIGNAL_ZONES_UPDATED, status[ZONES] )", "title": "" }, { "docid": "5976bb0b9d2d10617dadc00483787b44", "score": "0.48895377", "text": "def update(self,field,val):\n Location.objects.get(id = self.id).update(field = val)", "title": "" }, { "docid": "92e91c0ff40f589b608308c8027c572f", "score": "0.48791897", "text": "def updateZone(self, interval, world):\n #\n # Iterate through actors - use a list of the actors\n # in case the actor wants to update the list of\n # actors during this iteration\n for actor in list(self.actors):\n if actor.active:\n actor.updateActor(interval, world)\n #\n # Do physics if we need to\n if self._physics_objects:\n self._updatePhysics(interval)", "title": "" }, { "docid": "705fd86155b6ea89db86b56d565d9cc4", "score": "0.48785475", "text": "def update_account(self):\n return bind_api(\n api=self,\n path='/accounts/{account_id}',\n method='PUT',\n post_container='account',\n allowed_param=['account_id', 'name'])", "title": "" }, { "docid": "6f9568618edcc42e80bc7e8c408f3e8d", "score": "0.48718858", "text": "def _update_resource(self, res_type, context, id, res_data):\n\n res_dict = self._encode_resource(resource_id=id,\n resource=res_data[res_type])\n status_code, res_info = self._request_backend(context, res_dict,\n res_type, 'UPDATE')\n res_dicts = self._transform_response(status_code, info=res_info,\n obj_name=res_type)\n LOG.debug(\"update_%(res_type)s(): %(res_dicts)s\",\n {'res_type': res_type, 'res_dicts': res_dicts})\n\n return res_dicts", "title": "" }, { "docid": "ae322bd1454219d29b576c4ea5a3d8b0", "score": "0.4870781", "text": "def create_zone(self):\n self.log.info(\"creating a zone\")\n resp = self.api.create_zone()\n self.log.debug(utils.resp_to_string(resp))\n if resp.status_code != 202:\n self.fail(\"failed to create zone (status=%s)\" % resp.status_code)\n return resp.json()[\"name\"], resp.json()[\"id\"]", "title": "" }, { "docid": "9423ccd2291943b99aa862c4d9c21a0b", "score": "0.48658797", "text": "def put(self):\n request = transforms.loads(self.request.get('request'))\n key = request.get('key')\n\n if not self.assert_xsrf_token_or_fail(\n request, 'put-unit', {'key': key}):\n return\n\n if not CourseOutlineRights.can_edit(self):\n transforms.send_json_response(\n self, 401, 'Access denied.', {'key': key})\n return\n\n unit = courses.Course(self).find_unit_by_id(key)\n if not unit:\n transforms.send_json_response(\n self, 404, 'Object not found.', {'key': key})\n return\n\n payload = request.get('payload')\n updated_unit_dict = transforms.json_to_dict(\n transforms.loads(payload), self.SCHEMA_DICT)\n\n errors = []\n self.apply_updates(unit, updated_unit_dict, errors)\n if not errors:\n course = courses.Course(self)\n assert course.update_unit(unit)\n course.save()\n transforms.send_json_response(self, 200, 'Saved.')\n else:\n transforms.send_json_response(self, 412, '\\n'.join(errors))", "title": "" }, { "docid": "20b74aac94ab85c6a30466317e8a109e", "score": "0.4857295", "text": "def test_route_update_by_admin(self):\n route_test_1 = Route.objects.create(\n name='route-test-1',\n created_by=self.user_admin,\n origin='origin-test',\n destination='destination-test'\n )\n\n payload = {\n 'name': 'rote-updated',\n 'origin': 'test-origin',\n 'destination': 'test-destination'\n }\n\n self.client.force_authenticate(self.user_admin)\n path = ROUTES_URL+str(route_test_1.id)+'/'\n response = self.client.put(path, payload)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "title": "" }, { "docid": "4ecf11823bc5b6229da23562e49465e2", "score": "0.484399", "text": "def update_resource(self, transaction):\n # If-Match\n if transaction.request.if_match:\n if None not in transaction.request.if_match and str(transaction.resource.etag) \\\n not in transaction.request.if_match:\n transaction.response.code = defines.Codes.PRECONDITION_FAILED.number\n return transaction\n # If-None-Match\n if transaction.request.if_none_match:\n transaction.response.code = defines.Codes.PRECONDITION_FAILED.number\n return transaction\n\n method = getattr(transaction.resource, \"render_PUT\", None)\n\n try:\n resource = method(request=transaction.request)\n except NotImplementedError:\n transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number\n return transaction\n\n if isinstance(resource, Resource):\n pass\n elif isinstance(resource, tuple) and len(resource) == 2:\n resource, callback = resource\n resource = self._handle_separate(transaction, callback)\n if not isinstance(resource, Resource): # pragma: no cover\n transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number\n return transaction\n else: # pragma: no cover\n # Handle error\n transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number\n return transaction\n\n if resource.etag is not None:\n transaction.response.etag = resource.etag\n\n transaction.response.code = defines.Codes.CHANGED.number\n transaction.response.payload = None\n resource.changed = True\n resource.observe_count += 1\n transaction.resource = resource\n\n return transaction", "title": "" }, { "docid": "5495ed84c50c941748110fa55e2fdc94", "score": "0.48428872", "text": "def update(self, domain_record, actual_ip):\n raise NotImplementedError", "title": "" }, { "docid": "23a335bda8f7907515f7b3d96f95e43c", "score": "0.48319024", "text": "def update(self, resource: Resource):\n\n # Remember, everything is immutable.\n\n # 1. Update resource tree\n resource_name = resource.name\n self.resources.set(resource)\n\n # 2. Find any refs that point to this resource *name*.\n changed_ref_tos = [\n ref.to\n for ref in self.refs.items.values()\n if ref.to == resource_name\n ]\n\n # 3. Update any views that use those refs\n self.views.update_views(changed_ref_tos)", "title": "" }, { "docid": "2d4488822b1d99c1c3ce929463083a69", "score": "0.48297885", "text": "def update(cls, client, resource) :\n\t\ttry :\n\t\t\tif type(resource) is not list :\n\t\t\t\tupdateresource = nsassignment()\n\t\t\t\tupdateresource.name = resource.name\n\t\t\t\tupdateresource.variable = resource.variable\n\t\t\t\tupdateresource.set = resource.set\n\t\t\t\tupdateresource.Add = resource.Add\n\t\t\t\tupdateresource.sub = resource.sub\n\t\t\t\tupdateresource.append = resource.append\n\t\t\t\tupdateresource.clear = resource.clear\n\t\t\t\tupdateresource.comment = resource.comment\n\t\t\t\treturn updateresource.update_resource(client)\n\t\t\telse :\n\t\t\t\tif (resource and len(resource) > 0) :\n\t\t\t\t\tupdateresources = [ nsassignment() for _ in range(len(resource))]\n\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\tupdateresources[i].name = resource[i].name\n\t\t\t\t\t\tupdateresources[i].variable = resource[i].variable\n\t\t\t\t\t\tupdateresources[i].set = resource[i].set\n\t\t\t\t\t\tupdateresources[i].Add = resource[i].Add\n\t\t\t\t\t\tupdateresources[i].sub = resource[i].sub\n\t\t\t\t\t\tupdateresources[i].append = resource[i].append\n\t\t\t\t\t\tupdateresources[i].clear = resource[i].clear\n\t\t\t\t\t\tupdateresources[i].comment = resource[i].comment\n\t\t\t\tresult = cls.update_bulk_request(client, updateresources)\n\t\t\treturn result\n\t\texcept Exception as e :\n\t\t\traise e", "title": "" }, { "docid": "69031dd0987199445e82525939046b56", "score": "0.4823912", "text": "def update(self, id, realm, rep):\n\n path = \"/{realm}/client-templates/{id}\".format(\n id=id, realm=realm\n )\n params = {\n \n }\n headers = {\n \"Content-Type\": \"application/json\"\n }\n return self.session.request(\n method=\"PUT\",\n endpoint=self.session._admurl(path),\n params=params,\n headers=headers,\n data=rep,\n )", "title": "" }, { "docid": "0264be7bf3cf95f45953e0439d024679", "score": "0.48238653", "text": "def put (self, request,pk=None):\n \n return Response({'method':'PUT'})", "title": "" }, { "docid": "f7b32c4da2f5cf13fbf63e4b62b0e32a", "score": "0.48217115", "text": "def dns_update(self):\n if self.svc.node.collector_env.dbopensvc is None:\n return\n\n if self.ipname is None:\n self.log.debug(\"skip dns update: ipname is not set\")\n return\n\n try:\n self.conf_get(\"dns_update\")\n except ex.OptNotFound:\n self.log.debug(\"skip dns update: dns_update is not set\")\n return\n\n if not self.is_up():\n self.log.debug(\"skip dns update: resource is not up\")\n return\n\n if self.dns_name_suffix is None:\n self.log.debug(\"dns update: dns_name_suffix is not set\")\n\n try:\n self.getaddr()\n except ex.Error as exc:\n self.log.error(str(exc))\n return\n\n post_data = {\n \"content\": self.addr,\n }\n\n if self.dns_name_suffix:\n post_data[\"name\"] = self.dns_name_suffix\n\n try:\n data = self.svc.node.collector_rest_post(\n \"/dns/services/records\",\n post_data,\n path=self.dns_rec_name(),\n )\n except Exception as exc:\n raise ex.Error(\"dns update failed: \"+str(exc))\n if \"error\" in data:\n raise ex.Error(data[\"error\"])\n\n self.log.info(\"dns updated\")", "title": "" }, { "docid": "b6293afea967a998c8582a88aab923e4", "score": "0.48209825", "text": "def perform_update(self, serializer):\n raise EndpointNotImplemented()", "title": "" }, { "docid": "34c9aedb19254408859f71c869a982ab", "score": "0.48169306", "text": "async def set_mode(self, mode):\n allowed_modes = [ZONE_MODE.Off, ZONE_MODE.Override, ZONE_MODE.Timer]\n\n if self._has_pir:\n allowed_modes += [ZONE_MODE.Footprint]\n allowed_mode_strs = [IMODE_TO_MODE[i] for i in allowed_modes]\n\n if isinstance(mode, int) and mode in allowed_modes:\n mode_str = IMODE_TO_MODE[mode]\n elif isinstance(mode, str) and mode in allowed_mode_strs:\n mode_str = mode\n mode = MODE_TO_IMODE[mode_str]\n else:\n raise TypeError(f\"Zone.set_mode(): mode='{mode}' isn't valid.\")\n\n _LOGGER.debug(\n \"Zone(%s).set_mode(mode=%s, mode_str='%s')...\", self.id, mode, mode_str\n )\n\n if self._hub.api_version == 1:\n url = f\"zones/{self.id}/mode\" # v1 API uses strings\n resp = await self._hub.request(\"PUT\", url, data=mode_str)\n else: # self._hub.api_version == 3\n url = (\n f\"zone/{self.id}\"\n ) # v3 API uses dicts # TODO: check: is it PUT(POST?) vs PATCH\n resp = await self._hub.request(\"PATCH\", url, data={\"iMode\": mode})\n\n if resp: # for v1, resp = None?\n resp = resp[\"data\"] if resp[\"error\"] == 0 else resp\n _LOGGER.debug(\"Zone(%s).set_mode(): response = %s\", self.id, resp)", "title": "" }, { "docid": "549670aa385170d59c7946342e144bbf", "score": "0.47911164", "text": "def update(controller, dt, ship, moon):\n action = controller.get_action()\n is_crashed = check_crash(ship, moon)\n \n ship.update(dt, action, is_crashed)", "title": "" }, { "docid": "38f57ca5367a72ea91d33473cb5699c3", "score": "0.47900626", "text": "def put(self, request, pk=None):\n\n return Response({'method': 'put'})", "title": "" }, { "docid": "7785f6b35e057aa2de84b686b820aaab", "score": "0.47845304", "text": "def update(self, data=None, timeout=-1, custom_headers=None, force=False):\n\n resource = deepcopy(self.data)\n resource.update(data)\n\n self.data = self._helper.update(resource, self.URI, force, timeout, custom_headers)\n\n return self", "title": "" }, { "docid": "a259e0e575aa6ce4d0dadd5494e3b728", "score": "0.4781302", "text": "def update(self, client=None):\n try:\n return super(TimestampedResource, self).update(client)\n except exceptions.DoesNotExist as e:\n self._set('created', None)\n self._set('modified', None)\n raise e", "title": "" }, { "docid": "e43f24ed9c3a06c7f66551b759baf643", "score": "0.47758436", "text": "def assign(zone_name, domain_name, value, ttl):\n zone = CONNECTION.get_zone(zone_name)\n record = _find_record(zone, domain_name)\n record_type = 'A' if _is_ip_address(value) else 'CNAME'\n\n if record:\n record_info = _query(record)\n if record_info['type'] == record_type and \\\n record_info['value'] == value and \\\n record_info['ttl'] == ttl:\n # No need to assign the same values again\n return\n\n # There's no way to change the type of an existing record in one operation.\n # Instead, we first DELETE if necessary and then CREATE a new record.\n change_set = r53.record.ResourceRecordSets(CONNECTION, zone.id)\n if record:\n change_set.add_change_record('DELETE', record)\n change = change_set.add_change('CREATE', domain_name, record_type, ttl)\n change.add_value(value)\n change = change_set.commit()\n\n change_id = _get_change_id(change)\n while _get_change_state(CONNECTION.get_change(change_id)) == 'PENDING':\n time.sleep(10)", "title": "" }, { "docid": "ec5262734b8f0f91e29f436955951ac6", "score": "0.47746283", "text": "def update_day(zone_name, zone_job, day_of_week, value):\n if zone_name in ([n for [n] in use_database.read_all_garden_zone_names()]):\n zone_schedule[zone_name].update_day(zone_name, zone_job, day_of_week, value)\n elif zone_name in ([n for [n] in use_database.read_all_power_zone_names()]):\n power_zone_schedule[zone_name].update_day(zone_name, zone_job, day_of_week, value)", "title": "" }, { "docid": "4a000758ca6a85b7f71cdbfe57a6aa5e", "score": "0.47742543", "text": "def update_resource(\n project_id,\n location,\n dataset_id,\n fhir_store_id,\n resource_type,\n resource_id,\n):\n # Imports Python's built-in \"os\" module\n import os\n\n # Imports the google.auth.transport.requests transport\n from google.auth.transport import requests\n\n # Imports a module to allow authentication using a service account\n from google.oauth2 import service_account\n\n # Gets credentials from the environment.\n credentials = service_account.Credentials.from_service_account_file(\n os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]\n )\n scoped_credentials = credentials.with_scopes(\n [\"https://www.googleapis.com/auth/cloud-platform\"]\n )\n # Creates a requests Session object with the credentials.\n session = requests.AuthorizedSession(scoped_credentials)\n\n # URL to the Cloud Healthcare API endpoint and version\n base_url = \"https://healthcare.googleapis.com/v1\"\n\n # TODO(developer): Uncomment these lines and replace with your values.\n # project_id = 'my-project' # replace with your GCP project ID\n # location = 'us-central1' # replace with the parent dataset's location\n # dataset_id = 'my-dataset' # replace with the parent dataset's ID\n # fhir_store_id = 'my-fhir-store' # replace with the FHIR store ID\n # resource_type = 'Patient' # replace with the FHIR resource type\n # resource_id = 'b682d-0e-4843-a4a9-78c9ac64' # replace with the FHIR resource's ID\n url = \"{}/projects/{}/locations/{}\".format(base_url, project_id, location)\n\n resource_path = \"{}/datasets/{}/fhirStores/{}/fhir/{}/{}\".format(\n url, dataset_id, fhir_store_id, resource_type, resource_id\n )\n\n # Sets required application/fhir+json header on the request\n headers = {\"Content-Type\": \"application/fhir+json;charset=utf-8\"}\n\n # The body shown works with a Patient resource and is not guaranteed\n # to work with other types of FHIR resources. If necessary,\n # supply a new body with data that corresponds to the resource you\n # are updating.\n body = {\"resourceType\": resource_type, \"active\": True, \"id\": resource_id}\n\n response = session.put(resource_path, headers=headers, json=body)\n response.raise_for_status()\n\n resource = response.json()\n\n print(\"Updated {} resource:\".format(resource[\"resourceType\"]))\n print(json.dumps(resource, indent=2))\n\n return resource", "title": "" } ]
6e7c3d3e2a05a9577615a6066acf2de1
This method is deprecated. Please switch to AddIsGameOver.
[ { "docid": "c420d43d69cc43b11061c0e151a99f80", "score": "0.5853067", "text": "def DeepSingleAgentEnvWithDiscreteActionsStateDataAddIsGameOver(builder, isGameOver):\n return AddIsGameOver(builder, isGameOver)", "title": "" } ]
[ { "docid": "0741ff7b41ba00efd002b94655689183", "score": "0.67969733", "text": "def game_over(self):\n raise NotImplementedError()", "title": "" }, { "docid": "1110ad2a849ae1de5830ea21087ddd3e", "score": "0.63831997", "text": "def game_over(self):\n # The game is over! Disable rolling more turns.\n self._turn_button.state(['disabled'])\n self._reset_button.focus()\n self.infotext['text'] += \"Game Over!\"", "title": "" }, { "docid": "2fe6c2586de121b67d9bb23a41d76816", "score": "0.6277449", "text": "def is_game_over(self, is_game_over):\n\n self._is_game_over = is_game_over", "title": "" }, { "docid": "7a45409e84e22dd2f3f431bfd50327ab", "score": "0.62484246", "text": "def game_over(self, game):\n\n for i in game.users.keys():\n self.client_callbacks[i].gameOver(game.get_scores()[0][0])\n self.ServerGames.remove(game.get_uuid())\n print \"Game over, game uuid: %s\" % game.get_uuid()", "title": "" }, { "docid": "aec399096e50a770f0fc5d0ce0f6a50b", "score": "0.6247514", "text": "def gameOver(self):\n\t\tself.window.show_view(GameOverView(self.score))", "title": "" }, { "docid": "29207c97f15441b8fcd9b34d53cbe0a7", "score": "0.622483", "text": "def game_over(self):\n if self.current_room.id == 0:\n print(\"Game Over\")\n return True\n return False", "title": "" }, { "docid": "81dc7d83f7e75f3d0cd5c005eb072dcb", "score": "0.6170105", "text": "def isGameOver(self):\n if self.get_moves():\n return False\n return True", "title": "" }, { "docid": "8d71657ea2bf172ece032cc5db6f2da0", "score": "0.6119336", "text": "def game_over():\n global game_on\n game_on = False\n score.game_over()\n # screen.onkey(replay, \"space\")", "title": "" }, { "docid": "f2b5f8c363efa8a6862b420f4be33c5e", "score": "0.60841423", "text": "def game_over(self):\n\n title_screen = Title(self)\n info_screen = InfoScreen(self, self.score, '', title_screen)\n self.change_state(info_screen)", "title": "" }, { "docid": "7dc9d4572068ef784b3f292fed618161", "score": "0.6058548", "text": "def checkGameOver(self):\n return self.keep_playing", "title": "" }, { "docid": "d90ee583f0df0fbab55e66d78af724cb", "score": "0.60327965", "text": "def isGameOver(self):\n\n if self.getResult() != 0:\n self.gameover = True\n return True\n elif 0 not in (self.board[0,:,:,0] + self.board[0,:,:,1]):\n self.gameover = True\n return True\n else:\n return False", "title": "" }, { "docid": "2c7c3da4824df6c4163666a0e1f8fe5c", "score": "0.5992917", "text": "def displayGameOverMenu(self):\n\n # make game over menu object.\n GOM = GameOverMenu(self.__settings.get_res_width(), self.__settings.get_res_height(), self.__screen)\n GOM.run_game_over_menu()\n\n # checks if user clicked 'Main Menu' button.\n if GOM.get_to_main_menu() == True:\n self.__goToMainMenu = True\n else:\n self.__goToMainMenu = False\n\n # checks if user clicked 'Play Again' button.\n if GOM.play_again(): # if true\n self.__play_again = True\n else:\n self.__play_again = False", "title": "" }, { "docid": "0bbfc30dfec37f0e74c160e309b92dce", "score": "0.5955247", "text": "def is_in_game(self):\n pass", "title": "" }, { "docid": "ba319b5bc49b039ad4e08f467c5696d8", "score": "0.593408", "text": "def is_game_over(self):\n if self.total_score <= 0:\n print(\"Better luck next time!\")\n print(\"Game over\")\n self.keep_playing = False", "title": "" }, { "docid": "21fd3a8bf7838446df49c7d67967d42e", "score": "0.5924194", "text": "def isGameOver(self):\n if not self.get_moves():\n return True\n return False", "title": "" }, { "docid": "299349df6beb7a3ef472caef85adbc03", "score": "0.58878446", "text": "def game_is_over(self):\r\n return self.game_clock.game_is_over()", "title": "" }, { "docid": "f46b37ed254a57911adb1c741428403c", "score": "0.58453447", "text": "def game_over(self):\n\n return self.game.game_over()", "title": "" }, { "docid": "7bbf102d4e23762b6c15ea8584264afd", "score": "0.5844271", "text": "def gameover(self) -> bool:\n return self.win or self.loss", "title": "" }, { "docid": "cf65422563f09e246dd75e929d8314e4", "score": "0.5840213", "text": "def game_over(self):\n self.ende = UiGameOverWindow()\n self.ende.show()\n\n self.close()", "title": "" }, { "docid": "0cd812b26778362480a83da67ac27b99", "score": "0.58365285", "text": "def GameOver(self):\n #decide who won and compensate for players not actually sliding the 10th cube into place\n if self.game_state.red_score > self.game_state.blue_score:\n if self.game_state.red_team is None:\n winner = 'red'\n else:\n winner = self.game_state.red_team\n\n self.game_state.red_score = 10\n\n elif self.game_state.blue_score > self.game_state.red_score:\n if self.game_state.blue_team is None:\n winner = 'blue'\n else:\n winner = self.game_state.blue_team\n\n self.game_state.blue_score = 10\n\n elif self.game_state.blue_score == self.game_state.red_score:\n winner = 'tie'\n\n #populate and commit a game state object\n foos_log = Game(winner=winner,\\\n blue_score=self.game_state.blue_score,\\\n red_score=self.game_state.red_score,\\\n red_off=self.game_state.red_off,\\\n red_def=self.game_state.red_def,\\\n blue_off=self.game_state.blue_off,\\\n blue_def=self.game_state.blue_def,\\\n started=self.game_state.game_started,\\\n ended=int(time()))\n\n db.session.add(foos_log)\n db.session.commit()\n\n #set a state object to the winner to update web ui with winning team detected\n self.game_state.game_winner = winner\n\n self.game_state.game_on = False\n self.game_state.blue_off = -1\n self.game_state.blue_def = -1\n self.game_state.red_off = -1\n self.game_state.red_def = -1\n self.game_state.red_team = ''\n self.game_state.blue_team = ''\n self.CommitState()", "title": "" }, { "docid": "ed0594ac57ad86dbb856f00a4b7427be", "score": "0.58238447", "text": "def __check_game_over(self):\r\n if self.__level.block_collides():\r\n self.__game_over = True\r\n self.__save_score()\r\n self.__finished()", "title": "" }, { "docid": "131e7e21ddb5de5b80e346809338694e", "score": "0.58088434", "text": "def check_game_over():\n not_visible = []\n for card in GAME.play_cards:\n if card.visible == False:\n not_visible.append(card.visible)\n else:\n continue\n \n if len(not_visible) == len(GAME.play_cards):\n window = card.turtle.screen\n play = window.textinput(\"Game Over!\", \"Would you like to play again? (Y/N): \" )\n if play.upper() == 'Y':\n # update leaderboard before restarting\n update_leaderboard()\n # reset guesses\n GAME.guesses = 0\n game_controller()\n else:\n # update leaderboard before quiting\n update_leaderboard()\n quit()", "title": "" }, { "docid": "86848d2ea068bf37cbf5a79aabf52f41", "score": "0.5808788", "text": "def game_over(self):\n self.screen.fill(WHITE)\n font = PG.font.SysFont('Helvitica', 50, True, False)\n final = font.render(\"GAME OVER\", True, BLACK)\n scoreboard = font.render(\"YOUR SCORE: \" + str(self.score), True, BLACK)\n question = font.render(\"Play Again? Y/N\", True, BLACK)\n self.screen.blit(final, [SIZE[0]/2 - 150, SIZE[1]/2 - 50])\n self.screen.blit(scoreboard, [SIZE[0]/2 - 175, SIZE[1]/2])\n self.screen.blit(question, [SIZE[0]/2 - 175, SIZE[1]/2 + 50])\n\n PG.display.flip()\n while True:\n for event in PG.event.get():\n if event.type == PG.QUIT:\n PG.quit()\n exit()\n if event.type == PG.KEYDOWN and event.key == PG.K_ESCAPE:\n PG.quit()\n exit()\n if event.type == PG.KEYDOWN and event.key == PG.K_y:\n #Play again\n return True\n if event.type == PG.KEYDOWN and event.key == PG.K_n:\n #Don't play again\n return False", "title": "" }, { "docid": "2b403d352698d750572da0e3a71d6094", "score": "0.5776626", "text": "def isGameOver(self,board):\n\t\tif(self.canMove(board,1) == False):\n\t\t\tif(self.canMove(board,-1) == False):\n\t\t\t\treturn True\n\t\treturn False", "title": "" }, { "docid": "31e4c95404474d3958d773070b3de3f2", "score": "0.57646906", "text": "def game_over(self):\n \n self.sound.play(\"gameover\")\n pygame.mouse.set_visible(True)\n back_button_position = 5, 400\n exit_button_position = 5, 440\n \n self.gui.put_widget(\"back\", back_button_position)\n self.gui.put_widget(\"exit\", exit_button_position)\n\n score_txt = \"Score: \"+str(self.state.score)\n score_surface = self.gui.get_text(score_txt)\n \n go_screen = pygame.image.load(GAMEOVER_SCREEN_IMG).convert_alpha()\n\n while True:\n\n self.screen.fill((0,0,255))\n self.screen.blit(go_screen, (0,0))\n self.screen.blit(score_surface,(0,0))\n self.gui.render(self.screen)\n\n events = self.gui.process(pygame.event.get())\n if events:\n\n for event in events:\n type, name = event\n if type == \"quit\":\n exit()\n elif type == \"cursor-over\":\n self.sound.play(\"select\")\n elif type == \"clicked\":\n self.sound.play(\"click\")\n sleep(1)\n if name == \"back\":\n self.gui.remove_widget(\"back\")\n self.gui.remove_widget(\"exit\")\n return\n elif name == \"exit\":\n exit()\n\n pygame.display.update()", "title": "" }, { "docid": "18b79da5ff59312b36d1bdefa20a74ed", "score": "0.57605654", "text": "def draw_game_over(self):\n output = \"Game Over\"\n arcade.draw_text(output, 240, 400, arcade.color.WHITE, 54)\n\n output = \"Click to restart\"\n arcade.draw_text(output, 310, 300, arcade.color.WHITE, 24)", "title": "" }, { "docid": "54fe4a5325a9dee24c184e0cc4faea97", "score": "0.57425076", "text": "def turn_over(self, player):\n return True", "title": "" }, { "docid": "a1eb1a4fb15b0f0b79036edb60db0faa", "score": "0.5738607", "text": "def game_over() -> None:\n print(\"Game Over!\")\n print(\"Vous aurez probablement plus de chances la prochaine fois...\")", "title": "" }, { "docid": "608c38e7adba72e7d13cbbf8645bd616", "score": "0.5716874", "text": "def is_game_over(self):\n def gameover(guesses, ships):\n return all(map(lambda shipcoord: shipcoord in guesses, flatten(ships)))\n\n return gameover(self.player2.guesses, self.player1.ships)", "title": "" }, { "docid": "5992661af77c7221a6d202f9d627942c", "score": "0.56981707", "text": "def lose_life(self):\r\n global game_over\r\n player.num_lives -= 1\r\n player_death_sound.play()\r\n if player.num_lives == 0:\r\n game_over = True\r\n blinking = Blinking() #used to blink cursor as user types in their name\r\n prompt_high_score_screen(blinking,screen, score)\r\n blinking.kill()\r\n else:\r\n player.shield = 100\r\n self.respawning = True", "title": "" }, { "docid": "c16e4cb6362766671bd89f0b5dd2c020", "score": "0.5694089", "text": "def game_over(self):\n\n display_string = \"GAME OVER!\"\n points_string = f\"Your points: {len(self.body())}\"\n self.window.erase()\n self.window.border()\n max_y, max_x = self.window.getmaxyx()\n y = max_y // 2\n x1 = (max_x - len(display_string)) // 2\n x2 = (max_x - len(points_string)) // 2\n self.window.addstr(y - 1, x1, display_string)\n self.window.addstr(y + 1, x2, points_string)\n self.window.refresh()\n self.alive = False\n time.sleep(2)", "title": "" }, { "docid": "51cccfc1c3e7791d3f40e7ca1ab0f617", "score": "0.5640243", "text": "def game_over(self, state):\n return True if state == 0 else False", "title": "" }, { "docid": "687c0eae46ae3b6fc700625332edc8b6", "score": "0.56252277", "text": "def GameOver(self):\n \n i = 36\n self.__gameover_affect.play()\n while i <= 240:\n img = self.LoadImagesHelper(\"Pictures/over/Pictures\" + str(i) + \".jpg\", 1280, 715)\n i += 1\n self.__screen.blit(img, (0,0))\n self.__clock.tick(30)\n pygame.display.flip()\n self.__screen.fill(BLACK)", "title": "" }, { "docid": "cf6063fa4cf6eafa62c5470191b311d1", "score": "0.561631", "text": "def over(self):\n\t\tself.gameOver=True\n\t\tself.can.create_line(0,0, 10*self.blocksize, 20*self.blocksize+5, fill=\"red\", width=6)\n\t\tself.can.create_line(10*self.blocksize, 0,0,20*self.blocksize+5, fill=\"red\", width=6)\n\t\tself.master.check_over()", "title": "" }, { "docid": "608726625a6277a82253e77fd9a11696", "score": "0.55831105", "text": "def game_over(self):\n points = self.points()\n if points == 21:\n return 'Blackjack!'\n elif points > 21:\n return 'Busted'\n else:\n return False", "title": "" }, { "docid": "3fbd9c20832fcf1d2580aa064726c55e", "score": "0.5560022", "text": "def game_over(self):\n\n self.delete(ALL)\n self.create_text(self.winfo_width() /2, self.winfo_height()/2,\n text=\"Game Over with score {0}\".format(self.score), fill=\"white\")", "title": "" }, { "docid": "4a5398257e4ed72e509f6ae498ca8949", "score": "0.55443573", "text": "def prep_game_over(self):\n\t\tself.game_over_image = self.font.render('Game Over', True, self.text_color, (110, 108, 108))\n\t\tself.game_over_rect = self.game_over_image.get_rect()\n\t\tself.game_over_rect.centery = self.screen_rect.centery - 35\n\t\tself.game_over_rect.centerx = self.screen_rect.centerx", "title": "" }, { "docid": "441d2c1850d6467cea57a018a57b6d53", "score": "0.5542856", "text": "def _game_over_functionality(self, sound_when_dead, gameover_text_str):\n\n self._play_dead_sound(sound_when_dead)\n self.dead = True\n self.game_over_text = gameover_text_str\n self.game_over_screen()", "title": "" }, { "docid": "f9159c3de55746e7394cd7672e669e77", "score": "0.54997027", "text": "def gameOverStateRender(self, display):\r\n display.fill((0, 0, 0))\r\n display.fill((200, 200, 200), ((131, 82), (655, 411)))\r\n display.blit(font.Font('freesansbold.ttf', 100).render(\"Game Over!\", True, (0, 0, 0)), (160, 140))\r\n self.gameOverButtons[0].drawButton(display, (245, 350))\r\n self.gameOverButtons[1].drawButton(display, (465, 350))", "title": "" }, { "docid": "79174a718067baad2bee4c73bafa52e0", "score": "0.5468527", "text": "def game_over(self):\n basic_font = pygame.font.SysFont(None, self.slot) # to refactor\n left = self.display.get_rect().centerx - len(\"GAME O\") * self.slot\n top = self.display.get_rect().centery\n self._print(\n basic_font, \"GAME OVER\", left, top, Cuatris.RED, Cuatris.GRAY\n )\n self._print(\n basic_font,\n \"RETURN TO START OVER\",\n left,\n top + self.slot,\n Cuatris.RED,\n Cuatris.GRAY,\n )", "title": "" }, { "docid": "388b83f7e189ac0da877de2c06f882d8", "score": "0.54560274", "text": "def game_over(self):\n self.goto(0.0, 0.0)\n self.write(\n \" Game Over\\nclick anywhere to close the window\",\n align=\"center\",\n font=FONT,\n )", "title": "" }, { "docid": "196757ab093bf586148bbd26548d973a", "score": "0.5452973", "text": "def game_over(self, winner):\n pid = winner.get_id()\n last_move = winner.last_move()\n wid = last_move[0]\n move_action = last_move[1]\n self.__obs(lambda obs: obs.game_over(pid, wid, move_action))", "title": "" }, { "docid": "ecceb52f24e5132f88ccd5768a574301", "score": "0.5443769", "text": "def gameOver(self):\n helv50 = font.Font(family='Helvetica', size=50, weight='bold')\n self.canvas.create_text(400, 400, text='Game Over', font=helv50, fill='red')\n self.canvas.pack()", "title": "" }, { "docid": "a290abc0f87b208f3632b0a51dfa7f62", "score": "0.5442499", "text": "def end_game_menu(self):\r\n # Game over menu\r\n game_over_menu = pygame_menu.Menu(self.WIDTH, self.HEIGHT, 'Game over', theme=pygame_menu.themes.THEME_DARK)\r\n\r\n # Depending on the game players, print the winner/loser\r\n if self.game.AI_activated and self.game.winner == 'White':\r\n WINNER = 'YOU WIN!'\r\n game_over_menu.add.label(WINNER, max_char=-1, font_size=70, font_color='Red')\r\n\r\n elif self.game.AI_activated and self.game.winner == 'Black':\r\n WINNER = 'YOU LOSE!'\r\n game_over_menu.add.label(WINNER, max_char=-1, font_size=70, font_color='Red')\r\n\r\n elif not self.game.AI_activated and self.game.winner == 'White':\r\n WINNER = 'THE WINNER IS ' + self.user_name_1.get_value().capitalize() + ' (White)'\r\n game_over_menu.add.label(WINNER, max_char=-1, font_size=70, font_color='Red')\r\n\r\n elif not self.game.AI_activated and self.game.winner == 'Black':\r\n WINNER = 'THE WINNER IS ' + self.user_name_2.get_value().capitalize() + ' (Black)'\r\n game_over_menu.add.label(WINNER, max_char=-1, font_size=70, font_color='Red')\r\n\r\n # Play again with the same settings\r\n game_over_menu.add.button('Play again', self.run_game, True, align=pygame_menu.locals.ALIGN_LEFT)\r\n # Go to main menu\r\n game_over_menu.add.button('Main menu', main_menu, align=pygame_menu.locals.ALIGN_LEFT)\r\n game_over_menu.mainloop(self.WIN)", "title": "" }, { "docid": "6f0d5688a07f925315a88caa72be34da", "score": "0.5434655", "text": "def game_over(self):\n _choice = self.disp.game_over_screen(self.stage.score, self.stage.player.character)\n if _choice == \"start\":\n self.g_o = False\n elif _choice == \"quick\":\n self.g_o = False\n self.skip_start = True\n else:\n self.quit_game()", "title": "" }, { "docid": "fdf0d5cc0f0df1335bbdc27d8c930b86", "score": "0.54312736", "text": "def ongameplayupdatingdisabled(self):\n self.Check( ID_ToggleAI, False )\n self.get_frame().get_toolbar().ToggleTool( ID_ToggleAI, False )", "title": "" }, { "docid": "fc3d2d997ad759fefa72b84187a331b1", "score": "0.5400444", "text": "def draw_game_over(self):\n arcade.draw_text(\"You lost!\", SCREEN_W/2, SCREEN_H/2, arcade.color.ANTIQUE_RUBY, font_size=64, font_name='arial', anchor_x='center')\n arcade.set_background_color(arcade.color.PURPLE_HEART)", "title": "" }, { "docid": "688c5dbf7295e03d47741e03c5d734e3", "score": "0.5397686", "text": "def draw_gameover_page(self):\n arcade.draw_texture_rectangle(SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2,\n SCREEN_WIDTH,\n SCREEN_HEIGHT, self.gameover, 0)\n\n # Add messages depending on game outcome\n if self.total_time < 0.1:\n arcade.draw_text(\"Times Up!!!\", 375, 160, arcade.color.RED, 75)\n output = f\"Score: {self.score}\"\n arcade.draw_text(output, 570, 70, arcade.color.WHITE, 30)\n\n elif self.lives < 1:\n arcade.draw_text(\"You lost all your lives\", 400, 150, arcade.color.WHITE, 35)\n output = f\"Score: {self.score}\"\n arcade.draw_text(output, 570, 70, arcade.color.WHITE, 30)\n\n elif self.score < 25:\n arcade.draw_text(\"Sorry you lost!\", 200, 400, arcade.color.RED, 75)\n output = f\"Score: {self.score}\"\n arcade.draw_text(output, 570, 70, arcade.color.WHITE, 30)", "title": "" }, { "docid": "f58c450068c26fa381abc100bf4594ea", "score": "0.53893214", "text": "def game_over(self):\n return (self.x, self.y) not in self.actions", "title": "" }, { "docid": "d577a328f81f92b8f2ef8ba84277e8b7", "score": "0.538671", "text": "def gameOver(self):\n\t\treturn self.winner() or not self.getValidMoves()", "title": "" }, { "docid": "327db2da68687d8c64aed061aae7a343", "score": "0.53813183", "text": "def gameover( self ) -> bool:\n if len( self.actions ) == 0: return True\n if abs( self.utility() ) == math.inf: return True\n return False", "title": "" }, { "docid": "c4dbe174390d5feab84953eb2f43f5fe", "score": "0.53556", "text": "def IsDrawOver(self):\r\n return False", "title": "" }, { "docid": "d2698144478df75f60db07d4d1cd087c", "score": "0.5344385", "text": "def game_over():\n if points_1 >= 7:\n game_over_screen_1()\n elif points_2 >= 7:\n game_over_screen_2()\n else:\n return False", "title": "" }, { "docid": "749cfce239eb01b8e51ae882bbc7c56c", "score": "0.53433317", "text": "def game_over(reason):\n print(\"\\n\" + reason)\n print(\"Game Over!\")\n play_again()\n \"\"\"\n Ask player to play again or not by activating play_again() function\n \"\"\"", "title": "" }, { "docid": "37dd3773c8604d0be4a716e28065e8c5", "score": "0.5332427", "text": "def displayGameOver():\n font = pygame.font.SysFont('comicsansms', 36)\n text = font.render(ScoreBoard.gameOverMessage, 1, (109, 48, 252))\n textPos = text.get_rect(\n centerx=ScoreBoard.POSITION[0] + ScoreBoard.SIZE[0] * 0.5,\n centery=ScoreBoard.POSITION[1] + ScoreBoard.SIZE[1] * 0.5 + 35\n )\n screen.blit(text, textPos)", "title": "" }, { "docid": "c1081078f48a4f599c9ad8ada460ea33", "score": "0.53310555", "text": "def _game_over(self):\n game_over = False\n if self.head in self.snake[1:]: # head is position 0 in the list\n game_over = True\n if self.head.x > self.w - constants.BLOCK_SZ or self.head.x < 0 or \\\n self.head.y < 0 or self.head.y > self.h - constants.BLOCK_SZ:\n game_over = True \n return game_over", "title": "" }, { "docid": "07ce8295e345ae586d270b1ac2b3a646", "score": "0.5325378", "text": "def is_over(self, state: GameState) -> bool:\n raise NotImplementedError", "title": "" }, { "docid": "f931a49a08f15ac78b86b5cb154e7c32", "score": "0.53073853", "text": "def label_gameover():\n label = GLabel('Game over!')\n label.font = '-50'\n label.color = 'skyblue'\n return label", "title": "" }, { "docid": "235b2be5b96da804b02247557b72349d", "score": "0.5300991", "text": "def game_is_over(game):\n for p in game.players:\n if p.score >= 50:\n return True\n return False", "title": "" }, { "docid": "a936e63989036f875390f1bae547a9be", "score": "0.5284049", "text": "def on_move_fail(self, user, battle):", "title": "" }, { "docid": "cb0c28f56b3349fd2c5b9bef2fdd4574", "score": "0.52828914", "text": "def is_game_over(self):\n for player in self.players:\n for penguin in player.get_penguins():\n if len(self.get_reachable(penguin)) > 0:\n return False\n return True", "title": "" }, { "docid": "150dcc169c8ae0948b1aad82700086a7", "score": "0.5247563", "text": "def ongameplayupdatingenabled(self):\n self.Check( ID_ToggleAI, True )\n self.get_frame().get_toolbar().ToggleTool( ID_ToggleAI, True )", "title": "" }, { "docid": "469134f6e994f11813e74a2d9a5ef018", "score": "0.52322143", "text": "def test_game_over(self):\n\n UFO = Game('failure')\n UFO.lives = 0\n result = Game.check_status(UFO)\n\n self.assertEqual(result, game.STATUS_LOSE)", "title": "" }, { "docid": "d86227bf0bcca09f539a2417e8dcb690", "score": "0.5217142", "text": "def is_game_over(board):\n moves = max(available_moves(board))\n if moves == 0:\n return True\n p1w = has_won(board,1)\n p2w = has_won(board,2)\n if p1w == True:\n return True\n elif p2w == True:\n return True\n else:\n return False", "title": "" }, { "docid": "bdea1b45e94af3b6766f756a51b939bb", "score": "0.52171195", "text": "def end(self):\r\n end_message = games.Message(value = \"Game Over\",\r\n size = 90,\r\n color = color.red,\r\n x = games.screen.width/2,\r\n y = games.screen.height/2,\r\n lifetime = 5 * games.screen.fps,\r\n after_death = games.screen.quit,\r\n is_collideable = False)\r\n games.screen.add(end_message)", "title": "" }, { "docid": "046e0ca3a338716253d24e0bb0d7f337", "score": "0.5211163", "text": "def check_background_games(self, *args):\n if self.username is None or self.password is None:\n return\n if self.player.logged_in is False:\n return\n if not self.my_turn_games and not self.old_games and not self.opp_turn_games:\n self.load_current_games()\n\n games = list(self.player.current_games)\n if len(games) < 1:\n return\n\n for game in games:\n if game.over and game in self.old_games:\n # NOOP\n pass\n elif game.over and game not in self.old_games:\n Logger.info(f\"Game over and not in old games? {game.guid}\")\n self.notify_removed_game(game)\n self.old_games.append(game)\n if game in self.opp_turn_games:\n self.opp_turn_games.remove(game)\n if game in self.my_turn_games:\n self.my_turn_games.remove(game)\n\n elif game.my_turn and game in self.opp_turn_games:\n # New game for us to make a move!\n self.notify_my_move(game)\n self.my_turn_games.append(game)\n self.opp_turn_games.remove(game)\n elif not game.my_turn and game in self.my_turn_games:\n # Opponent is to move now.\n self.opp_turn_games.append(game)\n self.my_turn_games.remove(game)\n elif game not in self.my_turn_games and game not in self.opp_turn_games:\n self.notify_new_game(game)\n if game.my_turn:\n self.my_turn_games.append(game)\n else:\n self.opp_turn_games.append(game)", "title": "" }, { "docid": "a433776dfa836a53e73a050f6b53129b", "score": "0.52030104", "text": "def check_game_over(self):\n\n # check if we have at least K blocks saved, either locally or remotely\n lbs, rbs_array = self.local_blocks, self.remote_blocks\n rbs = []\n for i in range(len(rbs_array)):\n for j in range(len(rbs_array[i])):\n if len(rbs) > i:\n rbs[i] = rbs_array[j][i] or rbs[i]\n else:\n rbs.append(rbs_array[j][i])\n\n blocks_saved = [lb or rb for lb, rb in zip(lbs, rbs)]\n if sum(blocks_saved) < self.K:\n raise GameOver", "title": "" }, { "docid": "67f224eaef8f1a47a37f11a99ec4f380", "score": "0.51871765", "text": "def is_end_game(self):\n if Player.total == 2:\n try:\n if (\n self.board.rows % 2 == 1\n and self.counter.pawn[0].position\n == (self.board.cols - 1, self.board.rows - 1)\n ) or (\n self.board.rows % 2 == 0\n and self.counter.pawn[0].position == (0, self.board.rows - 1)\n ):\n # Player Loses\n self._winner = self.counter\n pygame.display.update()\n return True\n except IndexError:\n pass\n if self.counter.score == 6:\n # Player Loses\n self._winner = self.counter\n pygame.display.update()\n return True\n elif Player.array[1].score == 6:\n # Player Wins\n self._winner = Player.array[1]\n pygame.display.update()\n return True\n else:\n for player in Player.array:\n if player.score == 12 // (Player.total - 1):\n # That Player Wins\n self._winner = player\n pygame.display.update()\n return True\n return False", "title": "" }, { "docid": "b15c8022cb6c26e6f496d37d1be539ee", "score": "0.5184023", "text": "def game_over(ship_list, AI_ship_list):\n if player_win(AI_ship_list) or AI_win(ship_list): #either player wins or AI wins\n return True\n else:\n return False", "title": "" }, { "docid": "a8fe29a41182dee26fd5a80599d6a58a", "score": "0.51778793", "text": "def on_game_over(self, bj):\n self.print(\"on_game_over\")\n self.print(\"dealer:\", player.players[0].hand)\n res = self.wins - self.losses\n if self.positive_prog:\n res = -res\n self.print(\"res:\", res)\n if res < 0:\n self.betting.on_loss(abs(res))\n if self.anti_fallacy:\n self.af_trigger = True\n elif res > 0 and not self.af_trigger:\n self.betting.on_win(res)\n elif res > 0:\n self.af_trigger = False\n else:\n self.betting.on_tie()\n self.reset_results()", "title": "" }, { "docid": "73e8b9877e116344f91802bda7524df2", "score": "0.5169724", "text": "def game_over():\n print(GAME_OVER)\n time.sleep(1)\n play_again()", "title": "" }, { "docid": "320ac21dd8f220dad9aad9fd6cce1e81", "score": "0.5163603", "text": "def GameOver():\n Text(\"Game Over\",window_Width//2,window_Height//2,Window).Message_display()\n pg.display.update()\n while True:\n for event in pg.event.get():\n if event.type==pg.KEYDOWN:\n if event.type==pg.QUIT:\n pg.quit()\n quit()\n if event.key==pg.K_RETURN:\n mainloop()", "title": "" }, { "docid": "50baffd35696fd783f986962d4ae6f97", "score": "0.51631284", "text": "def isGameOver(self, boards):\n return self.deadTest(boards[0]) and self.deadTest(boards[1]) and self.deadTest(boards[2])", "title": "" }, { "docid": "36e8efc791fb6facf821fc60dcfc7de1", "score": "0.5156873", "text": "def game_over(self):\n if len(self.player_pieces) == 0:\n self.status = \"player_wins\"\n elif len(self.computer_pieces) == 0:\n self.status = \"computer_wins\"\n else:\n left_end = self.domino_snake[0][0]\n right_end = self.domino_snake[len(self.domino_snake) - 1][1]\n # If numbers on the ends are identical\n if left_end == right_end:\n # And appear within the snake 8 times, it's a draw\n if len([piece for piece in self.domino_snake if left_end in piece]) >= 8:\n self.status = \"draw\"", "title": "" }, { "docid": "dc3048bf2c6144bea4bda24cfce0bb2e", "score": "0.51562595", "text": "def game_exists(session):\n # Your code goes here\n return False", "title": "" }, { "docid": "c19eb72f284c63b3e7d5841cb51f71ea", "score": "0.5154593", "text": "def reloadGameOver(self, image):\n gameOver = pygame.image.load(image)\n gameOver = pygame.transform.scale(gameOver, self.dimension)\n self.screen.blit(gameOver, [0, 0])", "title": "" }, { "docid": "6f665502c69dfbda30141b512240f47d", "score": "0.5149441", "text": "def is_lost():\n return get_game_state() == -1", "title": "" }, { "docid": "70147f4542ef8e833626671e6d8a1aee", "score": "0.51396245", "text": "def isGameOver(S,p):\n if move_was_winning_move(S,p):\n if p == COMP:\n set_winner(COMP)\n else:\n set_winner(HUMAN)\n return True\n \n if move_still_possible(S) == False:\n set_winner(TIE)\n return True\n\n return False", "title": "" }, { "docid": "601bf74065e353d6278d0cb927e6535c", "score": "0.5125398", "text": "def is_in_pregame(self):\n return False", "title": "" }, { "docid": "3999841bbc9fd6f8572e813863d75042", "score": "0.5120661", "text": "def episode_over(self, winner):\n if winner == DRAW:\n print('Game over! It was a draw.')\n else:\n print('Game over! Winner: Player {0}'.format(winner))", "title": "" }, { "docid": "c4473ef49de1ea5b77bd6488517702e6", "score": "0.5120455", "text": "def game_over():\n WIN = Window.WIN\n WHITE = (255, 255, 255)\n BLACK = (0, 0, 0)\n\n end = True\n\n while end:\n for event in pygame.event.get():\n print(event)\n\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n WIN.fill(BLACK)\n text = pygame.font.SysFont('Times New Roman', 100)\n Text, TextRect = text_ob(\"Game Over\", text, WHITE)\n TextRect.center = ((Window.WIDTH / 2), (Window.HEIGHT / 2))\n WIN.blit(Text, TextRect)\n pygame.display.update()", "title": "" }, { "docid": "f3158e57c22b57e615c7e8352bb52bb3", "score": "0.5109053", "text": "def display_game_over(self, screen):\n my_score, opp_score = self.get_my_score(self.board)\n \n screen.blit(OVER_BG, OVER_BG_RECT)\n\n # Decide win, lose, or draw\n if my_score > opp_score:\n screen.blit(TROPHY, TROPHY_RECT)\n elif my_score < opp_score:\n screen.blit(SAD, SAD_RECT)\n elif my_score == opp_score:\n screen.blit(TIE, TIE_RECT)\n\n # Game over text\n text_surf = BRIT_FONT_BIG.render(\"GAME OVER!\", True, PURPLE)\n text_rect = text_surf.get_rect()\n text_rect.center = (int(WINDOW_X/2), 50)\n \n # Score text\n score_surf = BRIT_FONT.render(\" %s -- %s \" %\n (str(my_score), str(opp_score)),\n True, BLACK)\n score_rect = score_surf.get_rect()\n score_rect.center = (int(WINDOW_X/2), 400)\n\n # Query text\n text2_surf = BRIT_FONT.render('What do you want to do?', True, BLACK)\n text2_rect = text2_surf.get_rect()\n text2_rect.center = (int(WINDOW_X/2), 450)\n\n # Display the elements\n screen.blit(text_surf, text_rect)\n screen.blit(text2_surf, text2_rect)\n screen.blit(score_surf, score_rect)\n screen.blit(self.play_again, self.play_again_rect)\n screen.blit(self.quit_game, self.quit_game_rect) \n\n # Set game over trigger\n self.gameover = True", "title": "" }, { "docid": "0011bd32a01349c31d52927eeae5ac06", "score": "0.50900185", "text": "def _checkGameState(self) -> int:\n if self.game.isFinished():\n self.winningPlayers = self.game.winningPlayers\n return END\n return CONTINUE", "title": "" }, { "docid": "10bfcc50ef53ef02e8959e291b302447", "score": "0.50871944", "text": "def game_over_text(self):\n over_text = self.score.over_font.render(\"GAME OVER\", True, (255, 255, 255))\n self.screen.blit(over_text, (200, 250))\n return self", "title": "" }, { "docid": "e18114991da85b4b7ecf89ea5b5ff97a", "score": "0.5079349", "text": "def loadGameOver(self, image):\n gameOver = pygame.image.load(image)\n gameOver = pygame.transform.scale(gameOver, self.dimension)\n self.screen.blit(gameOver, [0, 0])", "title": "" }, { "docid": "75dbb1e37944f7b02a94c3a4b543a5b2", "score": "0.5078623", "text": "def turnOver(self):\n if self.__faceDown: self.__faceDown = False\n else: self.__faceDown = True", "title": "" }, { "docid": "30eb8c857cb9507b2b1a7553e85cc432", "score": "0.5071968", "text": "def update_game_over_screen(screen_type):\n # If the game is over\n if bbm.check_game_over_condition():\n # Delete an existing save file given the perma-death nature of the game\n bbo.delete_save_file()\n # Update the game over screen background\n if init.game_state.game_over != \"Lost\":\n sc.sm.get_screen(\"over\").ids.background_over.this_source = bbo.get_path(\n \"../../GraphicFiles/won_background.png\")\n if init.game_state.game_over != \"Won\":\n sc.sm.get_screen(\"over\").ids.background_over.this_source = bbo.get_path(\n \"../../GraphicFiles/lost_background.png\")\n # Change the screen\n sc.sm.get_screen(screen_type).change_window(\"over\")", "title": "" }, { "docid": "b61f48e415cf13b15ef8a9d5d8692e7c", "score": "0.5068129", "text": "def loseGame(self):\n # ONLY set alive via loseGame\n self.alive = False\n self.targetable = False\n self.discard += self.hand\n del self.hand[:]\n return", "title": "" }, { "docid": "cb2e38cbd74fc6b93413c3de052b4ec2", "score": "0.50639844", "text": "def gameOverButtonChecker(self):\r\n for button in self.gameOverButtons:\r\n button.clicked()", "title": "" }, { "docid": "786e0bc3b168e8eb90aa91b0a09c624d", "score": "0.5060393", "text": "def end_game(self, winner, loser):\n self.running = False\n print(\"Game over! {} wins and {} loses\".format(winner, loser))\n if isinstance(winner, Ai):\n winner.append_win()\n if isinstance(loser, Ai):\n loser.append_loss()", "title": "" }, { "docid": "d5b51f364f472f2039d61a88f37f6138", "score": "0.50415915", "text": "def is_game_over(self, gamestate) -> bool:\n all_players_dead = gamestate.all_players_expelled()\n state_complete = gamestate.game_complete()\n return state_complete or all_players_dead", "title": "" }, { "docid": "d0e111eb9bf2a17c606cc311b47d3436", "score": "0.50329655", "text": "def game_over(self, matrix):\n if any(-1 in row for row in matrix):\n self.bomeshflag = True\n return\n if any(2048 in row for row in matrix):\n self.overflag = True\n self.winflag = True\n elif (\n not any(0 in row for row in matrix)\n and not self.horizontal_move_exits(matrix)\n and not self.vertical_move_exits(matrix)\n ):\n self.overflag = True\n else:\n self.overflag = False\n self.winflag = False", "title": "" }, { "docid": "adca3275dfa82295f0945c1169c3fcfb", "score": "0.5031085", "text": "def lose_message(self):\n messagebox.showwarning('Fail', 'You Lose..')", "title": "" }, { "docid": "caba0c5a11915fc6850fb21c5f64f93a", "score": "0.50257415", "text": "def add_game_over_text():\n with open(\"active-game-files/game_over.txt\", \"a\") as f:\n f.writelines(\"Game Over\")", "title": "" }, { "docid": "02b77f306b27fbfb88bf71071a0ac30c", "score": "0.50181514", "text": "def is_game_over(self, state):\n return self.is_player_win(state, \"x\") or self.is_player_win(state, \"o\") or self.is_board_filled()", "title": "" }, { "docid": "1f91e037784ea5097a3ce13c42f56e0d", "score": "0.50021744", "text": "def check_new_event(self):\n new_event_list = self.nav.get_fleet_events()\n for new_event in new_event_list:\n if self.is_in(new_event, self.list_event):\n self.list_event.append(new_event)\n if not self.is_my_fleet(new_event):\n log(\n \"{} ship(s) from ({}, {}) to ({}, {}) arriving in {} at {} to {}\".format(\n new_event[\"detail_fleet\"],\n new_event[\"origin_planet_name\"],\n new_event[\"origin_planet_coord\"],\n new_event[\"destination_planet_name\"],\n new_event[\"destination_planet_coord\"],\n new_event[\"remaining_time\"],\n new_event[\"arrival_time\"],\n self.mission[new_event[\"mission_type\"]],\n )\n )\n if self.is_under_attack(new_event):\n self.nav.send_message_enemy(new_event[\"player_id\"])\n\n is_event_over = False\n for old_event in self.list_event:\n if self.is_in(old_event, new_event_list):\n self.list_event.remove(old_event)\n is_event_over = True\n return is_event_over", "title": "" }, { "docid": "069a06ed8aa0e4ecc23e12d8ab0eebd6", "score": "0.49960214", "text": "def handleStatsLostEvent (self, event):\n raise NotImplementedException(\"SENPAIListener object must properly implement handleStatsLostEvent, or return False from handlesEvent() for event type 'Stats Lost'.\")", "title": "" }, { "docid": "ad2a3ba8428a6461e9b3f14551e8f966", "score": "0.49867088", "text": "def game_over():\r\n text = pygame.font.Font(\"freesansbold.ttf\", 70)\r\n game_over = text.render(\"Game Over\", True, (255, 255, 255))\r\n window.blit(game_over, (150, 250))", "title": "" }, { "docid": "5fb7c4650fa3df8200a10791b6ccda1d", "score": "0.4982617", "text": "def has_ended(game):\n # Your code goes here\n return True", "title": "" }, { "docid": "8c3bff4f48378281200f34b559763226", "score": "0.4982418", "text": "def gameOver(self):\n # font = pygame.font.Font(\"Serif\", 25)\n font = pygame.font.SysFont(\"serif\", 25)\n text = font.render(\"Game Over, click to restart\", True, BLACK)\n center_x = (TREE_WIDTH // 2) - (text.get_width() // 2)\n center_y = (TREE_HEIGHT // 2) - (text.get_height() // 2)\n self.screen.blit(text, [center_x, center_y])", "title": "" } ]
797776956240c714c4b1dc4f47bbfbd4
Obtain the value of node_vm_size.
[ { "docid": "25cfcaaaf05a56b0ced7a9b70d3dd850", "score": "0.91161466", "text": "def get_node_vm_size(self) -> str:\n return self._get_node_vm_size()", "title": "" } ]
[ { "docid": "0581d2bf606539b195ce91b0880ccd28", "score": "0.8251246", "text": "def vm_size(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"vm_size\")", "title": "" }, { "docid": "9f2c5a1a0b8fbc3b59c1289a3fdbb736", "score": "0.80528116", "text": "def vm_size(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"vm_size\")", "title": "" }, { "docid": "8edc4cedba4119326b15f4c7e3a3e459", "score": "0.804634", "text": "def vm_size(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vm_size\")", "title": "" }, { "docid": "8edc4cedba4119326b15f4c7e3a3e459", "score": "0.804634", "text": "def vm_size(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vm_size\")", "title": "" }, { "docid": "4043e6b695ff8b7e7ac708bb3f7ac136", "score": "0.7856617", "text": "def _get_node_vm_size(self, read_only: bool = False) -> str:\n default_value = \"Standard_DS2_v2\"\n # read the original value passed by the command\n raw_value = self.raw_param.get(\"node_vm_size\")\n # try to read the property value corresponding to the parameter from the `mc` object\n value_obtained_from_mc = None\n if self.mc and self.mc.agent_pool_profiles:\n agent_pool_profile = safe_list_get(\n self.mc.agent_pool_profiles, 0, None\n )\n if agent_pool_profile:\n value_obtained_from_mc = agent_pool_profile.vm_size\n # try to retrieve the value from snapshot\n value_obtained_from_snapshot = None\n # skip dynamic completion if read_only is specified\n if not read_only:\n snapshot = self.get_snapshot()\n if snapshot:\n value_obtained_from_snapshot = snapshot.vm_size\n\n # set default value\n if value_obtained_from_mc is not None:\n node_vm_size = value_obtained_from_mc\n elif raw_value is not None:\n node_vm_size = raw_value\n elif value_obtained_from_snapshot is not None:\n node_vm_size = value_obtained_from_snapshot\n else:\n node_vm_size = default_value\n\n # this parameter does not need validation\n return node_vm_size", "title": "" }, { "docid": "c858171aa72a1e06b587007d4b6d3eb5", "score": "0.7719487", "text": "def agent_vm_size(self) -> Optional[str]:\n return pulumi.get(self, \"agent_vm_size\")", "title": "" }, { "docid": "b901f20422dfc8c6ca58ffda1809561f", "score": "0.76124597", "text": "def v_size(self) -> int:\r\n\r\n return self.node_size", "title": "" }, { "docid": "5a45dd929d04d5c00d3d641aedd171b7", "score": "0.71676594", "text": "def nodeSize(self):\n size = graphs.graphMap(self,item='node',dtype=numpy.float32)\n size[:]=1\n return size", "title": "" }, { "docid": "9f4e477a172a9ff837eed37dc8aee546", "score": "0.7057961", "text": "def GetMeasurementVectorSize(self):\n return _itkKdTreePython.itkKdTreeLSVF3_GetMeasurementVectorSize(self)", "title": "" }, { "docid": "42c9d612c43bc6d080fa9367b2a6faea", "score": "0.7029785", "text": "def _list_machines__get_size(self, node_dict):\n return None", "title": "" }, { "docid": "f5dc47a7095b364aff86f06a4fb6b84b", "score": "0.70250833", "text": "def GetMeasurementVectorSize(self):\n return _itkKdTreePython.itkKdTreeLSVF2_GetMeasurementVectorSize(self)", "title": "" }, { "docid": "54cf92f0e0c6ee03fd58f0a177739dd0", "score": "0.69325584", "text": "def __size(self):\n # find the master root\n if self.parent is not None:\n return self.parent.__size()\n\n # get the total size of ourselves\n our_size = asizeof.asizeof(self)\n return {\n \"used\": our_size,\n \"max\": self.MAX_MEM_SIZE\n }", "title": "" }, { "docid": "159b0c66fac6e53d25b6609375a56c3b", "score": "0.6923734", "text": "def nodeSize(self):\n if graphs.isGridGraph(self.baseGraph):\n return graphs._ragNodeSize(self, self.baseGraph, self.labels, self.ignoreLabel)\n else:\n baseNodeSizes = self.baseGraph.nodeSize()\n return self.accumulateNodeFeatures(baseNodeSizes,acc='sum')", "title": "" }, { "docid": "3d51be58d12340bab55a1772392d1c66", "score": "0.6913255", "text": "def size_mb(self) -> float:\n return pulumi.get(self, \"size_mb\")", "title": "" }, { "docid": "3d51be58d12340bab55a1772392d1c66", "score": "0.6913255", "text": "def size_mb(self) -> float:\n return pulumi.get(self, \"size_mb\")", "title": "" }, { "docid": "3d51be58d12340bab55a1772392d1c66", "score": "0.6913255", "text": "def size_mb(self) -> float:\n return pulumi.get(self, \"size_mb\")", "title": "" }, { "docid": "004af5cac3d627a22f01046c4a8d561d", "score": "0.69043255", "text": "def memory_size(self):\n ret = self._get_attr(\"memorySize\")\n return ret", "title": "" }, { "docid": "004af5cac3d627a22f01046c4a8d561d", "score": "0.69043255", "text": "def memory_size(self):\n ret = self._get_attr(\"memorySize\")\n return ret", "title": "" }, { "docid": "992fe51341049d3962e1db09238a9f6f", "score": "0.68462014", "text": "def get_kernel_size(self):\n return self.kernel_size", "title": "" }, { "docid": "af78a0608a98c49475d509853f7eabeb", "score": "0.68446267", "text": "def virtual_size(self):\n return self._total_count", "title": "" }, { "docid": "b9373c428cd8a423ed84447ef0db341c", "score": "0.6819602", "text": "def os_disk_size_gb(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"os_disk_size_gb\")", "title": "" }, { "docid": "ba70ba19ce42201647b1def443f6ce77", "score": "0.6795912", "text": "def size(self):\n return self.number_nodes", "title": "" }, { "docid": "de95b41b4ce04ff4394ba344413d2e5d", "score": "0.6787783", "text": "def size_mb(self) -> Optional[float]:\n return pulumi.get(self, \"size_mb\")", "title": "" }, { "docid": "b4ad02124c29a081e21d4dd7ef5f34a4", "score": "0.678094", "text": "def memory_size_gb(self) -> pulumi.Output[float]:\n return pulumi.get(self, \"memory_size_gb\")", "title": "" }, { "docid": "6ad3efb1613bb09e53f726172b8df763", "score": "0.67673737", "text": "def vram_size(self):\n ret = self._get_attr(\"VRAMSize\")\n return ret", "title": "" }, { "docid": "5d41c587fbaafef307ca770023a0e571", "score": "0.67542845", "text": "def _size(self):\n if self.root != None:\n return self.size\n else:\n return 0", "title": "" }, { "docid": "8b9756dc3c3a30f1cbf9c7e74ed8e2a7", "score": "0.6752157", "text": "def __get_size(self):\n return self.__size", "title": "" }, { "docid": "8b9756dc3c3a30f1cbf9c7e74ed8e2a7", "score": "0.6752157", "text": "def __get_size(self):\n return self.__size", "title": "" }, { "docid": "4fcd5a8201712dcc2b7f3934ff7ba6ca", "score": "0.67511624", "text": "def memory_size(self) -> int:\n return pulumi.get(self, \"memory_size\")", "title": "" }, { "docid": "4fcd5a8201712dcc2b7f3934ff7ba6ca", "score": "0.67511624", "text": "def memory_size(self) -> int:\n return pulumi.get(self, \"memory_size\")", "title": "" }, { "docid": "c9be1e0d534038063aab64c93e4642c3", "score": "0.6745595", "text": "def get_size(self):\n\n return self.attr('size')", "title": "" }, { "docid": "5e02aa0302b79b198534d81f7813ebe1", "score": "0.67445916", "text": "def info_vd_size(self):\n ret = self._get_attr(\"infoVDSize\")\n return ret", "title": "" }, { "docid": "da3f6d4b01d837fecb75ec66fd815076", "score": "0.6734818", "text": "def get_size(self):\n return self.__size", "title": "" }, { "docid": "da3f6d4b01d837fecb75ec66fd815076", "score": "0.6734818", "text": "def get_size(self):\n return self.__size", "title": "" }, { "docid": "14183983cb1eab5b5f9f23d0a8434f6f", "score": "0.673199", "text": "def node_ipv4_cidr_size(self) -> int:\n return pulumi.get(self, \"node_ipv4_cidr_size\")", "title": "" }, { "docid": "464fe5df8a39f30a112bba899379066c", "score": "0.67250305", "text": "def global_size(self) -> ConfigNodePropertyInteger:\n return self._global_size", "title": "" }, { "docid": "72193d873b14712c225c258b4464bf3c", "score": "0.6724445", "text": "def get_size(self):\n return self.size", "title": "" }, { "docid": "72193d873b14712c225c258b4464bf3c", "score": "0.6724445", "text": "def get_size(self):\n return self.size", "title": "" }, { "docid": "72193d873b14712c225c258b4464bf3c", "score": "0.6724445", "text": "def get_size(self):\n return self.size", "title": "" }, { "docid": "72193d873b14712c225c258b4464bf3c", "score": "0.6724445", "text": "def get_size(self):\n return self.size", "title": "" }, { "docid": "72193d873b14712c225c258b4464bf3c", "score": "0.6724445", "text": "def get_size(self):\n return self.size", "title": "" }, { "docid": "72193d873b14712c225c258b4464bf3c", "score": "0.6724445", "text": "def get_size(self):\n return self.size", "title": "" }, { "docid": "72193d873b14712c225c258b4464bf3c", "score": "0.6724445", "text": "def get_size(self):\n return self.size", "title": "" }, { "docid": "e68242585c382810c13e342f1ac7710a", "score": "0.6709897", "text": "def disk_size_gb(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"disk_size_gb\")", "title": "" }, { "docid": "49305e75a5debb65cd13c7057de5ab97", "score": "0.6699148", "text": "def get_size(self):\r\n try:\r\n return self._size\r\n except AttributeError:\r\n self._size = self._calc_size()\r\n return self._size", "title": "" }, { "docid": "d1f99fb0105f80406702b710699f8dd9", "score": "0.6667501", "text": "def get_size( self ):\n return self._size", "title": "" }, { "docid": "b12221769f4910ced5dd569e05540c46", "score": "0.6647827", "text": "def get_size(self):\r\n return self._size", "title": "" }, { "docid": "436026e7ef2a8293eaad8436ae8e64b2", "score": "0.66400325", "text": "def size(self):\n return self.tree_size", "title": "" }, { "docid": "5d7c6a452d03181640b1626acfeea046", "score": "0.66165406", "text": "def get_size(self):\n\n return self._size", "title": "" }, { "docid": "64895ea2d97d1d67b8bb8d2922098bea", "score": "0.66082406", "text": "def allocated_size(self):\n ret = self._get_attr(\"allocatedSize\")\n return ret", "title": "" }, { "docid": "95aea20cb2e550c15a1c998a659830ce", "score": "0.6574859", "text": "def _select_vm_size(self) -> dict:\n if self._az_compute_node:\n for vm_size in self._az_vm_sizes:\n if vm_size['name'] == \\\n self._az_compute_node['compute']['vmSize']:\n print(self.format('Selected vm size: {}',\n vm_size['name']))\n return vm_size\n\n return self._select_object('vm_size', 'name', create=False)", "title": "" }, { "docid": "e3d513389b9cac521b79e2e7bb0a4269", "score": "0.6568504", "text": "def process_size(self):\n return self._config.get('process_size', None)", "title": "" }, { "docid": "60e8d47c8240c9e60306a3b5cceae5d7", "score": "0.65619016", "text": "def disk_size_gb(self) -> int:\n return pulumi.get(self, \"disk_size_gb\")", "title": "" }, { "docid": "60e8d47c8240c9e60306a3b5cceae5d7", "score": "0.65619016", "text": "def disk_size_gb(self) -> int:\n return pulumi.get(self, \"disk_size_gb\")", "title": "" }, { "docid": "7760ff31a9ea14872c670fc78c54f40c", "score": "0.6541752", "text": "def object_size(self):\n ret = self._get_attr(\"objectSize\")\n return ret", "title": "" }, { "docid": "07df01a655cce81b69df2699b51803f6", "score": "0.6541044", "text": "def real_free_size(self, node):\n return node[7]", "title": "" }, { "docid": "af9cae2bedac36e78adce4f083d4ad7c", "score": "0.65092057", "text": "def disk_size_gb(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"disk_size_gb\")", "title": "" }, { "docid": "af9cae2bedac36e78adce4f083d4ad7c", "score": "0.65092057", "text": "def disk_size_gb(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"disk_size_gb\")", "title": "" }, { "docid": "af9cae2bedac36e78adce4f083d4ad7c", "score": "0.65092057", "text": "def disk_size_gb(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"disk_size_gb\")", "title": "" }, { "docid": "f016eda0507f3958b394a4a8c85222bf", "score": "0.6506755", "text": "def size(self):\n ret = self._get_attr(\"size\")\n return ret", "title": "" }, { "docid": "80f9eb29ab4a5d52b9456b3d3a01ecfd", "score": "0.65032965", "text": "def disk_size_in_gb(self) -> Optional[int]:\n return pulumi.get(self, \"disk_size_in_gb\")", "title": "" }, { "docid": "5967a88338318d358402e24803669ede", "score": "0.65029514", "text": "def os_disk_size_gb(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"os_disk_size_gb\")", "title": "" }, { "docid": "5967a88338318d358402e24803669ede", "score": "0.65029514", "text": "def os_disk_size_gb(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"os_disk_size_gb\")", "title": "" }, { "docid": "5967a88338318d358402e24803669ede", "score": "0.65029514", "text": "def os_disk_size_gb(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"os_disk_size_gb\")", "title": "" }, { "docid": "17b236efaba30427f29c53c3d8635dc6", "score": "0.6495506", "text": "def Size(self):\n return _itkKdTreePython.itkKdTreeNodeLSVF2_Size(self)", "title": "" }, { "docid": "976dc1abc54b664df67207a8c1ba27d2", "score": "0.6495205", "text": "def size_in_bytes(self) -> pulumi.Output[Optional[float]]:\n return pulumi.get(self, \"size_in_bytes\")", "title": "" }, { "docid": "5436034e05ce726d85c9308594791b05", "score": "0.6493726", "text": "def size(self):\n return self[\"size\"]", "title": "" }, { "docid": "b1718fec63a6f969c6597afd7716ca43", "score": "0.64847517", "text": "def disk_size_gb(self) -> Optional[float]:\n return pulumi.get(self, \"disk_size_gb\")", "title": "" }, { "docid": "b1718fec63a6f969c6597afd7716ca43", "score": "0.64847517", "text": "def disk_size_gb(self) -> Optional[float]:\n return pulumi.get(self, \"disk_size_gb\")", "title": "" }, { "docid": "15f654bfee01ea90757d1165159ef9b2", "score": "0.6478794", "text": "def size(self):\n return self.curr_size", "title": "" }, { "docid": "b14ab596c85176214da1d3852ee10db8", "score": "0.6475292", "text": "def memory_used(self):\n return int(self.entry['memory.used'])", "title": "" }, { "docid": "3634814f025559a2fd416039b8ec3f94", "score": "0.6470458", "text": "def evaluate_M_size(self):\n return self.M_size", "title": "" }, { "docid": "e61caf13620555ac8611b9111fed81b5", "score": "0.6470017", "text": "def size(self):\n return self.tree.size", "title": "" }, { "docid": "db5aae87a1e0e2f598b2f9ee573d8970", "score": "0.64599276", "text": "def Size(self):\n return _itkKdTreePython.itkKdTreeNodeLSVF3_Size(self)", "title": "" }, { "docid": "864b744163c0e894f586a7026ea4cd1f", "score": "0.6450148", "text": "def _get_w2v_vector_size(self, nn_id):\n node_id = self._find_netconf_node_id(nn_id)\n _path, _cls = self.get_cluster_exec_class(node_id)\n cls = self.load_class(_path, _cls)\n cls._init_node_parm(node_id)\n if('vector_size' in cls.__dict__) :\n return cls.vector_size\n return 10", "title": "" }, { "docid": "45b58a6a25aba655b81e55a9f7e505ac", "score": "0.64441603", "text": "def size(self) -> int:\n if self.available:\n self._cached_state = self._state\n return self._state[\"size\"]\n return self._cached_state[\"size\"]", "title": "" }, { "docid": "a4a93a21b95653159ba3c1a5780a8c95", "score": "0.64395994", "text": "def size(self) -> float:\n return self.__size", "title": "" }, { "docid": "6babc7933fc3f332595ace97afdb29ee", "score": "0.6429004", "text": "def disk_size_gb(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"disk_size_gb\")", "title": "" }, { "docid": "6babc7933fc3f332595ace97afdb29ee", "score": "0.6429004", "text": "def disk_size_gb(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"disk_size_gb\")", "title": "" }, { "docid": "6babc7933fc3f332595ace97afdb29ee", "score": "0.6429004", "text": "def disk_size_gb(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"disk_size_gb\")", "title": "" }, { "docid": "6babc7933fc3f332595ace97afdb29ee", "score": "0.6429004", "text": "def disk_size_gb(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"disk_size_gb\")", "title": "" }, { "docid": "6babc7933fc3f332595ace97afdb29ee", "score": "0.6429004", "text": "def disk_size_gb(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"disk_size_gb\")", "title": "" }, { "docid": "6babc7933fc3f332595ace97afdb29ee", "score": "0.6429004", "text": "def disk_size_gb(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"disk_size_gb\")", "title": "" }, { "docid": "6babc7933fc3f332595ace97afdb29ee", "score": "0.6429004", "text": "def disk_size_gb(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"disk_size_gb\")", "title": "" }, { "docid": "6babc7933fc3f332595ace97afdb29ee", "score": "0.6429004", "text": "def disk_size_gb(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"disk_size_gb\")", "title": "" }, { "docid": "6babc7933fc3f332595ace97afdb29ee", "score": "0.6429004", "text": "def disk_size_gb(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"disk_size_gb\")", "title": "" }, { "docid": "6babc7933fc3f332595ace97afdb29ee", "score": "0.6429004", "text": "def disk_size_gb(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"disk_size_gb\")", "title": "" }, { "docid": "6babc7933fc3f332595ace97afdb29ee", "score": "0.6429004", "text": "def disk_size_gb(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"disk_size_gb\")", "title": "" }, { "docid": "6babc7933fc3f332595ace97afdb29ee", "score": "0.6429004", "text": "def disk_size_gb(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"disk_size_gb\")", "title": "" }, { "docid": "127a436161140c0f3e7521d639f10b1c", "score": "0.6424235", "text": "def size(self):\n return _vnl_vectorPython.vnl_vectorUS_size(self)", "title": "" }, { "docid": "6165f1e3ff605a95533058fbce7992e0", "score": "0.64229035", "text": "def get_server_load_value():\n\n return psutil.virtual_memory().percent", "title": "" }, { "docid": "f9c5d81ea3d447bddfc0be8620fbfb10", "score": "0.6409558", "text": "def size(self):\n if \"size\" in self._prop_dict:\n return self._prop_dict[\"size\"]\n else:\n return None", "title": "" }, { "docid": "6f804baf95b703d0a0f0faabe69de4b0", "score": "0.63922334", "text": "def getSize(self):\n return self.size", "title": "" }, { "docid": "6f804baf95b703d0a0f0faabe69de4b0", "score": "0.63922334", "text": "def getSize(self):\n return self.size", "title": "" }, { "docid": "6f804baf95b703d0a0f0faabe69de4b0", "score": "0.63922334", "text": "def getSize(self):\n return self.size", "title": "" }, { "docid": "6a3ca717a32b13b51c0279328d3bf2ae", "score": "0.63879496", "text": "def size_in_gb(self) -> int:\n return pulumi.get(self, \"size_in_gb\")", "title": "" }, { "docid": "6a3ca717a32b13b51c0279328d3bf2ae", "score": "0.63879496", "text": "def size_in_gb(self) -> int:\n return pulumi.get(self, \"size_in_gb\")", "title": "" }, { "docid": "4e0c3ec556d473a9731394d3b5b8d7bc", "score": "0.6385725", "text": "def getSize(self):\n try:\n if self.plugin.req.size:\n return self.plugin.req.size\n else:\n return self.size\n except:\n return self.size", "title": "" }, { "docid": "6bb5ab86c0599db9eb8908f7da75a6b5", "score": "0.6382742", "text": "def get_size(self, value=None):\n return 6", "title": "" }, { "docid": "3312a0e8c843fad8abef5050bafe6354", "score": "0.63582903", "text": "def getMemorySize():\n memsize = c_uint64(0)\n size = c_size_t(sizeof(memsize))\n\n libc.sysctlbyname.argtypes = [\n c_char_p, c_void_p, c_void_p, c_void_p, c_ulong\n ]\n libc.sysctlbyname(\n \"hw.memsize\",\n c_void_p(addressof(memsize)),\n c_void_p(addressof(size)),\n None,\n 0\n )\n\n return int(memsize.value)", "title": "" } ]
708e08f481bab3715f95ae75d509388e
Constructor, load edgeless MNIST dictionary. Also calculate each digit's max size.
[ { "docid": "fbd16a5b9c97236697cc4ed2f6b87374", "score": "0.8259669", "text": "def __init__(self):\n self.dictionary = load_white_edgeless_mnist_dictionary()\n\n self.digit_to_max_size = {d: 0 for d in range(0, 10)}\n\n for k in self.dictionary:\n for arr in self.dictionary[k]:\n self.digit_to_max_size[k] = \\\n max(self.digit_to_max_size[k], tuple(arr.shape)[1])", "title": "" } ]
[ { "docid": "6b75515e27977d064177dbb284d2c0b7", "score": "0.6917801", "text": "def __init__(self, batch_size=50, exluded_digits=(1, 5)):\n mnist = input_data.read_data_sets('MNIST_data', one_hot=True, reshape=False, validation_size=0)\n self.train_data = mnist.train.images # Returns np.array\n self.train_labels = np.asarray(mnist.train.labels, dtype=np.int32)\n self.data_len = self.train_data.shape[0]\n\n self.excluded_digits = exluded_digits\n self.split_full_oneshot(self.excluded_digits)\n self.full_batch_id = 0\n self.one_shot_batch_id = 0\n\n if (batch_size % 2) != 0:\n raise Exception(\"Batch size must be divisible by two!\")\n self.batch_size = batch_size", "title": "" }, { "docid": "639f3db9aa83e9a88df413979dbc3b21", "score": "0.67930144", "text": "def __init__(self, data_path, flatten=True):\n (train_x, train_y), (test_x, test_y) = mnist(data_path)\n if not flatten:\n train_x = train_x.reshape(-1, 28, 28, 1)\n test_x = test_x.reshape(-1, 28, 28, 1)\n self.train = (binarize(train_x), one_hot(train_y))\n self.test = (binarize(test_x), one_hot(test_y))\n self.batch_size = 0\n self.epoch_size = train_y.shape[0]", "title": "" }, { "docid": "74f6d48f05849efd34a3714808ee2a9d", "score": "0.64203674", "text": "def __init__(self, root: str):\n self.root = root\n\n self.train = load_pickle(root + '/train_medium.pkl')\n self.label = torch.tensor(load_pickle(root + '/label_medium.pkl'))\n\n self.num_classes = max(max(self.train)) + 1\n self.ohe_mapping = torch.eye(self.num_classes)", "title": "" }, { "docid": "64d890c30efeac0ddedc9de761608e45", "score": "0.60133725", "text": "def load_mnist(dataset=\"training\", digits=np.arange(10), path=\".\"):\n\tif dataset == \"training\":\n\t\tfname_img = os.path.join(path, 'train-images.idx3-ubyte')\n\t\tfname_lbl = os.path.join(path, 'train-labels.idx1-ubyte')\n\telif dataset == \"testing\":\n\t\tfname_img = os.path.join(path, 't10k-images.idx3-ubyte')\n\t\tfname_lbl = os.path.join(path, 't10k-labels.idx1-ubyte')\n\telse:\n\t\traise ValueError(\"dataset must be 'testing' or 'training'\")\n\n\tflbl = open(fname_lbl, 'rb')\n\tmagic_nr, size = struct.unpack(\">II\", flbl.read(8))\n\tlbl = array(\"b\", flbl.read())\n\tflbl.close()\n\n\tfimg = open(fname_img, 'rb')\n\tmagic_nr, size, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n\timg = array(\"B\", fimg.read())\n\tfimg.close()\n\n\tind = [ k for k in range(size) if lbl[k] in digits ]\n\tN = len(ind)\n\timages = np.zeros((N, rows, cols), dtype=np.uint8)\n\tlabels = np.zeros((N, 1), dtype=np.int8)\n\tfor i in range(len(ind)):\n\t\timages[i] = np.array(img[ ind[i]*rows*cols : (ind[i]+1)*rows*cols ]).reshape((rows, cols))\n\t\tlabels[i] = lbl[ind[i]]\n\n\treturn images, labels", "title": "" }, { "docid": "e23547e3fa552b0a6bfb629c48dd45ef", "score": "0.60079396", "text": "def load_mnist(size: int = None,\n border: int = _MNIST_BORDER,\n blank_corners: bool = False,\n nums: List[int] = None) \\\n -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n # DOCME: Fix up formatting above,\n # DOCME: Explain nums argument\n\n # JIT import since keras startup is slow\n from keras.datasets import mnist\n\n def _filter_mnist(x: np.ndarray, y: np.ndarray, nums: List[int] = None) \\\n -> Tuple[np.ndarray, np.ndarray]:\n xt = []\n yt = []\n items = len(y)\n for n in range(items):\n if nums is not None and y[n] in nums:\n xt.append(x[n])\n yt.append(y[n])\n xt = np.stack(xt)\n yt = np.stack(yt)\n return xt, yt\n\n def _rescale(imgarray: np.ndarray, size: int) -> np.ndarray:\n N = imgarray.shape[0]\n\n # Chop off border\n imgarray = imgarray[:, border:-border, border:-border]\n\n rescaled = np.zeros(shape=(N, size, size), dtype=np.float)\n for n in range(0, N):\n img = Image.fromarray(imgarray[n])\n img = img.resize((size, size), Image.LANCZOS)\n rsc = np.asarray(img).reshape((size, size))\n rsc = 256.*rsc/rsc.max()\n rescaled[n] = rsc\n\n return rescaled.astype(dtype=np.uint8)\n\n def _blank_corners(imgarray: np.ndarray) -> None:\n # Zero out corners\n sz = imgarray.shape[1]\n corner = (sz//2)-1\n for x in range(0, corner):\n for y in range(0, corner-x):\n imgarray[:, x, y] = 0\n imgarray[:, -(1+x), y] = 0\n imgarray[:, -(1+x), -(1+y)] = 0\n imgarray[:, x, -(1+y)] = 0\n\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n if nums:\n x_train, y_train = _filter_mnist(x_train, y_train, nums)\n x_test, y_test = _filter_mnist(x_test, y_test, nums)\n\n if size:\n x_train = _rescale(x_train, size)\n x_test = _rescale(x_test, size)\n\n if blank_corners:\n _blank_corners(x_train)\n _blank_corners(x_test)\n\n return x_train, y_train, x_test, y_test", "title": "" }, { "docid": "4a673ce26f0b0d25328978a10d478407", "score": "0.59981215", "text": "def load_mnist(num_train_samples=60000, num_test_samples=10000, res=28, normalization=False):\n # Fetch train (60.000 images) and test data (10.000 images)\n (train_images_raw, train_labels_raw), (test_images_raw, test_labels_raw) = tf.keras.datasets.mnist.load_data()\n\n # Convert data type\n train_images_raw = np.asarray(train_images_raw, dtype=np.float32)\n test_images_raw = np.asarray(test_images_raw, dtype=np.float32)\n\n # Convert data by squaring the pixel intensities\n # train_images_raw = np.square(train_images_raw)\n # test_images_raw = np.square(test_images_raw)\n\n # Cut data\n train_labels = train_labels_raw[:num_train_samples]\n test_labels = test_labels_raw[:num_test_samples]\n train_images = train_images_raw[:num_train_samples]\n test_images = test_images_raw[:num_test_samples]\n\n # Normalize data\n if normalization:\n train_images_mean = np.mean(train_images, 0)\n train_images_std = np.std(train_images, 0)\n std_eps = 1e-7\n train_images = (train_images - train_images_mean) / (train_images_std + std_eps)\n test_images = (test_images - train_images_mean) / (train_images_std + std_eps)\n\n # Resize images\n if res != 28:\n train_images = np.array(list(map(lambda x: resize(x, (res, res)), train_images)))\n test_images = np.array(list(map(lambda x: resize(x, (res, res)), test_images)))\n\n # Reshape and convert data\n train_images = train_images.reshape(-1, res * res).astype('float32')\n test_images = test_images.reshape(-1, res * res).astype('float32')\n\n return (train_images, train_labels), (test_images, test_labels)", "title": "" }, { "docid": "4a23bcd54999c5935ca4873f2c9c176e", "score": "0.5973087", "text": "def load_mnist_1D_large(filename = '../data/mnist.pkl.gz'):\n \n import keras.utils as ku\n\n # Load the data.\n training_data, validation_data, test_data = get_data(filename)\n\n # Return the values.\n return training_data[0], ku.to_categorical(training_data[1]), \\\n validation_data[0], ku.to_categorical(validation_data[1]), \\\n test_data[0], ku.to_categorical(test_data[1])", "title": "" }, { "docid": "028418c6c4041a815da7b1c79aa73ced", "score": "0.59730726", "text": "def load_mnist_2D(filename = ''):\n\n # Get the data.\n tr_d, tr_v, va_d, va_v, te_d, te_v = \\\n load_mnist_1D_large(filename = filename)\n\n # Reshape the data.\n training_inputs = np.array([x.reshape(28, 28, 1) for x in tr_d])\n validation_inputs = np.array([x.reshape(28, 28, 1) for x in va_d])\n test_inputs = np.array([x.reshape(28, 28, 1) for x in te_d])\n\n # Return the data.\n return training_inputs, tr_v, validation_inputs, va_v, \\\n test_inputs, te_v", "title": "" }, { "docid": "b5ad00e777d4e3e0857a30601d4acf8d", "score": "0.59380287", "text": "def load_mnist_dataset():\n # We first define a download function, supporting both Python 2 and 3.\n if sys.version_info[0] == 2:\n from urllib import urlretrieve\n else:\n from urllib.request import urlretrieve\n\n def download(foldername,filename, source='http://yann.lecun.com/exdb/mnist/'):\n print(\"Downloading %s\" % filename)\n urlretrieve(source + filename, foldername+filename)\n\n # We then define functions for loading MNIST images and labels.\n # For convenience, they also download the requested files if needed.\n\n def load_mnist_images(foldername,filename):\n if not os.path.exists(foldername+filename):\n download(foldername,filename)\n # Read the inputs in Yann LeCun's binary format.\n with gzip.open(foldername+filename, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=16)\n # The inputs are vectors now, we reshape them to monochrome 2D images,\n # following the shape convention: (examples, channels, rows, columns)\n data = data.reshape(-1, 1, 28, 28)\n # The inputs come as bytes, we convert them to float32 in range [0,1].\n # (Actually to range [0, 255/256], for compatibility to the version\n # provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)\n return data / np.float32(256)\n\n def load_mnist_labels(foldername,filename):\n if not os.path.exists(foldername+filename):\n download(foldername,filename)\n # Read the labels in Yann LeCun's binary format.\n with gzip.open(foldername+filename, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=8)\n # The labels are vectors of integers now, that's exactly what we want.\n return data\n\n # We can now download and read the training and test set images and labels.\n foldername = '../Data/MNIST/'\n X_train = load_mnist_images(foldername,'train-images-idx3-ubyte.gz')\n y_train = load_mnist_labels(foldername,'train-labels-idx1-ubyte.gz')\n X_test = load_mnist_images(foldername,'t10k-images-idx3-ubyte.gz')\n y_test = load_mnist_labels(foldername,'t10k-labels-idx1-ubyte.gz')\n\n # We reserve the last 10000 training examples for validation.\n X_train, X_val = X_train[:-10000], X_train[-10000:]\n y_train, y_val = y_train[:-10000], y_train[-10000:]\n\n # We just return all the arrays in order, as expected in main().\n # (It doesn't matter how we do this as long as we can read them again.)\n return X_train, y_train, X_val, y_val #, X_test, y_test", "title": "" }, { "docid": "724f738281f98ab6ae3be9c40c2387f3", "score": "0.59180367", "text": "def load_mnist_1D_small(filename = '../data/mnist.pkl.gz'):\n\n # Load the data.\n training_data, validation_data, test_data = get_data(filename)\n\n # Return the values.\n return training_data[0][0:500, :], training_data[1][0:500], \\\n validation_data[0][0:200, :], validation_data[1][0:200], \\\n test_data[0][0:100, :], test_data[1][0:100]", "title": "" }, { "docid": "d3c472ec63a2639bce0cda2954578cf0", "score": "0.5899108", "text": "def __init__(self, input_size = (1, 28, 28), num_classes = 10):\n super(CNN_MNIST, self).__init__()\n\n self.layer1 = nn.Sequential(\n nn.Conv2d(input_size[0], 32, kernel_size=5),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2))\n\n self.layer2 = nn.Sequential(\n nn.Conv2d(32, 64, kernel_size=5),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2))\n\n self.fc1 = nn.Linear(4 * 4 * 64, num_classes)", "title": "" }, { "docid": "c44b25eaf1606ad133fa08ffbc0a94c6", "score": "0.588867", "text": "def __init(self):\n # self.size_of_test_items = 24\n # self.size_of_training_items = 24", "title": "" }, { "docid": "6fe27c5856560bb794f78aa484eace31", "score": "0.58741045", "text": "def load_MNIST():\n\n with open('../data/X_train.pkl', 'rb') as f:\n X_train = pickle.load(f)\n X_train.reshape(-1, )\n with open('../data/Y_train.pkl', 'rb') as f:\n Y_train = pickle.load(f)\n with open('../data/X_test.pkl', 'rb') as f:\n X_test = pickle.load(f)\n with open('../data/Y_test.pkl', 'rb') as f:\n Y_test = pickle.load(f)\n\n num_classes = len(np.unique(Y_train))\n input_dim, n_samples = X_train.shape\n\n print(f'num_classes = {num_classes}')\n print(f'num_pixels = {input_dim}')\n print(f'num_training_samples = {n_samples}')\n\n tr_y_multi_class = np.zeros((num_classes, n_samples))\n for i in range(num_classes):\n tr_y_multi_class[i, np.where(Y_train == i)] = 1\n Y_train = np.asmatrix(tr_y_multi_class)\n\n input_dim, n_samples = X_test.shape\n te_y_multi_class = np.zeros((num_classes, n_samples))\n for i in range(num_classes):\n te_y_multi_class[i, np.where(Y_test == i)] = 1\n Y_test = np.asmatrix(te_y_multi_class)\n\n return X_train, Y_train, X_test, Y_test", "title": "" }, { "docid": "e63d0903c5254b5d821623914b81774b", "score": "0.5867644", "text": "def ld_mnist():\n\n def convert_types(image, label):\n image = tf.cast(image, tf.float32)\n image /= 255\n return image, label\n\n dataset, info = tfds.load(\n \"mnist\", with_info=True, as_supervised=True\n )\n\n mnist_train, mnist_test = dataset[\"train\"], dataset[\"test\"]\n mnist_train = mnist_train.map(convert_types).shuffle(10000).batch(128)\n mnist_test = mnist_test.map(convert_types).batch(128)\n return EasyDict(train=mnist_train, test=mnist_test)", "title": "" }, { "docid": "641dba05f815375836f7804b77778c47", "score": "0.5828651", "text": "def __init__(self, **kwargs):\r\n self.w = []\r\n self.epoch = 20000\r\n self.best_dimension = 0\r\n self.best_layers = 0", "title": "" }, { "docid": "525c90419bb6b117f828d4fa0a33e70d", "score": "0.58113134", "text": "def init():\n global width, height, map_width, map_height, epochs, batch_size, min_heat\n width = 1024\n height = 1024\n\n map_width = int(width / 2)\n map_height = int(height / 2)\n\n epochs = 100\n batch_size = 4\n\n # 200~255 to find heat point\n min_heat = 150", "title": "" }, { "docid": "d467c34bf424771289762f22284049f5", "score": "0.57916975", "text": "def initializeMNISTData():\n # Initialize the MNIST dataset\n print()\n print(\"Initializing MNIST dataset...\")\n mnistData = MNISTData()\n return mnistData", "title": "" }, { "docid": "0f6212476ccc871b906045ccfc2b6a5b", "score": "0.5735987", "text": "def Load_MNIST ():\n print(\"Collecting MNIST data .....\\n\")\n (X_train,y_train),(X_test,y_test) = \\\n keras.datasets.mnist.load_data()\n X_test,y_test = X_test[:6000],y_test[:6000]\n X_train,y_train = X_train[:10000],y_train[:10000]\n return X_train,y_train", "title": "" }, { "docid": "0711743930f73b962820c5d4678e6c57", "score": "0.5718131", "text": "def load_data():\n f = gzip.open('mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = cPickle.load(f)\n f.close() \n #sets up average stroke thickness=\n '''for i in range(len(training_data[0])):\n training_data[0][i] *= 255\n image_list = []\n tr_d = [np.reshape(x, (28, 28)) for x in training_data[0]]\n for data in tr_d:\n image = Image.fromarray(data)\n image = image.convert('L')\n image_list.append(image)\n #image_list[0].show()\n for i in range(10):\n print stroke_thickness(image_list[i])\n image_matrix = standardize_image(image_list[i])/255\n print image_matrix\n average_thick = average_thickness(image_list)\n print average_thick'''\n return (training_data, validation_data, test_data)", "title": "" }, { "docid": "13c9cf5c5eea987a7ac52cef23aec3ac", "score": "0.5712747", "text": "def __init__(self, n_clusters, max_iter = 100):\n self.n_clusters = n_clusters\n self.max_iter = max_iter\n self.centers = []\n self.labels_ = []\n self.input_shape = None\n self.__trained = False", "title": "" }, { "docid": "3a95b309e815b296fd71cc99b058ae2a", "score": "0.57126945", "text": "def __init__(self):\n self.train_data = pd.read_csv('mnist_train.csv').values.tolist()\n self.test_data = pd.read_csv('mnist_test.csv').values.tolist()\n self.train_label = []\n self.test_label = []\n self.predict_test = []\n self.test_error = {1: 0, 9: 0, 19: 0, 29: 0, 39: 0, 49: 0, 59: 0, 69: 0, 79: 0, 89: 0, 99: 0}\n self.train_error = {1: 0, 9: 0, 19: 0, 29: 0, 39: 0, 49: 0, 59: 0, 69: 0, 79: 0, 89: 0, 99: 0}", "title": "" }, { "docid": "fdd3ec5f8d228d1e73f22d8175681d6b", "score": "0.56758404", "text": "def __init__(\n self, label_map: Dict[int, str], max_num_images: Optional[int] = None\n ) -> None:\n self.label_map = label_map\n self.max_num_images = max_num_images", "title": "" }, { "docid": "dac85bcd81b01171117b2caf3caaf5a1", "score": "0.56731814", "text": "def __init__(self, sizes, cost=CrossEntropyCost):\n self.num_layers = len(sizes)\n self.sizes = sizes\n self.default_weight_initializer()\n self.cost = cost", "title": "" }, { "docid": "5dd16648913966a98c66234952a5d425", "score": "0.5667201", "text": "def __init__(self):\n\n self.train_batch_size = 32\n self.features = 26\n self.test_batch_size = 100\n self.training_set = None\n self.target_set = None\n self.dataset = None\n self.data_index = 0", "title": "" }, { "docid": "30a63d5b2568aabcd8a082dc4c89b7f8", "score": "0.5654114", "text": "def test_keras_mnist():\n data = fetch('mnist')\n check(data, (60000, 28*28), (10000, 28*28))", "title": "" }, { "docid": "2d324e061cda743d9f59dcd6a469a651", "score": "0.5637487", "text": "def __init__(self, batch_size, classes_per_set=10, samples_per_class=1, seed=2591, queries_per_class=1):\n np.random.seed(seed)\n self.x = np.load(\"/data/omniglot.npy\")\n self.x = np.reshape(self.x, newshape=(1622, 20, 28, 28, 1))\n self.x_train, self.x_test, self.x_val = self.x[:1200], self.x[1200:1411], self.x[1411:]\n self.normalization()\n self.batch_size = batch_size\n self.n_classes = self.x.shape[0]\n self.classes_per_set = classes_per_set\n self.samples_per_class = samples_per_class\n self.queries_per_class = queries_per_class\n\n print(\"train_shape\", self.x_train.shape, \"test_shape\", self.x_test.shape, \"val_shape\", self.x_val.shape)\n self.indexes = {\"train\": 0, \"val\": 0, \"test\": 0}\n self.datasets = {\"train\": self.x_train, \"val\": self.x_val, \"test\": self.x_test} #original data cached", "title": "" }, { "docid": "16fd5da8472bba679c90c5d0692789ce", "score": "0.5636208", "text": "def build_data(self):\n\n # load original mnist dataset and expand each number with embedded \"0\"s\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n embedding_img = x_train[1]\n x_train = np.array([self.expand_img(embedding_img, img) for img, label in zip(x_train, y_train)])\n x_test = np.array([self.expand_img(embedding_img, img) for img, label in zip(x_test, y_test)])\n\n if K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, self.img_rows, self.img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, self.img_rows, self.img_cols)\n else:\n x_train = x_train.reshape(x_train.shape[0], self.img_rows, self.img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], self.img_rows, self.img_cols, 1)\n\n # normalize and cast\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n\n # convert class vectors to binary class matrices\n y_train = keras.utils.to_categorical(y_train, self.num_classes)\n y_test = keras.utils.to_categorical(y_test, self.num_classes)\n\n return x_train, x_test, y_train, y_test", "title": "" }, { "docid": "eefeb54eb29ae672b9b18cb88136fc36", "score": "0.55747116", "text": "def __init__(self):\r\n self.edges = defaultdict(list)\r\n self.weights = {}", "title": "" }, { "docid": "11480007ead7c784d88ba7315e0a0972", "score": "0.5571569", "text": "def __init__(self):\n self.edges = defaultdict(list)\n self.weights = {}", "title": "" }, { "docid": "f4a2743d9741d5ed04fb7ef6e12c8f01", "score": "0.556744", "text": "def __init__(self,sizes):\r\n\r\n self.numberLayers = len(sizes)\r\n \r\n #Initialization of weights and biases\r\n self.biases = [np.random.randn(y,1) for y in sizes[1:]]\r\n self.weights = [np.random.randn(y,x) for x,y in zip(sizes[:-1], sizes[1:])]", "title": "" }, { "docid": "99b0c7232881ec07d3fa0c824b4e8e8e", "score": "0.55670637", "text": "def __init__(self, filepath, batch_size=135):\n # initialize the cursors to keep track where we are in the Dataset\n self.train_cursor = 0\n self.test_cursor = 0\n self.train_batch_size = batch_size\n self.test_batch_size = batch_size // 10\n\n # initialize the idx arrays\n a_train_ = np.tile(np.arange(self.NUM_TRAIN_ITEMS_PER_CLASS), self.NUM_CLASSES).reshape(self.NUM_CLASSES, self.NUM_TRAIN_ITEMS_PER_CLASS)\n b_train_ = np.tile(np.arange(self.NUM_CLASSES) * self.NUM_ITEMS_PER_CLASS, self.NUM_TRAIN_ITEMS_PER_CLASS).reshape(self.NUM_TRAIN_ITEMS_PER_CLASS, self.NUM_CLASSES).T\n self.train_idxs = (a_train_ + b_train_).reshape(-1)\n\n a_test_ = np.tile(np.arange(self.NUM_TEST_ITEMS_PER_CLASS), self.NUM_CLASSES).reshape(self.NUM_CLASSES, self.NUM_TEST_ITEMS_PER_CLASS)\n b_test_ = np.tile(np.arange(self.NUM_CLASSES) * self.NUM_ITEMS_PER_CLASS, self.NUM_TEST_ITEMS_PER_CLASS).reshape(self.NUM_TEST_ITEMS_PER_CLASS, self.NUM_CLASSES).T\n self.test_idxs = (a_test_ + b_test_ + self.NUM_TRAIN_ITEMS_PER_CLASS).reshape(-1)\n\n # load the .mat file containing the dataset\n print('Loading the dataset...')\n data = h5py.File(filepath)\n self.dataset_images = data['imdb']['images']['data']\n self.dataset_labels = data['imdb']['images']['labels']\n print('Dataset loaded!')", "title": "" }, { "docid": "2062f729e21bb114be8c1404f045432e", "score": "0.55592626", "text": "def load_mnist_keras():\n train, test = tf.keras.datasets.mnist.load_data()\n train_data, train_labels = train\n test_data, test_labels = test\n\n train_data = np.array(train_data, dtype=np.float32) / 255\n test_data = np.array(test_data, dtype=np.float32) / 255\n\n train_data = train_data.reshape(train_data.shape[0], 28, 28, 1)\n test_data = test_data.reshape(test_data.shape[0], 28, 28, 1)\n\n train_labels = np.array(train_labels, dtype=np.int32)\n test_labels = np.array(test_labels, dtype=np.int32)\n\n train_labels = tf.keras.utils.to_categorical(train_labels, num_classes=10)\n test_labels = tf.keras.utils.to_categorical(test_labels, num_classes=10)\n\n assert train_data.min() == 0.\n assert train_data.max() == 1.\n assert test_data.min() == 0.\n assert test_data.max() == 1.\n \n return train_data, train_labels, test_data, test_labels", "title": "" }, { "docid": "0cd315c56a952e15095d7914445d6520", "score": "0.555627", "text": "def __init__(self, sizes, input, bits=None):\r\n\t\t\r\n\t\tif bits is not None:\r\n\t\t\tself.bits = bits\r\n\t\telse: \r\n\t\t\tself.bits = 8\r\n\t\t\r\n\t\tself.num_layers = len(sizes)\r\n\t\tself.sizes = sizes\r\n\t\tself.cost = Cost\r\n\t\tself.a_L = Criterion.a_r(sizes[-1])\r\n\t\tself.weight_initializer()\r\n\t\t\r\n\t\tself.x0 = b10_to_b2(input[0],self.bits)\r\n\t\tself.y0 = b10_to_b2(input[1],self.bits)\r\n\t\t\r\n\t\tself.last_input = np.append(self.x0,self.y0)\r\n\t\tself.new_input = np.zeros(2*self.bits)\r\n\t\tself.kappa = 0 # The network standard will start out being random\r\n\t\tself.kappas = []", "title": "" }, { "docid": "bcec21a38eedebcafc2f1aff04329793", "score": "0.5555787", "text": "def __init__(self, max_depth=1000, size_allowed=1, n_features=None, n_split=None):\n\n self.root = 1\n self.max_depth = max_depth\n self.size_allowed = size_allowed\n self.n_features = n_features\n self.n_split = n_split", "title": "" }, { "docid": "a3f3416c5d6ff5a28b9e781e248fa17b", "score": "0.5555058", "text": "def __init__(self, intrinsics_path, max_instances = 256, num_classes = 41):\n self.no_instance = 999\n self.max_instances = max_instances\n self.num_classes = num_classes\n self.critical_instance_count = False\n self.instance_matrix = np.zeros((num_classes, max_instances, 3)) + self.no_instance\n self.intrinisc_inv = np.linalg.inv(np.loadtxt(intrinsics_path))\n self.class_inst_to_number_map = None\n self.class_inst_cnt = 0", "title": "" }, { "docid": "8cab01e8c050457e700fa79ee4c394b0", "score": "0.5554475", "text": "def __init__(self, path):\n self.path = path\n self.train_set = {}\n self.test_set = {}\n self.classes = []\n self.classes_counts = []\n self.myarray = []\n self.imageList = []", "title": "" }, { "docid": "79a0720c22c8aebb454e7c6b1991f4c1", "score": "0.5553256", "text": "def __init__(self):\n super().__init__()\n# self.save_hyperparameters()\n\n self.dense = torch.nn.Linear(HIDDEN_SIZE*2, NUM_GAMES)\n self.ndcg = NDCGMetric(k=20)\n self.map = MAPMetric()", "title": "" }, { "docid": "9b04558fd859bb3eb3f42b8673c458e4", "score": "0.5539841", "text": "def __init__(self):\n\n prototxt = 'face_detector/deploy.prototxt'\n caffemodel = 'face_detector/res10_300x300_ssd_iter_140000.caffemodel'\n self.inWidth = 300\n self.inHeight = 300\n self.net = dnn.readNetFromCaffe(prototxt, caffemodel)", "title": "" }, { "docid": "e4e9c9835e8e0c32b91fd1ca3689f18f", "score": "0.55364", "text": "def load_mnist():\r\n f = open(\"C:\\Users\\Michael\\Desktop\\Research\\Data\\mnist\\mnist_noval.pkl\", 'rb')\r\n train_data, test_data = cPickle.load(f)\r\n f.close()\r\n \r\n train_images, train_labels = train_data\r\n test_images, test_labels = test_data\r\n \r\n return train_images, train_labels, test_images, test_labels", "title": "" }, { "docid": "d91700da5ef3464ebdb43c7a37bf1589", "score": "0.55361855", "text": "def __init__(self, learning_rate, batch_size):\n # Parameters\n self.learning_rate = learning_rate\n self.mnist = mnist\n self.batch_size = batch_size\n self.num_epochs = 50\n self.num_classes = 10\n self.input_size = 784\n self.input_weight, self.input_height = 28, 28\n self.batch_per_epoch = int(self.mnist.train.num_examples/self.batch_size)\n self.display_step = 1\n\n # Placeholders\n self.X = tf.placeholder(tf.float32, [None, 784]) # mnist data image of shape 28*28=784\n self.Y = tf.placeholder(tf.float32, [None, 10]) # 0-9 digits recognition => 10 classes", "title": "" }, { "docid": "899a55462958d4f6dd514b87239b3537", "score": "0.5519446", "text": "def build_image_embeddings(self):\n\n #用densenet生成一个imageembedding\n densenet_out = image_embedding.densenet_161(\n self.images,\n trainable=self.train_inception,\n is_training=self.is_training())\n self.densenet_variable = tf.get_collection(\n tf.GraphKeys.GLOBAL_VARIABLES, scope=\"densenet161\")\n\n\n\n # Save the embedding size in the graph.\n self.embedding_size=384\n tf.constant(self.embedding_size, name=\"embedding_size\")\n\n self.features = densenet_out\n self.L = 196\n self.D = 384", "title": "" }, { "docid": "b139d273e169514f393d7167b5c88331", "score": "0.55185604", "text": "def load_data():\n X, Y, testX, testY = mnist.load_data(one_hot=True)\n X = X.reshape([-1, 28, 28, 1])\n testX = testX.reshape([-1, 28, 28, 1])\n\n return X, Y, testX, testY", "title": "" }, { "docid": "d856e4cb34281300fc055622a1f71ec4", "score": "0.5513635", "text": "def load_mnist():\n train, test = tf.keras.datasets.mnist.load_data()\n train_data, train_labels = train\n test_data, test_labels = test\n\n train_data = np.array(train_data, dtype=np.float32) / 255\n test_data = np.array(test_data, dtype=np.float32) / 255\n\n train_labels = np.array(train_labels, dtype=np.int32)\n test_labels = np.array(test_labels, dtype=np.int32)\n\n assert train_data.min() == 0.\n assert train_data.max() == 1.\n assert test_data.min() == 0.\n assert test_data.max() == 1.\n assert train_labels.ndim == 1\n assert test_labels.ndim == 1\n\n return train_data, train_labels, test_data, test_labels", "title": "" }, { "docid": "6fd6d27bc522e0a741ca176164d845b3", "score": "0.55117446", "text": "def __init__(self, input_dimensions=2,number_of_nodes=4):\r\n if input_dimensions == 0 or number_of_nodes == 0:\r\n print()\r\n return\r\n else:\r\n self.input_dimension = input_dimensions + 1\r\n self.weights = np.ones((number_of_nodes,self.input_dimension))\r\n self.initialize_weights()", "title": "" }, { "docid": "2c940f2709486d5dfa5499ca01a7ca55", "score": "0.5506797", "text": "def MNIST(one_hot=True):\n return input_data.read_data_sets('MNIST_data/', one_hot=one_hot)", "title": "" }, { "docid": "2dd244f960a7df6d1aa3942755ae4afb", "score": "0.55056965", "text": "def __init__(self, batch_norm, n=2):\n super(AlexNetSmall, self).__init__(batch_norm)\n while n > 0:\n name = self.features[-1]._get_name()\n if name.__contains__(\"MaxPool\") or name.__contains__(\"Conv2d\"):\n n -= 1\n self.features.__delitem__(len(self.features) - 1)", "title": "" }, { "docid": "d38d9329cdbcbdfeab2077e8e29346cf", "score": "0.55013484", "text": "def setup():\r\n # import MNIST dataset from scikit_learn\r\n mnist = sklearn.datasets.load_digits()\r\n X, y = mnist[\"data\"], mnist[\"target\"]\r\n \r\n # normalize the image data\r\n X = X / 255\r\n \r\n # shuffle the data-set randomly\r\n shuffle_index = np.random.permutation(y.shape[0])\r\n X_new = X[shuffle_index, :] \r\n y_new = y[shuffle_index]\r\n \r\n # train and test sets of length 1499 and 297 respectively \r\n # hard-coded this but can easily just split it up based on percentiles\r\n X_train = X_new[1:1500, :]\r\n y_train = y_new[1:1500]\r\n X_test = X_new[1500:, :]\r\n y_test = y_new[1500:]\r\n y_test_temp = y_test\r\n \r\n # one-hot encode the labels for the training and test sets\r\n y_test_new = np.zeros((y_test.size, y_test.max()+1))\r\n y_test_new[np.arange(y_test.size), y_test] = 1\r\n y_test = y_test_new\r\n y_train_new = np.zeros((y_train.size, y_train.max()+1))\r\n y_train_new[np.arange(y_train.size), y_train] = 1\r\n y_train = y_train_new\r\n \r\n # set up the parameters for the model\r\n params = {}\r\n params[\"i_size\"], params[\"h_size\"], params[\"o_size\"] = X.shape[0], 35, 10\r\n params[\"act\"], params[\"loss\"] = [\"tanh\", \"softmax\"], \"ce\"\r\n params[\"eps\"], params[\"num_epochs\"], params[\"lambda\"] = 0.001, 2000, 0.1\r\n \r\n # set up a ffnn with sigmoid activation, cross-entropy loss to recognize MNIST digits\r\n mnist_classifier = nn_classifier(X_train, y_train, params)\r\n\r\n # train the network\r\n mnist_classifier.train() \r\n\r\n # make predictions on the test-set\r\n # then, generate a confusion matrix using seaborn's heatmap()\r\n results = mnist_classifier.predict(X_test)\r\n \r\n cm = confusion_matrix(y_test_temp, results)\r\n ax = plt.axes()\r\n sb.heatmap(cm, ax = ax)\r\n ax.set_title('MNIST Classification: Three-Layer FFNN with Tanh + Softmax + Cross-enropy')\r\n plt.show()", "title": "" }, { "docid": "9950151bdcb7a5c27844ce372e92b094", "score": "0.5497196", "text": "def __init__(self, input_size, output_size, activation):\n\n raise NotImplementedError", "title": "" }, { "docid": "84cf8caf42887620a27e23964172b25b", "score": "0.54940647", "text": "def __init__(self, path='../data/train', size=128, channel=3, normalize=0):\n self._size = size\n self._channel = channel\n self._imgs = []\n self._ids = []\n self._masks = []\n self._n = 0\n \n print(\"Extracting training image info ...\")\n start_time = time.time()\n for img_id in os.listdir(path):\n self._ids.append(img_id)\n img_file_list = os.listdir('{0}/{1}/images'.format(path, img_id))\n assert len(img_file_list) == 1, \"Multiple images found in one images id folder.\"\n assert img_file_list[0] == img_id + '.png', \"Image id and image name do not match.\"\n img = imread('{0}/{1}/images/{1}.png'.format(path, img_id, img_id))[:, :, :channel]\n ## Do normalization\n if normalize==1:\n img = (img.astype(np.float32) - img.mean())/max(1., img.std())\n elif normalize > 1:\n img = img.astype(np.float32)/normalize\n \n mask = np.zeros(img.shape[:2])\n for m in os.listdir('{0}/{1}/masks'.format(path, img_id)):\n mask_ = imread('{0}/{1}/masks/{2}'.format(path, img_id, m))\n assert mask_.shape == img.shape[:2], \"Image shape and mask shape do not match.\"\n mask = np.maximum(mask, mask_)\n mask = mask.astype(np.bool)\n self._imgs.append(img)\n self._masks.append(mask.astype(np.float32))\n self._n = len(self._imgs)\n \n print(\"Time Usage: {0} sec\".format(str(time.time() - start_time)))\n print len(self._ids), len(self._imgs), len(self._masks)", "title": "" }, { "docid": "2d5185a0666e611dbe359493ceb869ab", "score": "0.5493848", "text": "def __init__(self, train_size, loss_function=nn.MSELoss()):\n super(ELBO, self).__init__()\n self.train_size = train_size\n self.loss_function = loss_function", "title": "" }, { "docid": "23bbb778c79d3b2fd8cd15edfea805be", "score": "0.5484616", "text": "def load_binarized_mnist():\n\n (train_images, _), (test_images, _) = tf.keras.datasets.mnist.load_data()\n\n train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')\n test_images = test_images.reshape(test_images.shape[0], 28, 28, 1).astype('float32')\n\n # Normalizing the images to the range of [0., 1.]\n train_images /= 255.\n test_images /= 255.\n\n # Binarization\n train_images[train_images >= .5] = 1.\n train_images[train_images < .5] = 0.\n test_images[test_images >= .5] = 1.\n test_images[test_images < .5] = 0.\n\n return train_images, test_images", "title": "" }, { "docid": "5b43ef1d0b3e4a7599e2fbdc877bc00c", "score": "0.5482675", "text": "def bmnist(root='../data/', batch_size=128, num_workers=4, download=True):\n data_transforms = transforms.Compose([transforms.ToTensor(),\n Binarize(threshold=0.5)])\n\n dataset = torchvision.datasets.MNIST(\n root, train=True, transform=data_transforms, download=download)\n test_set = torchvision.datasets.MNIST(\n root, train=False, transform=data_transforms, download=download)\n\n train_dataset = data.dataset.Subset(dataset, np.arange(45000))\n val_dataset = data.dataset.Subset(dataset, np.arange(45000, 50000))\n\n train_loader = data.DataLoader(\n train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, \n pin_memory=True)\n val_loader = data.DataLoader(\n val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers,\n drop_last=False)\n test_loader = data.DataLoader(\n test_set, batch_size=batch_size, shuffle=False, num_workers=num_workers,\n drop_last=False)\n\n return train_loader, val_loader, test_loader", "title": "" }, { "docid": "f8562c697e7374a8443c23ac5ed4406c", "score": "0.54824346", "text": "def __init__(self, nx, layers):\n if type(nx) is not int:\n raise TypeError(\"nx must be an integer\")\n if nx < 1:\n raise ValueError(\"nx must be a positive integer\")\n if type(layers) is not list:\n raise TypeError(\"layers must be a list of positive integers\")\n if len(layers) is 0:\n raise TypeError(\"layers must be a list of positive integers\")\n\n self.nx = nx\n self.layers = layers\n self.__L = len(layers)\n self.__cache = {}\n self.__weights = {}\n for l in range(self.__L):\n if layers[l] <= 0 or type(layers[l]) is not int:\n raise TypeError(\"layers must be a list of positive integers\")\n if l is 0:\n he_init = np.random.randn(layers[l], nx)*np.sqrt(2/nx)\n self.__weights['W' + str(l+1)] = he_init\n if l > 0:\n he_init1 = np.random.randn(layers[l], layers[l-1])\n he_init2 = np.sqrt(2/layers[l-1])\n self.__weights['W' + str(l+1)] = he_init1 * he_init2\n self.__weights['b' + str(l + 1)] = np.zeros((layers[l], 1))", "title": "" }, { "docid": "265aad832adc385eb6683c520662de7e", "score": "0.5479937", "text": "def __init__(self, size):\r\n self._storage = []\r\n self._eps_dict = {}\r\n self._maxsize = size\r\n self._next_idx = 0\r\n self._hit_count = np.zeros(int(size))\r\n self._num_added = 0\r\n self._num_sampled = 0\r\n self._episodes_registry = {}", "title": "" }, { "docid": "98e110640c0a25cdbbbe2879a0cf18ce", "score": "0.5469335", "text": "def __init__(self, layers):\n self.weights = {}\n self.biases = {}\n self.layers = layers # length L\n self.L = len(layers)\n for i in range(2, len(layers) + 1):\n self.weights[i] = np.random.randn(layers[i - 1], layers[i - 2])\n self.biases[i] = np.random.randn(layers[i - 1], 1)", "title": "" }, { "docid": "96541f5ebe28bd1125d00623c0e2d35c", "score": "0.5462261", "text": "def loading():\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n assert x_train.shape == (60000, 28, 28)\n assert x_test.shape == (10000, 28, 28)\n assert y_train.shape == (60000,)\n assert y_test.shape == (10000,) \n\n temp = []\n for i in range(len(y_train)):\n temp.append(to_categorical(y_train[i], num_classes=10))\n y_train = np.array(temp)\n temp = []\n for i in range(len(y_test)): \n temp.append(to_categorical(y_test[i], num_classes=10))\n y_test = np.array(temp)\n \n return x_train, x_test, y_train, y_test", "title": "" }, { "docid": "e9d02cdb28352ca67d8f57a99e758887", "score": "0.54538333", "text": "def __init__(self, n_features: int = 5):\n self.n_features = int(n_features)\n self.n_samples = int(1e15)", "title": "" }, { "docid": "de6e0a34faab80de451ed1e33f655823", "score": "0.5447168", "text": "def load_MNIST_clusters(self, digits_used=[2, 8], image_set='training',\n images_base_dir='/home/escalafl/Databases/MNIST'):\n from . import mnist\n\n images, labels = mnist.read(digits_used, image_set, images_base_dir)\n labels = labels.reshape((-1,))\n\n num_images = len(labels)\n all_indices = numpy.arange(num_images)\n\n clusters = {}\n for i, digit in enumerate(digits_used):\n # print \"cluster \", i, \" for digit \", digit\n cluster_indices = all_indices[(labels == digit)]\n clusters[digit] = cluster_indices\n numpy.random.shuffle(clusters[digit])\n print(\"cluster %d for digit %d has %d images\" % (i, digit, len(cluster_indices)))\n return clusters, images", "title": "" }, { "docid": "3a0be626feccaf432dee79a6c4455cc3", "score": "0.5442395", "text": "def load_deep_lesion(self, dataset_dir, subset): #I don't think we need dataset directory.\n # Add classes. We have only one class to add.\n self.add_class(\"Lesion\", 1, \"Bone\") # \"Bone\"\n self.add_class(\"Lesion\", 2, \"Abdomen_notLiver_notKidney\") # \"Abdomen_notLiver_notKidney\"\n self.add_class(\"Lesion\", 3, \"Mediastinum\") # \"Mediastinum\"\n self.add_class(\"Lesion\", 4, \"Liver\") # \"Liver\"\n self.add_class(\"Lesion\", 5, \"Lung\") #\"Lung\"\n self.add_class(\"Lesion\", 6, \"Kidney\") #\"Kidney\"\n self.add_class(\"Lesion\", 7, \"Soft_tissue\") #Soft tissue: miscellaneous lesions in the body wall, muscle, skin, fat, limbs, head, and neck\n self.add_class(\"Lesion\", 8, \"Pelvis\") #\"Pelvis\"\n \n \n \n \n \n ##################### UNKNOWN CASES, WE WILL LEAVE THESE OUT. \n #self.add_class(\"-1\", 9, \"-1\") #Can i have a negative number here? This is straight from Deep Lesion format. \n \n#########################\n # Train or validation dataset?\n \n #assert subset in [\"train\", \"val\"]\n #dataset_dir = os.path.join(dataset_dir, subset)\n\n ###### ---> SAVED under \"Image\" in data.json, there is variable called \"train_val_test\" \n #which has a int value (0-2 or 1-3) indicating it is training, test or validation. \n # NEED TO FIGURE OUT HOW TO USE THIS TO ASSIGN TRAIN, VALIDATION, TEST. \n##########################\n\n\n # Load annotations\n # VGG Image Annotator (up to version 1.6) saves each image in the form:\n # { 'filename': '28503151_5b5b7ec140_b.jpg',\n # 'regions': {\n # '0': {\n # 'region_attributes': {},\n # 'shape_attributes': {\n # 'all_points_x': [...],\n # 'all_points_y': [...],\n # 'name': 'polygon'}},\n # ... more regions ...\n # },\n # 'size': 100202\n # }\n # We mostly care about the x and y coordinates of each region\n # Note: In VIA 2.0, regions was changed from a dict to a list.\n annotations = json.load(open(os.path.join(DEXTR_DIR, \"data.json\")))\n #annotations_seg = list(annotations.values()) # don't need the dict keys\n\n annotations_seg = annotations['annotations']\n \n #annotations_seg = segmentation[2]\n\n # The VIA tool saves images in the JSON even if they don't have any\n # annotations. Skip unannotated images.\n #annotations_seg = [a for a in annotations_seg if a['segmentation']]\n\n\n \n b=0\n for a in annotations_seg:\n image_info = annotations['images'][b]\n win = annotations_seg[b]['Windowing']\n image_id = annotations['images'][b]['id']\n image_cat = annotations['categories'][b]['category_id']\n\n \n \n \n ############\n ############\n ########## Copy Food.py\n polygons=[]\n objects=[]\n #for r in a['regions'].values():\n for r in a['regions']:\n polygons.append(r['shape_attributes'])\n # print(\"polygons=\", polygons)\n objects.append(r['region_attributes'])\n \n class_ids = [int(n['Lesion']) for n in objects]\n \n \n \n train_valid_test = annotations['images'][b]['Train_Val_Test']\n #### Must use index 'b' before here, because after this point it\n # will point to next image/index/\n b=b+1\n \n \n # Get the x, y coordinaets of points of the polygons that make up\n # the outline of each object instance. These are stores in the\n # shape_attributes (see json format above)\n # The if condition is needed to support VIA versions 1.x and 2.x.\n ######## polygons \n # needs to be a list of dictionaries for each lesion\n # so if there is one lesion.\n # polygons = list of size 1\n # dict of size 3. \n # all_point_x is list of variable size(depends on number of points)\n # same for y\n # name is str 'polygon'\n\n \n \n # load_mask() needs the image size to convert polygons to masks.\n # Unfortunately, VIA doesn't include it in JSON, so we must read\n # the image. This is only managable since the dataset is tiny.\n \n ###### LETS STORE PATH TO IMAGE INTO JSON FILE. \n \n ## --- RERUN. added to images under annotations. current_file_path\n #image_path = image_info['File_path']\n #image_path = os.path.join(dataset_dir, a['filename'])\n image_path = os.path.join(DEXTR_DIR,image_info['File_path'])\n \n \n #***************************************************************\n ########################################################### use this to import all png files in this directory and load them as blanks. NaN for segmentation. \n #############################################################\n #********************************************************************\n \n \n # use files_bg to add in non-segmentated images to the dataset. \n ##\n #\n #\n # polygons = [] ? or polygons = NaN?\n # Need to figure out what format will work so it will train on these background images and not throw an error. \n #\n #\n ##\n \n \n\n \n \n \n ################### image format should be: unit8 rgb \n #image = skimage.io.imread(image_path)\n ############################### WORKS! LOAD WITH DEFAULT WINDOWING.\n #image = util.load_im_with_default_windowing(win,image_path)\n \n ############################### LOAD WITH default 16bit format?\n image = cv2.imread(image_path, -1)\n\n \n ###############\n \n height, width = image.shape[:2]\n\n #### SEE IMAGE_INFO, INFO BELOW: I think it gets this from here\n #### SEE IMAGE_INFO, INFOb = randint(0,len(annotations_seg)) BELOW: I think it gets this from here\n #### SEE IMAGE_INFO, INFO BELOW: I think it gets this from here\n #### SEE IMAGE_INFO, INFO BELOW: I think it gets this from here\n #### SEE IMAGE_INFO, INFO BELOW: I think it gets this from here\n \n print(subset)\n print(int(image_cat))\n print(train_valid_test)\n #### PROBLEM: Most of the \"1\" training images are \"unknown\" and therefore worthless. There is 4831 in 3 and 4793 or so in 2. ZERO LABELS IMAGES IN 1. How lame is that?\n if subset == 'train' and train_valid_test==3 and int(image_cat) >= 0: #Last checks that there are no unknowns.\n print(\"Image Added for training\")\n print(\"Image Added for training\")\n print(\"Image Added for training\")\n print(\"Image Added for training\")\n print(\"Image Added for training\")\n \n\n self.add_image(\n ############ Replace balloon with CLASSES ABOVE, take from category. \n \"Lesion\",\n image_id=image_id, # id is set above. \n path=image_path,\n width=width, height=height,\n polygons=polygons,\n win=win,\n class_ids=class_ids) \n\n elif subset == 'val' and train_valid_test==2 and int(image_cat) >= 0: #Last checks that there are no unknowns.\n print(\"Image added for validation\")\n\n self.add_image(\n ############ Replace balloon with CLASSES ABOVE, take from category. \n \"Lesion\",\n image_id=image_id, # id is set above. \n path=image_path,\n width=width, height=height,\n polygons=polygons,\n win=win,\n class_ids=class_ids)\n \n else:\n print(\"No image added...\")\n print(\"Unknown, should say -1 below\")\n print(int(image_cat)) # 3 is saved for validation since we are using both training and validation (1&2) for training.\n print(\"train_valid_test\")\n print(train_valid_test)", "title": "" }, { "docid": "60cdcb51f8e1cf48c6a9e59ec9f785cd", "score": "0.54401577", "text": "def __init__(self, size):\n self.cache = None\n self.size = size\n self.layer_type = 'connected'\n self.weights = None\n self.biases = None", "title": "" }, { "docid": "a85196aa1934f41cab68bcc24a80439f", "score": "0.5435952", "text": "def get_and_process_MNIST_data(self):\n\n #mndata = MNIST() \n #self.train_images, self.train_labels = mndata.load_training() \n self.train_images, self.train_labels = np.reshape(mndata.train_images(),(60000,784)), mndata.train_labels()\n self.train_images, self.train_labels = self.train_images[:500], self.train_labels[:500] \n print(np.shape(self.train_images)) \n print(np.shape(self.train_labels)) \n ## labeling the pixels back \n self.train_images, self.train_labels = np.array([[1 if p > 0.5 else -1 for p in i] for i in self.train_images]), np.array(self.train_labels)\n \n ### i need to change the below code so it iterate through the matrix properly \n #self.train_images, self.train_labels = np.array([[1 if p > 0.5 else -1 for p in i] for i in self.train_images), np.array(self.train_labels)\n side_length = int(np.sqrt(self.train_images.shape[1]))\n self.orig_train_images = copy.deepcopy(self.train_images.reshape((self.train_images.shape[0], side_length, side_length)))\n self.noisy_train_images = np.zeros((self.train_images.shape[0], side_length, side_length))\n for im in np.arange(self.train_images.shape[0]):\n random_inds = random.sample(range(1, self.train_images.shape[1]), int(0.02 * self.train_images.shape[1]))\n self.train_images[im, random_inds] = np.where(self.train_images[im, random_inds] == -1, 1, -1)\n self.noisy_train_images[im, :, :] = self.train_images[im, :].reshape(side_length, side_length)\n self.side_length = side_length", "title": "" }, { "docid": "8839341faf50b58a58b84f4dd3341ed9", "score": "0.54333675", "text": "def __init__(self, hidden_layer_sizes=(100,), max_iter=200, verbose=False):\n super().__init__(\n hidden_layer_sizes=hidden_layer_sizes,\n max_iter=max_iter,\n verbose=verbose,\n )\n self._num_classes = 0\n self.epochs = 1", "title": "" }, { "docid": "163ea3ae9e52c21a4b1cb0837a3836b3", "score": "0.5431743", "text": "def __init__(self, path, **kwargs):\n super().__init__(path, **kwargs)\n\n self._largeImagePath = self._getLargeImagePath()\n # Read the root dzi file and check that the expected image files exist\n try:\n with builtins.open(self._largeImagePath) as fptr:\n if fptr.read(1024).strip()[:5] != '<?xml':\n msg = 'File cannot be opened via deepzoom reader.'\n raise TileSourceError(msg)\n fptr.seek(0)\n xml = ElementTree.parse(self._largeImagePath).getroot()\n self._info = etreeToDict(xml)['Image']\n except (ElementTree.ParseError, KeyError, UnicodeDecodeError):\n msg = 'File cannot be opened via deepzoom reader.'\n raise TileSourceError(msg)\n except FileNotFoundError:\n if not os.path.isfile(self._largeImagePath):\n raise TileSourceFileNotFoundError(self._largeImagePath) from None\n raise\n # We should now have a dictionary like\n # {'Format': 'png', # or 'jpeg'\n # 'Overlap': '1',\n # 'Size': {'Height': '41784', 'Width': '44998'},\n # 'TileSize': '254'}\n # and a file structure like\n # <rootname>_files/<level>/<x>_<y>.<format>\n # images will be TileSize+Overlap square; final images will be\n # truncated. Base level is either 0 or probably 8 (level 0 is a 1x1\n # pixel tile)\n self.sizeX = int(self._info['Size']['Width'])\n self.sizeY = int(self._info['Size']['Height'])\n self.tileWidth = self.tileHeight = int(self._info['TileSize'])\n maxXY = max(self.sizeX, self.sizeY)\n self.levels = int(math.ceil(\n math.log(maxXY / self.tileWidth) / math.log(2))) + 1\n tiledirName = os.path.splitext(os.path.basename(self._largeImagePath))[0] + '_files'\n rootdir = os.path.dirname(self._largeImagePath)\n self._tiledir = os.path.join(rootdir, tiledirName)\n if not os.path.isdir(self._tiledir):\n rootdir = os.path.dirname(rootdir)\n self._tiledir = os.path.join(rootdir, tiledirName)\n zeroname = '0_0.%s' % self._info['Format']\n self._nested = os.path.isdir(os.path.join(self._tiledir, '0', zeroname))\n zeroimg = PIL.Image.open(\n os.path.join(self._tiledir, '0', zeroname) if not self._nested else\n os.path.join(self._tiledir, '0', zeroname, zeroname))\n if zeroimg.size == (1, 1):\n self._baselevel = int(\n math.ceil(math.log(maxXY) / math.log(2)) -\n math.ceil(math.log(maxXY / self.tileWidth) / math.log(2)))\n else:\n self._baselevel = 0", "title": "" }, { "docid": "d7ce2ce3a4a5e6425ace05cec2bd6675", "score": "0.5430628", "text": "def __init__(self, size):\n self.size = size\n self.bias = npy.random.uniform(0, 1)\n self.weights = []\n for _ in range(0, self.size):\n self.weights.append(npy.random.uniform(0, 1))\n self.inputs = [] # storing inputs for later\n self.output = 0\n self.delta = 0", "title": "" }, { "docid": "beec6e6905611c730ece2313d52ff785", "score": "0.5412695", "text": "def __init__(self, nx, layers):\n if type(nx) != int:\n raise TypeError(\"nx must be an integer\")\n if nx < 1:\n raise ValueError(\"nx must be a positive integer\")\n\n if type(layers) != list or len(layers) == 0:\n raise TypeError(\"layers must be a list of positive integers\")\n\n self.L = len(layers)\n self.cache = {}\n self.weights = {}\n for i in range(0, self.L):\n if layers[i] < 0:\n raise TypeError(\"layers must be a list of positive integers\")\n if i == 0:\n self.weights[\"W\" + str(i + 1)] = np.random.randn(\n layers[i], nx)*np.sqrt(2/(nx))\n self.weights[\"b\" + str(i + 1)] = np.zeros((layers[i], 1))\n else:\n self.weights[\"W\" + str(i + 1)] = np.random.randn(\n layers[i], layers[i-1]) * np.sqrt(2/(layers[i-1]))\n self.weights[\"b\" + str(i + 1)] = np.zeros((layers[i], 1))", "title": "" }, { "docid": "e60b49cc3a81e8f67ea5d0c4724b419a", "score": "0.5407968", "text": "def __init__(self, pool_size):\n self.pool_size = pool_size\n if self.pool_size > 0: # create an empty pool\n self.num_imgs = 0\n self.images = []", "title": "" }, { "docid": "b1bdffdc896f875c5020d12597efd265", "score": "0.54054046", "text": "def make_DBN():\n dataset = datasets.fetch_mldata(\"MNIST Original\") # mnist original, uci 20070111 letter, datasets uci letter\n\n # Split our easy dataset so we can use it to train the DBN\n (trainX, testX, trainY, testY) = train_test_split(dataset.data/float(dataset.data.max()), # DBM only understands 0.0-1.0\n dataset.target.astype(\"int0\"),\n test_size=0.33)\n\n # Make Deep Belief Network\n dbn = DBN(\n # [nodes in input layer (pixel size of vectorized dataset),\n # nodes in hidden layer(s),\n # nodes in output layer (size of output dataset)]\n [trainX.shape[1], 1000, 300, 10], # out = 10,\n learn_rates=0.3,\n learn_rate_decays=0.9,\n epochs=10,\n verbose=1\n )\n # Teach DBM to recognize digits\n dbn.fit(trainX, trainY)\n\n # Predict using our test set\n preds = dbn.predict(testX)\n# print classification_report(testY, preds)\n\n\n # Examine random selection from our test set\n for i in np.random.choice(np.arange(0, len(testY)), size=(100,)):\n # Classify actual digit\n pred = dbn.predict(np.atleast_2d(testX[i]))\n\n # Reshape to 28x28\n image = (testX[i] * 255).reshape((28,28)).astype('uint8')\n\n # Show image and prediction\n seen = pred[0]\n actual = testY[i]\n if seen != actual:\n print \"Actual digit is {0}, saw {1}\".format(actual, seen)\n # plt.imshow(image)\n # plt.show()\n\n with open('dbn.pickle', 'wb') as f:\n pickle.dump(dbn, f)", "title": "" }, { "docid": "0a1e9ed9271e677b28f7cded66ee1d38", "score": "0.54044276", "text": "def load_data(path, ):\n\n # Read all EMNIST test and train data\n mndata = MNIST(path)\n\n X_train, y_train = mndata.load(path + '/emnist-byclass-train-images-idx3-ubyte',\n path + '/emnist-byclass-train-labels-idx1-ubyte')\n X_test, y_test = mndata.load(path + '/emnist-byclass-test-images-idx3-ubyte',\n path + '/emnist-byclass-test-labels-idx1-ubyte')\n\n # Read mapping of the labels and convert ASCII values to chars\n mapping = []\n\n with open(path + '/emnist-byclass-mapping.txt') as f:\n for line in f:\n mapping.append(chr(int(line.split()[1])))\n\n X_train = np.array(X_train)\n y_train = np.array(y_train)\n X_test = np.array(X_test)\n y_test = np.array(y_test)\n\n X_train = normalize(X_train)\n X_test = normalize(X_test)\n\n X_train = reshape_for_cnn(X_train)\n X_test = reshape_for_cnn(X_test)\n\n y_train = preprocess_labels(y_train, len(mapping))\n y_test = preprocess_labels(y_test, len(mapping))\n\n return X_train, y_train, X_test, y_test, mapping", "title": "" }, { "docid": "470bd542ba3e9473555070b6c85c7079", "score": "0.5402779", "text": "def __init__(self, numelem=100, cycles=1, maxweight=100, filename=\"data\", printonce=False, verbose=False):\n\n self.printonce = printonce\n self.verbose = verbose\n self.executiontime = 0\n self.__numelem = numelem\n self.weights = numpy.random.random_integers(1, maxweight, size=self.__numelem).astype(numpy.uint32)\n self.values = numpy.random.random_integers(1, maxweight*2, size=self.__numelem).astype(numpy.uint32)\n self.sumofweights = numpy.uint32(self.weights.sum())\n self.capacity = numpy.uint32(self.sumofweights/2)\n self.f0 = numpy.zeros(self.capacity+1).astype(numpy.uint32)\n self.f1 = numpy.zeros_like(self.f0)\n self.m_d = numpy.zeros_like(self.f0)\n self.cycles = cycles", "title": "" }, { "docid": "fa300c5743e388c73a00b979898ff5b4", "score": "0.53982687", "text": "def __init__(self, embed_size, model_path=None):\n super(EncoderCNN, self).__init__()\n resnet = models.resnet152(pretrained=True)\n modules = list(resnet.children())[:-1] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n self.linear = nn.Linear(resnet.fc.in_features, embed_size)\n self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)\n self.init_weights(model_path)", "title": "" }, { "docid": "6c8950db980c8e5032def8fa6982637c", "score": "0.5389867", "text": "def __init__(self, layers, optimizer, minibatch_size=100, learning_rate=0.001):\n self._layers = layers\n self._optimizer = optimizer\n self._learning_rate = learning_rate\n self._minibatch_size = minibatch_size # size of minibatch samples\n # add widen and deepen functionality", "title": "" }, { "docid": "2b3e8c261a3147ecbf4c8933353e312e", "score": "0.53868204", "text": "def mnist_noniid_class(dataset, num_users, class_per_user):\n # 60,000 training imgs --> 300 imgs/shard X 200 shards\n # num_shards, num_imgs = 200, 300\n # idx_shard = [i for i in range(num_shards)]\n dict_users = {i: np.array([]) for i in range(num_users)}\n dict_class = {}\n\n idxs = np.arange(200*300)\n labels = dataset.train_labels.numpy()\n labels_his=Counter(labels)\n\n # sort labels\n idxs_labels = np.vstack((idxs, labels))\n idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()]\n\n total_class=len(np.unique(labels))\n print('total_class',total_class)\n per_class_avg_users=int(num_users/total_class*class_per_user)\n print('per_class_avg_users',per_class_avg_users)\n remain_users=num_users*class_per_user-per_class_avg_users*total_class\n print('remain_users',remain_users)\n last_class_users=per_class_avg_users+remain_users\n print('last_class_users',last_class_users)\n\n idx_accumulate=0\n shard=0\n idx_shard={}\n for i in range(total_class):\n if i==total_class-1:\n per_class_avg_users=last_class_users\n idxs_per_class = idxs_labels[0, idx_accumulate:idx_accumulate+labels_his[i]]\n # print('idxs_per_class',idxs_per_class)\n num_imgs_per_user=int(len(idxs_per_class)/per_class_avg_users) \n dict_class[i]=shard+np.arange(per_class_avg_users)\n #idx of per shard \n for k in range(per_class_avg_users):\n idx_shard[shard+k]= idxs_per_class[k*num_imgs_per_user:(k+1)*num_imgs_per_user]\n\n shard+=per_class_avg_users\n print('shard',shard)\n idx_accumulate+=labels_his[i] \n num_shard= [k for k in range(shard)]\n\n print(len(idx_shard)) \n print('dict_class',dict_class)\n # divide and assign class_per_user shards/client\n # ensure each user has class_per_user\n for i in range(num_users):\n # print('user',i)\n flag=1\n while flag:\n if len(num_shard)>class_per_user:\n rand_set = np.random.choice(num_shard, class_per_user, replace=False)\n # print('rand_set',rand_set)\n dict_keys=[]\n for j in rand_set:\n dict_keys.append([key for idx, key in enumerate(dict_class) if j in dict_class[key]])\n if len(np.unique(dict_keys))==class_per_user:\n flag=0\n for rand in rand_set:\n dict_users[i] = np.concatenate((dict_users[i], idx_shard[rand]), axis=0).astype(int)\n num_shard = list(set(num_shard) - set(rand_set))\n else:\n rand_set = num_shard\n flag=0\n for rand in rand_set:\n dict_users[i] = np.concatenate((dict_users[i], idx_shard[rand]), axis=0).astype(int)\n num_shard = list(set(num_shard) - set(rand_set))\n print('num_shard',num_shard)\n return dict_users", "title": "" }, { "docid": "c0483f18a170f8f89b313e6216a457e1", "score": "0.53723264", "text": "def load_mnist(images_path, labels_path):\n with open(labels_path, 'rb') as lbpath:\n struct.unpack('>II', lbpath.read(8))\n labels = np.fromfile(lbpath, dtype=np.uint8)\n with open(images_path, 'rb') as imgpath:\n struct.unpack('>IIII', imgpath.read(16))\n images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 28, 28, 1)\n return images, labels", "title": "" }, { "docid": "369613a70b6bfe8f8d879b35c70f1343", "score": "0.53714097", "text": "def __init__(self, sizes):\n self.num_layers = len(sizes)\n self.sizes = sizes\n # play with biases to start from i/p layer, i.e., ... fory in sizes[:-1]\n self.biases = [np.random.randn(y, 1) for y in sizes[1:]] \n self.weights = [np.random.randn(y, x)\n for x, y in zip(sizes[:-1], sizes[1:])]", "title": "" }, { "docid": "bb8e4f5779759228f9c5a07b00fa6920", "score": "0.53709626", "text": "def load_mnist_data():\n\n file_name = 'dataset/mnist.pkl.gz'\n\n # pickle.load return ((train_x, train_y), (valid_x, valid_y), (test_x, test_y))\n with gzip.open(file_name, 'rb') as f:\n train_set, valid_set, test_set = pickle.load(f, encoding='latin1')\n\n train_x, train_y = train_set\n valid_x, valid_y = valid_set\n\n # concatenation valid set to train set\n train_x = np.vstack((train_x, valid_x))\n train_y = np.append(train_y, valid_y)\n\n test_x, test_y = test_set\n\n return (train_x, train_y, test_x, test_y)", "title": "" }, { "docid": "298a3ab7f183b172c7d1583b52e3318f", "score": "0.53653", "text": "def __init__(self, embed_size):\n super(EncoderCNN, self).__init__()\n resnet = models.resnet152(pretrained=True)\n modules = list(resnet.children())[:-1] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n self.linear = nn.Linear(resnet.fc.in_features, embed_size)\n self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)\n self.init_weights()", "title": "" }, { "docid": "3f873d1046774486e26d2bd3671deee3", "score": "0.5363259", "text": "def __init__(self, hidden_layer_sizes=(100,), max_iter=200, verbose=False):\n super().__init__(\n hidden_layer_sizes=hidden_layer_sizes,\n max_iter=max_iter,\n verbose=verbose,\n )\n self.epochs = 1", "title": "" }, { "docid": "51980cef0426fc5da83574956d4cd948", "score": "0.5359663", "text": "def load_MiniImageNet():\n\n base_path = '/home/USER/Documents'\n if (not (os.path.exists(base_path))):\n base_path = '/home/ubuntu/Projects'\n if (not (os.path.exists(base_path))):\n base_path = '/home/USER/Projects'\n if (not (os.path.exists(base_path))):\n base_path = '/home/USER/Projects'\n \n data_path = base_path + '/MAML/input_data/miniImageNet/'\n train_tasks_file = open(data_path + 'miniImageNet_train_tasks.txt', 'rb')\n train_tasks = pickle.load(train_tasks_file)\n\n val_tasks_file = open(data_path + 'miniImageNet_val_tasks.txt', 'rb')\n val_tasks = pickle.load(val_tasks_file)\n\n test_tasks_file = open(data_path + 'miniImageNet_test_tasks.txt', 'rb')\n test_tasks = pickle.load(test_tasks_file)\n\n mtl_train_tasks = {}\n mtl_train_tasks['X'] = [np.concatenate((train_task['X_inner'], train_task['X_outer']), axis=0) for train_task in train_tasks]\n mtl_train_tasks['Y'] = [np.concatenate((train_task['Y_inner'], train_task['Y_outer']), axis=0) for train_task in train_tasks]\n\n return mtl_train_tasks, val_tasks, test_tasks", "title": "" }, { "docid": "7555885361549d9dd0b4334c765e7e1e", "score": "0.5358751", "text": "def __init__(self,test_size):\n self.letters = {0:'A',1:'B',2:'C',3:'D',4:'E',5:'F',6:'G',7:'H',8:'I',9:'J',10:'K',11:'L',12:'M',13:'N',14:'O',15:'P',16:'Q',17:'R',18:'S',19:'T',20:'U',21:'V',22:'W',23:'X', 24:'Y',25:'Z'}\n self.data_path = \"A_Z Handwritten Data.csv\"\n self.test_size = test_size\n self.X_train = None\n self.X_test = None\n self.y_train = None\n self.y_test = None\n self.model = None\n self.y_train_cat = None\n self.y_test_cat = None\n self.history = None\n self.cm = None\n self.y_pred = None", "title": "" }, { "docid": "975313f00f937f602974a2a3b83ba336", "score": "0.5351087", "text": "def parse_image():\r\n LABELS = ['t10k-images.idx3-ubyte',\r\n 't10k-labels.idx1-ubyte',\r\n 'train-images.idx3-ubyte',\r\n 'train-labels.idx1-ubyte']\r\n\r\n TRAIN_IMAGE = LABELS[2]\r\n TRAIN_LABELS = LABELS[3]\r\n TEST_IMAGE = LABELS[0]\r\n TEST_LABELS = LABELS[1]\r\n\r\n f_image = open('mnist/' + TRAIN_IMAGE, 'rb')\r\n f_label = open('mnist/' + TRAIN_LABELS, 'rb')\r\n\r\n i_magic, n_images, n_row, n_col = struct.unpack(\">4I\", f_image.read(16))\r\n l_magic, n_labels = struct.unpack(\">2I\", f_label.read(8))\r\n\r\n assert n_labels == n_images\r\n\r\n training = [tuple([np.array([[i/255.0]\r\n for _ in range(n_row)\r\n for i in struct.unpack(\">%dB\" % (n_col), f_image.read(n_col))]),\r\n vectorized_result(struct.unpack(\">B\", f_label.read(1))[0])])\r\n for _ in range(n_images-10000)]\r\n\r\n validation = [tuple([np.array([[i/255.0]\r\n for _ in range(n_row)\r\n for i in struct.unpack(\">%dB\" % (n_col), f_image.read(n_col))]),\r\n vectorized_result(struct.unpack(\">B\", f_label.read(1))[0])])\r\n for _ in range(n_images-10000, n_images)]\r\n\r\n f_image.close()\r\n f_label.close()\r\n\r\n f_image = open('mnist/' + TEST_IMAGE, 'rb')\r\n f_label = open('mnist/' + TEST_LABELS, 'rb')\r\n\r\n i_magic, n_images, n_row, n_col = struct.unpack(\">4I\", f_image.read(16))\r\n l_magic, n_labels = struct.unpack(\">2I\", f_label.read(8))\r\n\r\n testing = [tuple([np.array([i/255.0\r\n for _ in range(n_row)\r\n for i in struct.unpack(\">%dB\" % (n_col), f_image.read(n_col))]),\r\n vectorized_result(struct.unpack(\">B\", f_label.read(1))[0])])\r\n for _ in range(n_images)]\r\n\r\n with open('mnist_data', 'wb') as f:\r\n pickle.dump((training, validation, testing), f)\r\n f.close()\r\n\r\n return training, validation, testing", "title": "" }, { "docid": "b9457a09a51dfb9ffd8a3d4b88704925", "score": "0.5348022", "text": "def load_mnist_realval(path=data_path, one_hot=True, dequantify=False):\n if not os.path.isfile(path):\n data_dir = os.path.dirname(path)\n if not os.path.exists(os.path.dirname(path)):\n os.makedirs(data_dir)\n # download_dataset('http://www.iro.umontreal.ca/~lisa/deep/data/mnist'\n # '/mnist.pkl.gz', path)\n download_dataset('https://oneflow-public.oss-cn-beijing.aliyuncs.com/datasets/zhusuan_oneflow/mnist.pkl.gz',\n path)\n \n\n f = gzip.open(path, 'rb')\n if six.PY2:\n train_set, valid_set, test_set = pickle.load(f)\n else:\n train_set, valid_set, test_set = pickle.load(f, encoding='latin1')\n f.close()\n x_train, t_train = train_set[0], train_set[1]\n x_valid, t_valid = valid_set[0], valid_set[1]\n x_test, t_test = test_set[0], test_set[1]\n # x_train, t_train = train_set[0][:64*500], train_set[1][:64*500]\n # x_valid, t_valid = valid_set[0][:64*50], valid_set[1][:64*50]\n # x_test, t_test = test_set[0][:64*10], test_set[1][:64*10]\n if dequantify:\n x_train += np.random.uniform(0, 1. / 256,\n size=x_train.shape).astype('float32')\n x_valid += np.random.uniform(0, 1. / 256,\n size=x_valid.shape).astype('float32')\n x_test += np.random.uniform(0, 1. / 256,\n size=x_test.shape).astype('float32')\n n_y = t_train.max() + 1\n t_transform = (lambda x: to_one_hot(x, n_y)) if one_hot else (lambda x: x)\n return x_train, t_transform(t_train), x_valid, t_transform(t_valid), \\\n x_test, t_transform(t_test)", "title": "" }, { "docid": "7a214882a9e05c68f2d56950e031e18b", "score": "0.5346792", "text": "def mnist_noniid(args, dataset, num_users, n_list, k_list):\n\n # 60,000 training imgs --> 200 imgs/shard X 300 shards\n num_shards, num_imgs = 10, 6000\n idx_shard = [i for i in range(num_shards)]\n dict_users = {}\n idxs = np.arange(num_shards*num_imgs)\n labels = dataset.train_labels.numpy()\n # sort labels\n idxs_labels = np.vstack((idxs, labels))\n idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()]\n idxs = idxs_labels[0, :]\n label_begin = {}\n cnt=0\n for i in idxs_labels[1,:]:\n if i not in label_begin:\n label_begin[i] = cnt\n cnt+=1\n\n classes_list = []\n for i in range(num_users):\n n = n_list[i]\n k = k_list[i]\n k_len = args.train_shots_max\n classes = random.sample(range(0,args.num_classes), n)\n classes = np.sort(classes)\n print(\"user {:d}: {:d}-way {:d}-shot\".format(i + 1, n, k))\n print(\"classes:\", classes)\n user_data = np.array([])\n for each_class in classes:\n # begin = i*10 + label_begin[each_class.item()]\n begin = i * k_len + label_begin[each_class.item()]\n user_data = np.concatenate((user_data, idxs[begin : begin+k]),axis=0)\n dict_users[i] = user_data\n classes_list.append(classes)\n\n return dict_users, classes_list\n #\n #\n #\n #\n #\n # # divide and assign 2 shards/client\n # for i in range(num_users):\n # rand_set = set(np.random.choice(idx_shard, n_list[i], replace=False))\n # idx_shard = list(set(idx_shard) - rand_set)\n # for rand in rand_set:\n # dict_users[i] = np.concatenate(\n # (dict_users[i], idxs[rand*num_imgs:(rand+1)*num_imgs]), axis=0)\n # return dict_users", "title": "" }, { "docid": "77550f7146ed3484d918c9895ced1ff5", "score": "0.5345506", "text": "def __init__(self, input_dict):\n self.instance = tik.Tik(tik.Dprofile())\n self.dtype = input_dict.get(\"x\").get(\"dtype\").lower()\n self.dsize = common_util.get_data_size(self.dtype)\n total_size = tbe_platform.cce_conf.get_soc_spec(tbe_platform.cce_conf.UB_SIZE)\n ub_size = (total_size - RESERVE_SIZE) // (2 * self.dsize)\n burnest_len = constant.BLOCK_SIZE // self.dsize\n ub_size = ((ub_size + burnest_len - 1) // burnest_len) * burnest_len\n self.one_max_size = ub_size\n x_len = get_shape_total_number(input_dict.get(\"x\").get(\"shape\"))\n x_len = ((x_len + burnest_len - 1) // burnest_len) * burnest_len\n hw = input_dict.get(\"y\").get(\"shape\")[2] * \\\n input_dict.get(\"y\").get(\"shape\")[3]\n mod = hw % burnest_len\n if mod != 0:\n x_len = x_len + burnest_len\n self.x_gm = self.instance.Tensor(self.dtype, (x_len,), name=\"x_gm\",\n scope=tik.scope_gm)\n self.y_gm = self.instance.Tensor(self.dtype, (x_len,), name=\"y_gm\",\n scope=tik.scope_gm)\n self.input_dict = input_dict", "title": "" }, { "docid": "2d71e32a8e187a45ca323630c79aaf6c", "score": "0.5339088", "text": "def get_train(self, even=None, min_blobs=1, max_blobs=1): # MT\n self.images, self.labels, self.areas = create_data_natural.generate_data(even, min_blobs, max_blobs, scalar_output=True)\n self.length = len(self.images)", "title": "" }, { "docid": "54e9dd8f1c7bf87977d7b32c5d7eb3ae", "score": "0.5338825", "text": "def __init__(self, inputSize, layers=[], costFunction=None):\n self.inputSize = inputSize\n self.layers = layers\n self.costFunction = costFunction\n self._weights = []\n self.d_weights = []\n self.layerCache=[]", "title": "" }, { "docid": "c7257ad3fdda6bd45ad2a3a9b6d81cf7", "score": "0.533793", "text": "def __init__(self, data, train_labels, train_indices, train_size):\n self.data = data\n self.train_labels = train_labels\n self.train_indices = train_indices\n self.train_size = train_size\n if len(self.classes) > train_size:\n raise ValueError(\"The train size can not be smaller than the number of classes.\")", "title": "" }, { "docid": "347c4319a0c62185d73a5416e8e5a6ff", "score": "0.5336137", "text": "def __init__(self, ext_mem: Dict, mem_size: int, adaptive_size=True,\n total_num_classes=-1):\n self.ext_mem = ext_mem\n self.mem_size = mem_size\n self.adaptive_size = adaptive_size\n self.total_num_classes = total_num_classes\n self.seen_classes = set()\n\n if not self.adaptive_size:\n assert self.total_num_classes > 0, \\\n \"\"\"When fixed exp mem size, total_num_classes should be > 0.\"\"\"", "title": "" }, { "docid": "65e6b5257df783629c930a5bc66ea7a1", "score": "0.53353137", "text": "def load_train(self):\n images, labels = self.load(os.path.join('mnist', 'train', 'images'),\n os.path.join('mnist', 'train', 'labels'))\n self.train_data = list(zip(images, labels))", "title": "" }, { "docid": "f9dc526202ac9c72af509fb05945b74e", "score": "0.53287077", "text": "def __init__(self):\n #self.lambda_mixture = 0.86752\n self.lambda_mixture = 0.86\n self.label_dict = {}\n self.num_class = 14\n self.prior = []\n self.vocab = 0\n self.class_vocab = []\n self.vocab_bigram = 0\n self.class_vocab_bigram = []\n self.label_dict_bigram = {}", "title": "" }, { "docid": "90e86efff4639867ee68d811d92584df", "score": "0.5319188", "text": "def __init__(self, images, labels, type_=\"mnist\"):\n images = self._normalize(images, type_)\n labels = (labels).astype(np.int64)\n self.images = torch.from_numpy(images)\n self.labels = torch.from_numpy(labels)\n self.num_samples = images.shape[0]", "title": "" }, { "docid": "56fbf6ca7b20aa2e66eb256130336d76", "score": "0.5316506", "text": "def mnist(batch_size=64, size=28, path_to_data='../../mnist_data'):\n all_transforms = transforms.Compose([\n transforms.Resize(size),\n transforms.ToTensor()\n ])\n\n train_data = datasets.MNIST(path_to_data, train=True, download=True,\n transform=all_transforms)\n test_data = datasets.MNIST(path_to_data, train=False,\n transform=all_transforms)\n\n train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)\n test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True)\n\n return train_loader, test_loader", "title": "" }, { "docid": "3940b6b86239bd177b2ca011aa0ff5b5", "score": "0.5307942", "text": "def __init__(self):\n self.vert_dict = {}\n self.num_verticies = 0\n self.num_edges = 0", "title": "" }, { "docid": "85a7296f3c5e7fe74171f0b4fed755d0", "score": "0.5305711", "text": "def __init__(self, h5_path, input_size=None):\n self.input_size = input_size\n self.h5_path = h5_path\n self.x, self.y = self.load_list()", "title": "" }, { "docid": "95775e637e5125854d21d4c60cb1cfc7", "score": "0.530351", "text": "def __init__(self, batch_size, shuffle=False):\n train, _ = tf.keras.datasets.mnist.load_data()\n X, y = train\n X = X.astype(np.float32) / 255\n X = X.reshape((X.shape[0], -1))\n self.X, self.y = X, y\n self.batch_size, self.shuffle = batch_size, shuffle", "title": "" }, { "docid": "332ab19572c3da0b9e202cbb4ea76fe3", "score": "0.53026295", "text": "def __init__(self, hidden_width, depth, input_size, output_size):\n self.width = hidden_width\n self.depth = depth\n self.network = []\n\n # activations are in columns\n # minimum one hidden layer\n # depth = 0 => perceptron\n if depth == 0:\n self.network.append((np.random.normal(0, 1, (input_size + 1, output_size))))\n return\n\n # deep NN with normally distributed initial weights\n self.network.append(np.random.normal(0, 1, (input_size + 1, hidden_width)))\n for i in range(depth - 1):\n self.network.append(np.random.normal(0, 1, (self.network[-1].shape[1] + 1, hidden_width)))\n self.network.append(np.random.normal(0, 1, (self.network[-1].shape[1] + 1, output_size)))\n\n return", "title": "" }, { "docid": "b10f49cf4117d2060d9a12d7d58dc00b", "score": "0.5298141", "text": "def __init__( self\n , hidden_size\n , input_size):\n super(MLPEncodingCell, self).__init__()\n self._MLP = tf.keras.layers.Dense(hidden_size, activation='relu')\n self.hidden_size = hidden_size\n self.input_size = input_size\n self.state_size = tf.TensorShape([self.hidden_size])", "title": "" }, { "docid": "acf5a700e50e78ef46639c1208ec4c3f", "score": "0.52919877", "text": "def load_mnist():\r\n\r\n print('Loading train data...')\r\n train_data = torch.utils.data.DataLoader(\r\n torchvision.datasets.MNIST('mnist/', \r\n train=True, \r\n download=True,\r\n transform=torchvision.transforms.Compose([\r\n torchvision.transforms.ToTensor()\r\n ])),\r\n shuffle=True,)\r\n\r\n train_input = []\r\n train_label = []\r\n \r\n cnt = 0\r\n for batch, label in tqdm(train_data):\r\n train_input.append(batch.squeeze().numpy().reshape(784,))\r\n train_label.append(label.numpy())\r\n cnt += 1\r\n if cnt == 1300: break\r\n\r\n print('Loading test data...')\r\n test_data = torch.utils.data.DataLoader(\r\n torchvision.datasets.MNIST('mnist/', \r\n train=False, \r\n download=True,\r\n transform=torchvision.transforms.Compose([\r\n torchvision.transforms.ToTensor()\r\n ])),\r\n shuffle=True,)\r\n\r\n test_input = []\r\n test_label = []\r\n \r\n for batch, label in tqdm(test_data):\r\n test_input.append(batch.squeeze().numpy().reshape(784,))\r\n test_label.append(label.numpy())\r\n\r\n return np.array(train_input), np.array(train_label), np.array(test_input), np.array(test_label)", "title": "" }, { "docid": "878d7d61a85338a7814220a6687af10d", "score": "0.52839684", "text": "def __init__(self, embed_size):\n # pdb.set_trace()\n super(EncoderCNN, self).__init__()\n resnet = models.resnet152(pretrained=True)\n modules = list(resnet.children())[:-1] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n self.linear = nn.Linear(resnet.fc.in_features, embed_size)\n self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)", "title": "" }, { "docid": "b6dfd6112fe6e2dc61fb6222112a8dcc", "score": "0.52795196", "text": "def load_train(self):\n images, labels = self.load(os.path.join('mnist', 'train', 'images'),\n os.path.join('mnist', 'train', 'labels'))\n self.train_data = zip(images, labels)", "title": "" }, { "docid": "307f7eefcfe692b04bf8051839437a14", "score": "0.5279294", "text": "def __init__(self, input_size, output_size, hidden_sizes=(64, 64)):\n super(Decoder, self).__init__()\n self.net = make_nn(input_size, output_size, hidden_sizes)", "title": "" } ]