code
stringlengths
20
1.05M
apis
sequence
extract_api
stringlengths
75
5.24M
#!/usr/bin/env python # -*- coding: utf-8 -*- """ test_faz ---------------------------------- Tests for `faz` module. """ import os import glob import time import unittest from faz import main, parser from faz.task import Task, TaskFailedException from faz.graph import CircularDependencyException FILE = """file=file999 # Using bash as the interpreter # file21, file22, $file <- touch file21 file22 echo "Output from the first task" echo $file touch $file # file3, file4 <- file2*, $file :force touch file3 file4 echo "Output from the last task" """ FILE1 = """ # Using bash as the interpreter # file1, file2 <- touch file1 file2 # file3, file4 <- file1, file2 echo "Hellow world! 1" > file3 echo "Hellow world! 1" > file4 # file5, file6 <- file3, file4 echo "Hellow world! 2" > file5 echo "Hellow world! 2" > file6 # file7, file8 <- file5, file6 echo "Hellow world! 3" > file7 echo "Hellow world! 3" > file8 # Now using python as the interpreter # file9, file10, file11 <- file5, file3 :python, force import sys a = [[range(3)], [range(4, 7)], [range(7, 10)]] f = open("file11", "w") for line in a: f.write(" ".join([str(i) for i in line])) f.close() open("file9", "w").write("Hello from python\\n") open("file10", "w").write("Hello from python\\n") # file22, file33 <- file1, file11 :ruby File.open("file22", 'w') { |file| file.write("Hi Ruby22!") } File.open("file33", 'w') { |file| file.write("Hi Ruby33!") } """ FILE2 = """ # Using bash as the interpreter # file1, file2 <- touch file3 file4 touch file1 file2 # file3, file4 <- file1, file2 echo "Hellow world! 1" > file3 echo "Hellow world! 1" > file4 # file5, file6 <- file3, file4 echo "Hellow world! 2" > file5 echo "Hellow world! 2" > file6 # file7, file8 <- file5, file6 echo "Hellow world! 3" > file7 echo "Hellow world! 3" > file8 """ FILE3 = """ # Using bash as the interpreter # file1, file2 <- touch file3 file4 """ FILE4 = """ # Using bash as the interpreter # file3, file4 <- file1, file2 touch file3 file4 """ FILE5 = """ # Using bash as the interpreter # file1, file2 <- touch file5 touch file1 file2 # file3, file4 <- file1, file2 touch file3 file4 # file5 <- file3, file4 touch file5 """ FILE6 = """ # Using bash as the interpreter # file21, file22 <- touch file21 file22 # file3, file4 <- file2* touch file3 file4 """ FILE7 = """ test = 1 a = 2 b = 3 """ FILE8 = """ file=asdf # Using bash as the interpreter # file21, file22, $file <- touch file21 file22 touch $file # file3, file4 <- file2*, $file touch file3 file4 # file5, file6 <- file3, file4 touch $[OUTPUT1] touch $[OUTPUT2] """ FILE9 = """ # Using bash as the interpreter # file21, file22 <- file3 touch file21 file22 # file3, file4 <- file22, file21 touch file3 file4 """ FILE10 = """ #include: file1.txt #include: file2.txt # file3, file4 <- file1, file2 touch file3 file4 """ FILE11 = """ # Using bash as the interpreter # data/file1, data/file2 <- mkdir data touch data/file1 data/file2 # file3, file4 <- data/file2, data/file1 touch file3 file4 """ class TestFaz(unittest.TestCase): def setUp(self): pass def test_something(self): main.faz(FILE1) def tearDown(self): for fname in glob.glob("file*"): os.unlink(fname) class TestMain(unittest.TestCase): def setUp(self): f = open("fazfile", "w") f.write(FILE1) f.close() def test_something(self): main.main(arguments=[]) def tearDown(self): for fname in glob.glob("file*"): os.unlink(fname) os.unlink("fazfile") class TestInputFileDoesNotExist(unittest.TestCase): def setUp(self): pass @unittest.expectedFailure def test_something(self): main.main(arguments=["nonexistent_file"]) def tearDown(self): pass class TestMainDebug(unittest.TestCase): def setUp(self): f = open("fazfile", "w") f.write(FILE1) f.close() def test_something(self): main.main(arguments=["-d"]) def tearDown(self): for fname in glob.glob("file*"): os.unlink(fname) os.unlink("fazfile") class TestMissingInput(unittest.TestCase): def setUp(self): pass @unittest.expectedFailure def test_something(self): main.faz() def tearDown(self): pass class TestMissingInputs(unittest.TestCase): def setUp(self): pass def test_something(self): main.faz(FILE2) def tearDown(self): for fname in glob.glob("file*"): os.unlink(fname) class TestFazFileInDir(unittest.TestCase): def setUp(self): for fname in glob.glob(".faz/tmp*"): os.remove(fname) os.rmdir(".faz") f = open(".faz", "w") f.close() @unittest.expectedFailure def test_something(self): main.faz(FILE1) def tearDown(self): os.unlink(".faz") class TestOuputsNotCreated(unittest.TestCase): def setUp(self): pass @unittest.expectedFailure def test_something(self): main.faz(FILE3) def tearDown(self): pass class TestInputsDoNotExist(unittest.TestCase): def setUp(self): pass def test_something(self): main.faz(FILE4) def tearDown(self): pass class TestOutputsAreOlderThanInputs(unittest.TestCase): def setUp(self): pass def test_something(self): main.faz(FILE5) def tearDown(self): pass class TestWildcardInName(unittest.TestCase): def setUp(self): pass def test_something(self): main.faz(FILE6) def tearDown(self): pass class TestParser(unittest.TestCase): def setUp(self): pass def test_something(self): tasks = parser.parse_input_file(FILE1) self.failUnlessEqual(6, len(tasks)) def tearDown(self): pass class TestEnvironment(unittest.TestCase): def setUp(self): pass def test_something(self): env = parser.create_environment(FILE7.splitlines()) self.failUnlessEqual(env["test"], "1") self.failUnlessEqual(env["a"], "2") self.failUnlessEqual(env["b"], "3") def tearDown(self): pass class TestVariableExpansion(unittest.TestCase): def setUp(self): pass def test_something(self): main.faz(FILE8) def tearDown(self): for fname in glob.glob("file*"): os.unlink(fname) os.unlink('asdf') class TestCircularDependencyException(unittest.TestCase): def setUp(self): pass def test_something(self): with self.assertRaises(CircularDependencyException): main.faz(FILE9) def tearDown(self): pass class TestTaskMethods(unittest.TestCase): def setUp(self): self.filenames = ["file1", "file2", "file3", "file_1", "file_2", "file_3", "file__1", "file__2"] self.should_not_be_present = ["file4", "file5", "file6", "file7", "file8", "file9"] for filename in self.should_not_be_present: if os.path.exists(filename) and os.path.isfile(filename): os.unlink(filename) for filename in self.filenames: with open(filename, "w") as f: f.close() self.task = Task(["file[0-3]", "file_*"], ["file[4-6]"], ["touch file4\n", "touch file5\n", "touch file6\n", "echo $[test_var]\n", "echo $test_var\n"], ["force"], {"test_var": "test_var_value"}) def test_task(self): self.task() def test_code_variable_expansion(self): self.task.expand_variables() self.assertTrue(any([line for line in self.task.code if "test_var_value" in line])) def test_outputs_do_not_exist(self): task = Task(["file[0-3]", "file_*"], ["file99", "file234"], ["touch file4\n", "touch file5\n", "touch file6\n"], ["force"], {"test_var": "test_var_value"}) with self.assertRaises(TaskFailedException): task() def test_return_code_is_not_0(self): task = Task(["file[0-3]", "file_*"], ["file99", "file234"], ["touch file4\n", "touch file5\n", "touch file6\n", "ls non_existant_dir\n"], ["force"], {"test_var": "test_var_value"}) with self.assertRaises(TaskFailedException): task() def test_use_the_force(self): f = open("file22", "w") f.close() time.sleep(0.1) f = open("file33", "w") f.close() self.assertTrue(os.path.getmtime("file33") > os.path.getmtime("file22")) task = Task(["file22"], ["file33"], ["touch file33\n"], ["force"], {"test_var": "test_var_value"}) result = self.task.dependencies_are_newer(["file33"], ["file22"]) self.assertFalse(result) self.assertTrue(task.inputs == ["file22"]) self.assertTrue(task.outputs == ["file33"]) self.assertTrue(task.code == ["touch file33\n"]) self.assertTrue(task.options == ["force"]) self.assertTrue(task.interpreter == "bash") self.assertTrue(task.force) task() os.unlink("file22") os.unlink("file33") def test_files_exist(self): self.assertTrue(self.task.files_exist(["file1", "file2", "file3"])) def test_filename_shell_expansion(self): results = self.task.expand_filenames(["file[0-3]", "file_?", "file__*"]) for result, filename in zip(results, self.filenames): self.assertEqual(result, filename) def test_filename_variable_expansion(self): results = self.task.expand_filenames(["$test_var"]) self.assertEqual(results[0], "test_var_value") def test_nonexistant_file(self): results = self.task.expand_filenames(["file[4-9]"]) self.assertEqual(results[0], "NONEXISTENT") def test_dependencies_are_newer(self): for filename in ["old_file1", "old_file2"]: with open(filename, "w") as f: f.close() time.sleep(0.1) for filename in ["new_file1", "new_file2"]: with open(filename, "w") as f: f.close() result = self.task.dependencies_are_newer(["old_file1", "old_file2"], ["new_file1", "new_file2"]) self.assertTrue(result) [os.unlink(filename) for filename in ["old_file1", "old_file2"]] [os.unlink(filename) for filename in ["new_file1", "new_file2"]] def test_dependencies_are_older(self): for filename in ["new_file1", "new_file2"]: with open(filename, "w") as f: f.close() time.sleep(0.1) for filename in ["old_file1", "old_file2"]: with open(filename, "w") as f: f.close() result = self.task.dependencies_are_newer(["old_file1", "old_file2"], ["new_file1", "new_file2"]) self.assertFalse(result) [os.unlink(filename) for filename in ["old_file1", "old_file2"]] [os.unlink(filename) for filename in ["new_file1", "new_file2"]] def tearDown(self): for filename in self.filenames: os.unlink(filename) class TestIncludeMechanism(unittest.TestCase): def setUp(self): with open("file1.txt", "w") as f: f.write("# file1 <- \ntouch file1\n") f.close() with open("file2.txt", "w") as f: f.write("# file2 <- \ntouch file2\n") f.close() def test_includes(self): main.faz(FILE10) self.assertTrue(os.path.isfile("file3")) self.assertTrue(os.path.isfile("file4")) def tearDown(self): for fname in ["file1", "file2", "file3", "file4", "file1.txt", "file2.txt"]: os.unlink(fname) class TestAbsPAth(unittest.TestCase): def setUp(self): pass def test_abspath(self): main.faz(FILE11) self.assertTrue(os.path.isdir(os.path.abspath("data"))) self.assertTrue(os.path.isfile("file3")) self.assertTrue(os.path.isfile("file4")) def tearDown(self): for fname in ["data/file1", "data/file2", "file3", "file4"]: os.unlink(os.path.abspath(fname)) os.rmdir('data') if __name__ == '__main__': unittest.main(verbosity=3)
[ "os.path.exists", "faz.task.Task", "faz.main.faz", "time.sleep", "faz.main.main", "os.rmdir", "os.remove", "os.path.isfile", "os.unlink", "unittest.main", "os.path.getmtime", "os.path.abspath", "glob.glob", "faz.parser.parse_input_file" ]
[((13012, 13038), 'unittest.main', 'unittest.main', ([], {'verbosity': '(3)'}), '(verbosity=3)\n', (13025, 13038), False, 'import unittest\n'), ((3141, 3156), 'faz.main.faz', 'main.faz', (['FILE1'], {}), '(FILE1)\n', (3149, 3156), False, 'from faz import main, parser\n'), ((3203, 3221), 'glob.glob', 'glob.glob', (['"""file*"""'], {}), "('file*')\n", (3212, 3221), False, 'import glob\n'), ((3424, 3447), 'faz.main.main', 'main.main', ([], {'arguments': '[]'}), '(arguments=[])\n', (3433, 3447), False, 'from faz import main, parser\n'), ((3494, 3512), 'glob.glob', 'glob.glob', (['"""file*"""'], {}), "('file*')\n", (3503, 3512), False, 'import glob\n'), ((3551, 3571), 'os.unlink', 'os.unlink', (['"""fazfile"""'], {}), "('fazfile')\n", (3560, 3571), False, 'import os\n'), ((3730, 3771), 'faz.main.main', 'main.main', ([], {'arguments': "['nonexistent_file']"}), "(arguments=['nonexistent_file'])\n", (3739, 3771), False, 'from faz import main, parser\n'), ((3987, 4014), 'faz.main.main', 'main.main', ([], {'arguments': "['-d']"}), "(arguments=['-d'])\n", (3996, 4014), False, 'from faz import main, parser\n'), ((4061, 4079), 'glob.glob', 'glob.glob', (['"""file*"""'], {}), "('file*')\n", (4070, 4079), False, 'import glob\n'), ((4118, 4138), 'os.unlink', 'os.unlink', (['"""fazfile"""'], {}), "('fazfile')\n", (4127, 4138), False, 'import os\n'), ((4288, 4298), 'faz.main.faz', 'main.faz', ([], {}), '()\n', (4296, 4298), False, 'from faz import main, parser\n'), ((4457, 4472), 'faz.main.faz', 'main.faz', (['FILE2'], {}), '(FILE2)\n', (4465, 4472), False, 'from faz import main, parser\n'), ((4519, 4537), 'glob.glob', 'glob.glob', (['"""file*"""'], {}), "('file*')\n", (4528, 4537), False, 'import glob\n'), ((4656, 4678), 'glob.glob', 'glob.glob', (['""".faz/tmp*"""'], {}), "('.faz/tmp*')\n", (4665, 4678), False, 'import glob\n'), ((4717, 4733), 'os.rmdir', 'os.rmdir', (['""".faz"""'], {}), "('.faz')\n", (4725, 4733), False, 'import os\n'), ((4851, 4866), 'faz.main.faz', 'main.faz', (['FILE1'], {}), '(FILE1)\n', (4859, 4866), False, 'from faz import main, parser\n'), ((4900, 4917), 'os.unlink', 'os.unlink', (['""".faz"""'], {}), "('.faz')\n", (4909, 4917), False, 'import os\n'), ((5071, 5086), 'faz.main.faz', 'main.faz', (['FILE3'], {}), '(FILE3)\n', (5079, 5086), False, 'from faz import main, parser\n'), ((5248, 5263), 'faz.main.faz', 'main.faz', (['FILE4'], {}), '(FILE4)\n', (5256, 5263), False, 'from faz import main, parser\n'), ((5434, 5449), 'faz.main.faz', 'main.faz', (['FILE5'], {}), '(FILE5)\n', (5442, 5449), False, 'from faz import main, parser\n'), ((5609, 5624), 'faz.main.faz', 'main.faz', (['FILE6'], {}), '(FILE6)\n', (5617, 5624), False, 'from faz import main, parser\n'), ((5784, 5814), 'faz.parser.parse_input_file', 'parser.parse_input_file', (['FILE1'], {}), '(FILE1)\n', (5807, 5814), False, 'from faz import main, parser\n'), ((6364, 6379), 'faz.main.faz', 'main.faz', (['FILE8'], {}), '(FILE8)\n', (6372, 6379), False, 'from faz import main, parser\n'), ((6426, 6444), 'glob.glob', 'glob.glob', (['"""file*"""'], {}), "('file*')\n", (6435, 6444), False, 'import glob\n'), ((6483, 6500), 'os.unlink', 'os.unlink', (['"""asdf"""'], {}), "('asdf')\n", (6492, 6500), False, 'import os\n'), ((7676, 7869), 'faz.task.Task', 'Task', (["['file[0-3]', 'file_*']", "['file[4-6]']", "['touch file4\\n', 'touch file5\\n', 'touch file6\\n', 'echo $[test_var]\\n',\n 'echo $test_var\\n']", "['force']", "{'test_var': 'test_var_value'}"], {}), '([\'file[0-3]\', \'file_*\'], [\'file[4-6]\'], [\'touch file4\\n\',\n \'touch file5\\n\', \'touch file6\\n\', """echo $[test_var]\n""",\n \'echo $test_var\\n\'], [\'force\'], {\'test_var\': \'test_var_value\'})\n', (7680, 7869), False, 'from faz.task import Task, TaskFailedException\n'), ((8236, 8392), 'faz.task.Task', 'Task', (["['file[0-3]', 'file_*']", "['file99', 'file234']", "['touch file4\\n', 'touch file5\\n', 'touch file6\\n']", "['force']", "{'test_var': 'test_var_value'}"], {}), "(['file[0-3]', 'file_*'], ['file99', 'file234'], ['touch file4\\n',\n 'touch file5\\n', 'touch file6\\n'], ['force'], {'test_var':\n 'test_var_value'})\n", (8240, 8392), False, 'from faz.task import Task, TaskFailedException\n'), ((8594, 8779), 'faz.task.Task', 'Task', (["['file[0-3]', 'file_*']", "['file99', 'file234']", "['touch file4\\n', 'touch file5\\n', 'touch file6\\n', 'ls non_existant_dir\\n']", "['force']", "{'test_var': 'test_var_value'}"], {}), '([\'file[0-3]\', \'file_*\'], [\'file99\', \'file234\'], [\'touch file4\\n\',\n \'touch file5\\n\', \'touch file6\\n\', """ls non_existant_dir\n"""], [\'force\'\n ], {\'test_var\': \'test_var_value\'})\n', (8598, 8779), False, 'from faz.task import Task, TaskFailedException\n'), ((9076, 9091), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (9086, 9091), False, 'import time\n'), ((9238, 9333), 'faz.task.Task', 'Task', (["['file22']", "['file33']", "['touch file33\\n']", "['force']", "{'test_var': 'test_var_value'}"], {}), "(['file22'], ['file33'], ['touch file33\\n'], ['force'], {'test_var':\n 'test_var_value'})\n", (9242, 9333), False, 'from faz.task import Task, TaskFailedException\n'), ((9839, 9858), 'os.unlink', 'os.unlink', (['"""file22"""'], {}), "('file22')\n", (9848, 9858), False, 'import os\n'), ((9867, 9886), 'os.unlink', 'os.unlink', (['"""file33"""'], {}), "('file33')\n", (9876, 9886), False, 'import os\n'), ((10719, 10734), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (10729, 10734), False, 'import time\n'), ((11363, 11378), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (11373, 11378), False, 'import time\n'), ((12269, 12285), 'faz.main.faz', 'main.faz', (['FILE10'], {}), '(FILE10)\n', (12277, 12285), False, 'from faz import main, parser\n'), ((12636, 12652), 'faz.main.faz', 'main.faz', (['FILE11'], {}), '(FILE11)\n', (12644, 12652), False, 'from faz import main, parser\n'), ((12963, 12979), 'os.rmdir', 'os.rmdir', (['"""data"""'], {}), "('data')\n", (12971, 12979), False, 'import os\n'), ((3235, 3251), 'os.unlink', 'os.unlink', (['fname'], {}), '(fname)\n', (3244, 3251), False, 'import os\n'), ((3526, 3542), 'os.unlink', 'os.unlink', (['fname'], {}), '(fname)\n', (3535, 3542), False, 'import os\n'), ((4093, 4109), 'os.unlink', 'os.unlink', (['fname'], {}), '(fname)\n', (4102, 4109), False, 'import os\n'), ((4551, 4567), 'os.unlink', 'os.unlink', (['fname'], {}), '(fname)\n', (4560, 4567), False, 'import os\n'), ((4692, 4708), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (4701, 4708), False, 'import os\n'), ((6458, 6474), 'os.unlink', 'os.unlink', (['fname'], {}), '(fname)\n', (6467, 6474), False, 'import os\n'), ((6700, 6715), 'faz.main.faz', 'main.faz', (['FILE9'], {}), '(FILE9)\n', (6708, 6715), False, 'from faz import main, parser\n'), ((11053, 11072), 'os.unlink', 'os.unlink', (['filename'], {}), '(filename)\n', (11062, 11072), False, 'import os\n'), ((11126, 11145), 'os.unlink', 'os.unlink', (['filename'], {}), '(filename)\n', (11135, 11145), False, 'import os\n'), ((11698, 11717), 'os.unlink', 'os.unlink', (['filename'], {}), '(filename)\n', (11707, 11717), False, 'import os\n'), ((11771, 11790), 'os.unlink', 'os.unlink', (['filename'], {}), '(filename)\n', (11780, 11790), False, 'import os\n'), ((11912, 11931), 'os.unlink', 'os.unlink', (['filename'], {}), '(filename)\n', (11921, 11931), False, 'import os\n'), ((12310, 12333), 'os.path.isfile', 'os.path.isfile', (['"""file3"""'], {}), "('file3')\n", (12324, 12333), False, 'import os\n'), ((12359, 12382), 'os.path.isfile', 'os.path.isfile', (['"""file4"""'], {}), "('file4')\n", (12373, 12382), False, 'import os\n'), ((12506, 12522), 'os.unlink', 'os.unlink', (['fname'], {}), '(fname)\n', (12515, 12522), False, 'import os\n'), ((12741, 12764), 'os.path.isfile', 'os.path.isfile', (['"""file3"""'], {}), "('file3')\n", (12755, 12764), False, 'import os\n'), ((12790, 12813), 'os.path.isfile', 'os.path.isfile', (['"""file4"""'], {}), "('file4')\n", (12804, 12813), False, 'import os\n'), ((7456, 7480), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (7470, 7480), False, 'import os\n'), ((7485, 7509), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (7499, 7509), False, 'import os\n'), ((7527, 7546), 'os.unlink', 'os.unlink', (['filename'], {}), '(filename)\n', (7536, 7546), False, 'import os\n'), ((9166, 9192), 'os.path.getmtime', 'os.path.getmtime', (['"""file33"""'], {}), "('file33')\n", (9182, 9192), False, 'import os\n'), ((9195, 9221), 'os.path.getmtime', 'os.path.getmtime', (['"""file22"""'], {}), "('file22')\n", (9211, 9221), False, 'import os\n'), ((12691, 12714), 'os.path.abspath', 'os.path.abspath', (['"""data"""'], {}), "('data')\n", (12706, 12714), False, 'import os\n'), ((12931, 12953), 'os.path.abspath', 'os.path.abspath', (['fname'], {}), '(fname)\n', (12946, 12953), False, 'import os\n')]
#!/usr/bin/env python # -*- encoding: utf-8 import sys from commands import git, sbt from git_utils import ( get_changed_paths, remote_default_branch, local_current_head, get_sha1_for_tag, remote_default_head, ) from provider import current_branch, is_default_branch, repo from release import check_release_file, parse_release_file, has_release, latest_version def autoformat(): local_head = local_current_head() if is_default_branch(): latest_sha = get_sha1_for_tag(latest_version()) commit_range = f"{latest_sha}..{local_head}" else: remote_head = remote_default_head() commit_range = f"{remote_head}..{local_head}" print(f"Working in branch: {current_branch()}") print(f"On default branch: {is_default_branch()}") print(f"Commit range: {commit_range}") sbt('scalafmt') check_release_file(commit_range) # If there are any changes, push to GitHub immediately and fail the # build. This will abort the remaining jobs, and trigger a new build # with the reformatted code. if get_changed_paths(): print("*** There were changes from formatting, creating a commit") git("config", "user.name", "Buildkite on behalf of Wellcome Collection") git("config", "user.email", "<EMAIL>") git("remote", "add", "ssh-origin", repo(), exit_on_error=False) # We checkout the branch before we add the commit, so we don't # include the merge commit that Buildkite makes. git("fetch", "ssh-origin") git("checkout", "--track", f"ssh-origin/{current_branch()}") git("add", "--verbose", "--update") git("commit", "-m", "Apply auto-formatting rules") git("push", "ssh-origin", f"HEAD:{current_branch()}") # We exit here to fail the build, so Buildkite will skip to the next # build, which includes the autoformat commit. sys.exit(1) else: print("*** There were no changes from auto-formatting") if __name__ == '__main__': autoformat()
[ "commands.git", "provider.is_default_branch", "git_utils.local_current_head", "release.latest_version", "commands.sbt", "provider.current_branch", "git_utils.remote_default_head", "sys.exit", "git_utils.get_changed_paths", "provider.repo", "release.check_release_file" ]
[((420, 440), 'git_utils.local_current_head', 'local_current_head', ([], {}), '()\n', (438, 440), False, 'from git_utils import get_changed_paths, remote_default_branch, local_current_head, get_sha1_for_tag, remote_default_head\n'), ((449, 468), 'provider.is_default_branch', 'is_default_branch', ([], {}), '()\n', (466, 468), False, 'from provider import current_branch, is_default_branch, repo\n'), ((843, 858), 'commands.sbt', 'sbt', (['"""scalafmt"""'], {}), "('scalafmt')\n", (846, 858), False, 'from commands import git, sbt\n'), ((864, 896), 'release.check_release_file', 'check_release_file', (['commit_range'], {}), '(commit_range)\n', (882, 896), False, 'from release import check_release_file, parse_release_file, has_release, latest_version\n'), ((1084, 1103), 'git_utils.get_changed_paths', 'get_changed_paths', ([], {}), '()\n', (1101, 1103), False, 'from git_utils import get_changed_paths, remote_default_branch, local_current_head, get_sha1_for_tag, remote_default_head\n'), ((611, 632), 'git_utils.remote_default_head', 'remote_default_head', ([], {}), '()\n', (630, 632), False, 'from git_utils import get_changed_paths, remote_default_branch, local_current_head, get_sha1_for_tag, remote_default_head\n'), ((1189, 1261), 'commands.git', 'git', (['"""config"""', '"""user.name"""', '"""Buildkite on behalf of Wellcome Collection"""'], {}), "('config', 'user.name', 'Buildkite on behalf of Wellcome Collection')\n", (1192, 1261), False, 'from commands import git, sbt\n'), ((1270, 1308), 'commands.git', 'git', (['"""config"""', '"""user.email"""', '"""<EMAIL>"""'], {}), "('config', 'user.email', '<EMAIL>')\n", (1273, 1308), False, 'from commands import git, sbt\n'), ((1518, 1544), 'commands.git', 'git', (['"""fetch"""', '"""ssh-origin"""'], {}), "('fetch', 'ssh-origin')\n", (1521, 1544), False, 'from commands import git, sbt\n'), ((1623, 1658), 'commands.git', 'git', (['"""add"""', '"""--verbose"""', '"""--update"""'], {}), "('add', '--verbose', '--update')\n", (1626, 1658), False, 'from commands import git, sbt\n'), ((1667, 1717), 'commands.git', 'git', (['"""commit"""', '"""-m"""', '"""Apply auto-formatting rules"""'], {}), "('commit', '-m', 'Apply auto-formatting rules')\n", (1670, 1717), False, 'from commands import git, sbt\n'), ((1921, 1932), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1929, 1932), False, 'import sys\n'), ((508, 524), 'release.latest_version', 'latest_version', ([], {}), '()\n', (522, 524), False, 'from release import check_release_file, parse_release_file, has_release, latest_version\n'), ((1352, 1358), 'provider.repo', 'repo', ([], {}), '()\n', (1356, 1358), False, 'from provider import current_branch, is_default_branch, repo\n'), ((720, 736), 'provider.current_branch', 'current_branch', ([], {}), '()\n', (734, 736), False, 'from provider import current_branch, is_default_branch, repo\n'), ((772, 791), 'provider.is_default_branch', 'is_default_branch', ([], {}), '()\n', (789, 791), False, 'from provider import current_branch, is_default_branch, repo\n'), ((1594, 1610), 'provider.current_branch', 'current_branch', ([], {}), '()\n', (1608, 1610), False, 'from provider import current_branch, is_default_branch, repo\n'), ((1760, 1776), 'provider.current_branch', 'current_branch', ([], {}), '()\n', (1774, 1776), False, 'from provider import current_branch, is_default_branch, repo\n')]
import os from unittest import main, mock, TestCase from panamah_sdk.nfe import Nfe from panamah_sdk.models.definitions import PanamahVenda, PanamahProduto class TestNfe(TestCase): def test_parsing_file(self): models = Nfe.read_models_from_file( os.path.join(os.path.dirname(__file__), 'fixtures/NFe13190507128945000132650340000000111000000099.xml') ) self.assertEqual(len(models), 9) produtoIndex = 0 for model in models: if isinstance(model, PanamahVenda): self.assertEqual(len(model.itens), 6) if isinstance(model, PanamahProduto): if produtoIndex == 0: self.assertEqual(len(model.eans), 1) self.assertEqual(model.eans[0].id, '00854011370054') if produtoIndex == 1: self.assertIsNone(model.eans) produtoIndex += 1 def test_parsing_directory(self): models = Nfe.read_models_from_directory( os.path.join(os.path.dirname(__file__), 'fixtures') ) self.assertEqual(len(models), 13) if __name__ == '__main__': main()
[ "unittest.main", "os.path.dirname" ]
[((1187, 1193), 'unittest.main', 'main', ([], {}), '()\n', (1191, 1193), False, 'from unittest import main, mock, TestCase\n'), ((285, 310), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (300, 310), False, 'import os\n'), ((1062, 1087), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1077, 1087), False, 'import os\n')]
from os import listdir from os.path import isfile, join import argparse import sys import csv # read CL arguments def parse_args() -> dict: parser = argparse.ArgumentParser() parser.add_argument("-rp", help="bin directory path", required=True, \ dest="bin_dir", metavar="") parser.add_argument("-wp", help="bin directory path", required=True, \ dest="ds_write_path", metavar="") #parser.add_argument("-sz", help="number of basic blocks written to each file", required=True, \ # dest="bb_per_file", metavar="") args = vars(parser.parse_args()) return args # generator over single-level directory containing binaries def get_bin_path(dir_path: str) -> str: for f in listdir(dir_path): yield join(dir_path, f) # write one instruction per line, with empty row delimiter between basic blocks def write_uncat_asm(ds_insns: list, write_path: str): with open(write_path, 'w') as fh: writer = csv.writer(fh, delimiter=',') for bb in ds_insns: for insn in bb: writer.writerow(insn) writer.writerow([]) # write two instructions per line, tab-seperated, in plain text format # -last line will not be written, since insn pairs are required def write_concat_asm(ds_insns: list, write_path: str): with open(write_path, 'w') as fh: for bb in ds_insns: bb_sz = len(bb) if bb_sz < 2: raise RuntimeError('basic block with fewer than minimum # of insns encountered at write') for insn_idx, insn in enumerate(bb): if (insn_idx == bb_sz-1): break concat_insns = '\t'.join([' '.join(insn), ' '.join(bb[insn_idx+1])]) + '\n' fh.write(concat_insns)
[ "csv.writer", "os.listdir", "os.path.join", "argparse.ArgumentParser" ]
[((154, 179), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (177, 179), False, 'import argparse\n'), ((768, 785), 'os.listdir', 'listdir', (['dir_path'], {}), '(dir_path)\n', (775, 785), False, 'from os import listdir\n'), ((1018, 1047), 'csv.writer', 'csv.writer', (['fh'], {'delimiter': '""","""'}), "(fh, delimiter=',')\n", (1028, 1047), False, 'import csv\n'), ((810, 827), 'os.path.join', 'join', (['dir_path', 'f'], {}), '(dir_path, f)\n', (814, 827), False, 'from os.path import isfile, join\n')]
from sokoban import game from HeuristicFunctionModeEnum import HeuristicFunctionMode from a_star import A_star from bfs import BFS from dfs import DFS from dls import DLS from ids import IDS from node import Node from sokoban import Mode from ucs import UCS import arrow """ single level single algorithm time analysis """ level = 5 algorithm = 2 print(" level:", level, " algorithm:",algorithm) game_obj = game('levels', level) n = Node(None, None, 0, game_obj) mode = Mode(algorithm) time1 = arrow.utcnow() if mode == Mode.BFS: bfs = BFS() actions, nodes = bfs.search(n) #print([action.movement for action in actions]) elif mode == Mode.DFS: dfs = DFS() actions, nodes = dfs.search(n) #print([action.movement for action in actions]) elif mode == Mode.UCS: ucs = UCS() actions, nodes = ucs.search(n) #print([action.movement for action in actions]) elif mode == Mode.A_STAR: a_star = A_star() actions, nodes = a_star.search(n, heuristic_function=HeuristicFunctionMode.MANHATTAN) elif mode == Mode.DLS: dls = DLS(limit=5) actions, nodes = dls.search(n) elif mode == Mode.IDS: ids = IDS(3) actions, nodes, depth = ids.search(n) time2 = arrow.utcnow() elapsed_time = (time2 - time1).total_seconds() print(elapsed_time)
[ "sokoban.game", "ids.IDS", "arrow.utcnow", "ucs.UCS", "a_star.A_star", "bfs.BFS", "dls.DLS", "dfs.DFS", "sokoban.Mode", "node.Node" ]
[((411, 432), 'sokoban.game', 'game', (['"""levels"""', 'level'], {}), "('levels', level)\n", (415, 432), False, 'from sokoban import game\n'), ((437, 466), 'node.Node', 'Node', (['None', 'None', '(0)', 'game_obj'], {}), '(None, None, 0, game_obj)\n', (441, 466), False, 'from node import Node\n'), ((474, 489), 'sokoban.Mode', 'Mode', (['algorithm'], {}), '(algorithm)\n', (478, 489), False, 'from sokoban import Mode\n'), ((498, 512), 'arrow.utcnow', 'arrow.utcnow', ([], {}), '()\n', (510, 512), False, 'import arrow\n'), ((1201, 1215), 'arrow.utcnow', 'arrow.utcnow', ([], {}), '()\n', (1213, 1215), False, 'import arrow\n'), ((544, 549), 'bfs.BFS', 'BFS', ([], {}), '()\n', (547, 549), False, 'from bfs import BFS\n'), ((670, 675), 'dfs.DFS', 'DFS', ([], {}), '()\n', (673, 675), False, 'from dfs import DFS\n'), ((796, 801), 'ucs.UCS', 'UCS', ([], {}), '()\n', (799, 801), False, 'from ucs import UCS\n'), ((929, 937), 'a_star.A_star', 'A_star', ([], {}), '()\n', (935, 937), False, 'from a_star import A_star\n'), ((1062, 1074), 'dls.DLS', 'DLS', ([], {'limit': '(5)'}), '(limit=5)\n', (1065, 1074), False, 'from dls import DLS\n'), ((1143, 1149), 'ids.IDS', 'IDS', (['(3)'], {}), '(3)\n', (1146, 1149), False, 'from ids import IDS\n')]
from server.orm.command import Command from server.service.command.delete.schema import DeleteCommandProcessorSchema from server.service.slack.message import Message, MessageStatus, MessageVisibility from server.service.validator.decorator import validate_schema @validate_schema(DeleteCommandProcessorSchema) def delete_command_processor( *, channel_id: str, command_to_delete: str, ) -> dict[str, any]: command = Command.find_one_by_name_and_chanel(command_to_delete, channel_id) Command.delete_command(command) message_content = f"Command {command_to_delete} successfully deleted." return { "message": Message( content=message_content, status=MessageStatus.INFO, visibility=MessageVisibility.NORMAL, ) }
[ "server.service.slack.message.Message", "server.service.validator.decorator.validate_schema", "server.orm.command.Command.find_one_by_name_and_chanel", "server.orm.command.Command.delete_command" ]
[((266, 311), 'server.service.validator.decorator.validate_schema', 'validate_schema', (['DeleteCommandProcessorSchema'], {}), '(DeleteCommandProcessorSchema)\n', (281, 311), False, 'from server.service.validator.decorator import validate_schema\n'), ((433, 499), 'server.orm.command.Command.find_one_by_name_and_chanel', 'Command.find_one_by_name_and_chanel', (['command_to_delete', 'channel_id'], {}), '(command_to_delete, channel_id)\n', (468, 499), False, 'from server.orm.command import Command\n'), ((504, 535), 'server.orm.command.Command.delete_command', 'Command.delete_command', (['command'], {}), '(command)\n', (526, 535), False, 'from server.orm.command import Command\n'), ((645, 746), 'server.service.slack.message.Message', 'Message', ([], {'content': 'message_content', 'status': 'MessageStatus.INFO', 'visibility': 'MessageVisibility.NORMAL'}), '(content=message_content, status=MessageStatus.INFO, visibility=\n MessageVisibility.NORMAL)\n', (652, 746), False, 'from server.service.slack.message import Message, MessageStatus, MessageVisibility\n')]
#!/usr/bin/python # encoding: utf-8 import sys from workflow import Workflow, ICON_WEB, web ICON_DEFAULT = 'icon.png' def search(query): # search the ganks from gank.io url = 'http://gankio.herokuapp.com/search' # url = 'http://ganhuo.herokuapp.com/search' params = dict(keyword=query) r = web.post(url, params) # throw an error if request failed, Workflow will catch this and show it to the user r.raise_for_status() return r.json() def main(wf): # The Workflow instance will be passed to the function # you call from `Workflow.run`. Not so useful, as # the `wf` object created in `if __name__ ...` below is global. # # Get query from Alfred query = wf.args[0] # Search ganks or load from cached data, 10 mins def wrapper(): return search(query) ganks = wf.cached_data(query, wrapper, max_age=600) # Parse the JSON returned by pinboard and extract the ganks for gank in ganks: wf.add_item(title=gank['title'], subtitle=gank['source'], arg=gank['url'], valid=True, icon=ICON_DEFAULT) # Send output to Alfred. You can only call this once. # Well, you *can* call it multiple times, but Alfred won't be listening # any more... wf.send_feedback() if __name__ == '__main__': # Create a global `Workflow` object wf = Workflow() wf = Workflow(update_settings={ 'github_slug': 'hujiaweibujidao/Gank-Alfred-Workflow', 'frequency': 7 }) # Call your entry function via `Workflow.run()` to enable its helper # functions, like exception catching, ARGV normalization, magic # arguments etc. sys.exit(wf.run(main)) if wf.update_available: wf.start_update()
[ "workflow.Workflow", "workflow.web.post" ]
[((315, 336), 'workflow.web.post', 'web.post', (['url', 'params'], {}), '(url, params)\n', (323, 336), False, 'from workflow import Workflow, ICON_WEB, web\n'), ((1435, 1445), 'workflow.Workflow', 'Workflow', ([], {}), '()\n', (1443, 1445), False, 'from workflow import Workflow, ICON_WEB, web\n'), ((1456, 1557), 'workflow.Workflow', 'Workflow', ([], {'update_settings': "{'github_slug': 'hujiaweibujidao/Gank-Alfred-Workflow', 'frequency': 7}"}), "(update_settings={'github_slug':\n 'hujiaweibujidao/Gank-Alfred-Workflow', 'frequency': 7})\n", (1464, 1557), False, 'from workflow import Workflow, ICON_WEB, web\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- # Author: <NAME> # License: MIT import sys from dolfin import MPI from gyptis import Geometry, dolfin, pi comm = MPI.comm_world rank = MPI.rank(comm) dolfin.parameters["ghost_mode"] = "shared_vertex" dolfin.parameters["ghost_mode"] = "shared_facet" square_size = 1 cyl_size = 0.2 mesh_size = cyl_size / 50 # data_dir = None data_dir = "/tmp" def make_geo(): model = Geometry( "Square", dim=2, data_dir=data_dir, options={"General.Verbosity": 4} ) box = model.add_rectangle( -square_size / 2, -square_size / 2, 0, square_size, square_size ) # cyl = model.add_rectangle(-cyl_size / 2, -cyl_size / 2, 0, cyl_size, cyl_size) cyl = model.add_circle(0, 0, 0, cyl_size) cyl, box = model.fragment(cyl, box) model.add_physical(box, "box") model.add_physical(cyl, "cyl") outer_bnds = model.get_boundaries("box")[:-1] cyl_bnds = model.get_boundaries("cyl") model.add_physical(outer_bnds, "outer_bnds", dim=1) model.add_physical(cyl_bnds, "cyl_bnds", dim=1) model.set_size("box", 1 * mesh_size) model.set_size("cyl", 1 * mesh_size) model.set_size("cyl_bnds", 1 * mesh_size, dim=1) return model import time model = make_geo() mpi = bool(int(sys.argv[1])) print("MPI: ", mpi) if mpi: if rank == 0: print("meshing") sys.stdout.flush() model.build( interactive=False, generate_mesh=True, write_mesh=True, read_info=False, read_mesh=False, finalize=True, check_subdomains=True, ) data = model.mesh_object # model = 3 else: data = None # data = comm.bcast(data, root=0) else: model = make_geo() model.build() data = model.mesh_object print(data) data = model.read_mesh_file() print(data) # dx = model.measure["dx"] # ds = model.measure["ds"] # dS = model.measure["dS"] # # # if rank == 0: # print('Process {} computing:'.format(rank)) # sys.stdout.flush() # # model.build() # time.sleep(2) # print('Process {} done computing:'.format(rank)) # sys.stdout.flush() # data = {'a': 7, 'b': 3.14} # comm.send(data, tag=11) # print('Process {} sent data:'.format(rank), data) # sys.stdout.flush() # else: # print('Process {} waiting:'.format(rank)) # sys.stdout.flush() # data = comm.recv(source=0, tag=11) # print('Process {} received data:'.format(rank), data) # sys.stdout.flush() # # size = 2 # # if rank == 0: # data = {'x': 1, 'y': 2.0} # for i in range(1, size): # req = comm.isend(data, dest=i, tag=i) # req.wait() # print('Process {} sent data:'.format(rank), data) # # else: # req = comm.irecv(source=0, tag=rank) # data = req.wait() # print('Process {} received data:'.format(rank), data) # # mpi = False # if mpi: # if rank ==0: # model.build() # req = comm.isend(data, dest=1, tag=11) # req.wait() # else: # model.build( # generate_mesh=False, # write_mesh=False, # ) # else: # model.build() # # dx = model.measure["dx"] # ds = model.measure["ds"] # dS = model.measure["dS"]
[ "dolfin.MPI.rank", "sys.stdout.flush", "gyptis.Geometry" ]
[((186, 200), 'dolfin.MPI.rank', 'MPI.rank', (['comm'], {}), '(comm)\n', (194, 200), False, 'from dolfin import MPI\n'), ((427, 505), 'gyptis.Geometry', 'Geometry', (['"""Square"""'], {'dim': '(2)', 'data_dir': 'data_dir', 'options': "{'General.Verbosity': 4}"}), "('Square', dim=2, data_dir=data_dir, options={'General.Verbosity': 4})\n", (435, 505), False, 'from gyptis import Geometry, dolfin, pi\n'), ((1371, 1389), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1387, 1389), False, 'import sys\n')]
import os from typing import Union from .AnaplanConnection import AnaplanConnection from .FileUpload import FileUpload from .StreamUpload import StreamUpload class UploadFactory: _is_file: bool def __init__(self, data: str): self._is_file = os.path.isfile(data) def get_uploader(self, conn: AnaplanConnection, file_id: str) -> Union[FileUpload, StreamUpload]: if self._is_file: return FileUpload(conn, file_id) else: return StreamUpload(conn, file_id)
[ "os.path.isfile" ]
[((248, 268), 'os.path.isfile', 'os.path.isfile', (['data'], {}), '(data)\n', (262, 268), False, 'import os\n')]
from hat.util import aio from hat.gateway import common device_type = 'mock' async def create(conf, client, event_type_prefix): device = MockDevice() device._async_group = aio.Group() device._client = client device._event_type_prefix = event_type_prefix return device class MockDevice(common.Device): @property def closed(self): return self._async_group.closed async def async_close(self): await self._async_group.async_close() @property def client(self): return self._client @property def event_type_prefix(self): return self._event_type_prefix
[ "hat.util.aio.Group" ]
[((184, 195), 'hat.util.aio.Group', 'aio.Group', ([], {}), '()\n', (193, 195), False, 'from hat.util import aio\n')]
# _*_ coding: utf-8 _*_ """ Geodesy calculation. """ import numpy as np def haversine_np(lon1, lat1, lon2, lat2): """ Calculate the great circle distance between two points on the earth (specified in decimal degrees) All args must be of equal length. :param lon1: point 1 longitudes. :param lat1: point 1 latitudes. :param lon2: point 2 longitudes. :param lat2: point 2 latitudes. :return: great circle distance in meters. """ lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2]) dlon = lon2 - lon1 dlat = lat2 - lat1 a = np.sin(dlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2.0)**2 c = 2 * np.arcsin(np.sqrt(a)) return 6371.e3 * c def area_weighted_mean(lon, lat, data): """Calculate the mean of gridded data on a sphere. Data points on the Earth's surface are often represented as a grid. As the grid cells do not have a constant area they have to be weighted when calculating statistical properties (e.g. mean). This function returns the weighted mean assuming a perfectly spherical globe. https://github.com/atmtools/typhon/blob/master/typhon/geographical.py Parameters: lon (ndarray): Longitude (M) angles [degree]. lat (ndarray): Latitude (N) angles [degree]. data ()ndarray): Data array (N x M). Returns: float: Area weighted mean. """ # Calculate coordinates and steradian (in rad). lon = np.deg2rad(lon) lat = np.deg2rad(lat) dlon = np.diff(lon) dlat = np.diff(lat) # Longitudal mean middle_points = (data[:, 1:] + data[:, :-1]) / 2 norm = np.sum(dlon) lon_integral = np.sum(middle_points * dlon, axis=1) / norm # Latitudal mean lon_integral *= np.cos(lat) # Consider varying grid area (N-S). middle_points = (lon_integral[1:] + lon_integral[:-1]) / 2 norm = np.sum(np.cos((lat[1:] + lat[:-1]) / 2) * dlat) return np.sum(middle_points * dlat) / norm
[ "numpy.sqrt", "numpy.diff", "numpy.sum", "numpy.deg2rad", "numpy.cos", "numpy.sin" ]
[((1478, 1493), 'numpy.deg2rad', 'np.deg2rad', (['lon'], {}), '(lon)\n', (1488, 1493), True, 'import numpy as np\n'), ((1504, 1519), 'numpy.deg2rad', 'np.deg2rad', (['lat'], {}), '(lat)\n', (1514, 1519), True, 'import numpy as np\n'), ((1531, 1543), 'numpy.diff', 'np.diff', (['lon'], {}), '(lon)\n', (1538, 1543), True, 'import numpy as np\n'), ((1555, 1567), 'numpy.diff', 'np.diff', (['lat'], {}), '(lat)\n', (1562, 1567), True, 'import numpy as np\n'), ((1655, 1667), 'numpy.sum', 'np.sum', (['dlon'], {}), '(dlon)\n', (1661, 1667), True, 'import numpy as np\n'), ((1773, 1784), 'numpy.cos', 'np.cos', (['lat'], {}), '(lat)\n', (1779, 1784), True, 'import numpy as np\n'), ((1687, 1723), 'numpy.sum', 'np.sum', (['(middle_points * dlon)'], {'axis': '(1)'}), '(middle_points * dlon, axis=1)\n', (1693, 1723), True, 'import numpy as np\n'), ((1956, 1984), 'numpy.sum', 'np.sum', (['(middle_points * dlat)'], {}), '(middle_points * dlat)\n', (1962, 1984), True, 'import numpy as np\n'), ((600, 618), 'numpy.sin', 'np.sin', (['(dlat / 2.0)'], {}), '(dlat / 2.0)\n', (606, 618), True, 'import numpy as np\n'), ((695, 705), 'numpy.sqrt', 'np.sqrt', (['a'], {}), '(a)\n', (702, 705), True, 'import numpy as np\n'), ((1903, 1935), 'numpy.cos', 'np.cos', (['((lat[1:] + lat[:-1]) / 2)'], {}), '((lat[1:] + lat[:-1]) / 2)\n', (1909, 1935), True, 'import numpy as np\n'), ((622, 634), 'numpy.cos', 'np.cos', (['lat1'], {}), '(lat1)\n', (628, 634), True, 'import numpy as np\n'), ((637, 649), 'numpy.cos', 'np.cos', (['lat2'], {}), '(lat2)\n', (643, 649), True, 'import numpy as np\n'), ((652, 670), 'numpy.sin', 'np.sin', (['(dlon / 2.0)'], {}), '(dlon / 2.0)\n', (658, 670), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- """ *************************************************************************** __init__.py --------------------- Date : February 2016 Copyright : (C) 2016 by <NAME> Email : jef at norbit dot de *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = '<NAME>' __date__ = 'February 2016' __copyright__ = '(C) 2016, <NAME>' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183' import warnings from PyQt5.uic.Compiler import indenter, compiler from PyQt5.uic.objcreator import widgetPluginPath from PyQt5.uic import properties, uiparser, Compiler from PyQt5.uic import * __PyQtLoadUiType = loadUiType def __loadUiType(*args, **kwargs): with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) return __PyQtLoadUiType(*args, **kwargs) loadUiType = __loadUiType
[ "warnings.catch_warnings", "warnings.filterwarnings" ]
[((1375, 1400), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (1398, 1400), False, 'import warnings\n'), ((1410, 1472), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (1433, 1472), False, 'import warnings\n')]
from django.urls import path from . import views urlpatterns = [ path('', views.ads_maker, name="ads-maker-index"), path('site/<int:pk>/', views.SiteDetail.as_view(), name='site-detail'), path('site/<int:site_pk>/sitemap/<int:pk>/', views.SitemapDetail.as_view(), name='sitemap-detail') ]
[ "django.urls.path" ]
[((72, 121), 'django.urls.path', 'path', (['""""""', 'views.ads_maker'], {'name': '"""ads-maker-index"""'}), "('', views.ads_maker, name='ads-maker-index')\n", (76, 121), False, 'from django.urls import path\n')]
from django.db import models from django.db.models import Aggregate, Sum from django.conf import settings from datetime import datetime class Profile(models.Model): id = models.AutoField(primary_key=True) user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE) phone_number = models.CharField(verbose_name="Numer telefonu", max_length=18) nip_number = models.CharField( verbose_name="Numer nip", max_length=13, null=True, blank=True, ) business_name = models.CharField( verbose_name="Nazwa firmy", max_length=128, null=True, blank=True, ) business_name_l = models.CharField( verbose_name="Nazwa firmy c.d.", max_length=128, null=True, blank=True, ) company = models.BooleanField(verbose_name="Profil firmowy?", default=False) class Meta: ordering = ("user", ) verbose_name_plural = "Profil użytkownika" def __str__(self): return "{}, {}, {}".format(self.user.username, self.user.first_name, self.user.last_name) class Address(models.Model): id = models.AutoField(primary_key=True) user_id = models.OneToOneField( settings.AUTH_USER_MODEL, null=True, blank=True, verbose_name="Użytkownik", on_delete=models.CASCADE, ) street = models.CharField(verbose_name="Ulica", max_length=128) house = models.CharField(verbose_name="Nr domu", max_length=8) door = models.CharField(verbose_name="Nr lokalu", null=True, blank=True, max_length=8) city = models.CharField(verbose_name="Miasto", max_length=64) post_code = models.CharField(verbose_name="Kod pocztowy", null=True, blank=True, max_length=6) class Meta: ordering = ( "user_id", "-id", ) verbose_name_plural = "Adresy" def __str__(self): return "{}, {}, {}".format(self.user_id, self.street, self.house)
[ "django.db.models.OneToOneField", "django.db.models.AutoField", "django.db.models.CharField", "django.db.models.BooleanField" ]
[((177, 211), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (193, 211), False, 'from django.db import models\n'), ((223, 295), 'django.db.models.OneToOneField', 'models.OneToOneField', (['settings.AUTH_USER_MODEL'], {'on_delete': 'models.CASCADE'}), '(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n', (243, 295), False, 'from django.db import models\n'), ((348, 410), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Numer telefonu"""', 'max_length': '(18)'}), "(verbose_name='Numer telefonu', max_length=18)\n", (364, 410), False, 'from django.db import models\n'), ((465, 550), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Numer nip"""', 'max_length': '(13)', 'null': '(True)', 'blank': '(True)'}), "(verbose_name='Numer nip', max_length=13, null=True, blank=True\n )\n", (481, 550), False, 'from django.db import models\n'), ((605, 692), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Nazwa firmy"""', 'max_length': '(128)', 'null': '(True)', 'blank': '(True)'}), "(verbose_name='Nazwa firmy', max_length=128, null=True,\n blank=True)\n", (621, 692), False, 'from django.db import models\n'), ((750, 842), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Nazwa firmy c.d."""', 'max_length': '(128)', 'null': '(True)', 'blank': '(True)'}), "(verbose_name='Nazwa firmy c.d.', max_length=128, null=True,\n blank=True)\n", (766, 842), False, 'from django.db import models\n'), ((892, 958), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'verbose_name': '"""Profil firmowy?"""', 'default': '(False)'}), "(verbose_name='Profil firmowy?', default=False)\n", (911, 958), False, 'from django.db import models\n'), ((1288, 1322), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (1304, 1322), False, 'from django.db import models\n'), ((1337, 1463), 'django.db.models.OneToOneField', 'models.OneToOneField', (['settings.AUTH_USER_MODEL'], {'null': '(True)', 'blank': '(True)', 'verbose_name': '"""Użytkownik"""', 'on_delete': 'models.CASCADE'}), "(settings.AUTH_USER_MODEL, null=True, blank=True,\n verbose_name='Użytkownik', on_delete=models.CASCADE)\n", (1357, 1463), False, 'from django.db import models\n'), ((1520, 1574), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Ulica"""', 'max_length': '(128)'}), "(verbose_name='Ulica', max_length=128)\n", (1536, 1574), False, 'from django.db import models\n'), ((1587, 1641), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Nr domu"""', 'max_length': '(8)'}), "(verbose_name='Nr domu', max_length=8)\n", (1603, 1641), False, 'from django.db import models\n'), ((1653, 1732), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Nr lokalu"""', 'null': '(True)', 'blank': '(True)', 'max_length': '(8)'}), "(verbose_name='Nr lokalu', null=True, blank=True, max_length=8)\n", (1669, 1732), False, 'from django.db import models\n'), ((1828, 1882), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Miasto"""', 'max_length': '(64)'}), "(verbose_name='Miasto', max_length=64)\n", (1844, 1882), False, 'from django.db import models\n'), ((1899, 1985), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Kod pocztowy"""', 'null': '(True)', 'blank': '(True)', 'max_length': '(6)'}), "(verbose_name='Kod pocztowy', null=True, blank=True,\n max_length=6)\n", (1915, 1985), False, 'from django.db import models\n')]
from PyQt5 import QtWidgets from PyQt5 import QtCore import pyqtgraph as pg import numpy as np import socket from ctypes import * import time from MainWindow import Ui_MainWindow # PLC UDP Data Types import from RxUdp import RxUdp from TxUdp import TxUdp class RemoteInterface(QtWidgets.QMainWindow, Ui_MainWindow): def __init__(self): super(RemoteInterface, self).__init__() self.gui = Ui_MainWindow() self.gui.setupUi(self) # Udp Socket self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.sock.bind(('192.168.90.60', 50060)) # UPD data comm with PLC self.txData = TxUdp() self.rxData = RxUdp() # Udp Read/Write thread self.timer = QtCore.QTimer() self.timer.timeout.connect(self.update) self.timer.start(50) # Initial time self.t0 = time.time() # Start GUI self.show() def update(self): # Elapsed time t = self.t0 - time.time() # Read data from udp data, addr = self.sock.recvfrom(1024) memmove(addressof(self.rxData), data, sizeof(self.rxData)) # Incerement counter and set Udp Key self.txData.iUdpKey = 46505228 self.txData.iCounter = self.txData.iCounter + 1 # Apply sine motion to heave for EM1500 self.txData.em1500_surge_cmd = 0.1*np.sin(0.05*2.0*np.pi*t) self.txData.em1500_heave_cmd = 0.2*np.sin(0.1*2.0*np.pi*t) # Send data to PLC self.sock.sendto(self.txData, ('192.168.90.50', 50050)) def closeEvent(self, event): self.timer.stop()
[ "socket.socket", "numpy.sin", "PyQt5.QtCore.QTimer", "RxUdp.RxUdp", "TxUdp.TxUdp", "MainWindow.Ui_MainWindow", "time.time" ]
[((410, 425), 'MainWindow.Ui_MainWindow', 'Ui_MainWindow', ([], {}), '()\n', (423, 425), False, 'from MainWindow import Ui_MainWindow\n'), ((499, 547), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (512, 547), False, 'import socket\n'), ((653, 660), 'TxUdp.TxUdp', 'TxUdp', ([], {}), '()\n', (658, 660), False, 'from TxUdp import TxUdp\n'), ((683, 690), 'RxUdp.RxUdp', 'RxUdp', ([], {}), '()\n', (688, 690), False, 'from RxUdp import RxUdp\n'), ((745, 760), 'PyQt5.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (758, 760), False, 'from PyQt5 import QtCore\n'), ((880, 891), 'time.time', 'time.time', ([], {}), '()\n', (889, 891), False, 'import time\n'), ((1002, 1013), 'time.time', 'time.time', ([], {}), '()\n', (1011, 1013), False, 'import time\n'), ((1391, 1421), 'numpy.sin', 'np.sin', (['(0.05 * 2.0 * np.pi * t)'], {}), '(0.05 * 2.0 * np.pi * t)\n', (1397, 1421), True, 'import numpy as np\n'), ((1459, 1488), 'numpy.sin', 'np.sin', (['(0.1 * 2.0 * np.pi * t)'], {}), '(0.1 * 2.0 * np.pi * t)\n', (1465, 1488), True, 'import numpy as np\n')]
from pprint import pprint import rhea.build as build from rhea.build.boards import get_board from blink import blinky def run_nano(): brd = get_board('de0nano') flow = build.flow.Quartus(brd=brd, top=blinky) flow.run() info = flow.get_utilization() pprint(info) flow.program() if __name__ == '__main__': run_nano()
[ "rhea.build.boards.get_board", "rhea.build.flow.Quartus", "pprint.pprint" ]
[((148, 168), 'rhea.build.boards.get_board', 'get_board', (['"""de0nano"""'], {}), "('de0nano')\n", (157, 168), False, 'from rhea.build.boards import get_board\n'), ((180, 219), 'rhea.build.flow.Quartus', 'build.flow.Quartus', ([], {'brd': 'brd', 'top': 'blinky'}), '(brd=brd, top=blinky)\n', (198, 219), True, 'import rhea.build as build\n'), ((273, 285), 'pprint.pprint', 'pprint', (['info'], {}), '(info)\n', (279, 285), False, 'from pprint import pprint\n')]
"""Parse a crontab entry and return a dictionary.""" import calendar import itertools import re PREDEFINED_SCHEDULE = { "@yearly": "0 0 1 1 *", "@annually": "0 0 1 1 *", "@monthly": "0 0 1 * *", "@weekly": "0 0 * * 0", "@daily": "0 0 * * *", "@midnight": "0 0 * * *", "@hourly": "0 * * * *", } def convert_predefined(line): if not line.startswith("@"): return line if line not in PREDEFINED_SCHEDULE: raise ValueError("Unknown predefine: %s" % line) return PREDEFINED_SCHEDULE[line] class FieldParser: """Parse and validate a field in a crontab entry.""" name = None bounds = None range_pattern = re.compile( r""" (?P<min>\d+|\*) # Initial value (?:-(?P<max>\d+))? # Optional max upper bound (?:/(?P<step>\d+))? # Optional step increment """, re.VERBOSE, ) def normalize(self, source): return source.strip() def get_groups(self, source): return source.split(",") def parse(self, source): if source == "*": return None groups = [self.get_values(group) for group in self.get_groups(source)] groups = set(itertools.chain.from_iterable(groups)) has_last = False if "LAST" in groups: has_last = True groups.remove("LAST") groups = sorted(groups) if has_last: groups.append("LAST") return groups def get_match_groups(self, source): match = self.range_pattern.match(source) if not match: raise ValueError("Unknown expression: %s" % source) return match.groupdict() def get_values(self, source): source = self.normalize(source) match_groups = self.get_match_groups(source) step = 1 min_value, max_value = self.get_value_range(match_groups) if match_groups["step"]: step = self.validate_bounds(match_groups["step"]) return self.get_range(min_value, max_value, step) def get_value_range(self, match_groups): if match_groups["min"] == "*": return self.bounds min_value = self.validate_bounds(match_groups["min"]) if match_groups["max"]: # Cron expressions are inclusive, range is exclusive on upper bound max_value = self.validate_bounds(match_groups["max"]) + 1 return min_value, max_value return min_value, min_value + 1 def get_range(self, min_value, max_value, step): if min_value < max_value: return list(range(min_value, max_value, step)) min_bound, max_bound = self.bounds diff = (max_bound - min_value) + (max_value - min_bound) return [(min_value + i) % max_bound for i in list(range(0, diff, step))] def validate_bounds(self, value): min_value, max_value = self.bounds value = int(value) if not min_value <= value < max_value: raise ValueError(f"{self.name} value out of range: {value}") return value class MinuteFieldParser(FieldParser): name = "minutes" bounds = (0, 60) class HourFieldParser(FieldParser): name = "hours" bounds = (0, 24) class MonthdayFieldParser(FieldParser): name = "monthdays" bounds = (1, 32) def get_values(self, source): # Handle special case for last day of month source = self.normalize(source) if source == "L": return ["LAST"] return super().get_values(source) class MonthFieldParser(FieldParser): name = "months" bounds = (1, 13) month_names = calendar.month_abbr[1:] def normalize(self, month): month = super().normalize(month) month = month.lower() for month_num, month_name in enumerate(self.month_names, start=1): month = month.replace(month_name.lower(), str(month_num)) return month class WeekdayFieldParser(FieldParser): name = "weekdays" bounds = (0, 7) day_names = ["sun", "mon", "tue", "wed", "thu", "fri", "sat"] def normalize(self, day_of_week): day_of_week = super().normalize(day_of_week) day_of_week = day_of_week.lower() for dow_num, dow_name in enumerate(self.day_names): day_of_week = day_of_week.replace(dow_name, str(dow_num)) return day_of_week.replace("7", "0").replace("?", "*") minute_parser = MinuteFieldParser() hour_parser = HourFieldParser() monthday_parser = MonthdayFieldParser() month_parser = MonthFieldParser() weekday_parser = WeekdayFieldParser() # TODO: support L (for dow), W, # def parse_crontab(line): line = convert_predefined(line) minutes, hours, dom, months, dow = line.split(None, 4) return { "minutes": minute_parser.parse(minutes), "hours": hour_parser.parse(hours), "monthdays": monthday_parser.parse(dom), "months": month_parser.parse(months), "weekdays": weekday_parser.parse(dow), "ordinals": None, }
[ "itertools.chain.from_iterable", "re.compile" ]
[((676, 894), 're.compile', 're.compile', (['"""\n (?P<min>\\\\d+|\\\\*) # Initial value\n (?:-(?P<max>\\\\d+))? # Optional max upper bound\n (?:/(?P<step>\\\\d+))? # Optional step increment\n """', 're.VERBOSE'], {}), '(\n """\n (?P<min>\\\\d+|\\\\*) # Initial value\n (?:-(?P<max>\\\\d+))? # Optional max upper bound\n (?:/(?P<step>\\\\d+))? # Optional step increment\n """\n , re.VERBOSE)\n', (686, 894), False, 'import re\n'), ((1218, 1255), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['groups'], {}), '(groups)\n', (1247, 1255), False, 'import itertools\n')]
import networkx as nx import pandas as pd import os import numpy as np from time import time from utils.plot_data import PlotData import matplotlib.pyplot as plt import stellargraph as sg from stellargraph.mapper import GraphSAGENodeGenerator from stellargraph.layer import GraphSAGE # note that using "from keras" will not work in tensorflow >= 2.0 from tensorflow.keras import callbacks from tensorflow.keras.callbacks import TensorBoard from tensorflow.keras import layers, optimizers, losses, metrics, Model from sklearn import preprocessing, feature_extraction, model_selection from sklearn.metrics import confusion_matrix, mean_absolute_error, mean_squared_error, \ classification_report, accuracy_score def plot_history(history): metrics = sorted(history.history.keys()) metrics = metrics[:len(metrics) // 2] for m in metrics: plt.plot(history.history[m]) plt.plot(history.history['val_' + m]) plt.title(m) plt.ylabel(m) plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper right') plt.show() # Set the graph edgelist with the target-source contact data data_dir = os.path.expanduser("./datasets/contacts") cora_location = os.path.expanduser(os.path.join(data_dir, "cct.contacts")) edgelist = pd.read_csv(os.path.join(data_dir, "cct.contacts"), sep='\t', header=None, names=["target", "source"]) edgelist["label"] = "contacts" g_nx = nx.from_pandas_edgelist(edgelist[0:1348], 'target', 'source') # Set each node attribute as the 'subject' data in the last field of the cora feature file cct_feature_dir = os.path.expanduser("./datasets/covid_vulnerability_features") cct_features_location = os.path.expanduser(os.path.join(cct_feature_dir, "cct_features.csv")) node_attr = pd.read_csv(cct_features_location) node_attr = node_attr.drop(columns='personId') node_attr.loc[node_attr['Gender'] == 'male', 'Gender'] = 1. node_attr.loc[node_attr['Gender'] == 'female', 'Gender'] = 0. node_attr['Age'] = node_attr['Age'].div(max(node_attr['Age'])) node_attr = node_attr.replace([True, False], [1., 0.]) feature_names = node_attr.columns.values.tolist() # Let's artificially create SEI classification labels: # S = susceptible # E = exposed # I = infected case f1 = 'Diagnosis of Lung disease due to external agents in the previous 12 months' f2 = 'Diagnosis of Influenza in the previous 12 months' f3 = 'Diagnosis of Pneumothorax in the previous 12 months' f4 = 'Age' f5 = 'Gender' # Note: race would be a good feature component also! # Tweak threshold for ~50% exposed rate node_attr['subject'] = np.where(node_attr[f1] + node_attr[f2] + node_attr[f3] + node_attr[f5] + node_attr[f4] >= 1.2, 'E', 'S') node_attr.loc[node_attr[f1] + node_attr[f2] + node_attr[f3] + node_attr[f5] + node_attr[f4] >= 2, 'subject'] = 'I' print('Number infected:', len(np.where(node_attr['subject'] == 'I')[0])/1.*1.) print('Number exposed:', len(np.where(node_attr['subject'] == 'E')[0])/1.*1.) print('Number susceptible:', len(np.where(node_attr['subject'] == 'S')[0])/1.*1.) values = {str(row.tolist()[0]): row.tolist()[-1] for _, row in node_attr.iterrows()} nx.set_node_attributes(g_nx, values, 'subject') node_attr.index = [*g_nx.nodes] # Select the largest connected component. For clarity we ignore isolated # nodes and subgraphs; having these in the data does not prevent the # algorithm from running and producing valid results. g_nx_ccs = (g_nx.subgraph(c).copy() for c in nx.connected_components(g_nx)) g_nx = max(g_nx_ccs, key=len) print("Largest subgraph statistics: {} nodes, {} edges".format( g_nx.number_of_nodes(), g_nx.number_of_edges())) # Get node feature data for only the nodes remaining largest connected component graph node_data = node_attr node_data = node_data[node_data.index.isin(list(g_nx.nodes()))] # Train a graph-ML model that will predict the "subject" attribute on the nodes. These subjects are one of 3 categories set(node_data["subject"]) # For machine learning we want to take a subset of the nodes for training, # and use the rest for testing. We'll use scikit-learn again to do this # These are train / test dataframe splits train_data, test_data = model_selection.train_test_split(node_data, train_size=0.6, test_size=None, stratify=node_data['subject'], random_state=42) # Create dataframe to ndarray transform target_encoding = feature_extraction.DictVectorizer(sparse=False) # Create the training and test split label one-hot-encoding labels for the generators below # This means, that the DictVectorizer needs to be fitted prior to # transforming target_encoding into it's corresponding matrix format. # You need to call vec.fit(target_encoding) followed by vec.transform(target_encoding), # or more succintly X_train = vec.fit_transform(target_encoding). # DictVectorizer needs to know the keys of all the passed dictionaries, # so that the transformation of unseen data consistently yields the same number of columns and column order. train_targets = target_encoding.fit_transform(train_data[["subject"]].to_dict('records')) test_targets = target_encoding.fit_transform(test_data[["subject"]].to_dict('records')) # Get just the node feature vectors from the node dataframe node_features = node_data[feature_names] # print(node_features.head(2)) # Now create a StellarGraph object G = sg.StellarGraph.from_networkx(g_nx, node_features=node_features) # Print help for GraphSAGENodeGenerator # help(GraphSAGENodeGenerator) batch_size = 50 num_samples = [10, 20, 10] generator = GraphSAGENodeGenerator(G, batch_size, num_samples) # Create a test data generator given training set node index and it's label train_gen = generator.flow(train_data.index, train_targets) graphsage_model = GraphSAGE( layer_sizes=[64, 64, 64], generator=generator, bias=True, dropout=0.5, ) # Create the network model using the graph input and output tensors and a softmax prediction layer x_inp, x_out = graphsage_model.in_out_tensors() prediction = layers.Dense(units=train_targets.shape[1], activation="softmax")(x_out) # Cost function model_loss_function = losses.categorical_crossentropy # Training metric model_metrics = ["acc"] # define optimizers model_optimizer_rmsprop0 = 'rmsprop' model_optimizer_adam0 = 'adam' model_optimizer_adam = optimizers.Adam(lr=0.005) model_optimizer_sgd = optimizers.SGD(lr=1e-5, decay=1e-6, momentum=0.45, nesterov=True) model_optimizer_rmsprop = optimizers.RMSprop(lr=1e-5, decay=0.9, momentum=0.5, epsilon=1e-10, centered=True) model_optimizer_rmsprop1 = optimizers.RMSprop(lr=1e-5, decay=1e-6, momentum=0.49, centered=True) use_best_weights = False # Use this after getting best weights if use_best_weights: model_optimizer = model_optimizer_rmsprop1 else: # Use this as first run model_optimizer = model_optimizer_adam # Create the network model model = Model(inputs=x_inp, outputs=prediction) print(model.summary()) #Epoch 00016: val_loss improved from 0.40059 to 0.38344, saving model to .\trained_for_pred\cct_graphsage_node_inference\model\log\Best-weights-my_model-016-0.4879-0.7858.h5 #12/12 - 5s - loss: 0.4869 - acc: 0.7858 - val_loss: 0.3834 - val_acc: 0.8756 #Epoch 17/50 #Epoch 00008: val_loss improved from 0.38307 to 0.38293, saving model to .\trained_for_pred\cct_graphsage_node_inference\model\log\Best-weights-my_model-008-0.4605-0.8014.h5 #12/12 - 5s - loss: 0.4658 - acc: 0.8014 - val_loss: 0.3829 - val_acc: 0.8731 #Epoch 9/10 if use_best_weights: # Load the best weights and compile the model best_weights = '.\\trained_for_pred\\cct_graphsage_node_inference\\model\\log\\Best-weights-my_model-008-0.4605-0.8014.h5' model.load_weights(best_weights) model.compile( optimizer=model_optimizer, loss=model_loss_function, metrics=model_metrics, ) # Create a test data generator given test node index and it's label test_gen = generator.flow(test_data.index, test_targets) # Prepare callbacks model_type = 'cct_graphsage_node_inference' train_log_path = '.\\trained_for_pred\\' + \ model_type + '\\model\\log\\model_train.csv' train_checkpoint_path = '.\\trained_for_pred\\' + model_type + \ '\\model\\log\\Best-weights-my_model-{epoch:03d}-{loss:.4f}-{acc:.4f}.h5' model_tensorboard_log = '.\\training_log\\tensorboard\\' csv_log = callbacks.CSVLogger(train_log_path, separator=',', append=False) early_stopping = callbacks.EarlyStopping( monitor='val_loss', min_delta=0, patience=0, verbose=0, mode='auto') checkpoint = callbacks.ModelCheckpoint( train_checkpoint_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min') tensorboard = TensorBoard( log_dir=model_tensorboard_log + "{}".format(time())) callbacks_list = [csv_log, tensorboard, checkpoint] # Train the network history = model.fit( train_gen, epochs=50, validation_data=test_gen, verbose=2, callbacks=callbacks_list, shuffle=True, ) plot_history(history) test_metrics = model.evaluate(test_gen) print("\nTest Set Metrics:") for name, val in zip(model.metrics_names, test_metrics): print("\t{}: {:0.4f}".format(name, val)) ###################################################################################################################### # start the evaluate and report process print("===================== starting evaluation and report generation =========================") # Create plotter object plotter = PlotData() # paths to save outputs save_plt_cm = './trained_for_pred/' + model_type + '/stats/confusion_matrix.png' save_plt_normalized_cm = './trained_for_pred/' + \ model_type + '/stats/confusion_matrix_normalized.png' save_plt_roc = './trained_for_pred/' + \ model_type + '/stats/roc_curve.png' save_eval_report = './trained_for_pred/' + \ model_type + '/stats/eval_report.txt' save_plt_accuracy = './trained_for_pred/' + \ model_type + '/stats/model_accuracy.png' save_plt_loss = './trained_for_pred/' + \ model_type + '/stats/model_loss.png' save_plt_learning = './trained_for_pred/' + \ model_type + '/stats/model_learning.eps' train_log_data_path = './trained_for_pred/' + \ model_type + '/model/log/model_train.csv' # for confusion matrix plotting classification_list = ["Exposed", "Infected", "Susceptible"] # NOTE: Do not shuffle the test data! This is an issue with Keras. # See: https://github.com/keras-team/keras/issues/4225 # and: https://github.com/keras-team/keras/issues/5558 Y_true = test_targets Y_pred = model.predict(test_gen) n_classes = Y_true.shape[1] # Save multiclass confusion matrices and ROCs for i in range(n_classes): # Confusion Matrix and Classification Report y_true = Y_true[:, i] y_pred = np.round(Y_pred[:, i]) # plot confusion matrix cm = confusion_matrix(y_true, y_pred) cm_plot_labels = ["Other", classification_list[i]] save_plt_cm_to = save_plt_cm[:-4] + '_' + classification_list[i] + '.png' plotter.plot_confusion_matrix( cm, cm_plot_labels, save_plt_cm_to, title='Confusion Matrix') save_plt_normalized_cm_to = save_plt_normalized_cm[:-4] + '_' + classification_list[i] + '.png' plotter.plot_confusion_matrix( cm, cm_plot_labels, save_plt_normalized_cm_to, normalize=True, title='Normalized Confusion Matrix') # Compute ROC curve and ROC area for each class save_plt_roc_to = save_plt_roc[:-4] + '_' + classification_list[i] + '.png' roc_auc = plotter.plot_roc(y_true, Y_pred[:, i], save_plt_roc_to) mae = mean_absolute_error(y_true, y_pred) mse = mean_squared_error(y_true, y_pred) accuracy = accuracy_score(y_true, y_pred) print('mean absolute error: ' + str(mae)) print('mean squared error: ' + str(mse)) print('Area Under the Curve (AUC): ' + str(roc_auc)) c_report = classification_report( y_true, y_pred, target_names=cm_plot_labels) print(c_report) save_eval_report_to = save_eval_report[:-4] + '_' + classification_list[i] + '.txt' #delete_file(save_eval_report) with open(save_eval_report_to, 'a') as f: f.write('\n\n') f.write('******************************************************\n') f.write('************** Evalaluation Report ***************\n') f.write('******************************************************\n') f.write('\n\n') f.write('- Accuracy Score: ' + str(accuracy)) f.write('\n\n') f.write('- Mean Absolute Error (MAE): ' + str(mae)) f.write('\n\n') f.write('- Mean Squared Error (MSE): ' + str(mse)) f.write('\n\n') f.write('- Area Under the Curve (AUC): ' + str(roc_auc)) f.write('\n\n') f.write('- Confusion Matrix:\n') f.write(str(cm)) f.write('\n\n') f.write('- Normalized Confusion Matrix:\n') cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] f.write(str(cm)) f.write('\n\n') f.write('- Classification report:\n') f.write(str(c_report)) f.close() train_validation = ['train', 'validation'] data = pd.read_csv(train_log_data_path) acc = data['acc'].values val_acc = data['val_acc'].values loss = data['loss'].values val_loss = data['val_loss'].values # plot metrics to the stats dir plotter.plot_2d(acc, val_acc, 'epoch', 'accuracy', 'Model Accuracy', train_validation, save_plt_accuracy) plotter.plot_2d(loss, val_loss, 'epoch', 'loss', 'Model Loss', train_validation, save_plt_loss) plotter.plot_model_bis(data, save_plt_learning) ##################################################################################################################### # To get an idea of how the prediction errors are distributed visually # load the graph in yEd Live and apply a radial layout: # get the predictions themselves for all nodes using another node iterator: all_nodes = node_data.index all_mapper = generator.flow(all_nodes) all_predictions = model.predict(all_mapper) # invert the one-hot encoding node_predictions = target_encoding.inverse_transform(all_predictions) results = pd.DataFrame(node_predictions, index=all_nodes).idxmax(axis=1) df = pd.DataFrame({"Predicted": results, "True": node_data['subject']}) df.head(10) # augment the graph with the true vs. predicted label for visualization purposes: for nid, pred, true in zip(df.index, df["Predicted"], df["True"]): g_nx.nodes[nid]["subject"] = true g_nx.nodes[nid]["PREDICTED_subject"] = pred.split("=")[-1] # add isTrain and isCorrect node attributes: for nid in train_data.index: g_nx.nodes[nid]["isTrain"] = True for nid in test_data.index: g_nx.nodes[nid]["isTrain"] = False for nid in g_nx.nodes(): g_nx.nodes[nid]["isCorrect"] = g_nx.nodes[nid]["subject"] == g_nx.nodes[nid]["PREDICTED_subject"] # Save the graphml object pred_fname = "pred_n={}.graphml".format(num_samples) nx.write_graphml(g_nx,'./nodepredictions.graphml') ###################################################################################################################### # Node embeddings # Evaluate node embeddings as activations of the output of graphsage layer stack, and visualise them, # coloring nodes by their subject label. # The GraphSAGE embeddings are the output of the GraphSAGE layers, namely the x_out variable. # Let’s create a new model with the same inputs as we used previously x_inp but now the output is # the embeddings rather than the predicted class. Additionally note that the weights trained previously # are kept in the new model. embedding_model = Model(inputs=x_inp, outputs=x_out) emb = embedding_model.predict(all_mapper) # This is the number of nodes in the largest connected subgraph x outputa layer size print('Embedding shape:', emb.shape) from sklearn.manifold import TSNE import pandas as pd import numpy as np import matplotlib.patches as mpatches X = emb y = np.argmax(target_encoding.transform(node_data[["subject"]].to_dict('records')), axis=1) n_components = 3 if X.shape[1] > 2: transform = TSNE trans = transform(n_components=n_components) emb_transformed = pd.DataFrame(trans.fit_transform(X), index=node_data.index) emb_transformed['label'] = y else: emb_transformed = pd.DataFrame(X, index=node_data.index) emb_transformed = emb_transformed.rename(columns={'0': 0, '1': 1}) emb_transformed['label'] = y alpha = 0.7 color_list = ['b', 'r', 'g'] colors = [color_list[i] for i in emb_transformed['label']] classification_list = ["Exposed", "Infected", "Susceptible"] classifications = [classification_list[i] for i in emb_transformed['label']] # Make sure classification list is in the right order: print('Number Infected:', classifications.count('Infected')) print('Number Susceptible:', classifications.count('Susceptible')) print('Number Exposed:', classifications.count('Exposed')) if n_components == 2: fig, ax = plt.subplots(figsize=(8, 8)) ax.scatter(emb_transformed[0], emb_transformed[1], c=colors, alpha=alpha) ax.set(aspect="equal", xlabel="$X_1$", ylabel="$X_2$") else: fig = plt.figure(figsize=(8, 8)) ax = fig.add_subplot(111, projection='3d') ax.scatter(emb_transformed[0], emb_transformed[1], emb_transformed[2], c=colors, alpha=alpha) recs = [] for i, _ in enumerate(color_list): recs.append(mpatches.Rectangle((0,0),1,1,fc=color_list[i])) plt.title('{} visualization of GraphSAGE embeddings for CCT dataset'.format(transform.__name__)) ax.legend(recs, classification_list, loc=4) ax.grid(True) plt.show()
[ "pandas.read_csv", "matplotlib.pyplot.ylabel", "sklearn.metrics.classification_report", "stellargraph.StellarGraph.from_networkx", "tensorflow.keras.callbacks.EarlyStopping", "tensorflow.keras.layers.Dense", "sklearn.feature_extraction.DictVectorizer", "numpy.where", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "tensorflow.keras.optimizers.SGD", "networkx.from_pandas_edgelist", "pandas.DataFrame", "sklearn.metrics.mean_absolute_error", "os.path.expanduser", "numpy.round", "sklearn.metrics.confusion_matrix", "sklearn.model_selection.train_test_split", "networkx.connected_components", "stellargraph.mapper.GraphSAGENodeGenerator", "sklearn.metrics.mean_squared_error", "tensorflow.keras.optimizers.RMSprop", "matplotlib.pyplot.title", "time.time", "sklearn.metrics.accuracy_score", "matplotlib.pyplot.legend", "matplotlib.pyplot.show", "matplotlib.patches.Rectangle", "tensorflow.keras.callbacks.CSVLogger", "networkx.write_graphml", "os.path.join", "tensorflow.keras.optimizers.Adam", "matplotlib.pyplot.figure", "utils.plot_data.PlotData", "networkx.set_node_attributes", "stellargraph.layer.GraphSAGE", "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.keras.Model", "matplotlib.pyplot.subplots" ]
[((1154, 1195), 'os.path.expanduser', 'os.path.expanduser', (['"""./datasets/contacts"""'], {}), "('./datasets/contacts')\n", (1172, 1195), False, 'import os\n'), ((1423, 1484), 'networkx.from_pandas_edgelist', 'nx.from_pandas_edgelist', (['edgelist[0:1348]', '"""target"""', '"""source"""'], {}), "(edgelist[0:1348], 'target', 'source')\n", (1446, 1484), True, 'import networkx as nx\n'), ((1595, 1656), 'os.path.expanduser', 'os.path.expanduser', (['"""./datasets/covid_vulnerability_features"""'], {}), "('./datasets/covid_vulnerability_features')\n", (1613, 1656), False, 'import os\n'), ((1763, 1797), 'pandas.read_csv', 'pd.read_csv', (['cct_features_location'], {}), '(cct_features_location)\n', (1774, 1797), True, 'import pandas as pd\n'), ((2580, 2688), 'numpy.where', 'np.where', (['(node_attr[f1] + node_attr[f2] + node_attr[f3] + node_attr[f5] + node_attr[\n f4] >= 1.2)', '"""E"""', '"""S"""'], {}), "(node_attr[f1] + node_attr[f2] + node_attr[f3] + node_attr[f5] +\n node_attr[f4] >= 1.2, 'E', 'S')\n", (2588, 2688), True, 'import numpy as np\n'), ((3124, 3171), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['g_nx', 'values', '"""subject"""'], {}), "(g_nx, values, 'subject')\n", (3146, 3171), True, 'import networkx as nx\n'), ((4159, 4286), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (['node_data'], {'train_size': '(0.6)', 'test_size': 'None', 'stratify': "node_data['subject']", 'random_state': '(42)'}), "(node_data, train_size=0.6, test_size=None,\n stratify=node_data['subject'], random_state=42)\n", (4191, 4286), False, 'from sklearn import preprocessing, feature_extraction, model_selection\n'), ((4398, 4445), 'sklearn.feature_extraction.DictVectorizer', 'feature_extraction.DictVectorizer', ([], {'sparse': '(False)'}), '(sparse=False)\n', (4431, 4445), False, 'from sklearn import preprocessing, feature_extraction, model_selection\n'), ((5358, 5422), 'stellargraph.StellarGraph.from_networkx', 'sg.StellarGraph.from_networkx', (['g_nx'], {'node_features': 'node_features'}), '(g_nx, node_features=node_features)\n', (5387, 5422), True, 'import stellargraph as sg\n'), ((5549, 5599), 'stellargraph.mapper.GraphSAGENodeGenerator', 'GraphSAGENodeGenerator', (['G', 'batch_size', 'num_samples'], {}), '(G, batch_size, num_samples)\n', (5571, 5599), False, 'from stellargraph.mapper import GraphSAGENodeGenerator\n'), ((5755, 5840), 'stellargraph.layer.GraphSAGE', 'GraphSAGE', ([], {'layer_sizes': '[64, 64, 64]', 'generator': 'generator', 'bias': '(True)', 'dropout': '(0.5)'}), '(layer_sizes=[64, 64, 64], generator=generator, bias=True, dropout=0.5\n )\n', (5764, 5840), False, 'from stellargraph.layer import GraphSAGE\n'), ((6314, 6339), 'tensorflow.keras.optimizers.Adam', 'optimizers.Adam', ([], {'lr': '(0.005)'}), '(lr=0.005)\n', (6329, 6339), False, 'from tensorflow.keras import layers, optimizers, losses, metrics, Model\n'), ((6362, 6429), 'tensorflow.keras.optimizers.SGD', 'optimizers.SGD', ([], {'lr': '(1e-05)', 'decay': '(1e-06)', 'momentum': '(0.45)', 'nesterov': '(True)'}), '(lr=1e-05, decay=1e-06, momentum=0.45, nesterov=True)\n', (6376, 6429), False, 'from tensorflow.keras import layers, optimizers, losses, metrics, Model\n'), ((6454, 6541), 'tensorflow.keras.optimizers.RMSprop', 'optimizers.RMSprop', ([], {'lr': '(1e-05)', 'decay': '(0.9)', 'momentum': '(0.5)', 'epsilon': '(1e-10)', 'centered': '(True)'}), '(lr=1e-05, decay=0.9, momentum=0.5, epsilon=1e-10,\n centered=True)\n', (6472, 6541), False, 'from tensorflow.keras import layers, optimizers, losses, metrics, Model\n'), ((6564, 6635), 'tensorflow.keras.optimizers.RMSprop', 'optimizers.RMSprop', ([], {'lr': '(1e-05)', 'decay': '(1e-06)', 'momentum': '(0.49)', 'centered': '(True)'}), '(lr=1e-05, decay=1e-06, momentum=0.49, centered=True)\n', (6582, 6635), False, 'from tensorflow.keras import layers, optimizers, losses, metrics, Model\n'), ((6879, 6918), 'tensorflow.keras.Model', 'Model', ([], {'inputs': 'x_inp', 'outputs': 'prediction'}), '(inputs=x_inp, outputs=prediction)\n', (6884, 6918), False, 'from tensorflow.keras import layers, optimizers, losses, metrics, Model\n'), ((8306, 8370), 'tensorflow.keras.callbacks.CSVLogger', 'callbacks.CSVLogger', (['train_log_path'], {'separator': '""","""', 'append': '(False)'}), "(train_log_path, separator=',', append=False)\n", (8325, 8370), False, 'from tensorflow.keras import callbacks\n'), ((8388, 8484), 'tensorflow.keras.callbacks.EarlyStopping', 'callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0)', 'patience': '(0)', 'verbose': '(0)', 'mode': '"""auto"""'}), "(monitor='val_loss', min_delta=0, patience=0,\n verbose=0, mode='auto')\n", (8411, 8484), False, 'from tensorflow.keras import callbacks\n'), ((8499, 8615), 'tensorflow.keras.callbacks.ModelCheckpoint', 'callbacks.ModelCheckpoint', (['train_checkpoint_path'], {'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""min"""'}), "(train_checkpoint_path, monitor='val_loss',\n verbose=1, save_best_only=True, mode='min')\n", (8524, 8615), False, 'from tensorflow.keras import callbacks\n'), ((9407, 9417), 'utils.plot_data.PlotData', 'PlotData', ([], {}), '()\n', (9415, 9417), False, 'from utils.plot_data import PlotData\n'), ((13033, 13065), 'pandas.read_csv', 'pd.read_csv', (['train_log_data_path'], {}), '(train_log_data_path)\n', (13044, 13065), True, 'import pandas as pd\n'), ((14115, 14181), 'pandas.DataFrame', 'pd.DataFrame', (["{'Predicted': results, 'True': node_data['subject']}"], {}), "({'Predicted': results, 'True': node_data['subject']})\n", (14127, 14181), True, 'import pandas as pd\n'), ((14829, 14880), 'networkx.write_graphml', 'nx.write_graphml', (['g_nx', '"""./nodepredictions.graphml"""'], {}), "(g_nx, './nodepredictions.graphml')\n", (14845, 14880), True, 'import networkx as nx\n'), ((15504, 15538), 'tensorflow.keras.Model', 'Model', ([], {'inputs': 'x_inp', 'outputs': 'x_out'}), '(inputs=x_inp, outputs=x_out)\n', (15509, 15538), False, 'from tensorflow.keras import layers, optimizers, losses, metrics, Model\n'), ((17448, 17458), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17456, 17458), True, 'import matplotlib.pyplot as plt\n'), ((1231, 1269), 'os.path.join', 'os.path.join', (['data_dir', '"""cct.contacts"""'], {}), "(data_dir, 'cct.contacts')\n", (1243, 1269), False, 'import os\n'), ((1294, 1332), 'os.path.join', 'os.path.join', (['data_dir', '"""cct.contacts"""'], {}), "(data_dir, 'cct.contacts')\n", (1306, 1332), False, 'import os\n'), ((1700, 1749), 'os.path.join', 'os.path.join', (['cct_feature_dir', '"""cct_features.csv"""'], {}), "(cct_feature_dir, 'cct_features.csv')\n", (1712, 1749), False, 'import os\n'), ((6016, 6080), 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': 'train_targets.shape[1]', 'activation': '"""softmax"""'}), "(units=train_targets.shape[1], activation='softmax')\n", (6028, 6080), False, 'from tensorflow.keras import layers, optimizers, losses, metrics, Model\n'), ((10673, 10695), 'numpy.round', 'np.round', (['Y_pred[:, i]'], {}), '(Y_pred[:, i])\n', (10681, 10695), True, 'import numpy as np\n'), ((10733, 10765), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (10749, 10765), False, 'from sklearn.metrics import confusion_matrix, mean_absolute_error, mean_squared_error, classification_report, accuracy_score\n'), ((11459, 11494), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (11478, 11494), False, 'from sklearn.metrics import confusion_matrix, mean_absolute_error, mean_squared_error, classification_report, accuracy_score\n'), ((11505, 11539), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (11523, 11539), False, 'from sklearn.metrics import confusion_matrix, mean_absolute_error, mean_squared_error, classification_report, accuracy_score\n'), ((11555, 11585), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (11569, 11585), False, 'from sklearn.metrics import confusion_matrix, mean_absolute_error, mean_squared_error, classification_report, accuracy_score\n'), ((11750, 11816), 'sklearn.metrics.classification_report', 'classification_report', (['y_true', 'y_pred'], {'target_names': 'cm_plot_labels'}), '(y_true, y_pred, target_names=cm_plot_labels)\n', (11771, 11816), False, 'from sklearn.metrics import confusion_matrix, mean_absolute_error, mean_squared_error, classification_report, accuracy_score\n'), ((16167, 16205), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {'index': 'node_data.index'}), '(X, index=node_data.index)\n', (16179, 16205), True, 'import pandas as pd\n'), ((16828, 16856), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (16840, 16856), True, 'import matplotlib.pyplot as plt\n'), ((17010, 17036), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (17020, 17036), True, 'import matplotlib.pyplot as plt\n'), ((859, 887), 'matplotlib.pyplot.plot', 'plt.plot', (['history.history[m]'], {}), '(history.history[m])\n', (867, 887), True, 'import matplotlib.pyplot as plt\n'), ((896, 933), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_' + m]"], {}), "(history.history['val_' + m])\n", (904, 933), True, 'import matplotlib.pyplot as plt\n'), ((942, 954), 'matplotlib.pyplot.title', 'plt.title', (['m'], {}), '(m)\n', (951, 954), True, 'import matplotlib.pyplot as plt\n'), ((963, 976), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['m'], {}), '(m)\n', (973, 976), True, 'import matplotlib.pyplot as plt\n'), ((985, 1004), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (995, 1004), True, 'import matplotlib.pyplot as plt\n'), ((1013, 1061), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {'loc': '"""upper right"""'}), "(['train', 'test'], loc='upper right')\n", (1023, 1061), True, 'import matplotlib.pyplot as plt\n'), ((1070, 1080), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1078, 1080), True, 'import matplotlib.pyplot as plt\n'), ((3446, 3475), 'networkx.connected_components', 'nx.connected_components', (['g_nx'], {}), '(g_nx)\n', (3469, 3475), True, 'import networkx as nx\n'), ((14047, 14094), 'pandas.DataFrame', 'pd.DataFrame', (['node_predictions'], {'index': 'all_nodes'}), '(node_predictions, index=all_nodes)\n', (14059, 14094), True, 'import pandas as pd\n'), ((17244, 17294), 'matplotlib.patches.Rectangle', 'mpatches.Rectangle', (['(0, 0)', '(1)', '(1)'], {'fc': 'color_list[i]'}), '((0, 0), 1, 1, fc=color_list[i])\n', (17262, 17294), True, 'import matplotlib.patches as mpatches\n'), ((8692, 8698), 'time.time', 'time', ([], {}), '()\n', (8696, 8698), False, 'from time import time\n'), ((2830, 2867), 'numpy.where', 'np.where', (["(node_attr['subject'] == 'I')"], {}), "(node_attr['subject'] == 'I')\n", (2838, 2867), True, 'import numpy as np\n'), ((2908, 2945), 'numpy.where', 'np.where', (["(node_attr['subject'] == 'E')"], {}), "(node_attr['subject'] == 'E')\n", (2916, 2945), True, 'import numpy as np\n'), ((2990, 3027), 'numpy.where', 'np.where', (["(node_attr['subject'] == 'S')"], {}), "(node_attr['subject'] == 'S')\n", (2998, 3027), True, 'import numpy as np\n')]
import unittest from hydra.core.communication.network.message import Message, MessageProcessor, MessageException # Message class is abstract therefore provide a simple implementation class SimpleMessage(Message): def __init__(self): self.data = ({"test": "test"}, "string with utf-8: łąćżęć", []) def loads_message(self, header, body, attachments): self.data = (header, body, attachments) def dumps_message(self): return self.data @staticmethod def get_message_type(): return "TestType" class SimpleMessage2(Message): def __init__(self): self.data = ({"test": "test"}, "string with utf-8: łąćżęć", []) def loads_message(self, header, body, attachments): self.data = (header, body, attachments) def dumps_message(self): return self.data @staticmethod def get_message_type(): return "OtherTestType" # Tests class TestSimpleMessage(unittest.TestCase): def setUp(self): self.message = SimpleMessage() def test_serialize_deserialize(self): result = SimpleMessage().loads(self.message.dumps()) assert result.data == self.message.data def test_process(self): data = self.message.dumps() typename, _, _, _ = Message.process(data) assert typename == self.message.get_message_type() class TestMessage(unittest.TestCase): def test_encode_decode(self): header = {"test": "test", "test2": "2"} body = "teststringąśðæśłćńþœπœę©" attachments = [] encoded = Message._encode(header, body, attachments) header2, body2, attachments2 = Message._decode(encoded) assert header == header2 assert body == body2 assert attachments == attachments2 class TestMessageProcessor(unittest.TestCase): def setUp(self): self.message1 = SimpleMessage() self.message1.loads_message({"test1": "test1"}, "a string", []) self.message2 = SimpleMessage2() self.message1.loads_message({"test2": "test2"}, "a string2", []) def test_constructor(self): mp = MessageProcessor(SimpleMessage, SimpleMessage2) msg1 = mp.loads(self.message1.dumps()) assert msg1.get_message_type() == self.message1.get_message_type() msg2 = mp.loads(self.message2.dumps()) assert msg2.get_message_type() == self.message2.get_message_type() def test_register(self): mp = MessageProcessor() mp.register(SimpleMessage) msg1 = mp.loads(self.message1.dumps()) assert msg1.get_message_type() == self.message1.get_message_type() with self.assertRaises(MessageException): msg2 = mp.loads(self.message2.dumps()) mp.register(SimpleMessage2) msg2 = mp.loads(self.message2.dumps()) assert msg2.get_message_type() == self.message2.get_message_type()
[ "hydra.core.communication.network.message.Message._encode", "hydra.core.communication.network.message.MessageProcessor", "hydra.core.communication.network.message.Message.process", "hydra.core.communication.network.message.Message._decode" ]
[((1272, 1293), 'hydra.core.communication.network.message.Message.process', 'Message.process', (['data'], {}), '(data)\n', (1287, 1293), False, 'from hydra.core.communication.network.message import Message, MessageProcessor, MessageException\n'), ((1561, 1603), 'hydra.core.communication.network.message.Message._encode', 'Message._encode', (['header', 'body', 'attachments'], {}), '(header, body, attachments)\n', (1576, 1603), False, 'from hydra.core.communication.network.message import Message, MessageProcessor, MessageException\n'), ((1643, 1667), 'hydra.core.communication.network.message.Message._decode', 'Message._decode', (['encoded'], {}), '(encoded)\n', (1658, 1667), False, 'from hydra.core.communication.network.message import Message, MessageProcessor, MessageException\n'), ((2116, 2163), 'hydra.core.communication.network.message.MessageProcessor', 'MessageProcessor', (['SimpleMessage', 'SimpleMessage2'], {}), '(SimpleMessage, SimpleMessage2)\n', (2132, 2163), False, 'from hydra.core.communication.network.message import Message, MessageProcessor, MessageException\n'), ((2452, 2470), 'hydra.core.communication.network.message.MessageProcessor', 'MessageProcessor', ([], {}), '()\n', (2468, 2470), False, 'from hydra.core.communication.network.message import Message, MessageProcessor, MessageException\n')]
#Method 1- Recursive Search returns all subsets of {0,1,...n-1} import random n=random.randint(0,5) print(n) subset=[] def search(k): if k==n: print(subset) else: search(k+1) subset.append(k) search(k+1) subset.pop() search(0)
[ "random.randint" ]
[((80, 100), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (94, 100), False, 'import random\n')]
# -*- coding: utf-8 -*- from setuptools import setup, find_packages with open("README.md") as f: readme = f.read() with open("LICENSE") as f: license_text = f.read() setup( name="mvae", version="0.1.1", description="Multidimensional Variational Autoencoder", long_description=readme, author="<NAME>", author_email="<EMAIL>", url="https://github.com/NikolasMarkou/multiscale_variational_autoencoder", license=license_text, packages=find_packages(exclude=("tests", "docs")) )
[ "setuptools.find_packages" ]
[((479, 519), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "('tests', 'docs')"}), "(exclude=('tests', 'docs'))\n", (492, 519), False, 'from setuptools import setup, find_packages\n')]
# Day 20: map, filter, and Conditional Comprehensions # Exercises # Use map to call the strip method on each string in the following list: # Print the lines of the nursery rhyme on different lines in the console. # Remember that you can use the operator module and the methodcaller function instead of a lambda expression if you want to. humpty_dumpty = [ " <NAME> sat on a wall, ", "<NAME> had a great fall; ", " All the king's horses and all the king's men ", " Couldn't put Humpty together again." ] from operator import methodcaller humpty_dumpty_strip = map(methodcaller("strip"), humpty_dumpty) print(*humpty_dumpty_strip, sep="\n") #Below you'll find a tuple containing several names: names = ("bob", "Christopher", "Rachel", "MICHAEL", "jessika", "francine") # Use a list comprehension with a filtering condition so that only names with fewer than 8 characters end up in the new list. Make sure that every name in the new list is in title case. names = [name.title() for name in names if len(name) < 8] print(names) # Use filter to remove all negative numbers from the following range: range(-5, 11). Print the remaining numbers to the console. def positivenum(number): return number >= 0 print(*filter(positivenum, range(-5, 11)))
[ "operator.methodcaller" ]
[((584, 605), 'operator.methodcaller', 'methodcaller', (['"""strip"""'], {}), "('strip')\n", (596, 605), False, 'from operator import methodcaller\n')]
import numpy as np from opensfm import types from opensfm import csfm def get_shot_origin(shot): """Compute the origin of a shot.""" pose = types.Pose([shot.rx, shot.ry, shot.rz], [shot.tx, shot.ty, shot.tz]) return pose.get_origin() def get_reconstruction_origin(r): """Compute the origin of a reconstruction.""" s = r.scale pose = types.Pose([r.rx, r.ry, r.rz], [r.tx / s, r.ty / s, r.tz / s]) return pose.get_origin() def test_single_shot(): """Single shot test.""" ra = csfm.ReconstructionAlignment() ra.add_shot('1', 0.5, 0, 0, 0, 0, 0, False) ra.add_absolute_position_constraint('1', 1, 0, 0, 1) ra.run() s1 = ra.get_shot('1') assert np.allclose(get_shot_origin(s1), [1, 0, 0], atol=1e-6) def test_singleton_reconstruction(): """Single shot in a single reconstruction.""" ra = csfm.ReconstructionAlignment() ra.add_shot('1', 0, 0, 0, 0, 0, 0, False) ra.add_reconstruction('a', 0, 0, 0, 0, 0, 0, 4, False) ra.add_relative_motion_constraint( csfm.RARelativeMotionConstraint('a', '1', 0, 0, 0, -1, 0, 0)) ra.add_absolute_position_constraint('1', 1, 0, 0, 1) ra.run() s1 = ra.get_shot('1') assert np.allclose(get_shot_origin(s1), [1, 0, 0], atol=1e-6) def test_pair(): """Simple single reconstruction two shots test.""" ra = csfm.ReconstructionAlignment() ra.add_shot('1', 0, 0, 0, 0, 0, 0, False) ra.add_shot('2', 0, 0, 0, 0, 0, 0, False) ra.add_reconstruction('a', 0, 0, 0, 0, 0, 0, 4, False) ra.add_relative_motion_constraint( csfm.RARelativeMotionConstraint('a', '1', 0, 0, 0, 0, 0, 0)) ra.add_relative_motion_constraint( csfm.RARelativeMotionConstraint('a', '2', 0, 0, 0, -1, 0, 0)) ra.add_absolute_position_constraint('1', 1, 0, 0, 1) ra.add_absolute_position_constraint('2', 3, 0, 0, 1) ra.run() s1 = ra.get_shot('1') s2 = ra.get_shot('2') rec_a = ra.get_reconstruction('a') assert np.allclose(get_shot_origin(s1), [1, 0, 0], atol=1e-6) assert np.allclose(get_shot_origin(s2), [3, 0, 0], atol=1e-6) assert np.allclose(get_reconstruction_origin(rec_a), [1, 0, 0], atol=1e-6) assert np.allclose(rec_a.scale, 0.5) def test_two_shots_one_fixed(): """Two shot, one reconstruction. One shot is fixed""" ra = csfm.ReconstructionAlignment() ra.add_shot('1', 0, 0, 0, -1, 0, 0, True) ra.add_shot('2', 0, 0, 0, 0, 0, 0, False) ra.add_reconstruction('a', 0, 0, 0, 0, 0, 0, 1, False) ra.add_relative_motion_constraint( csfm.RARelativeMotionConstraint('a', '1', 0, 0, 0, 0, 0, 0)) ra.add_relative_motion_constraint( csfm.RARelativeMotionConstraint('a', '2', 0, 0, 0, -1, 0, 0)) # Next line should be ignored because shot 1 is fixed ra.add_absolute_position_constraint('1', 100, 0, 0, 1) ra.add_absolute_position_constraint('2', 3, 0, 0, 1) ra.run() s1 = ra.get_shot('1') s2 = ra.get_shot('2') rec_a = ra.get_reconstruction('a') assert np.allclose(get_shot_origin(s1), [1, 0, 0], atol=1e-6) assert np.allclose(get_shot_origin(s2), [3, 0, 0], atol=1e-6) assert np.allclose(get_reconstruction_origin(rec_a), [1, 0, 0], atol=1e-6) assert np.allclose(rec_a.scale, 0.5) def test_two_reconstructions(): """Two reconstructions""" ra = csfm.ReconstructionAlignment() ra.add_shot('1', 0, 0, 0, 0, 0, 0, False) ra.add_shot('2', 0, 0, 0, 0, 0, 0, False) ra.add_shot('3', 0, 0, 0, 0, 0, 0, False) ra.add_shot('4', 0, 0, 0, 0, 0, 0, False) ra.add_reconstruction('a', 0, 0, 0, 0, 0, 0, 1, False) ra.add_relative_motion_constraint( csfm.RARelativeMotionConstraint('a', '1', 0, 0, 0, 0, 0, 0)) ra.add_relative_motion_constraint( csfm.RARelativeMotionConstraint('a', '2', 0, 0, 0, -1, 0, 0)) ra.add_relative_motion_constraint( csfm.RARelativeMotionConstraint('a', '3', 0, 0, 0, -2, 0, 0)) ra.add_reconstruction('b', 0, 0, 0, 0, 0, 0, 1, False) ra.add_relative_motion_constraint( csfm.RARelativeMotionConstraint('b', '2', 0, 0, 0, 0, 0, 0)) ra.add_relative_motion_constraint( csfm.RARelativeMotionConstraint('b', '3', 0, 0, 0, -1, 0, 0)) ra.add_relative_motion_constraint( csfm.RARelativeMotionConstraint('b', '4', 0, 0, 0, -2, 0, 0)) ra.add_absolute_position_constraint('1', 1, 0, 0, 1) ra.add_absolute_position_constraint('2', 2, 0, 0, 1) ra.run() s1 = ra.get_shot('1') s2 = ra.get_shot('2') s3 = ra.get_shot('3') s4 = ra.get_shot('4') rec_a = ra.get_reconstruction('a') rec_b = ra.get_reconstruction('b') assert np.allclose(get_shot_origin(s1), [1, 0, 0], atol=1e-6) assert np.allclose(get_shot_origin(s2), [2, 0, 0], atol=1e-6) assert np.allclose(get_shot_origin(s3), [3, 0, 0], atol=1e-6) assert np.allclose(get_shot_origin(s4), [4, 0, 0], atol=1e-6) assert np.allclose(get_reconstruction_origin(rec_a), [1, 0, 0], atol=1e-6) assert np.allclose(get_reconstruction_origin(rec_b), [2, 0, 0], atol=1e-6) assert np.allclose(rec_a.scale, 1) assert np.allclose(rec_b.scale, 1) def test_common_points(): """Two reconstructions, two common points""" ra = csfm.ReconstructionAlignment() ra.add_reconstruction('a', 0, 0, 0, 0, 0, 0, 1, True) ra.add_reconstruction('b', 0, 0, 0, 0, 0, 0, 1, False) ra.add_common_point_constraint('a', 0, 0, 0, 'b', -1, 0, 0, 1.0) ra.add_common_point_constraint('a', 1, 0, 0, 'b', 0, 0, 0, 1.0) ra.run() rec_b = ra.get_reconstruction('b') o_b = get_reconstruction_origin(rec_b) assert np.allclose(o_b, [1, 0, 0], atol=1e-6)
[ "opensfm.csfm.ReconstructionAlignment", "numpy.allclose", "opensfm.types.Pose", "opensfm.csfm.RARelativeMotionConstraint" ]
[((151, 219), 'opensfm.types.Pose', 'types.Pose', (['[shot.rx, shot.ry, shot.rz]', '[shot.tx, shot.ty, shot.tz]'], {}), '([shot.rx, shot.ry, shot.rz], [shot.tx, shot.ty, shot.tz])\n', (161, 219), False, 'from opensfm import types\n'), ((362, 424), 'opensfm.types.Pose', 'types.Pose', (['[r.rx, r.ry, r.rz]', '[r.tx / s, r.ty / s, r.tz / s]'], {}), '([r.rx, r.ry, r.rz], [r.tx / s, r.ty / s, r.tz / s])\n', (372, 424), False, 'from opensfm import types\n'), ((517, 547), 'opensfm.csfm.ReconstructionAlignment', 'csfm.ReconstructionAlignment', ([], {}), '()\n', (545, 547), False, 'from opensfm import csfm\n'), ((857, 887), 'opensfm.csfm.ReconstructionAlignment', 'csfm.ReconstructionAlignment', ([], {}), '()\n', (885, 887), False, 'from opensfm import csfm\n'), ((1349, 1379), 'opensfm.csfm.ReconstructionAlignment', 'csfm.ReconstructionAlignment', ([], {}), '()\n', (1377, 1379), False, 'from opensfm import csfm\n'), ((2190, 2219), 'numpy.allclose', 'np.allclose', (['rec_a.scale', '(0.5)'], {}), '(rec_a.scale, 0.5)\n', (2201, 2219), True, 'import numpy as np\n'), ((2321, 2351), 'opensfm.csfm.ReconstructionAlignment', 'csfm.ReconstructionAlignment', ([], {}), '()\n', (2349, 2351), False, 'from opensfm import csfm\n'), ((3222, 3251), 'numpy.allclose', 'np.allclose', (['rec_a.scale', '(0.5)'], {}), '(rec_a.scale, 0.5)\n', (3233, 3251), True, 'import numpy as np\n'), ((3325, 3355), 'opensfm.csfm.ReconstructionAlignment', 'csfm.ReconstructionAlignment', ([], {}), '()\n', (3353, 3355), False, 'from opensfm import csfm\n'), ((5055, 5082), 'numpy.allclose', 'np.allclose', (['rec_a.scale', '(1)'], {}), '(rec_a.scale, 1)\n', (5066, 5082), True, 'import numpy as np\n'), ((5094, 5121), 'numpy.allclose', 'np.allclose', (['rec_b.scale', '(1)'], {}), '(rec_b.scale, 1)\n', (5105, 5121), True, 'import numpy as np\n'), ((5208, 5238), 'opensfm.csfm.ReconstructionAlignment', 'csfm.ReconstructionAlignment', ([], {}), '()\n', (5236, 5238), False, 'from opensfm import csfm\n'), ((5601, 5640), 'numpy.allclose', 'np.allclose', (['o_b', '[1, 0, 0]'], {'atol': '(1e-06)'}), '(o_b, [1, 0, 0], atol=1e-06)\n', (5612, 5640), True, 'import numpy as np\n'), ((1040, 1100), 'opensfm.csfm.RARelativeMotionConstraint', 'csfm.RARelativeMotionConstraint', (['"""a"""', '"""1"""', '(0)', '(0)', '(0)', '(-1)', '(0)', '(0)'], {}), "('a', '1', 0, 0, 0, -1, 0, 0)\n", (1071, 1100), False, 'from opensfm import csfm\n'), ((1578, 1637), 'opensfm.csfm.RARelativeMotionConstraint', 'csfm.RARelativeMotionConstraint', (['"""a"""', '"""1"""', '(0)', '(0)', '(0)', '(0)', '(0)', '(0)'], {}), "('a', '1', 0, 0, 0, 0, 0, 0)\n", (1609, 1637), False, 'from opensfm import csfm\n'), ((1686, 1746), 'opensfm.csfm.RARelativeMotionConstraint', 'csfm.RARelativeMotionConstraint', (['"""a"""', '"""2"""', '(0)', '(0)', '(0)', '(-1)', '(0)', '(0)'], {}), "('a', '2', 0, 0, 0, -1, 0, 0)\n", (1717, 1746), False, 'from opensfm import csfm\n'), ((2550, 2609), 'opensfm.csfm.RARelativeMotionConstraint', 'csfm.RARelativeMotionConstraint', (['"""a"""', '"""1"""', '(0)', '(0)', '(0)', '(0)', '(0)', '(0)'], {}), "('a', '1', 0, 0, 0, 0, 0, 0)\n", (2581, 2609), False, 'from opensfm import csfm\n'), ((2658, 2718), 'opensfm.csfm.RARelativeMotionConstraint', 'csfm.RARelativeMotionConstraint', (['"""a"""', '"""2"""', '(0)', '(0)', '(0)', '(-1)', '(0)', '(0)'], {}), "('a', '2', 0, 0, 0, -1, 0, 0)\n", (2689, 2718), False, 'from opensfm import csfm\n'), ((3646, 3705), 'opensfm.csfm.RARelativeMotionConstraint', 'csfm.RARelativeMotionConstraint', (['"""a"""', '"""1"""', '(0)', '(0)', '(0)', '(0)', '(0)', '(0)'], {}), "('a', '1', 0, 0, 0, 0, 0, 0)\n", (3677, 3705), False, 'from opensfm import csfm\n'), ((3754, 3814), 'opensfm.csfm.RARelativeMotionConstraint', 'csfm.RARelativeMotionConstraint', (['"""a"""', '"""2"""', '(0)', '(0)', '(0)', '(-1)', '(0)', '(0)'], {}), "('a', '2', 0, 0, 0, -1, 0, 0)\n", (3785, 3814), False, 'from opensfm import csfm\n'), ((3863, 3923), 'opensfm.csfm.RARelativeMotionConstraint', 'csfm.RARelativeMotionConstraint', (['"""a"""', '"""3"""', '(0)', '(0)', '(0)', '(-2)', '(0)', '(0)'], {}), "('a', '3', 0, 0, 0, -2, 0, 0)\n", (3894, 3923), False, 'from opensfm import csfm\n'), ((4031, 4090), 'opensfm.csfm.RARelativeMotionConstraint', 'csfm.RARelativeMotionConstraint', (['"""b"""', '"""2"""', '(0)', '(0)', '(0)', '(0)', '(0)', '(0)'], {}), "('b', '2', 0, 0, 0, 0, 0, 0)\n", (4062, 4090), False, 'from opensfm import csfm\n'), ((4139, 4199), 'opensfm.csfm.RARelativeMotionConstraint', 'csfm.RARelativeMotionConstraint', (['"""b"""', '"""3"""', '(0)', '(0)', '(0)', '(-1)', '(0)', '(0)'], {}), "('b', '3', 0, 0, 0, -1, 0, 0)\n", (4170, 4199), False, 'from opensfm import csfm\n'), ((4248, 4308), 'opensfm.csfm.RARelativeMotionConstraint', 'csfm.RARelativeMotionConstraint', (['"""b"""', '"""4"""', '(0)', '(0)', '(0)', '(-2)', '(0)', '(0)'], {}), "('b', '4', 0, 0, 0, -2, 0, 0)\n", (4279, 4308), False, 'from opensfm import csfm\n')]
#!/usr/bin/env python3 # # Copyright (C) 2005-2019 Centre National d'Etudes Spatiales (CNES) # # This file is part of Orfeo Toolbox # # https://www.orfeo-toolbox.org/ # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse import os import os.path from os.path import join from collections import defaultdict import re import glob from rst_utils import rst_section, RstPageHeading, examples_usage_regex def generate_examples_index(rst_dir, list_of_examples): # Compute dictionary of tag -> (list of examples) tag_files = defaultdict(list) for filename in list_of_examples: tag = filename.split("/")[1] name, _ = os.path.splitext(filename.split("/")[2]) tag_files[tag].append(join(tag, name + ".rst")) # Render index file and tag index files os.makedirs(join(rst_dir, "Examples"), exist_ok=True) index_f = open(join(rst_dir, "Examples.rst"), "w") index_f.write(RstPageHeading("C++ Examples", 3, ref="cpp-examples")) for tag, examples_filenames in tag_files.items(): tag_filename = join("Examples", tag + ".rst") index_f.write("\t" + tag_filename + "\n") with open(join(rst_dir, tag_filename), "w") as tag_f: tag_f.write(RstPageHeading(tag, 3)) for examples_filename in examples_filenames: tag_f.write("\t" + examples_filename + "\n") def indent(str): return "\n".join([" " + line for line in str.split("\n")]) def cpp_uncomment(code): # Strip '// ' return "\n".join([line[4:] for line in code.split("\n")]) def render_example(filename, otb_root): "Render a cxx example to rst" # Read the source code of the cxx example code = open(join(otb_root, filename)).read() # Don't show the license header to make it nicer, # and the cookbook is already under a CC license examples_license_header = open("templates/examples_license_header.txt").read() code = code.replace(examples_license_header, "") # Extract usages example_usage = "" usage_matches = list(re.finditer(examples_usage_regex, code, flags = re.MULTILINE | re.DOTALL)) examples_usage_template = open("templates/example_usage.rst").read() for match in usage_matches: example_usage += examples_usage_template.format(indent(match.group(1).strip())) # Don't show usage in example source code = re.sub(examples_usage_regex, "", code, flags = re.MULTILINE | re.DOTALL) # Make the link to the source code link_name = os.path.basename(filename) link_href = "https://gitlab.orfeo-toolbox.org/orfeotoolbox/otb/raw/develop/" + filename + "?inline=false" # Read the description from the example .rst file if it exists example_rst_file = join(otb_root, filename.replace(".cxx", ".rst")) if os.path.isfile(example_rst_file): rst_description = open(example_rst_file).read() else: rst_description = "" # Render the template name = os.path.basename(filename) template_example = open("templates/example.rst").read() output_rst = template_example.format( label=name, heading=rst_section(name, "="), description=rst_description, usage=example_usage, code=indent(code.strip()), link_name=link_name, link_href=link_href ) return output_rst def main(): parser = argparse.ArgumentParser(usage="Export examples to rst") parser.add_argument("rst_dir", help="Directory where rst files are generated") parser.add_argument("otb_root", help="OTB repository root") args = parser.parse_args() # Get list of cxx examples as relative paths from otb_root list_of_examples = [os.path.relpath(p, start=args.otb_root) for p in sorted(glob.glob(join(args.otb_root, "Examples/*/*.cxx")))] print("Generating rst for {} examples".format(len(list_of_examples))) # Generate example index and tag indexes generate_examples_index(join(args.rst_dir, "C++"), list_of_examples) # Generate examples rst for filename in list_of_examples: name = os.path.basename(filename) tag = filename.split("/")[1] root, ext = os.path.splitext(name) os.makedirs(join(args.rst_dir, "C++", "Examples", tag), exist_ok=True) with open(join(args.rst_dir, "C++", "Examples", tag, root + ".rst"), "w") as output_file: output_file.write(render_example(filename, args.otb_root)) if __name__ == "__main__": main()
[ "argparse.ArgumentParser", "rst_utils.rst_section", "os.path.join", "os.path.splitext", "os.path.isfile", "collections.defaultdict", "os.path.basename", "re.finditer", "re.sub", "rst_utils.RstPageHeading", "os.path.relpath" ]
[((1045, 1062), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1056, 1062), False, 'from collections import defaultdict\n'), ((2872, 2942), 're.sub', 're.sub', (['examples_usage_regex', '""""""', 'code'], {'flags': '(re.MULTILINE | re.DOTALL)'}), "(examples_usage_regex, '', code, flags=re.MULTILINE | re.DOTALL)\n", (2878, 2942), False, 'import re\n'), ((3001, 3027), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (3017, 3027), False, 'import os\n'), ((3285, 3317), 'os.path.isfile', 'os.path.isfile', (['example_rst_file'], {}), '(example_rst_file)\n', (3299, 3317), False, 'import os\n'), ((3452, 3478), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (3468, 3478), False, 'import os\n'), ((3854, 3909), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'usage': '"""Export examples to rst"""'}), "(usage='Export examples to rst')\n", (3877, 3909), False, 'import argparse\n'), ((1315, 1340), 'os.path.join', 'join', (['rst_dir', '"""Examples"""'], {}), "(rst_dir, 'Examples')\n", (1319, 1340), False, 'from os.path import join\n'), ((1376, 1405), 'os.path.join', 'join', (['rst_dir', '"""Examples.rst"""'], {}), "(rst_dir, 'Examples.rst')\n", (1380, 1405), False, 'from os.path import join\n'), ((1430, 1483), 'rst_utils.RstPageHeading', 'RstPageHeading', (['"""C++ Examples"""', '(3)'], {'ref': '"""cpp-examples"""'}), "('C++ Examples', 3, ref='cpp-examples')\n", (1444, 1483), False, 'from rst_utils import rst_section, RstPageHeading, examples_usage_regex\n'), ((1563, 1593), 'os.path.join', 'join', (['"""Examples"""', "(tag + '.rst')"], {}), "('Examples', tag + '.rst')\n", (1567, 1593), False, 'from os.path import join\n'), ((2550, 2621), 're.finditer', 're.finditer', (['examples_usage_regex', 'code'], {'flags': '(re.MULTILINE | re.DOTALL)'}), '(examples_usage_regex, code, flags=re.MULTILINE | re.DOTALL)\n', (2561, 2621), False, 'import re\n'), ((4176, 4215), 'os.path.relpath', 'os.path.relpath', (['p'], {'start': 'args.otb_root'}), '(p, start=args.otb_root)\n', (4191, 4215), False, 'import os\n'), ((4433, 4458), 'os.path.join', 'join', (['args.rst_dir', '"""C++"""'], {}), "(args.rst_dir, 'C++')\n", (4437, 4458), False, 'from os.path import join\n'), ((4560, 4586), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (4576, 4586), False, 'import os\n'), ((4644, 4666), 'os.path.splitext', 'os.path.splitext', (['name'], {}), '(name)\n', (4660, 4666), False, 'import os\n'), ((1228, 1252), 'os.path.join', 'join', (['tag', "(name + '.rst')"], {}), "(tag, name + '.rst')\n", (1232, 1252), False, 'from os.path import join\n'), ((3617, 3639), 'rst_utils.rst_section', 'rst_section', (['name', '"""="""'], {}), "(name, '=')\n", (3628, 3639), False, 'from rst_utils import rst_section, RstPageHeading, examples_usage_regex\n'), ((4688, 4730), 'os.path.join', 'join', (['args.rst_dir', '"""C++"""', '"""Examples"""', 'tag'], {}), "(args.rst_dir, 'C++', 'Examples', tag)\n", (4692, 4730), False, 'from os.path import join\n'), ((1663, 1690), 'os.path.join', 'join', (['rst_dir', 'tag_filename'], {}), '(rst_dir, tag_filename)\n', (1667, 1690), False, 'from os.path import join\n'), ((1731, 1753), 'rst_utils.RstPageHeading', 'RstPageHeading', (['tag', '(3)'], {}), '(tag, 3)\n', (1745, 1753), False, 'from rst_utils import rst_section, RstPageHeading, examples_usage_regex\n'), ((2203, 2227), 'os.path.join', 'join', (['otb_root', 'filename'], {}), '(otb_root, filename)\n', (2207, 2227), False, 'from os.path import join\n'), ((4765, 4822), 'os.path.join', 'join', (['args.rst_dir', '"""C++"""', '"""Examples"""', 'tag', "(root + '.rst')"], {}), "(args.rst_dir, 'C++', 'Examples', tag, root + '.rst')\n", (4769, 4822), False, 'from os.path import join\n'), ((4242, 4281), 'os.path.join', 'join', (['args.otb_root', '"""Examples/*/*.cxx"""'], {}), "(args.otb_root, 'Examples/*/*.cxx')\n", (4246, 4281), False, 'from os.path import join\n')]
"""""" # Standard library modules. import os import pkgutil # Third party modules. from qtpy import QtGui # Local modules. # Globals and constants variables. def load_pixmap(filename): package = "pymontecarlo_gui" resource = os.path.join("icons", filename) data = pkgutil.get_data(package, resource) pixmap = QtGui.QPixmap() pixmap.loadFromData(data) return pixmap def load_icon(filename): pixmap = load_pixmap(filename) return QtGui.QIcon(pixmap)
[ "qtpy.QtGui.QPixmap", "qtpy.QtGui.QIcon", "os.path.join", "pkgutil.get_data" ]
[((239, 270), 'os.path.join', 'os.path.join', (['"""icons"""', 'filename'], {}), "('icons', filename)\n", (251, 270), False, 'import os\n'), ((282, 317), 'pkgutil.get_data', 'pkgutil.get_data', (['package', 'resource'], {}), '(package, resource)\n', (298, 317), False, 'import pkgutil\n'), ((332, 347), 'qtpy.QtGui.QPixmap', 'QtGui.QPixmap', ([], {}), '()\n', (345, 347), False, 'from qtpy import QtGui\n'), ((470, 489), 'qtpy.QtGui.QIcon', 'QtGui.QIcon', (['pixmap'], {}), '(pixmap)\n', (481, 489), False, 'from qtpy import QtGui\n')]
import numpy as np RESIZED_SIZE = 256 IMAGE_SIZE = 227 INVERTED_H_DIM = 4096 N_CLASSES = 20 PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]]) WEIGHT_DECAY = 0.00005
[ "numpy.array" ]
[((107, 151), 'numpy.array', 'np.array', (['[[[102.9801, 115.9465, 122.7717]]]'], {}), '([[[102.9801, 115.9465, 122.7717]]])\n', (115, 151), True, 'import numpy as np\n')]
import os from PIL import Image import numpy as np import torch import csv import random from explore_version_03.data.base_dataset import BaseDataset # add data augumentation #from torch.utils.data import DataLoader model_list = ['densenet161','inception_v3', 'resnet152','resnext101_32x8d','vgg19_bn'] run_types = ['train','valid','test'] def parse_feature_data_dict(f_dir, cv, runtype): labels = [] all_predict_v = [] fff = True for model in model_list: predict_v = [] sub_dir = f_dir%(model, cv) filename = 'result_detail_%s_%s_%s.csv'%(model, runtype, cv) print(filename) filepath = os.path.join(sub_dir, filename) with open(filepath,'r') as f: csv_reader = csv.reader(f) for row in csv_reader: xxx = np.array([float(x) for x in row[4:]]) predict_v.append(xxx) if fff: cur_labels = np.array([float(row[0]),float(row[1]),float(row[2]),float(row[3])]) labels.append(np.argmax(cur_labels)) all_predict_v.append(np.array(predict_v)) fff = False all_matrix = [] features = np.concatenate(all_predict_v, 1) labels = np.array(labels) print (np.shape(features), np.shape(labels)) return features.astype(float), labels #cvs = ['cv1','cv2','cv3','cv4','cv5'] def parse_data_dict(f_dir, cv, runtype): labels = [] all_predict_v = [] fff = True # model = 'densenet161' # for cv in cvs: for model in model_list: predict_v = [] sub_dir = f_dir%(model, cv) filename = 'result_detail_%s_%s_%s.csv'%(model, runtype, cv) print(filename) filepath = os.path.join(sub_dir, filename) with open(filepath,'r') as f: csv_reader = csv.reader(f) for row in csv_reader: # xxx = np.array([float(row[0]),float(row[1]),float(row[2]),float(row[3])]) xxx = np.exp(np.array([float(row[0]),float(row[1]),float(row[2]),float(row[3])])) xxx = xxx/np.sum(xxx) # print (xxx) predict_v.append(xxx) if fff: cur_labels = np.array([float(row[4]),float(row[5]),float(row[6]),float(row[7])]) labels.append(np.argmax(cur_labels)) all_predict_v.append(np.array(predict_v)) fff = False # all_matrix = [] # for i in range(4): # temp_matrix = [] # for j in range(5): # temp_matrix.append(all_predict_v[j][:, i]) # temp_matrix = np.array(temp_matrix).transpose(1, 0) # print (np.shape(temp_matrix)) # all_matrix.append(temp_matrix) features_l = np.concatenate(all_predict_v, 1) # all_predict_v = [] # for model in model_list: # predict_v = [] # sub_dir = './explore_version_03/results/%s_20200407_multiclass_%s'%(model, cv) # filename = 'result_detail_%s_%s_%s.csv'%(model, runtype, cv) # print(filename) # filepath = os.path.join(sub_dir, filename) # with open(filepath,'r') as f: # csv_reader = csv.reader(f) # for row in csv_reader: # xxx = np.array([float(x) for x in row[4:]]) # predict_v.append(xxx) # all_predict_v.append(np.array(predict_v)) # features_f = np.concatenate(all_predict_v, 1) # features = np.concatenate([features_l, features_f], 1) # print (np.shape(features)) labels = np.array(labels) # print (np.shape(features), np.shape(labels)) return features_l.astype(float), labels def parse_data_dict_sampling(f_dir, cv, runtype): labels = [] all_predict_v = [] fff = True for model in model_list: predict_v = [] sub_dir = f_dir%(model, cv) filename = 'result_detail_%s_%s_%s.csv'%(model, runtype, cv) print(filename) filepath = os.path.join(sub_dir, filename) with open(filepath,'r') as f: csv_reader = csv.reader(f) for row in csv_reader: xxx = np.exp(np.array([float(row[0]),float(row[1]),float(row[2]),float(row[3])])) xxx = xxx/np.sum(xxx) predict_v.append(xxx) if fff: cur_labels = np.array([float(row[4]),float(row[5]),float(row[6]),float(row[7])]) labels.append(np.argmax(cur_labels)) all_predict_v.append(np.array(predict_v)) fff = False features_l = np.concatenate(all_predict_v, 1) labels = np.array(labels) new_data_normal = [] new_data_covid = [] for i in range(len(labels)): if labels[i]==0: new_data_normal.append((features_l[i], labels[i])) else: new_data_covid.append((features_l[i], labels[i])) new_data = new_data_covid * 10 + new_data_normal random.shuffle(new_data) features = [] labels = [] for i in range(len(new_data)): features.append(new_data[i][0]) labels.append(new_data[i][1]) features = np.array(features) labels = np.array(labels) return features.astype(float), labels class EnsembleDatasetSampling(BaseDataset): """A dataset class for paired image dataset. It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}. During test time, you need to prepare a directory '/path/to/data/test'. """ def __init__(self, opt, run_type = 'train'): """Initialize this dataset class. Parameters: opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions """ BaseDataset.__init__(self, opt) features, label_list = parse_data_dict_sampling(opt.data_dir, opt.cv, run_type) # get image paths self.features = torch.from_numpy(features) self.label_list = torch.from_numpy(label_list) def __getitem__(self, index): """Return a data point and its metadata information. Parameters: index - - a random integer for data indexing Returns a dictionary that contains A, B, A_paths and B_paths A (tensor) - - an image in the input domain B (tensor) - - its corresponding image in the target domain A_paths (str) - - image paths B_paths (str) - - image paths (same as A_paths) """ # read a image given a random integer index feature = self.features[index] label = self.label_list[index] return {'A': feature, 'B': label} def __len__(self): """Return the total number of images in the dataset.""" return len(self.features) def get_label_distri(self): counts = np.array([0.,0.,0.,0.]) for item in self.label_list: counts[item] += 1. counts = 1000./counts return torch.from_numpy(np.array([counts])) class EnsembleDataset(BaseDataset): """A dataset class for paired image dataset. It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}. During test time, you need to prepare a directory '/path/to/data/test'. """ def __init__(self, opt, run_type = 'train'): """Initialize this dataset class. Parameters: opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions """ BaseDataset.__init__(self, opt) features, label_list = parse_data_dict(opt.data_dir, opt.cv, run_type) # get image paths self.features = torch.from_numpy(features) self.label_list = torch.from_numpy(label_list) def __getitem__(self, index): """Return a data point and its metadata information. Parameters: index - - a random integer for data indexing Returns a dictionary that contains A, B, A_paths and B_paths A (tensor) - - an image in the input domain B (tensor) - - its corresponding image in the target domain A_paths (str) - - image paths B_paths (str) - - image paths (same as A_paths) """ # read a image given a random integer index feature = self.features[index] label = self.label_list[index] return {'A': feature, 'B': label} def __len__(self): """Return the total number of images in the dataset.""" return len(self.features) def get_label_distri(self): counts = np.array([0.,0.,0.,0.]) for item in self.label_list: counts[item] += 1. counts = 1000./counts return torch.from_numpy(np.array([counts])) if __name__ == "__main__": cdir = './explore_version_03/results/%s_20200407_multiclass_%s' myset = EnsembleDataset(cdir) myloader = DataLoader(dataset=myset, batch_size=2, shuffle=False) for data in myloader: print(data)
[ "random.shuffle", "os.path.join", "numpy.argmax", "torch.from_numpy", "numpy.array", "numpy.sum", "numpy.concatenate", "explore_version_03.data.base_dataset.BaseDataset.__init__", "numpy.shape", "csv.reader" ]
[((1077, 1109), 'numpy.concatenate', 'np.concatenate', (['all_predict_v', '(1)'], {}), '(all_predict_v, 1)\n', (1091, 1109), True, 'import numpy as np\n'), ((1121, 1137), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (1129, 1137), True, 'import numpy as np\n'), ((2454, 2486), 'numpy.concatenate', 'np.concatenate', (['all_predict_v', '(1)'], {}), '(all_predict_v, 1)\n', (2468, 2486), True, 'import numpy as np\n'), ((3157, 3173), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (3165, 3173), True, 'import numpy as np\n'), ((4050, 4082), 'numpy.concatenate', 'np.concatenate', (['all_predict_v', '(1)'], {}), '(all_predict_v, 1)\n', (4064, 4082), True, 'import numpy as np\n'), ((4094, 4110), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (4102, 4110), True, 'import numpy as np\n'), ((4384, 4408), 'random.shuffle', 'random.shuffle', (['new_data'], {}), '(new_data)\n', (4398, 4408), False, 'import random\n'), ((4555, 4573), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (4563, 4573), True, 'import numpy as np\n'), ((4585, 4601), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (4593, 4601), True, 'import numpy as np\n'), ((620, 651), 'os.path.join', 'os.path.join', (['sub_dir', 'filename'], {}), '(sub_dir, filename)\n', (632, 651), False, 'import os\n'), ((1147, 1165), 'numpy.shape', 'np.shape', (['features'], {}), '(features)\n', (1155, 1165), True, 'import numpy as np\n'), ((1167, 1183), 'numpy.shape', 'np.shape', (['labels'], {}), '(labels)\n', (1175, 1183), True, 'import numpy as np\n'), ((1576, 1607), 'os.path.join', 'os.path.join', (['sub_dir', 'filename'], {}), '(sub_dir, filename)\n', (1588, 1607), False, 'import os\n'), ((3541, 3572), 'os.path.join', 'os.path.join', (['sub_dir', 'filename'], {}), '(sub_dir, filename)\n', (3553, 3572), False, 'import os\n'), ((5158, 5189), 'explore_version_03.data.base_dataset.BaseDataset.__init__', 'BaseDataset.__init__', (['self', 'opt'], {}), '(self, opt)\n', (5178, 5189), False, 'from explore_version_03.data.base_dataset import BaseDataset\n'), ((5321, 5347), 'torch.from_numpy', 'torch.from_numpy', (['features'], {}), '(features)\n', (5337, 5347), False, 'import torch\n'), ((5374, 5402), 'torch.from_numpy', 'torch.from_numpy', (['label_list'], {}), '(label_list)\n', (5390, 5402), False, 'import torch\n'), ((6256, 6286), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0])\n', (6264, 6286), True, 'import numpy as np\n'), ((6937, 6968), 'explore_version_03.data.base_dataset.BaseDataset.__init__', 'BaseDataset.__init__', (['self', 'opt'], {}), '(self, opt)\n', (6957, 6968), False, 'from explore_version_03.data.base_dataset import BaseDataset\n'), ((7091, 7117), 'torch.from_numpy', 'torch.from_numpy', (['features'], {}), '(features)\n', (7107, 7117), False, 'import torch\n'), ((7144, 7172), 'torch.from_numpy', 'torch.from_numpy', (['label_list'], {}), '(label_list)\n', (7160, 7172), False, 'import torch\n'), ((8026, 8056), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0])\n', (8034, 8056), True, 'import numpy as np\n'), ((705, 718), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (715, 718), False, 'import csv\n'), ((1009, 1028), 'numpy.array', 'np.array', (['predict_v'], {}), '(predict_v)\n', (1017, 1028), True, 'import numpy as np\n'), ((1661, 1674), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (1671, 1674), False, 'import csv\n'), ((2137, 2156), 'numpy.array', 'np.array', (['predict_v'], {}), '(predict_v)\n', (2145, 2156), True, 'import numpy as np\n'), ((3626, 3639), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (3636, 3639), False, 'import csv\n'), ((3998, 4017), 'numpy.array', 'np.array', (['predict_v'], {}), '(predict_v)\n', (4006, 4017), True, 'import numpy as np\n'), ((6408, 6426), 'numpy.array', 'np.array', (['[counts]'], {}), '([counts])\n', (6416, 6426), True, 'import numpy as np\n'), ((8178, 8196), 'numpy.array', 'np.array', (['[counts]'], {}), '([counts])\n', (8186, 8196), True, 'import numpy as np\n'), ((1895, 1906), 'numpy.sum', 'np.sum', (['xxx'], {}), '(xxx)\n', (1901, 1906), True, 'import numpy as np\n'), ((3777, 3788), 'numpy.sum', 'np.sum', (['xxx'], {}), '(xxx)\n', (3783, 3788), True, 'import numpy as np\n'), ((961, 982), 'numpy.argmax', 'np.argmax', (['cur_labels'], {}), '(cur_labels)\n', (970, 982), True, 'import numpy as np\n'), ((2089, 2110), 'numpy.argmax', 'np.argmax', (['cur_labels'], {}), '(cur_labels)\n', (2098, 2110), True, 'import numpy as np\n'), ((3950, 3971), 'numpy.argmax', 'np.argmax', (['cur_labels'], {}), '(cur_labels)\n', (3959, 3971), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- # Copyright (c) 2015 Ericsson AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # encoding: utf-8 from calvin.actor.actor import Actor, manage, condition, stateguard, calvinlib class Iterate(Actor): """ Produce sequence of items by iterating over 'token', or simply pass 'token' along if not iterable (i.e. list, dict, or string) N.B. Empty iterables produces a 'null' token. FIXME: Is 'null' production the right thing? Exception? Nothing? Inputs: token: any token Outputs: item: item (or value if input is dictionary) index: index of item (or key if input is dictionary) """ @manage(['data', 'has_data', 'index']) def init(self): self.data = None self.has_data = False self.index = 0 self.setup() def setup(self): self.copy = calvinlib.use("copy") def did_migrate(self): self.setup() @stateguard(lambda self: not self.has_data) @condition(['token'], []) def consume(self, data): if not data: # Empty list => null output self.data = None else: mutable = bool(type(data) is list or type(data) is dict) self.data = self.copy.copy(data) if mutable else data self.has_data = True self.index = 0 @stateguard(lambda self: self.has_data and type(self.data) is list) @condition([], ['item', 'index']) def produce_listitem(self): res = self.data.pop(0) i = self.index self.index += 1 if not self.data: self.data = None self.has_data = False return (res, i) @stateguard(lambda self: self.has_data and type(self.data) is dict) @condition([], ['item', 'index']) def produce_dictitem(self): k, v = self.data.popitem() if not self.data: self.data = None self.has_data = False return (v, k) @stateguard(lambda self: self.has_data and isinstance(self.data, basestring)) @condition([], ['item', 'index']) def produce_stringitem(self): res = self.data[0] self.data = self.data[1:] i = self.index self.index += 1 if not self.data: self.data = None self.has_data = False return (res, i) @stateguard(lambda self: self.has_data) @condition([], ['item', 'index']) def produce_plainitem(self): res = self.data self.data = None self.has_data = False return (res, 0) action_priority = (produce_listitem, produce_dictitem, produce_stringitem, produce_plainitem, consume) requires = ['copy'] test_args = [] test_kwargs = {} test_set = [ { 'inports': {'token': [[1,2,3]]}, 'outports': {'item': [1,2,3], 'index': [0, 1, 2]}, }, { 'inports': {'token': ["abcd"]}, 'outports': {'item': ["a", "b", "c", "d"], 'index': [0, 1, 2, 3]}, }, { 'inports': {'token': [1,2,3]}, 'outports': {'item': [1, 2, 3], 'index': [0, 0, 0]}, }, { 'inports': {'token': {"a":"A", "b":1}}, 'outports': {'item': set([1, "A"]), 'index': set(["a", "b"])}, }, { 'inports': {'token': [""]}, 'outports': {'item': [None], 'index': [0]}, }, { 'inports': {'token': [{}]}, 'outports': {'item': [None], 'index': [0]}, }, { 'inports': {'token': [[]]}, 'outports': {'item': [None], 'index': [0]}, }, { 'inports': {'token': [[], []]}, 'outports': {'item': [None, None], 'index': [0, 0]}, }, { 'inports': {'token': [[1], [2]]}, 'outports': {'item': [1, 2], 'index': [0, 0]}, }, { 'inports': {'token': [[], [1,2,3]]}, 'outports': {'item': [None, 1,2,3], 'index': [0, 0, 1, 2]}, }, { 'inports': {'token': [[], [], [1,2]]}, 'outports': {'item': [None, None, 1,2], 'index': [0, 0, 0, 1]}, }, { 'inports': {'token': ["ab", "", "A"]}, 'outports': {'item': ["a", "b", None, "A"], 'index': [0, 1, 0, 0]}, }, ]
[ "calvin.actor.actor.condition", "calvin.actor.actor.manage", "calvin.actor.actor.stateguard", "calvin.actor.actor.calvinlib.use" ]
[((1164, 1201), 'calvin.actor.actor.manage', 'manage', (["['data', 'has_data', 'index']"], {}), "(['data', 'has_data', 'index'])\n", (1170, 1201), False, 'from calvin.actor.actor import Actor, manage, condition, stateguard, calvinlib\n'), ((1441, 1483), 'calvin.actor.actor.stateguard', 'stateguard', (['(lambda self: not self.has_data)'], {}), '(lambda self: not self.has_data)\n', (1451, 1483), False, 'from calvin.actor.actor import Actor, manage, condition, stateguard, calvinlib\n'), ((1489, 1513), 'calvin.actor.actor.condition', 'condition', (["['token']", '[]'], {}), "(['token'], [])\n", (1498, 1513), False, 'from calvin.actor.actor import Actor, manage, condition, stateguard, calvinlib\n'), ((1913, 1945), 'calvin.actor.actor.condition', 'condition', (['[]', "['item', 'index']"], {}), "([], ['item', 'index'])\n", (1922, 1945), False, 'from calvin.actor.actor import Actor, manage, condition, stateguard, calvinlib\n'), ((2247, 2279), 'calvin.actor.actor.condition', 'condition', (['[]', "['item', 'index']"], {}), "([], ['item', 'index'])\n", (2256, 2279), False, 'from calvin.actor.actor import Actor, manage, condition, stateguard, calvinlib\n'), ((2546, 2578), 'calvin.actor.actor.condition', 'condition', (['[]', "['item', 'index']"], {}), "([], ['item', 'index'])\n", (2555, 2578), False, 'from calvin.actor.actor import Actor, manage, condition, stateguard, calvinlib\n'), ((2840, 2878), 'calvin.actor.actor.stateguard', 'stateguard', (['(lambda self: self.has_data)'], {}), '(lambda self: self.has_data)\n', (2850, 2878), False, 'from calvin.actor.actor import Actor, manage, condition, stateguard, calvinlib\n'), ((2884, 2916), 'calvin.actor.actor.condition', 'condition', (['[]', "['item', 'index']"], {}), "([], ['item', 'index'])\n", (2893, 2916), False, 'from calvin.actor.actor import Actor, manage, condition, stateguard, calvinlib\n'), ((1363, 1384), 'calvin.actor.actor.calvinlib.use', 'calvinlib.use', (['"""copy"""'], {}), "('copy')\n", (1376, 1384), False, 'from calvin.actor.actor import Actor, manage, condition, stateguard, calvinlib\n')]
# Generated by Django 3.2 on 2021-04-13 14:03 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('nyumbax', '0014_alter_business_body'), ] operations = [ migrations.AlterField( model_name='business', name='body', field=models.TextField(), ), ]
[ "django.db.models.TextField" ]
[((335, 353), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (351, 353), False, 'from django.db import migrations, models\n')]
""" This module is for processing the word2vec embdding from https://code.google.com/archive/p/word2vec/ (GoogleNews-vectors-negative300.bin.gz) and the crossword clue database from http://www.otsys.com/clue/. (format of the data, preprocessing, other options) """ import struct import numpy as np import random import pickle embedding_filename = "GoogleNews-vectors-negative300.bin" clues_filename = "clues-5-stripped.txt" pickled_index_filename = "picked-index.bin" max_clue_length = 40 def read_until(fd, b): """Read bytes from open file object fd until byte b is found""" s = b'' while True: c = fd.read(1) if not c: break #no more bytes to read if c == b: break #found our byte s += c return s def build_index(fname): """Given the path to the word2vec model, build an index of {bytestring: file offset} This assumes that the file consists of a header of the form: <num words> <another number>\\n and then lines of the form: <word><space><1200 bytes of embedding>\\n which is the format of the GoogleNews 300 dimensional embedding. """ d = {} with open(fname, "rb") as f: # read dimension count = int(read_until(f, b' ')) print("count: ",count) read_until(f, b'\n') #read words for i in range(count): if i % 100000 == 0: print(i) # attempt to read next word idx = f.tell() word = read_until(f, b' ') d[word]=idx if not word: break else: f.read(1200) return d def lookup(words, index, filename, length=None, postpad=False): """Return a numpy array of the embeddings of each word in words, using the pre-made index for the given filename. If length is not none and is bigger than the number of words, the result will be padded with 0s. The default is to prepend 0s, but appending can be set by giving postpad=True. The returned shape is (n, 300) where n is either the number of words, or length. """ l = len(words) pad = 0 if length and length > l: pad = length - l l = length a = np.zeros( (l, 300) , dtype=np.float32) with open(filename, "rb") as f: for w,i in zip(words, range(l)): b = bytes(w, encoding="latin-1") if b not in index: continue f.seek(index[b]) check = read_until(f, b' ') if not check == b: raise RuntimeError("index incorrect for word {}".format(i)) v = struct.unpack('f'*300, f.read(1200)) if postpad: a[i] = np.array(v, dtype=np.float32) else: a[i+pad] = np.array(v, dtype=np.float32) return a def guess_to_letters(guess): """Given an (n,m,26) array, return a list of n words, m letters each Picks the highest number from each""" z = guess.argmax(axis=2).tolist() #z has shape (n, m) abc = 'abcdefghijklmnopqrstuvwxyz' return list(map(lambda l: ''.join(abc[x] for x in l), z)) def letters_to_one_hot(answer): """Turns a string of length l into an (l,26) one-hot numpy array Anything not in [a-zA-Z] goes to all 0s""" l = len(answer) a = np.zeros( (l, 26) ) abc = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' for i in range(l): try: j = abc.index(answer[i].upper()) a[i,j]=1.0 except ValueError: pass return a class Clues5Generator: def __init__(self, batch_size, clue_path=clues_filename, embedding_path=embedding_filename, length=max_clue_length, pickled_index=None, read_pickle=False, write_pickle=False, pad="post"): if (write_pickle or read_pickle) and not pickled_index: raise RuntimeError("Told to read or write pickle, but not given file name") elif read_pickle: with open(pickled_index, "rb") as f: print("Loading pickled index {}".format(pickled_index)) self.index = pickle.load(f) elif not embedding_path: raise RuntimeError("No embedding_path and (not write_pickle)") else: print("Building index from binary embedding file {}".format(embedding_path)) self.index = build_index(embedding_path) if write_pickle: print("Writing to pickle index {}".format(pickled_index)) with open(pickled_index, "rb") as f: pickle.dump(self.index, f) self.embedding_path = embedding_path self.batch_size = batch_size with open(clue_path, "r") as f: self.clues = f.readlines() self.num_clues = len(self.clues) print("num clues: ", self.num_clues) self.left_in_epoch = list(range(self.num_clues)) random.shuffle(self.left_in_epoch) self.num_left_in_epoch = self.num_clues self.vector_length = length self.pad = pad def __iter__(self): return self def __next__(self): if self.num_left_in_epoch < self.batch_size: tmp = list(range(self.num_clues)) random.shuffle(tmp) self.left_in_epoch += tmp self.num_left_in_epoch += self.num_clues x = np.zeros( (self.batch_size, self.vector_length, 300), dtype=np.float32) y = np.zeros( (self.batch_size, 5, 26), dtype=np.float32) l = np.zeros( (self.batch_size,), dtype=np.int32) for i in range(self.batch_size): answer, *clue = self.clues[self.left_in_epoch[i]].strip().split(' ') x[i] = lookup(clue, self.index, self.embedding_path, self.vector_length, True if self.pad=="post" else False) y[i] = letters_to_one_hot(answer) l[i] = len(clue) del self.left_in_epoch[:self.batch_size] self.num_left_in_epoch -= self.batch_size return (x,y,l) def next_with_english(self): if self.num_left_in_epoch < self.batch_size: tmp = list(range(self.num_clues)) random.shuffle(tmp) self.left_in_epoch += tmp self.num_left_in_epoch += self.num_clues x = np.zeros( (self.batch_size, self.vector_length, 300), dtype=np.float32) y = np.zeros( (self.batch_size, 5, 26), dtype=np.float32) l = np.zeros( (self.batch_size,), dtype=np.int32) xe = [] ye = [] for i in range(self.batch_size): answer, *clue = self.clues[self.left_in_epoch[i]].strip().split(' ') x[i] = lookup(clue, self.index, self.embedding_path, self.vector_length) y[i] = letters_to_one_hot(answer) l[i] = len(clue) xe.append(clue) ye.append(answer) del self.left_in_epoch[:self.batch_size] self.num_left_in_epoch -= self.batch_size return (x,y,l,xe,ye)
[ "pickle.dump", "random.shuffle", "pickle.load", "numpy.array", "numpy.zeros" ]
[((2166, 2202), 'numpy.zeros', 'np.zeros', (['(l, 300)'], {'dtype': 'np.float32'}), '((l, 300), dtype=np.float32)\n', (2174, 2202), True, 'import numpy as np\n'), ((3263, 3280), 'numpy.zeros', 'np.zeros', (['(l, 26)'], {}), '((l, 26))\n', (3271, 3280), True, 'import numpy as np\n'), ((4829, 4863), 'random.shuffle', 'random.shuffle', (['self.left_in_epoch'], {}), '(self.left_in_epoch)\n', (4843, 4863), False, 'import random\n'), ((5291, 5361), 'numpy.zeros', 'np.zeros', (['(self.batch_size, self.vector_length, 300)'], {'dtype': 'np.float32'}), '((self.batch_size, self.vector_length, 300), dtype=np.float32)\n', (5299, 5361), True, 'import numpy as np\n'), ((5375, 5427), 'numpy.zeros', 'np.zeros', (['(self.batch_size, 5, 26)'], {'dtype': 'np.float32'}), '((self.batch_size, 5, 26), dtype=np.float32)\n', (5383, 5427), True, 'import numpy as np\n'), ((5441, 5485), 'numpy.zeros', 'np.zeros', (['(self.batch_size,)'], {'dtype': 'np.int32'}), '((self.batch_size,), dtype=np.int32)\n', (5449, 5485), True, 'import numpy as np\n'), ((6238, 6308), 'numpy.zeros', 'np.zeros', (['(self.batch_size, self.vector_length, 300)'], {'dtype': 'np.float32'}), '((self.batch_size, self.vector_length, 300), dtype=np.float32)\n', (6246, 6308), True, 'import numpy as np\n'), ((6322, 6374), 'numpy.zeros', 'np.zeros', (['(self.batch_size, 5, 26)'], {'dtype': 'np.float32'}), '((self.batch_size, 5, 26), dtype=np.float32)\n', (6330, 6374), True, 'import numpy as np\n'), ((6388, 6432), 'numpy.zeros', 'np.zeros', (['(self.batch_size,)'], {'dtype': 'np.int32'}), '((self.batch_size,), dtype=np.int32)\n', (6396, 6432), True, 'import numpy as np\n'), ((5155, 5174), 'random.shuffle', 'random.shuffle', (['tmp'], {}), '(tmp)\n', (5169, 5174), False, 'import random\n'), ((6102, 6121), 'random.shuffle', 'random.shuffle', (['tmp'], {}), '(tmp)\n', (6116, 6121), False, 'import random\n'), ((2663, 2692), 'numpy.array', 'np.array', (['v'], {'dtype': 'np.float32'}), '(v, dtype=np.float32)\n', (2671, 2692), True, 'import numpy as np\n'), ((2738, 2767), 'numpy.array', 'np.array', (['v'], {'dtype': 'np.float32'}), '(v, dtype=np.float32)\n', (2746, 2767), True, 'import numpy as np\n'), ((4031, 4045), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4042, 4045), False, 'import pickle\n'), ((4486, 4512), 'pickle.dump', 'pickle.dump', (['self.index', 'f'], {}), '(self.index, f)\n', (4497, 4512), False, 'import pickle\n')]
"""Async and await example using subprocesses Note: Requires Python 3.6. """ import os import sys import time import platform import asyncio async def run_command(*args): """Run command in subprocess Example from: http://asyncio.readthedocs.io/en/latest/subprocess.html """ # Create subprocess process = await asyncio.create_subprocess_exec( *args, # stdout must a pipe to be accessible as process.stdout stdout=asyncio.subprocess.PIPE) # Create subprocess process2 = await asyncio.create_subprocess_exec( *args, # stdout must a pipe to be accessible as process.stdout stdout=asyncio.subprocess.PIPE) # Status print('Started:', args, '(pid = ' + str(process.pid) + ')') # Status print('Started:', args, '(pid = ' + str(process2.pid) + ')') # Wait for the subprocess to finish stdout, stderr = await process.communicate() # Progress if process.returncode == 0: print('Done:', args, '(pid = ' + str(process.pid) + ')') else: print('Failed:', args, '(pid = ' + str(process.pid) + ')') # Result result = stdout.decode().strip() # Return stdout return result async def run_command_shell(command): """Run command in subprocess (shell) Note: This can be used if you wish to execute e.g. "copy" on Windows, which can only be executed in the shell. """ # Create subprocess process = await asyncio.create_subprocess_shell( command, stdout=asyncio.subprocess.PIPE) # Status print('Started:', command, '(pid = ' + str(process.pid) + ')') process2 = await asyncio.create_subprocess_shell( command, stdout=asyncio.subprocess.PIPE) # Status print('Started:', command, '(pid = ' + str(process2.pid) + ')') # Wait for the subprocess to finish stdout, stderr = await process.communicate() # Progress if process.returncode == 0: print('Done:', command, '(pid = ' + str(process.pid) + ')') else: print('Failed:', command, '(pid = ' + str(process.pid) + ')') # Result result = stdout.decode().strip() #kill print('Started:', command, '(pid = ' + str(process.pid) + ')') # Return stdout return result def make_chunks(l, n): """Yield successive n-sized chunks from l. Note: Taken from https://stackoverflow.com/a/312464 """ if sys.version_info.major == 2: for i in range(0, len(l), n): yield l[i:i + n] else: # Assume Python 3 for i in range(0, len(l), n): yield l[i:i + n] def run_asyncio_commands(tasks, max_concurrent_tasks=0): """Run tasks asynchronously using asyncio and return results If max_concurrent_tasks are set to 0, no limit is applied. Note: By default, Windows uses SelectorEventLoop, which does not support subprocesses. Therefore ProactorEventLoop is used on Windows. https://docs.python.org/3/library/asyncio-eventloops.html#windows """ all_results = [] if max_concurrent_tasks == 0: chunks = [tasks] else: chunks = make_chunks(l=tasks, n=max_concurrent_tasks) for tasks_in_chunk in chunks: if platform.system() == 'Windows': loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(loop) else: loop = asyncio.get_event_loop() commands = asyncio.gather(*tasks_in_chunk) # Unpack list using * results = loop.run_until_complete(commands) all_results += results loop.close() return all_results if __name__ == '__main__': start = time.time() if platform.system() == 'Windows': # Commands to be executed on Windows commands = [ ['hostname'] ] else: # Commands to be executed on Unix commands = [ ['du', '-sh', '/var/tmp'], ['hostname'], ] tasks = [] for command in commands: tasks.append(run_command(*command)) # # Shell execution socket_Process # tasks = [run_command_shell('copy c:/somefile d:/new_file')] # # List comprehension socket_Process # tasks = [ # run_command(*command, get_project_path(project)) # for project in accessible_projects(all_projects) # ] results = run_asyncio_commands(tasks, max_concurrent_tasks=20) # At most 20 parallel tasks print('Results:', results) end = time.time() rounded_end = ('{0:.4f}'.format(round(end - start, 4))) print('Script ran in about', str(rounded_end), 'seconds')
[ "asyncio.get_event_loop", "asyncio.create_subprocess_exec", "platform.system", "asyncio.gather", "asyncio.set_event_loop", "asyncio.create_subprocess_shell", "time.time", "asyncio.ProactorEventLoop" ]
[((3706, 3717), 'time.time', 'time.time', ([], {}), '()\n', (3715, 3717), False, 'import time\n'), ((4527, 4538), 'time.time', 'time.time', ([], {}), '()\n', (4536, 4538), False, 'import time\n'), ((348, 417), 'asyncio.create_subprocess_exec', 'asyncio.create_subprocess_exec', (['*args'], {'stdout': 'asyncio.subprocess.PIPE'}), '(*args, stdout=asyncio.subprocess.PIPE)\n', (378, 417), False, 'import asyncio\n'), ((544, 613), 'asyncio.create_subprocess_exec', 'asyncio.create_subprocess_exec', (['*args'], {'stdout': 'asyncio.subprocess.PIPE'}), '(*args, stdout=asyncio.subprocess.PIPE)\n', (574, 613), False, 'import asyncio\n'), ((1488, 1560), 'asyncio.create_subprocess_shell', 'asyncio.create_subprocess_shell', (['command'], {'stdout': 'asyncio.subprocess.PIPE'}), '(command, stdout=asyncio.subprocess.PIPE)\n', (1519, 1560), False, 'import asyncio\n'), ((1681, 1753), 'asyncio.create_subprocess_shell', 'asyncio.create_subprocess_shell', (['command'], {'stdout': 'asyncio.subprocess.PIPE'}), '(command, stdout=asyncio.subprocess.PIPE)\n', (1712, 1753), False, 'import asyncio\n'), ((3482, 3513), 'asyncio.gather', 'asyncio.gather', (['*tasks_in_chunk'], {}), '(*tasks_in_chunk)\n', (3496, 3513), False, 'import asyncio\n'), ((3727, 3744), 'platform.system', 'platform.system', ([], {}), '()\n', (3742, 3744), False, 'import platform\n'), ((3284, 3301), 'platform.system', 'platform.system', ([], {}), '()\n', (3299, 3301), False, 'import platform\n'), ((3335, 3362), 'asyncio.ProactorEventLoop', 'asyncio.ProactorEventLoop', ([], {}), '()\n', (3360, 3362), False, 'import asyncio\n'), ((3375, 3403), 'asyncio.set_event_loop', 'asyncio.set_event_loop', (['loop'], {}), '(loop)\n', (3397, 3403), False, 'import asyncio\n'), ((3437, 3461), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (3459, 3461), False, 'import asyncio\n')]
import numpy as np class PropertyMap: """Map points in the simulation domain to properties of parts containing the points. Parameters ---------- part_map : PartMap Mapper from spatial location to part identifier. prop_map : callable Map from part identifier to a property value. Returns ------- """ def __init__(self, part_map, prop_map): self.partMap = part_map self.propMap = prop_map def get_part(self, x): """Find the part(s) containing one or more points. Parameters ---------- x : Coordinate vector or array of coordinate vectors. Returns ------- """ return self.partMap(x) def __call__(self, x): """Do the mapping. Parameters ---------- x : Coordinate vector or array of coordinate vectors. Returns ------- Property of the part(s) containing `x`, of the same shape as `x` except for the last axis corresponding to coordinate vector extent. """ parts = self.get_part(x) if np.isscalar(parts): return self.propMap(parts) unique_parts = set(np.asanyarray(parts).flat) unique_props = [self.propMap(p) for p in unique_parts] obj_types = [type(p) for p in unique_props] if obj_types[0] is str: assert all(t is str for t in obj_types) obj_type = object else: obj_type = np.result_type(*obj_types) result = np.empty(np.shape(parts), dtype=obj_type) for part, prop in zip(unique_parts, unique_props): result[parts == part] = prop return result class MaterialPropertyMap(PropertyMap): """Map points in the simulation domain to material properties of parts containing the points. Parameters ---------- part_map : PartMap Function that takes a spatial location and maps it to a part identifier. part_materials : dict Dict mapping from part identifier to a material name. mat_lib : qmt.Materials Materials library used to look up the material properties. str : prop_name: Name of the material property to be retrieved for each part. eunit : Energy unit, passed to `mat_lib.find()`. fill_value : Value to be filled in places where there is no part or the part does not have a material or the material does not have the property `prop_name`. The default behavior `fill_value='raise'` is to raise a KeyError in these cases. Returns ------- """ def __init__( self, part_map, part_materials, mat_lib, prop_name, eunit=None, fill_value="raise", ): self.fillValue = fill_value self.materialsDict = { p: mat_lib.find(m, eunit) for p, m in part_materials.items() } self.partProps = {} for p, mat in self.materialsDict.items(): try: if prop_name == "conductionBandMinimum": self.partProps[p] = mat_lib.conduction_band_minimum(mat) elif prop_name == "valenceBandMaximum": self.partProps[p] = mat_lib.valence_band_maximum(mat) elif prop_name == "lightHoleMass": self.partProps[p] = mat.hole_mass("light", "dos") elif prop_name == "heavyHoleMass": self.partProps[p] = mat.hole_mass("heavy", "dos") elif prop_name == "dosHoleMass": self.partProps[p] = mat.hole_mass("dos", "dos") else: self.partProps[p] = mat[prop_name] except KeyError: pass def prop_map(part): try: return self.partProps[part] except KeyError: if self.fillValue == "raise": raise return self.fillValue super().__init__(part_map, prop_map)
[ "numpy.shape", "numpy.result_type", "numpy.isscalar", "numpy.asanyarray" ]
[((1146, 1164), 'numpy.isscalar', 'np.isscalar', (['parts'], {}), '(parts)\n', (1157, 1164), True, 'import numpy as np\n'), ((1526, 1552), 'numpy.result_type', 'np.result_type', (['*obj_types'], {}), '(*obj_types)\n', (1540, 1552), True, 'import numpy as np\n'), ((1579, 1594), 'numpy.shape', 'np.shape', (['parts'], {}), '(parts)\n', (1587, 1594), True, 'import numpy as np\n'), ((1233, 1253), 'numpy.asanyarray', 'np.asanyarray', (['parts'], {}), '(parts)\n', (1246, 1253), True, 'import numpy as np\n')]
from django import forms from hc.api.models import Channel from hc.front.models import Post from hc.front.models import FaqItem, FaqCategory from ckeditor.widgets import CKEditorWidget class NameTagsForm(forms.Form): name = forms.CharField(max_length=100, required=False) tags = forms.CharField(max_length=500, required=False) def clean_tags(self): l = [] for part in self.cleaned_data["tags"].split(" "): part = part.strip() if part != "": l.append(part) return " ".join(l) class TimeoutForm(forms.Form): timeout = forms.IntegerField(min_value=60, max_value=2592000) grace = forms.IntegerField(min_value=60, max_value=2592000) class NagTimeForm(forms.Form): nag_time = forms.IntegerField(min_value=60, max_value=2592000) class AddChannelForm(forms.ModelForm): class Meta: model = Channel fields = ['kind', 'value'] def clean_value(self): value = self.cleaned_data["value"] return value.strip() class AddWebhookForm(forms.Form): error_css_class = "has-error" value_down = forms.URLField(max_length=1000, required=False) value_up = forms.URLField(max_length=1000, required=False) def get_value(self): return "{value_down}\n{value_up}".format(**self.cleaned_data) class PostForm(forms.ModelForm): title = forms.CharField( widget=forms.TextInput( attrs={ "class": "form-control", "placeholder": "Title", "style": 'width: 98%' }), strip=True, label="" ) body = forms.CharField(widget=CKEditorWidget(), label="") class Meta: model = Post fields = ("title", "body",) class AddFaqForm(forms.ModelForm): body = forms.CharField(widget=CKEditorWidget(), label="") class Meta: model = FaqItem fields = ['category', 'title', 'body'] class AddFaqCategoryForm(forms.ModelForm): class Meta: model = FaqCategory fields = ['category']
[ "ckeditor.widgets.CKEditorWidget", "django.forms.CharField", "django.forms.URLField", "django.forms.IntegerField", "django.forms.TextInput" ]
[((230, 277), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (245, 277), False, 'from django import forms\n'), ((289, 336), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(500)', 'required': '(False)'}), '(max_length=500, required=False)\n', (304, 336), False, 'from django import forms\n'), ((603, 654), 'django.forms.IntegerField', 'forms.IntegerField', ([], {'min_value': '(60)', 'max_value': '(2592000)'}), '(min_value=60, max_value=2592000)\n', (621, 654), False, 'from django import forms\n'), ((667, 718), 'django.forms.IntegerField', 'forms.IntegerField', ([], {'min_value': '(60)', 'max_value': '(2592000)'}), '(min_value=60, max_value=2592000)\n', (685, 718), False, 'from django import forms\n'), ((767, 818), 'django.forms.IntegerField', 'forms.IntegerField', ([], {'min_value': '(60)', 'max_value': '(2592000)'}), '(min_value=60, max_value=2592000)\n', (785, 818), False, 'from django import forms\n'), ((1124, 1171), 'django.forms.URLField', 'forms.URLField', ([], {'max_length': '(1000)', 'required': '(False)'}), '(max_length=1000, required=False)\n', (1138, 1171), False, 'from django import forms\n'), ((1187, 1234), 'django.forms.URLField', 'forms.URLField', ([], {'max_length': '(1000)', 'required': '(False)'}), '(max_length=1000, required=False)\n', (1201, 1234), False, 'from django import forms\n'), ((1410, 1509), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control', 'placeholder': 'Title', 'style': 'width: 98%'}"}), "(attrs={'class': 'form-control', 'placeholder': 'Title',\n 'style': 'width: 98%'})\n", (1425, 1509), False, 'from django import forms\n'), ((1659, 1675), 'ckeditor.widgets.CKEditorWidget', 'CKEditorWidget', ([], {}), '()\n', (1673, 1675), False, 'from ckeditor.widgets import CKEditorWidget\n'), ((1832, 1848), 'ckeditor.widgets.CKEditorWidget', 'CKEditorWidget', ([], {}), '()\n', (1846, 1848), False, 'from ckeditor.widgets import CKEditorWidget\n')]
import os import os.path import tempfile def make_tree(dirs, files): base = tempfile.mkdtemp() for path in dirs: os.makedirs(os.path.join(base, path)) for path in files: with open(os.path.join(base, path), 'w') as f: f.write("content") return base
[ "os.path.join", "tempfile.mkdtemp" ]
[((82, 100), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (98, 100), False, 'import tempfile\n'), ((144, 168), 'os.path.join', 'os.path.join', (['base', 'path'], {}), '(base, path)\n', (156, 168), False, 'import os\n'), ((212, 236), 'os.path.join', 'os.path.join', (['base', 'path'], {}), '(base, path)\n', (224, 236), False, 'import os\n')]
import requests class WebShareIo: def __init__(self,key=None): if key == None: raise ValueError("Key is not provided.") self.host = "https://proxy.webshare.io/api/" self.headers = {"Authorization":f"Token {key}"} def request_handler(self,method=None,path=None,body=None): if method.lower() == "get": response = requests.get(self.host+path, headers=self.headers).json() elif method.lower() == "post" and body == None: response = requests.get(self.host+path, headers=self.headers,json=body).json() else: response = {"error":"Invalid request method."} return response def user_profile(self): return self.request_handler("GET","profile") def my_subscription(self): return self.request_handler("GET","subscription") def proxy_config(self): return self.request_handler("GET","proxy/config") def set_authorized_ip(self,ip=None): if key == None: raise ValueError("authorized ip missing.") return self.request_handler("POST","proxy/config",{"authorized_ips":[ip]}) def reset_proxy_password(self): return self.request_handler("POST","proxy/config/reset_password") def proxy_list(self,page=None,countries=None): if page != None and countries != None: return self.request_handler("GET",f"proxy/list/?page={page}&countries={countries}") elif page != None and countries == None: return self.request_handler("GET",f"proxy/list/?page={page}") elif page == None and countries != None: return self.request_handler("GET",f"proxy/list/?countries={countries}") else: return self.request_handler("GET","proxy/list") def download_proxies(self,page=None,countries=None): response = self.proxy_list(page,countries) proxies = [f"{i['username']}:{i['password']}@{i['proxy_address']}:80" for i in response['results']] open("proxies.txt","w+").write("\n".join(proxies)) print("Downloaded!\nFile name: proxies.txt.") def proxy_stats(self): return self.request_handler("GET","proxy/stats") def get_location(self,ip): return requests.get('http://lumtest.com/myip.json',proxies = {'http': "http://"+ip,'https': "http://"+ip}).json()
[ "requests.get" ]
[((2323, 2430), 'requests.get', 'requests.get', (['"""http://lumtest.com/myip.json"""'], {'proxies': "{'http': 'http://' + ip, 'https': 'http://' + ip}"}), "('http://lumtest.com/myip.json', proxies={'http': 'http://' +\n ip, 'https': 'http://' + ip})\n", (2335, 2430), False, 'import requests\n'), ((390, 442), 'requests.get', 'requests.get', (['(self.host + path)'], {'headers': 'self.headers'}), '(self.host + path, headers=self.headers)\n', (402, 442), False, 'import requests\n'), ((529, 592), 'requests.get', 'requests.get', (['(self.host + path)'], {'headers': 'self.headers', 'json': 'body'}), '(self.host + path, headers=self.headers, json=body)\n', (541, 592), False, 'import requests\n')]
import numpy as np import tensorflow as tf from tensorflow import keras from environ import game as environment class agent(): num_actions=3 epsilon = 0.8 decay_epsilon = 0.9995 min_epsilon = 0.1 gamma = 0.5 alpha = 0.01 def __init__(self): self.history = np.zeros(3) self.Q = av_function(input_size=3, alpha=self.alpha) def train(self, queue, v=True, num_iters=5000): for i in range(num_iters+1): if v and i % 10 == 0: print("[*]Thread 1: Iteration: {}".format(i)) if i % 100 == 0: if v: print("[*]Thread 1: Updating agent queue {}".format([self, i, False])) while queue.full(): queue.get_nowait() #Empties queue queue.put([self, i, False]) #Provides the display game function with the latest agent env = environment() state, _, done = env.step(0) #Sets initial state #print(self.history) self.history = np.zeros(3) while not done: action = self.choose_action(state) #Doesn"t train for every step reward = 0 for _ in range(20): if action == 1: state2, r, done = env.step(1) action = 0 else: state2, r, done = env.step(action) reward += r if done: break #print(self.gamma * self.max_val(state2)) self.Q.train(state, (action - 1.0), reward + self.gamma * (self.max_val(state2) - self.max_val(state))) state = state2 if v: print("[*]Thread 1 agent: reward: {}, epsilon: {}".format(round(reward, 3), round(self.epsilon, 3))) if self.epsilon <= self.min_epsilon: self.epsilon = self.min_epsilon else: self.epsilon *= self.decay_epsilon input("\nPress ENTER to continue\n") if v: print("[+]Thread 1: Training finished, signaling display {}".format([self, num_iters, True])) while queue.full(): queue.get_nowait() queue.put([self, num_iters, True]) def choose_action(self, state): if (self.epsilon - np.random.uniform()) > 0: return np.random.randint(0, self.num_actions) else: for a in range(self.num_actions): l = self.Q.predict(state, a) #self.history[np.argmax(l)] += 1 #print("[*]Thread 1 agent: stdev: {}, epsilon: {}".format(round(np.std(l), 4), round(self.epsilon, 3))) return np.argmax(l) def max_val(self, state): l = [] for a in range(self.num_actions): l.append(self.Q.predict(state, a)) return max(l) class av_function(): def build_nn(self, input_size): model = keras.Sequential() model.add(keras.layers.Dense(25, activation=tf.nn.tanh, kernel_initializer="random_uniform", input_dim=input_size)) model.add(keras.layers.Dense(1, activation=tf.nn.relu, kernel_initializer="random_uniform")) optimizer = keras.optimizers.SGD(lr = self.alpha) model.compile(loss="mean_squared_error", optimizer=optimizer, metrics=["mean_squared_error"]) model._make_predict_function() self.graph = tf.get_default_graph() return model def __init__(self, input_size, alpha=0.1): self.alpha = alpha self.model = self.build_nn(input_size+1) def train(self, state, action, reward): with self.graph.as_default(): self.model.fit([[state + [action]]], [reward], epochs=1, verbose=0) def predict(self, state, action): with self.graph.as_default(): return self.model.predict([[state + [action]]]) def model_summary(self): print("\nValue function approximator:") self.model.summary() if __name__ == "__main__": from queue import Queue q = Queue(1) a = agent() a.train(q)
[ "environ.game", "tensorflow.keras.Sequential", "numpy.argmax", "tensorflow.keras.optimizers.SGD", "numpy.zeros", "numpy.random.randint", "tensorflow.keras.layers.Dense", "numpy.random.uniform", "queue.Queue", "tensorflow.get_default_graph" ]
[((4016, 4024), 'queue.Queue', 'Queue', (['(1)'], {}), '(1)\n', (4021, 4024), False, 'from queue import Queue\n'), ((297, 308), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (305, 308), True, 'import numpy as np\n'), ((2909, 2927), 'tensorflow.keras.Sequential', 'keras.Sequential', ([], {}), '()\n', (2925, 2927), False, 'from tensorflow import keras\n'), ((3174, 3209), 'tensorflow.keras.optimizers.SGD', 'keras.optimizers.SGD', ([], {'lr': 'self.alpha'}), '(lr=self.alpha)\n', (3194, 3209), False, 'from tensorflow import keras\n'), ((3375, 3397), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (3395, 3397), True, 'import tensorflow as tf\n'), ((872, 885), 'environ.game', 'environment', ([], {}), '()\n', (883, 885), True, 'from environ import game as environment\n'), ((1007, 1018), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1015, 1018), True, 'import numpy as np\n'), ((2332, 2370), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.num_actions'], {}), '(0, self.num_actions)\n', (2349, 2370), True, 'import numpy as np\n'), ((2659, 2671), 'numpy.argmax', 'np.argmax', (['l'], {}), '(l)\n', (2668, 2671), True, 'import numpy as np\n'), ((2946, 3055), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(25)'], {'activation': 'tf.nn.tanh', 'kernel_initializer': '"""random_uniform"""', 'input_dim': 'input_size'}), "(25, activation=tf.nn.tanh, kernel_initializer=\n 'random_uniform', input_dim=input_size)\n", (2964, 3055), False, 'from tensorflow import keras\n'), ((3070, 3156), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(1)'], {'activation': 'tf.nn.relu', 'kernel_initializer': '"""random_uniform"""'}), "(1, activation=tf.nn.relu, kernel_initializer=\n 'random_uniform')\n", (3088, 3156), False, 'from tensorflow import keras\n'), ((2287, 2306), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2304, 2306), True, 'import numpy as np\n')]
#!/usr/bin/env python3 # -*- coding:utf-8 -*- ### # Project Name: chapter04 - Created Date: Tuesday October 6th 2020 # Author: loitd - Email: <EMAIL> # Description: This is a example of Python For Desktop book # Copyright (c) 2020 loitd. WWW: https://github.com/loitd # ----- # Last Modified: Tuesday October 6th 2020 2:38:40 pm By: loitd # ----- # HISTORY: # Date By Comments # ---------- ------ ---------------------------------------------------------- # 2020-10-06 loitd Initialized ### # First things, first. Import the wxPython package. import wx # Next, create an application object. app = wx.App() # Then a frame. frm = wx.Frame(None, title="Hello World") # Show it. frm.Show() # Start the event loop. app.MainLoop()
[ "wx.Frame", "wx.App" ]
[((609, 617), 'wx.App', 'wx.App', ([], {}), '()\n', (615, 617), False, 'import wx\n'), ((641, 676), 'wx.Frame', 'wx.Frame', (['None'], {'title': '"""Hello World"""'}), "(None, title='Hello World')\n", (649, 676), False, 'import wx\n')]
""" Main configuration file for the application On deployment, desired configuration can be overridden by the provision of a local.cfg file """ ################################################## # overrides for the webapp deployment DEBUG = True """is the web server in debug mode""" PORT = 5027 """port to start the webserver on""" SSL = False """support SSL requests""" THREADED = True """is the web server in threaded mode""" ############################################ # important overrides for the ES module # elasticsearch back-end connection settings ELASTIC_SEARCH_HOST = "http://gateway:9200" """base url to access elasticsearch""" ELASTIC_SEARCH_INDEX = "jper" """index name in elasticsearch where our types are stored""" ELASTIC_SEARCH_VERSION = "1.5.2" """version of elasticsearch which we're using - matters for certain semantics of requests""" # Classes from which to retrieve ES mappings to be used in this application # (note that if ELASTIC_SEARCH_DEFAULT_MAPPINGS is sufficient, you don't need to # add anything here ELASTIC_SEARCH_MAPPINGS = [ # "service.dao.MyDAO" ] """type-specific mappings to be used when initialising - currently there are none""" # initialise the index with example documents from each of the types # this will initialise each type and auto-create the relevant mappings where # example data is provided ELASTIC_SEARCH_EXAMPLE_DOCS = [ # "service.dao.MyDAO" ] """types which have their mappings initialised by example when initialising - currently there are none""" ############################################ # important overrides for account module ACCOUNT_ENABLE = False """Disable user accounts""" SECRET_KEY = "super-secret-key" """secret key for session management - only used when accounts are enabled""" ############################################# # important overrides for storage module STORE_IMPL = "octopus.modules.store.store.StoreLocal" """implementation class of the main fielstore""" STORE_TMP_IMPL = "octopus.modules.store.store.TempStore" """implementation class of the temporary local filestore""" from octopus.lib import paths STORE_LOCAL_DIR = paths.rel2abs(__file__, "..", "service", "tests", "local_store", "live") """path to local directory for local file store - specified relative to this file""" STORE_TMP_DIR = paths.rel2abs(__file__, "..", "service", "tests", "local_store", "tmp") """path to local directory for temp file store - specified relative to this file""" ############################################# # Re-try/back-off settings # from the http layer # specific to this app # Minimum amount to leave between attempts to deposit, in the event that there was a semi-permanent error # default to 1 hour LONG_CYCLE_RETRY_DELAY = 3600 """Delay in between attempts to communicate with a repository that is failing, in seconds""" # Maximum number of times to try and deposit before giving up and turning off repository sword submission # for a given account LONG_CYCLE_RETRY_LIMIT = 24 """Number of re-try attempts against a failing repository before we give up""" ############################################### ## Other app-specific settings DEFAULT_SINCE_DELTA_DAYS = 100 """Number to substract from 'last_deposit_date' (safety margin) to get the date from which the first request against the JPER API will be made, in days""" DEFAULT_SINCE_DATE = "1970-01-01T00:00:00Z" """The date from which the first request against the JPER API will be made when listing a repository's notifications""" # how many seconds in between each run of the script RUN_THROTTLE = 2 """delay between executions of the deposit script, in seconds""" # whether to store sword response data (receipt, etc). Recommend only to store during testing operation STORE_RESPONSE_DATA = False """Whether to store response data or not - set to True if testing"""
[ "octopus.lib.paths.rel2abs" ]
[((2137, 2209), 'octopus.lib.paths.rel2abs', 'paths.rel2abs', (['__file__', '""".."""', '"""service"""', '"""tests"""', '"""local_store"""', '"""live"""'], {}), "(__file__, '..', 'service', 'tests', 'local_store', 'live')\n", (2150, 2209), False, 'from octopus.lib import paths\n'), ((2312, 2383), 'octopus.lib.paths.rel2abs', 'paths.rel2abs', (['__file__', '""".."""', '"""service"""', '"""tests"""', '"""local_store"""', '"""tmp"""'], {}), "(__file__, '..', 'service', 'tests', 'local_store', 'tmp')\n", (2325, 2383), False, 'from octopus.lib import paths\n')]
#! /usr/bin/env python """Toolbox for TensorFlow 2.1 CTGAN implementation.""" import os from setuptools import find_packages, setup # get __version__ from _version.py ver_file = os.path.join('ctgan', '_version.py') with open(ver_file) as f: exec(f.read()) DISTNAME = 'ctgan-tf' DESCRIPTION = 'TensorFlow 2.1 implementation of Conditional Tabular GAN.' with open('README.md') as readme_file: LONG_DESCRIPTION = readme_file.read() MAINTAINER = '<NAME>' MAINTAINER_EMAIL = '<EMAIL>' URL = 'https://github.com/pbmartins/ctgan-tf' LICENSE = 'MIT' DOWNLOAD_URL = 'https://github.com/pbmartins/ctgan-tf' VERSION = __version__ CLASSIFIERS = ['Intended Audience :: Science/Research', 'Intended Audience :: Developers', 'License :: OSI Approved', 'Programming Language :: Python', 'Topic :: Software Development', 'Topic :: Scientific/Engineering', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Operating System :: Unix', 'Operating System :: MacOS', 'Programming Language :: Python :: 3.7'] INSTALL_REQUIRES = [ 'tensorflow<3.0,>=2.1.0', 'tensorflow-probability<1.0,>=0.9.0', 'scikit-learn<0.23,>=0.21', 'numpy<2,>=1.17.4', 'pandas<1.0.2,>=1.0', 'tqdm<4.44,>=4.43' ] EXTRAS_REQUIRE = { 'tests': [ 'pytest>=5.4.0', 'pytest-cov>=2.8.0'], 'dev': [ # general 'bump2version>=1.0.0', 'pip>=20.0.0', # style check 'flake8>=3.7.9', 'pylint-fail-under>=0.3.0', # tests 'pytest>=5.4.0', 'pytest-cov>=2.8.0', # distribute on PyPI 'twine>=3.1.1', 'wheel>=0.30.0', # Advanced testing 'coverage>=5.1', ], 'docs': [ 'm2r>=0.2.0', 'Sphinx<3.0.0,>=2.4.4', 'sphinx_rtd_theme>=0.4.3', 'autodocsumm>=0.1.10', 'numpydoc<1.0.0,>=0.9.2', 'sphinxcontrib-bibtex==1.0.0' ] } ENTRY_POINTS = { 'console_scripts': ['ctgan-tf=ctgan.__main__:cli.cli'] } setup( name=DISTNAME, author=MAINTAINER, author_email=MAINTAINER_EMAIL, description=DESCRIPTION, license=LICENSE, url=URL, version=VERSION, download_url=DOWNLOAD_URL, long_description=LONG_DESCRIPTION, long_description_content_type='text/markdown', zip_safe=False, # the package can run out of an .egg file classifiers=CLASSIFIERS, entry_points=ENTRY_POINTS, packages=find_packages(), install_requires=INSTALL_REQUIRES, tests_requires=EXTRAS_REQUIRE['tests'], extras_require=EXTRAS_REQUIRE, python_requires='>=3.7', )
[ "setuptools.find_packages", "os.path.join" ]
[((181, 217), 'os.path.join', 'os.path.join', (['"""ctgan"""', '"""_version.py"""'], {}), "('ctgan', '_version.py')\n", (193, 217), False, 'import os\n'), ((2558, 2573), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (2571, 2573), False, 'from setuptools import find_packages, setup\n')]
""" Copyright (c) 2009 <NAME> <<EMAIL>>, <NAME> <<EMAIL>>, <NAME> <<EMAIL>>. See LICENSE.txt for licensing details (MIT License). """ from pycsp_import import * import time @io def wait(seconds): time.sleep(seconds) @process def delay_output(msg, seconds): wait(seconds) print((str(msg))) Parallel( [ delay_output('%d second delay' % (i),i) for i in range(10)] ) shutdown()
[ "time.sleep" ]
[((209, 228), 'time.sleep', 'time.sleep', (['seconds'], {}), '(seconds)\n', (219, 228), False, 'import time\n')]
import aquests def test_upload (): formdata = { 'submit-name': '<NAME>', "file1": open ("./README.txt", "rb") } aquests.configure (10) for i in range (100): aquests.upload ("http://127.0.0.1:5000/upload", formdata) aquests.fetchall ()
[ "aquests.configure", "aquests.fetchall", "aquests.upload" ]
[((124, 145), 'aquests.configure', 'aquests.configure', (['(10)'], {}), '(10)\n', (141, 145), False, 'import aquests\n'), ((231, 249), 'aquests.fetchall', 'aquests.fetchall', ([], {}), '()\n', (247, 249), False, 'import aquests\n'), ((172, 228), 'aquests.upload', 'aquests.upload', (['"""http://127.0.0.1:5000/upload"""', 'formdata'], {}), "('http://127.0.0.1:5000/upload', formdata)\n", (186, 228), False, 'import aquests\n')]
from dash.dependencies import Input, Output import plotly.express as px from dash_app.app import app, districts, dff # @app.callback( # Output('temp_div', "children"), # [Input('table_districts', "derived_virtual_selected_rows"), # Input('table_districts', "derived_virtual_selected_row_ids"), # Input('table_districts', "selected_rows"), # Input('table_districts', "selected_row_ids")]) # def update_temp_div(derived_virtual_selected_rows, derived_virtual_selected_row_ids, selected_rows, selected_row_ids): # # When the table is first rendered, `derived_virtual_data` and # # `derived_virtual_selected_rows` will be `None`. This is due to an # # idiosyncracy in Dash (unsupplied properties are always None and Dash # # calls the dependent callbacks when the component is first rendered). # # So, if `rows` is `None`, then the component was just rendered # # and its value will be the same as the component's dataframe. # # Instead of setting `None` in here, you could also set # # `derived_virtual_data=df.to_rows('dict')` when you initialize # # the component. # if derived_virtual_selected_rows is None: # derived_virtual_selected_rows = ['asdaaaaaaaaaaaaaaaaa'] # # text_to_return = f""" # derived_virtual_selected_rows = {str(derived_virtual_selected_rows)} # derived_virtual_selected_row_ids = {str(derived_virtual_selected_row_ids)} # selected_rows = {str(selected_rows)} # selected_row_ids = {str(selected_row_ids)} # Okresy = {str(districts[selected_rows])} # {str(type(districts[selected_rows]))} # """ # return text_to_return @app.callback( Output('plot_districts', 'figure'), [Input('table_districts', 'selected_rows')]) def update_plot(selected_rows): # if selected rows is empty list if len(selected_rows) == 0: fig = {} else: fig = px.line(dff[dff['Okres'].isin(districts[selected_rows])], x='Datum', y='Nakažení', line_group='Okres', color='Okres', hover_name='Okres') for trace in fig.data: fig.add_annotation( x=trace.x[-1], y=trace.y[-1], text=trace.hovertext[-1], visible=True, showarrow=True, xanchor='left', # shifts text to the right from the end of the arrow ax=20) for trace in fig.data: trace.hoverinfo = 'x+y' trace.hovertemplate = '<b>%{hovertext}</b><br>Datum: %{x}<br>Nakažení: %{y}<extra></extra>' fig.update_layout(showlegend=False) fig.update_layout(margin={'l': 50, 'r': 30, 't': 0, 'b': 20}) fig.update_layout(modebar={'orientation': 'v'}) fig.update_layout(yaxis_title='Počet nakažených (kumulativně)') # fig.update_layout(xaxis_tickformat='%-d. %-m. %Y') return fig
[ "dash.dependencies.Output", "dash.dependencies.Input" ]
[((1693, 1727), 'dash.dependencies.Output', 'Output', (['"""plot_districts"""', '"""figure"""'], {}), "('plot_districts', 'figure')\n", (1699, 1727), False, 'from dash.dependencies import Input, Output\n'), ((1734, 1775), 'dash.dependencies.Input', 'Input', (['"""table_districts"""', '"""selected_rows"""'], {}), "('table_districts', 'selected_rows')\n", (1739, 1775), False, 'from dash.dependencies import Input, Output\n')]
import numpy as np def blinking(numspots, numframes=2000, avg_on_time = 20, on_fraction=0.1, subframe_blink=4): p_off = 1-on_fraction k_off = 1/(avg_on_time * subframe_blink) k_on =( k_off - p_off*k_off)/p_off print(f"p_off={p_off:.3f}, k_on={k_on:.3f}, k_off={k_off:.3f}",flush=True) blinkstate = np.random.binomial(1, on_fraction, size=numspots) for f in range(numframes): spot_ontimes = np.zeros((numspots),dtype=np.float32) for i in range(subframe_blink): turning_on = (1 - blinkstate) * np.random.binomial(1, k_on, size=numspots) remain_on = blinkstate * np.random.binomial(1, 1 - k_off, size=numspots) blinkstate = remain_on + turning_on spot_ontimes += blinkstate / subframe_blink yield spot_ontimes
[ "numpy.zeros", "numpy.random.binomial" ]
[((331, 380), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'on_fraction'], {'size': 'numspots'}), '(1, on_fraction, size=numspots)\n', (349, 380), True, 'import numpy as np\n'), ((440, 476), 'numpy.zeros', 'np.zeros', (['numspots'], {'dtype': 'np.float32'}), '(numspots, dtype=np.float32)\n', (448, 476), True, 'import numpy as np\n'), ((562, 604), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'k_on'], {'size': 'numspots'}), '(1, k_on, size=numspots)\n', (580, 604), True, 'import numpy as np\n'), ((642, 689), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(1 - k_off)'], {'size': 'numspots'}), '(1, 1 - k_off, size=numspots)\n', (660, 689), True, 'import numpy as np\n')]
from typing import Generic, Optional, TypeVar from pydantic import BaseModel, validator from pydantic.generics import GenericModel DataT = TypeVar("DataT") class Error(BaseModel): code: int message: str class Envelope(GenericModel, Generic[DataT]): data: Optional[DataT] error: Optional[Error] @validator("error", always=True) @classmethod def check_consistency(cls, v, values): if v is not None and values["data"] is not None: raise ValueError("must not provide both data and error") if v is None and values.get("data") is None: raise ValueError("must provide data or error") return v
[ "pydantic.validator", "typing.TypeVar" ]
[((141, 157), 'typing.TypeVar', 'TypeVar', (['"""DataT"""'], {}), "('DataT')\n", (148, 157), False, 'from typing import Generic, Optional, TypeVar\n'), ((322, 353), 'pydantic.validator', 'validator', (['"""error"""'], {'always': '(True)'}), "('error', always=True)\n", (331, 353), False, 'from pydantic import BaseModel, validator\n')]
''' Written by <NAME> (<EMAIL>) Github repository: https://github.com/tmunzer/Mist_library/ ''' #### IMPORTS #### import mlib as mist_lib from mlib import cli from mlib.__debug import Console import csv import datetime console = Console(7) hours_to_report = 96 csv_delimiter = "," csv_file = "report_app_usage.csv" def _get_clients_list(mist_session, site_id): clients = mist_lib.sites.stats.clients(mist_session, site_id)["result"] return clients def _get_site_name(mist_session, site_id): site_info = mist_lib.sites.info.get(mist_session, site_id)["result"] return site_info["name"] def _convert_numbers(size): # 2**10 = 1024 power = 2**10 n = 0 power_labels = {0 : '', 1: 'K', 2: 'M', 3: 'G', 4: 'T'} while size > power: size /= power n += 1 size = round(size, 2) return "%s %sB" %(size, power_labels[n]) def _generate_site_report(mist_session, site_name, site_id, start, stop, interval): app_usage = [] clients = _get_clients_list(mist_session, site_id) console.info("%s clients to process... Please wait..." %(len(clients))) for client in clients: client_mac = client["mac"] if "username" in client: client_username = client["username"] else: client_username = "" if "hostname" in client: client_hostname = client["hostname"] else: client_hostname = "" client_app = mist_lib.sites.insights.client(mist_session, site_id, client_mac, start, stop, interval, "top-app-by-bytes")["result"] tmp={"site name": site_name, "site id": site_id, "client mac": client_mac, "username": client_username, "hostname": client_hostname} for app in client_app["top-app-by-bytes"]: usage = _convert_numbers(app["total_bytes"]) tmp[app["app"]] = usage app_usage.append(tmp) return app_usage def _save_report(app_usage): console.notice("Saving to file %s..." %(csv_file)) fields = [] for row in app_usage: for key in row: if not key in fields: fields.append(key) with open(csv_file, 'w') as output_file: dict_writer = csv.DictWriter(output_file, restval="-", fieldnames=fields, delimiter=csv_delimiter) dict_writer.writeheader() dict_writer.writerows(app_usage) console.notice("File %s saved!" %(csv_file)) def generate_report(mist_session, site_ids, time): app_usage = [] if type(site_ids) == str: site_ids = [ site_ids] for site_id in site_ids: site_name = _get_site_name(mist_session, site_id) console.info("Processing site %s (id %s)" %(site_name, site_id)) app_usage += _generate_site_report(mist_session, site_name, site_id, time["start"], time["stop"], time["interval"]) cli.show(app_usage) _save_report(app_usage) def _ask_period(hours): now = datetime.datetime.now() start = round((datetime.datetime.now() - datetime.timedelta(hours=hours)).timestamp(), 0) stop = round(now.timestamp(), 0) interval = 3600 return {"start": start, "stop": stop, "interval": interval} if __name__ == "__main__": mist_session = mist_lib.Mist_Session() site_id = cli.select_site(mist_session, allow_many=True) time = _ask_period(hours_to_report) generate_report(mist_session, site_id, time)
[ "csv.DictWriter", "mlib.Mist_Session", "mlib.__debug.Console", "mlib.sites.info.get", "mlib.sites.insights.client", "datetime.datetime.now", "mlib.cli.select_site", "mlib.cli.show", "mlib.sites.stats.clients", "datetime.timedelta" ]
[((231, 241), 'mlib.__debug.Console', 'Console', (['(7)'], {}), '(7)\n', (238, 241), False, 'from mlib.__debug import Console\n'), ((2767, 2786), 'mlib.cli.show', 'cli.show', (['app_usage'], {}), '(app_usage)\n', (2775, 2786), False, 'from mlib import cli\n'), ((2850, 2873), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2871, 2873), False, 'import datetime\n'), ((3138, 3161), 'mlib.Mist_Session', 'mist_lib.Mist_Session', ([], {}), '()\n', (3159, 3161), True, 'import mlib as mist_lib\n'), ((3176, 3222), 'mlib.cli.select_site', 'cli.select_site', (['mist_session'], {'allow_many': '(True)'}), '(mist_session, allow_many=True)\n', (3191, 3222), False, 'from mlib import cli\n'), ((379, 430), 'mlib.sites.stats.clients', 'mist_lib.sites.stats.clients', (['mist_session', 'site_id'], {}), '(mist_session, site_id)\n', (407, 430), True, 'import mlib as mist_lib\n'), ((521, 567), 'mlib.sites.info.get', 'mist_lib.sites.info.get', (['mist_session', 'site_id'], {}), '(mist_session, site_id)\n', (544, 567), True, 'import mlib as mist_lib\n'), ((2138, 2227), 'csv.DictWriter', 'csv.DictWriter', (['output_file'], {'restval': '"""-"""', 'fieldnames': 'fields', 'delimiter': 'csv_delimiter'}), "(output_file, restval='-', fieldnames=fields, delimiter=\n csv_delimiter)\n", (2152, 2227), False, 'import csv\n'), ((1411, 1523), 'mlib.sites.insights.client', 'mist_lib.sites.insights.client', (['mist_session', 'site_id', 'client_mac', 'start', 'stop', 'interval', '"""top-app-by-bytes"""'], {}), "(mist_session, site_id, client_mac, start,\n stop, interval, 'top-app-by-bytes')\n", (1441, 1523), True, 'import mlib as mist_lib\n'), ((2893, 2916), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2914, 2916), False, 'import datetime\n'), ((2919, 2950), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': 'hours'}), '(hours=hours)\n', (2937, 2950), False, 'import datetime\n')]
# Generated by Django 2.0.6 on 2018-06-16 12:47 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('issueTracker', '0001_initial'), ] operations = [ migrations.AlterField( model_name='issue', name='assigned_to', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='assigned to'), ), migrations.AlterField( model_name='issue', name='priority', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='priority', to='issueTracker.Priority', verbose_name='priority'), ), migrations.AlterField( model_name='issue', name='status', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='status', to='issueTracker.Status', verbose_name='status'), ), migrations.AlterField( model_name='issue', name='submitter', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='submitter', to=settings.AUTH_USER_MODEL, verbose_name='submitter'), ), ]
[ "django.db.models.ForeignKey" ]
[((400, 524), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""assigned to"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL, verbose_name='assigned to')\n", (417, 524), False, 'from django.db import migrations, models\n'), ((642, 787), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""priority"""', 'to': '"""issueTracker.Priority"""', 'verbose_name': '"""priority"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='priority', to='issueTracker.Priority', verbose_name='priority')\n", (659, 787), False, 'from django.db import migrations, models\n'), ((903, 1042), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""status"""', 'to': '"""issueTracker.Status"""', 'verbose_name': '"""status"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='status', to='issueTracker.Status', verbose_name='status')\n", (920, 1042), False, 'from django.db import migrations, models\n'), ((1161, 1309), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""submitter"""', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""submitter"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='submitter', to=settings.AUTH_USER_MODEL, verbose_name='submitter')\n", (1178, 1309), False, 'from django.db import migrations, models\n')]
from typing import Tuple from eth_typing import NodeID from eth_utils import big_endian_to_int, int_to_big_endian from eth_utils.toolz import reduce import factory def bytes_to_bits(input_bytes: bytes) -> Tuple[bool, ...]: num_bits = len(input_bytes) * 8 as_int = big_endian_to_int(input_bytes) as_bits = tuple(bool(as_int & (1 << index)) for index in range(num_bits))[::-1] return as_bits def bits_to_bytes(input_bits: Tuple[bool, ...]) -> bytes: if len(input_bits) % 8 != 0: raise ValueError("Number of input bits must be a multiple of 8") num_bytes = len(input_bits) // 8 as_int = reduce(lambda rest, bit: rest * 2 + bit, input_bits) as_bytes_unpadded = int_to_big_endian(as_int) padding = b"\x00" * (num_bytes - len(as_bytes_unpadded)) return padding + as_bytes_unpadded class NodeIDFactory(factory.Factory): # type: ignore class Meta: model = NodeID inline_args = ("node_id",) node_id = factory.Faker("binary", length=32) @classmethod def at_log_distance(cls, reference: NodeID, log_distance: int) -> NodeID: from ddht.kademlia import at_log_distance as _at_log_distance return _at_log_distance(reference, log_distance)
[ "ddht.kademlia.at_log_distance", "factory.Faker", "eth_utils.big_endian_to_int", "eth_utils.toolz.reduce", "eth_utils.int_to_big_endian" ]
[((275, 305), 'eth_utils.big_endian_to_int', 'big_endian_to_int', (['input_bytes'], {}), '(input_bytes)\n', (292, 305), False, 'from eth_utils import big_endian_to_int, int_to_big_endian\n'), ((626, 678), 'eth_utils.toolz.reduce', 'reduce', (['(lambda rest, bit: rest * 2 + bit)', 'input_bits'], {}), '(lambda rest, bit: rest * 2 + bit, input_bits)\n', (632, 678), False, 'from eth_utils.toolz import reduce\n'), ((703, 728), 'eth_utils.int_to_big_endian', 'int_to_big_endian', (['as_int'], {}), '(as_int)\n', (720, 728), False, 'from eth_utils import big_endian_to_int, int_to_big_endian\n'), ((974, 1008), 'factory.Faker', 'factory.Faker', (['"""binary"""'], {'length': '(32)'}), "('binary', length=32)\n", (987, 1008), False, 'import factory\n'), ((1191, 1232), 'ddht.kademlia.at_log_distance', '_at_log_distance', (['reference', 'log_distance'], {}), '(reference, log_distance)\n', (1207, 1232), True, 'from ddht.kademlia import at_log_distance as _at_log_distance\n')]
import pickle from pathlib import Path from ez_address_parser import AddressParser ap = AddressParser(use_pretrained=False) here = Path(__file__).parent.absolute() with open(here / "ez_address_annotator/data/data.pkl", "rb") as f: data = pickle.load(f) print(f"{len(data)} labelled addresses are used to create this pretrained model") ap.train(data) address = input("Address: ") result = ap.parse(address) for token, label in result: print(f"{token:20s} -> {label}")
[ "ez_address_parser.AddressParser", "pickle.load", "pathlib.Path" ]
[((90, 125), 'ez_address_parser.AddressParser', 'AddressParser', ([], {'use_pretrained': '(False)'}), '(use_pretrained=False)\n', (103, 125), False, 'from ez_address_parser import AddressParser\n'), ((246, 260), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (257, 260), False, 'import pickle\n'), ((134, 148), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (138, 148), False, 'from pathlib import Path\n')]
from django.contrib.auth import get_user_model from rest_framework import viewsets from .permissions import IsAuthor from .models import Post from .serializers import AuthorSerializer, PostSerializer User = get_user_model() class PostViewset(viewsets.ModelViewSet): queryset = Post.objects.all() serializer_class = PostSerializer permission_classes = [IsAuthor] filter_fields = ('views', 'author__name', 'title', 'tags', 'created_at') search_fields = ('title', 'body', 'tags__name') ordering_fields = ('title', 'body', 'created_at', 'views') def get_object(self): instance = super().get_object() instance.increment_views() return instance class AuthorViewset(viewsets.ModelViewSet): queryset = User.objects.all() serializer_class = AuthorSerializer
[ "django.contrib.auth.get_user_model" ]
[((209, 225), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (223, 225), False, 'from django.contrib.auth import get_user_model\n')]
from jmetal.algorithm.multiobjective.moead import MOEAD from jmetal.core.quality_indicator import HyperVolume, InvertedGenerationalDistance from jmetal.operator import PolynomialMutation, DifferentialEvolutionCrossover from jmetal.problem import DTLZ2 from jmetal.util.aggregative_function import Tschebycheff from jmetal.util.solution import read_solutions, print_function_values_to_file, print_variables_to_file from jmetal.util.termination_criterion import StoppingByEvaluations from jmetal.problem import ZDT1, DTLZ1, UF8 from jmetal.util.observer import ProgressBarObserver, VisualizerObserver from jmetal.lab.visualization import Plot, InteractivePlot if __name__ == '__main__': import os os.chdir(os.path.dirname(os.path.dirname(os.path.dirname(os.getcwd())))) print('当前运行路径更改为:', os.getcwd()) problem = ZDT1() max_evaluations = 2000 algorithm = MOEAD( problem=problem, population_size=100, crossover=DifferentialEvolutionCrossover(CR=1.0, F=0.5), mutation=PolynomialMutation(probability=1.0 / problem.number_of_variables, distribution_index=20), aggregative_function=Tschebycheff(dimension=problem.number_of_objectives), neighbor_size=20, neighbourhood_selection_probability=0.9, max_number_of_replaced_solutions=20, weight_files_path='resources/MOEAD_weights', termination_criterion=StoppingByEvaluations(max_evaluations=max_evaluations) ) algorithm.observable.register(observer=ProgressBarObserver(max=max_evaluations)) algorithm.observable.register(observer=VisualizerObserver(reference_front=problem.reference_front)) algorithm.run() front = algorithm.get_result() hypervolume = HyperVolume([1.0, 1.0, 1.0]) print("Hypervolume: " + str(hypervolume.compute([front[i].objectives for i in range(len(front))]))) # 2.静态图:static plot_front = Plot(title='Pareto front approximation. Problem: ' + problem.get_name(), reference_front=problem.reference_front, axis_labels=problem.obj_labels) plot_front.plot(front, label=algorithm.label, filename=algorithm.get_name())
[ "jmetal.util.aggregative_function.Tschebycheff", "jmetal.util.observer.ProgressBarObserver", "jmetal.operator.PolynomialMutation", "jmetal.util.observer.VisualizerObserver", "os.getcwd", "jmetal.operator.DifferentialEvolutionCrossover", "jmetal.util.termination_criterion.StoppingByEvaluations", "jmetal.core.quality_indicator.HyperVolume", "jmetal.problem.ZDT1" ]
[((830, 836), 'jmetal.problem.ZDT1', 'ZDT1', ([], {}), '()\n', (834, 836), False, 'from jmetal.problem import ZDT1, DTLZ1, UF8\n'), ((1727, 1755), 'jmetal.core.quality_indicator.HyperVolume', 'HyperVolume', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (1738, 1755), False, 'from jmetal.core.quality_indicator import HyperVolume, InvertedGenerationalDistance\n'), ((814, 825), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (823, 825), False, 'import os\n'), ((961, 1006), 'jmetal.operator.DifferentialEvolutionCrossover', 'DifferentialEvolutionCrossover', ([], {'CR': '(1.0)', 'F': '(0.5)'}), '(CR=1.0, F=0.5)\n', (991, 1006), False, 'from jmetal.operator import PolynomialMutation, DifferentialEvolutionCrossover\n'), ((1025, 1117), 'jmetal.operator.PolynomialMutation', 'PolynomialMutation', ([], {'probability': '(1.0 / problem.number_of_variables)', 'distribution_index': '(20)'}), '(probability=1.0 / problem.number_of_variables,\n distribution_index=20)\n', (1043, 1117), False, 'from jmetal.operator import PolynomialMutation, DifferentialEvolutionCrossover\n'), ((1144, 1196), 'jmetal.util.aggregative_function.Tschebycheff', 'Tschebycheff', ([], {'dimension': 'problem.number_of_objectives'}), '(dimension=problem.number_of_objectives)\n', (1156, 1196), False, 'from jmetal.util.aggregative_function import Tschebycheff\n'), ((1401, 1455), 'jmetal.util.termination_criterion.StoppingByEvaluations', 'StoppingByEvaluations', ([], {'max_evaluations': 'max_evaluations'}), '(max_evaluations=max_evaluations)\n', (1422, 1455), False, 'from jmetal.util.termination_criterion import StoppingByEvaluations\n'), ((1506, 1546), 'jmetal.util.observer.ProgressBarObserver', 'ProgressBarObserver', ([], {'max': 'max_evaluations'}), '(max=max_evaluations)\n', (1525, 1546), False, 'from jmetal.util.observer import ProgressBarObserver, VisualizerObserver\n'), ((1591, 1650), 'jmetal.util.observer.VisualizerObserver', 'VisualizerObserver', ([], {'reference_front': 'problem.reference_front'}), '(reference_front=problem.reference_front)\n', (1609, 1650), False, 'from jmetal.util.observer import ProgressBarObserver, VisualizerObserver\n'), ((762, 773), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (771, 773), False, 'import os\n')]
import graphene class Person(graphene.ObjectType): first_name = graphene.String( name=graphene.Argument( graphene.String, default_value='name' ) ) last_name = graphene.String() full_name = graphene.String() def resolve_full_name(self, args, context, info): return '{} {}'.format(self.first_name, self.last_name) schema = graphene.Schema(query=Person) result = schema.execute('{ full_name }') if isinstance(result.data, (dict,)): print(result.data)
[ "graphene.String", "graphene.Schema", "graphene.Argument" ]
[((394, 423), 'graphene.Schema', 'graphene.Schema', ([], {'query': 'Person'}), '(query=Person)\n', (409, 423), False, 'import graphene\n'), ((213, 230), 'graphene.String', 'graphene.String', ([], {}), '()\n', (228, 230), False, 'import graphene\n'), ((247, 264), 'graphene.String', 'graphene.String', ([], {}), '()\n', (262, 264), False, 'import graphene\n'), ((100, 156), 'graphene.Argument', 'graphene.Argument', (['graphene.String'], {'default_value': '"""name"""'}), "(graphene.String, default_value='name')\n", (117, 156), False, 'import graphene\n')]
#!/usr/bin/env python # -*- coding:utf-8 -*- # author:owefsad # software: PyCharm # project: lingzhi-webapi from dongtai.endpoint import R from dongtai.endpoint import TalentAdminEndPoint from dongtai.models import User from django.utils.translation import gettext_lazy as _ class UserDetailEndPoint(TalentAdminEndPoint): def get(self, request, user_id): try: user = User.objects.filter(id=user_id).first() talent = user.get_talent() if talent: current_talent = request.user.get_talent() if current_talent == talent: department = user.get_department() return R.success(data={ 'username': user.get_username(), 'department': department.get_department_name(), 'talent': talent.get_talent_name() }) except: pass return R.failure(status=203, msg=_('no permission'))
[ "dongtai.models.User.objects.filter", "django.utils.translation.gettext_lazy" ]
[((1007, 1025), 'django.utils.translation.gettext_lazy', '_', (['"""no permission"""'], {}), "('no permission')\n", (1008, 1025), True, 'from django.utils.translation import gettext_lazy as _\n'), ((395, 426), 'dongtai.models.User.objects.filter', 'User.objects.filter', ([], {'id': 'user_id'}), '(id=user_id)\n', (414, 426), False, 'from dongtai.models import User\n')]
# # Advent of Code 2015 # Day 04 # # By PatchesTheDipstick # import hashlib if __name__ == '__main__': input = "yzbqklnj" index = 0 part_one = 0 part_two = 0 part_one_done = False part_two_done = False complete = False while complete == False: key = bytes(input + str(index), encoding="utf-8") hash = hashlib.md5(key).hexdigest() if (hash[:5] == "00000" and part_one == 0): part_one = index part_one_done = True if (hash[:6] == "000000" and part_two == 0): part_two = index part_two_done = True index += 1 if part_one_done and part_two_done: complete = True #print(index, hash[:5]) print("Advent of Code 2015 Day 04") print(" Part one:", part_one) print(" Part two:", part_two)
[ "hashlib.md5" ]
[((325, 341), 'hashlib.md5', 'hashlib.md5', (['key'], {}), '(key)\n', (336, 341), False, 'import hashlib\n')]
from distribute_setup import use_setuptools use_setuptools() from setuptools import setup, find_packages PACKAGE = 'mybic' VERSION = __import__(PACKAGE).get_version() kwargs = { 'name': PACKAGE, 'version': VERSION, 'packages': find_packages(exclude=[ 'tests', '*.tests', 'tests.*', '*.tests.*', ]), 'install_requires': [], 'test_suite': 'test_suite', 'tests_require': [], 'author': '', 'author_email': '', 'description': '', 'license': '', 'keywords': '', 'url': '', 'classifiers': [], } setup(**kwargs)
[ "distribute_setup.use_setuptools", "setuptools.find_packages", "setuptools.setup" ]
[((44, 60), 'distribute_setup.use_setuptools', 'use_setuptools', ([], {}), '()\n', (58, 60), False, 'from distribute_setup import use_setuptools\n'), ((583, 598), 'setuptools.setup', 'setup', ([], {}), '(**kwargs)\n', (588, 598), False, 'from setuptools import setup, find_packages\n'), ((242, 309), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['tests', '*.tests', 'tests.*', '*.tests.*']"}), "(exclude=['tests', '*.tests', 'tests.*', '*.tests.*'])\n", (255, 309), False, 'from setuptools import setup, find_packages\n')]
#! /usr/bin/python3 from abc import ABCMeta, abstractmethod import time from typing import Type from design import Design from estimator import Estimator from evaluator import Evaluator import numpy as np import pandas as pd class Plan(metaclass=ABCMeta): def __init__(self): self.evaluators = {} self.designs = {} def add_design(self, design_name, design_class: Type[Design], estimator_class: Type[Estimator], design_kwargs = None): self.designs[design_name] = (design_class, estimator_class, design_kwargs) def add_evaluator(self, evaluator_name: str, evaluator: Evaluator): self.evaluators[evaluator_name] = evaluator() def execute(self, dgp_factory, seed): np.random.seed(seed) dgp = dgp_factory.create_dgp() self.dgp = dgp X = dgp.X Y0 = dgp.Y([0] * dgp.n) Y1 = dgp.Y([1] * dgp.n) ITE = dgp.ITE() ATE = dgp.ATE() results = [] for design_name, (design_class, estimator_class, design_kwargs) in self.designs.items(): def make_row(name, value): return pd.DataFrame({"design": [design_name], "metric": [name], "value": [value]}) time_start = time.time() if design_kwargs is None: design_kwargs = {} design = design_class(**design_kwargs) design.fit(X) A = design.assign(X) time_end = time.time() time_elapsed = time_end - time_start results.append(make_row("time_design", time_elapsed)) YA = np.where(A==1, Y1, Y0) time_start = time.time() estimator = estimator_class(design) ITEhat = estimator.ITE(X, A, YA) ATEhat = estimator.ATE(X, A, YA) time_end = time.time() time_elapsed = time_end - time_start results.append(make_row("time_estimation", time_elapsed)) for name, evaluator in self.evaluators.items(): val = evaluator.evaluate(X, Y0, Y1, ATE, ITE, A, YA, ATEhat, ITEhat) results.append(make_row(name, val)) return pd.concat(results)
[ "numpy.where", "numpy.random.seed", "time.time", "pandas.DataFrame", "pandas.concat" ]
[((724, 744), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (738, 744), True, 'import numpy as np\n'), ((2144, 2162), 'pandas.concat', 'pd.concat', (['results'], {}), '(results)\n', (2153, 2162), True, 'import pandas as pd\n'), ((1218, 1229), 'time.time', 'time.time', ([], {}), '()\n', (1227, 1229), False, 'import time\n'), ((1436, 1447), 'time.time', 'time.time', ([], {}), '()\n', (1445, 1447), False, 'import time\n'), ((1580, 1604), 'numpy.where', 'np.where', (['(A == 1)', 'Y1', 'Y0'], {}), '(A == 1, Y1, Y0)\n', (1588, 1604), True, 'import numpy as np\n'), ((1628, 1639), 'time.time', 'time.time', ([], {}), '()\n', (1637, 1639), False, 'import time\n'), ((1801, 1812), 'time.time', 'time.time', ([], {}), '()\n', (1810, 1812), False, 'import time\n'), ((1117, 1192), 'pandas.DataFrame', 'pd.DataFrame', (["{'design': [design_name], 'metric': [name], 'value': [value]}"], {}), "({'design': [design_name], 'metric': [name], 'value': [value]})\n", (1129, 1192), True, 'import pandas as pd\n')]
#!/usr/bin/env python2 def main(): import sys import cbor import json data = sys.stdin.read() print('LEN: %d' % len(data)) sys.stdout.flush() print('HEX: ' + data.encode('hex')) sys.stdout.flush() doc = cbor.loads(data) print('REPR: ' + repr(doc)) sys.stdout.flush() try: print('JSON: ' + json.dumps(doc)) sys.stdout.flush() except: print('JSON: cannot encode') sys.stdout.flush() if __name__ == '__main__': main()
[ "sys.stdout.flush", "cbor.loads", "sys.stdin.read", "json.dumps" ]
[((95, 111), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (109, 111), False, 'import sys\n'), ((149, 167), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (165, 167), False, 'import sys\n'), ((212, 230), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (228, 230), False, 'import sys\n'), ((241, 257), 'cbor.loads', 'cbor.loads', (['data'], {}), '(data)\n', (251, 257), False, 'import cbor\n'), ((294, 312), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (310, 312), False, 'import sys\n'), ((372, 390), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (388, 390), False, 'import sys\n'), ((448, 466), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (464, 466), False, 'import sys\n'), ((347, 362), 'json.dumps', 'json.dumps', (['doc'], {}), '(doc)\n', (357, 362), False, 'import json\n')]
"""Test show command output parsing.""" from ttp_sros_parser.srosparser import SrosParser def test_file_dir_output(parsed_show_file_dir): """ Test parsing file dir output """ data = "tests/fixtures/show_output/file_dir.txt" parser = SrosParser(data) result = parser.show_file_dir() assert result == parsed_show_file_dir def test_get_show_bof(parsed_show_bof): """Test parsing show bof output.""" data = "tests/fixtures/show_output/show_bof.txt" sros_parser = SrosParser(data) result = sros_parser.show_bof() assert result == parsed_show_bof def test_show_router_interface(parsed_show_router_int): """ Test extracting router interfaces from show command """ data = "tests/fixtures/show_output/show_router_interface.txt" sros_parser = SrosParser(data) result = sros_parser.show_router_interface() assert result == parsed_show_router_int def test_show_service_service_using(parsed_show_service_using): """ Test parsing show service service using show command """ example_output = "tests/fixtures/show_output/show_service_service_using.txt" parser = SrosParser(example_output) result = parser.show_service_service_using() assert result, parsed_show_service_using def test_get_show_route_table(parsed_show_router_table): """Test general show router route table.""" data = "tests/fixtures/show_output/show_route_table.txt" sros_parser = SrosParser(data) result = sros_parser.show_router_route_table() assert result == parsed_show_router_table def test_get_show_router_static_route_v4(parsed_static_route_v4): """Test general show router static route.""" data = "tests/fixtures/show_output/show_router_static_route_tag.txt" sros_parser = SrosParser(data) result = sros_parser.show_router_static_route(protocol="ipv4") assert result == parsed_static_route_v4 def test_get_show_router_static_route_v4_default(parsed_static_route_v4): """Test general show router static route, default v4 without protocol specified.""" data = "tests/fixtures/show_output/show_router_static_route_tag.txt" sros_parser = SrosParser(data) result = sros_parser.show_router_static_route() assert result == parsed_static_route_v4 def test_get_show_router_static_route_v6(parsed_static_route_v6): """Test general show router static route, ipv6 specified.""" data = "tests/fixtures/show_output/show_router_static_route_tag_v6.txt" sros_parser = SrosParser(data) result = sros_parser.show_router_static_route(protocol="IPV6") assert result == parsed_static_route_v6
[ "ttp_sros_parser.srosparser.SrosParser" ]
[((255, 271), 'ttp_sros_parser.srosparser.SrosParser', 'SrosParser', (['data'], {}), '(data)\n', (265, 271), False, 'from ttp_sros_parser.srosparser import SrosParser\n'), ((504, 520), 'ttp_sros_parser.srosparser.SrosParser', 'SrosParser', (['data'], {}), '(data)\n', (514, 520), False, 'from ttp_sros_parser.srosparser import SrosParser\n'), ((809, 825), 'ttp_sros_parser.srosparser.SrosParser', 'SrosParser', (['data'], {}), '(data)\n', (819, 825), False, 'from ttp_sros_parser.srosparser import SrosParser\n'), ((1153, 1179), 'ttp_sros_parser.srosparser.SrosParser', 'SrosParser', (['example_output'], {}), '(example_output)\n', (1163, 1179), False, 'from ttp_sros_parser.srosparser import SrosParser\n'), ((1461, 1477), 'ttp_sros_parser.srosparser.SrosParser', 'SrosParser', (['data'], {}), '(data)\n', (1471, 1477), False, 'from ttp_sros_parser.srosparser import SrosParser\n'), ((1783, 1799), 'ttp_sros_parser.srosparser.SrosParser', 'SrosParser', (['data'], {}), '(data)\n', (1793, 1799), False, 'from ttp_sros_parser.srosparser import SrosParser\n'), ((2166, 2182), 'ttp_sros_parser.srosparser.SrosParser', 'SrosParser', (['data'], {}), '(data)\n', (2176, 2182), False, 'from ttp_sros_parser.srosparser import SrosParser\n'), ((2506, 2522), 'ttp_sros_parser.srosparser.SrosParser', 'SrosParser', (['data'], {}), '(data)\n', (2516, 2522), False, 'from ttp_sros_parser.srosparser import SrosParser\n')]
import hashlib import datetime import copy class Block(): def __init__(self, index, timestamp, date, previous_hash): self.index = index self.timestamp = timestamp self.date = date self.previous_hash = previous_hash self.hash = self.hashing() def hashing(self): key = hashlib.sha256() key.update(str(self.index).encode('utf-8')) key.update(str(self.timestamp).encode('utf-8')) key.update(str(self.date).encode('utf-8')) key.update(str(self.previous_hash).encode('utf-8')) return key.hexdigest() class Chain(): def __init__(self): self.blocks = [self.__get_genesis_block()] def __get_genesis_block(self): return Block(0, datetime.datetime.utcnow(), 'Genesis', 'arbitrary') def add_block(self, date): return self.blocks.append(Block(len(self.blocks), datetime.datetime.utcnow(), date, self.blocks[len(self.blocks) - 1].hash)) def get_chain_size(self): return len(self.blocks) - 1 def verify(self, verbose = True): flag = True for i in range(1, len(self.blocks)): if self.blocks[i].index != i: flag = False if verbose: print(f'Wrong block index at block {i}') if self.blocks[i - 1].hash != self.blocks[i].previous_hash: flag = False if verbose: print(f'Wrong previous hash at block {i}') if self.blocks[i].hash != self.blocks[i].hashing(): flag = False if verbose: print(f'Wrong hash at block {i}') if self.blocks[i - 1].timestamp >= self.blocks[i].timestamp: flag = False if verbose: print(f'Backdating at block {i}') return flag def fork(self, head = 'latest'): if head in ['latest', 'whole', 'all']: return copy.deepcopy(self) else: c = copy.deepcopy(self) c.blocks = c.blocks[0: head + 1] return c def get_root(self, chain_2): min_chain_size = min(self.get_chain_size(), chain_2.get_chain_size()) for i in range(1, min_chain_size): if self.blocks[i] != chain_2.blocks[i]: return self.fork(i - 1) return self.fork(min_chain_size)
[ "hashlib.sha256", "copy.deepcopy", "datetime.datetime.utcnow" ]
[((282, 298), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (296, 298), False, 'import hashlib\n'), ((651, 677), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (675, 677), False, 'import datetime\n'), ((1621, 1640), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (1634, 1640), False, 'import copy\n'), ((1656, 1675), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (1669, 1675), False, 'import copy\n'), ((783, 809), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (807, 809), False, 'import datetime\n')]
from enum import Enum from misc import dimensions class element_type(Enum): enemy = 0 trap = 1 treasure = 2 other = 3 class room_element: def __init__(self): self.dims = dimensions() self.type = element_type.other self.xp_value = 0 self.name = "NONAME" def space(self): return self.dims.w * self.dims.h class enemy(room_element): def __init__(self): self.type = element_type.enemy class trap(room_element): def __init__(self): self.type = element_type.trap class treasure(room_element): def __init__(self): self.type = element_type.treasure self.gp_value = 0
[ "misc.dimensions" ]
[((217, 229), 'misc.dimensions', 'dimensions', ([], {}), '()\n', (227, 229), False, 'from misc import dimensions\n')]
from werkzeug.security import safe_str_cmp from user import User def authenticate(username, password): user = User.find_by_usermane(username) if user and safe_str_cmp(user.password, password): return user def identity(payload): user_id = payload['identity'] return User.find_by_id(user_id)
[ "werkzeug.security.safe_str_cmp", "user.User.find_by_usermane", "user.User.find_by_id" ]
[((115, 146), 'user.User.find_by_usermane', 'User.find_by_usermane', (['username'], {}), '(username)\n', (136, 146), False, 'from user import User\n'), ((292, 316), 'user.User.find_by_id', 'User.find_by_id', (['user_id'], {}), '(user_id)\n', (307, 316), False, 'from user import User\n'), ((163, 200), 'werkzeug.security.safe_str_cmp', 'safe_str_cmp', (['user.password', 'password'], {}), '(user.password, password)\n', (175, 200), False, 'from werkzeug.security import safe_str_cmp\n')]
import pytest from unittest.mock import patch from .test_utils import * from tlbx import Script, Arg @Script( Arg("name", help="A name"), Arg("--flag", help="A flag", action="store_true", required=False, default=True), ) def script(name, flag): print("Name:", name) print("Flag:", flag) def test_run_script(): with patch("argparse._sys.argv", ["test_script_decorator.py", "test"]): script() def test_help(): with patch("argparse._sys.argv", ["test_script_decorator.py", "--help"]): with pytest.raises(SystemExit) as pytest_wrapped_e: script()
[ "tlbx.Arg", "unittest.mock.patch", "pytest.raises" ]
[((117, 143), 'tlbx.Arg', 'Arg', (['"""name"""'], {'help': '"""A name"""'}), "('name', help='A name')\n", (120, 143), False, 'from tlbx import Script, Arg\n'), ((149, 228), 'tlbx.Arg', 'Arg', (['"""--flag"""'], {'help': '"""A flag"""', 'action': '"""store_true"""', 'required': '(False)', 'default': '(True)'}), "('--flag', help='A flag', action='store_true', required=False, default=True)\n", (152, 228), False, 'from tlbx import Script, Arg\n'), ((340, 405), 'unittest.mock.patch', 'patch', (['"""argparse._sys.argv"""', "['test_script_decorator.py', 'test']"], {}), "('argparse._sys.argv', ['test_script_decorator.py', 'test'])\n", (345, 405), False, 'from unittest.mock import patch\n'), ((452, 519), 'unittest.mock.patch', 'patch', (['"""argparse._sys.argv"""', "['test_script_decorator.py', '--help']"], {}), "('argparse._sys.argv', ['test_script_decorator.py', '--help'])\n", (457, 519), False, 'from unittest.mock import patch\n'), ((534, 559), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (547, 559), False, 'import pytest\n')]
import requests from automationutils.UserAgentUtility import UserAgentUtility from automationutils.WebDriverManager import WebDriverManager driver = WebDriverManager() class ProxyUtility: @staticmethod def get_proxies(): driver.get_webdriver().get("https://free-proxy-list.net/") proxies = set() driver.get_webdriver().find_element_by_xpath('//th[7]//select').send_keys('yes') for element in driver.get_webdriver().find_elements_by_xpath('//tbody/tr')[:20]: if element.find_elements_by_xpath('.//td[7][contains(text(),"yes")]'): ip = element.find_element_by_xpath('.//td[1]').text port = element.find_element_by_xpath('.//td[2]').text proxy = ":".join([ip, port]) proxies.add(proxy) driver.shutdown_webdriver() return proxies @staticmethod def get_response_with_user_agent_and_proxy(url): url = url json_response = '' for proxy in ProxyUtility().get_proxies(): user_agent = UserAgentUtility().get_user_agent() driver.shutdown_webdriver() print("Requesting proxy: " + proxy) print( "Using User Agent: " + user_agent) # TODO: Store user agent list to file and read file to avoid all the browser session try: response = requests.get(url, headers={'User-Agent': user_agent}, proxies={"http": proxy, "https": proxy}) json_response = response.json() print("Using HTTP proxy %s" + json_response) break except: json_response = 'Skipping. Proxy Connection error!' print(json_response) if 'Skipping. Proxy Connection error!' in json_response: ProxyUtility().get_response_with_user_agent_and_proxy(url) return response # Remove comment # from below to test base implementation # RandomProxy().get_response_with_user_agent_and_proxy('https://httpbin.org/user-agent')
[ "requests.get", "automationutils.UserAgentUtility.UserAgentUtility", "automationutils.WebDriverManager.WebDriverManager" ]
[((151, 169), 'automationutils.WebDriverManager.WebDriverManager', 'WebDriverManager', ([], {}), '()\n', (167, 169), False, 'from automationutils.WebDriverManager import WebDriverManager\n'), ((1381, 1479), 'requests.get', 'requests.get', (['url'], {'headers': "{'User-Agent': user_agent}", 'proxies': "{'http': proxy, 'https': proxy}"}), "(url, headers={'User-Agent': user_agent}, proxies={'http':\n proxy, 'https': proxy})\n", (1393, 1479), False, 'import requests\n'), ((1057, 1075), 'automationutils.UserAgentUtility.UserAgentUtility', 'UserAgentUtility', ([], {}), '()\n', (1073, 1075), False, 'from automationutils.UserAgentUtility import UserAgentUtility\n')]
import os,warnings import string from igf_data.utils.dbutils import read_dbconf_json from igf_data.igfdb.baseadaptor import BaseAdaptor from igf_data.igfdb.igfTables import Base,Experiment,Project,Sample,Sample_attribute from igf_data.igfdb.projectadaptor import ProjectAdaptor from igf_data.igfdb.sampleadaptor import SampleAdaptor from igf_data.igfdb.experimentadaptor import ExperimentAdaptor from igf_data.task_tracking.igf_slack import IGF_slack class Experiment_metadata_updator: ''' A class for updating metadata for experiment table in database ''' def __init__(self,dbconfig_file,log_slack=True,slack_config=None): ''' :param dbconfig_file: A database configuration file path :param log_slack: A boolean flag for toggling Slack messages, default True :param slack_config: A file containing Slack tokens, default None ''' try: dbparams = read_dbconf_json(dbconfig_file) self.base_adaptor=BaseAdaptor(**dbparams) self.log_slack=log_slack if log_slack and slack_config is None: raise ValueError('Missing slack config file') elif log_slack and slack_config: self.igf_slack = IGF_slack(slack_config) # add slack object except: raise @staticmethod def _text_sum(a=None): if isinstance(a,list): return ';'.join(a) else: return a def update_metadta_from_sample_attribute(self,experiment_igf_id=None, sample_attribute_names=('library_source', 'library_strategy', 'experiment_type')): ''' A method for fetching experiment metadata from sample_attribute tables :param experiment_igf_id: An experiment igf id for updating only a selected experiment, default None for all experiments :param sample_attribute_names: A list of sample attribute names to look for experiment metadata, default: library_source, library_strategy, experiment_type ''' try: sample_attribute_names = list(sample_attribute_names) db_connected=False base=self.base_adaptor base.start_session() db_connected=True query=base.session.\ query(Experiment.experiment_igf_id).\ distinct(Experiment.experiment_id).\ join(Sample).\ join(Sample_attribute).\ filter(Sample.sample_id==Experiment.sample_id).\ filter(Sample.sample_id==Sample_attribute.sample_id).\ filter(Experiment.library_source=='UNKNOWN').\ filter(Experiment.library_strategy=='UNKNOWN').\ filter(Experiment.experiment_type=='UNKNOWN').\ filter(Sample_attribute.attribute_value.notin_('UNKNOWN')).\ filter(Sample_attribute.attribute_name.in_(sample_attribute_names)) # base query for db lookup if experiment_igf_id is not None: query=query.filter(Experiment.experiment_igf_id==experiment_igf_id) # look for specific experiment_igf_id exp_update_count=0 exps=base.fetch_records(query, output_mode='object') # fetch exp records as generator expression for row in exps: experiment_id=row[0] ea=ExperimentAdaptor(**{'session':base.session}) attributes=ea.fetch_sample_attribute_records_for_experiment_igf_id(experiment_igf_id=experiment_id, output_mode='object', attribute_list=sample_attribute_names) exp_update_data=dict() for attribute_row in attributes: exp_update_data.update({attribute_row.attribute_name:attribute_row.attribute_value}) if len(exp_update_data.keys())>0: exp_update_count+=1 ea.update_experiment_records_by_igf_id(experiment_igf_id=experiment_id, update_data=exp_update_data, autosave=False) # update experiment entry if attribute records are found base.commit_session() base.close_session() db_connected=False if self.log_slack: message='Update {0} experiments from sample attribute records'.\ format(exp_update_count) self.igf_slack.post_message_to_channel(message=message, reaction='pass') except Exception as e: if db_connected: base.rollback_session() base.close_session() message='Error while updating experiment records: {0}'.format(e) warnings.warn(message) if self.log_slack: self.igf_slack.post_message_to_channel(message=message, reaction='fail') raise if __name__ == '__main__': from sqlalchemy import create_engine dbparams = read_dbconf_json('data/dbconfig.json') dbname=dbparams['dbname'] if os.path.exists(dbname): os.remove(dbname) base=BaseAdaptor(**dbparams) Base.metadata.create_all(base.engine) base.start_session() project_data=[{'project_igf_id':'IGFP0001_test_22-8-2017_rna_sc', 'project_name':'test_22-8-2017_rna', 'description':'Its project 1', 'project_deadline':'Before August 2017', 'comments':'Some samples are treated with drug X', }] pa=ProjectAdaptor(**{'session':base.session}) pa.store_project_and_attribute_data(data=project_data) sample_data=[{'sample_igf_id':'IGF00001', 'project_igf_id':'IGFP0001_test_22-8-2017_rna_sc', 'library_source':'TRANSCRIPTOMIC_SINGLE_CELL', 'library_strategy':'RNA-SEQ', 'experiment_type':'POLYA-RNA'}, {'sample_igf_id':'IGF00003', 'project_igf_id':'IGFP0001_test_22-8-2017_rna_sc', 'library_source':'TRANSCRIPTOMIC_SINGLE_CELL', 'experiment_type':'POLYA-RNA'}, {'sample_igf_id':'IGF00002', 'project_igf_id':'IGFP0001_test_22-8-2017_rna_sc',}, ] sa=SampleAdaptor(**{'session':base.session}) sa.store_sample_and_attribute_data(data=sample_data) experiment_data=[{'project_igf_id':'IGFP0001_test_22-8-2017_rna_sc', 'sample_igf_id':'IGF00001', 'experiment_igf_id':'IGF00001_HISEQ4000', 'library_name':'IGF00001'}, {'project_igf_id':'IGFP0001_test_22-8-2017_rna_sc', 'sample_igf_id':'IGF00003', 'experiment_igf_id':'IGF00003_HISEQ4000', 'library_name':'IGF00001'}, {'project_igf_id':'IGFP0001_test_22-8-2017_rna_sc', 'sample_igf_id':'IGF00002', 'experiment_igf_id':'IGF00002_HISEQ4000', 'library_name':'IGF00002'}, ] ea=ExperimentAdaptor(**{'session':base.session}) ea.store_project_and_attribute_data(data=experiment_data) base.close_session() emu=Experiment_metadata_updator(dbconfig_file='data/dbconfig.json', log_slack=False) emu.update_metadta_from_sample_attribute() if os.path.exists(dbname): os.remove(dbname)
[ "os.path.exists", "igf_data.igfdb.baseadaptor.BaseAdaptor", "igf_data.igfdb.igfTables.Sample_attribute.attribute_name.in_", "igf_data.igfdb.igfTables.Base.metadata.create_all", "igf_data.igfdb.projectadaptor.ProjectAdaptor", "igf_data.igfdb.sampleadaptor.SampleAdaptor", "igf_data.task_tracking.igf_slack.IGF_slack", "warnings.warn", "igf_data.igfdb.igfTables.Sample_attribute.attribute_value.notin_", "igf_data.utils.dbutils.read_dbconf_json", "igf_data.igfdb.experimentadaptor.ExperimentAdaptor", "os.remove" ]
[((5040, 5078), 'igf_data.utils.dbutils.read_dbconf_json', 'read_dbconf_json', (['"""data/dbconfig.json"""'], {}), "('data/dbconfig.json')\n", (5056, 5078), False, 'from igf_data.utils.dbutils import read_dbconf_json\n'), ((5112, 5134), 'os.path.exists', 'os.path.exists', (['dbname'], {}), '(dbname)\n', (5126, 5134), False, 'import os, warnings\n'), ((5166, 5189), 'igf_data.igfdb.baseadaptor.BaseAdaptor', 'BaseAdaptor', ([], {}), '(**dbparams)\n', (5177, 5189), False, 'from igf_data.igfdb.baseadaptor import BaseAdaptor\n'), ((5192, 5229), 'igf_data.igfdb.igfTables.Base.metadata.create_all', 'Base.metadata.create_all', (['base.engine'], {}), '(base.engine)\n', (5216, 5229), False, 'from igf_data.igfdb.igfTables import Base, Experiment, Project, Sample, Sample_attribute\n'), ((5573, 5616), 'igf_data.igfdb.projectadaptor.ProjectAdaptor', 'ProjectAdaptor', ([], {}), "(**{'session': base.session})\n", (5587, 5616), False, 'from igf_data.igfdb.projectadaptor import ProjectAdaptor\n'), ((6297, 6339), 'igf_data.igfdb.sampleadaptor.SampleAdaptor', 'SampleAdaptor', ([], {}), "(**{'session': base.session})\n", (6310, 6339), False, 'from igf_data.igfdb.sampleadaptor import SampleAdaptor\n'), ((7106, 7152), 'igf_data.igfdb.experimentadaptor.ExperimentAdaptor', 'ExperimentAdaptor', ([], {}), "(**{'session': base.session})\n", (7123, 7152), False, 'from igf_data.igfdb.experimentadaptor import ExperimentAdaptor\n'), ((7407, 7429), 'os.path.exists', 'os.path.exists', (['dbname'], {}), '(dbname)\n', (7421, 7429), False, 'import os, warnings\n'), ((5140, 5157), 'os.remove', 'os.remove', (['dbname'], {}), '(dbname)\n', (5149, 5157), False, 'import os, warnings\n'), ((7435, 7452), 'os.remove', 'os.remove', (['dbname'], {}), '(dbname)\n', (7444, 7452), False, 'import os, warnings\n'), ((885, 916), 'igf_data.utils.dbutils.read_dbconf_json', 'read_dbconf_json', (['dbconfig_file'], {}), '(dbconfig_file)\n', (901, 916), False, 'from igf_data.utils.dbutils import read_dbconf_json\n'), ((941, 964), 'igf_data.igfdb.baseadaptor.BaseAdaptor', 'BaseAdaptor', ([], {}), '(**dbparams)\n', (952, 964), False, 'from igf_data.igfdb.baseadaptor import BaseAdaptor\n'), ((2885, 2944), 'igf_data.igfdb.igfTables.Sample_attribute.attribute_name.in_', 'Sample_attribute.attribute_name.in_', (['sample_attribute_names'], {}), '(sample_attribute_names)\n', (2920, 2944), False, 'from igf_data.igfdb.igfTables import Base, Experiment, Project, Sample, Sample_attribute\n'), ((3344, 3390), 'igf_data.igfdb.experimentadaptor.ExperimentAdaptor', 'ExperimentAdaptor', ([], {}), "(**{'session': base.session})\n", (3361, 3390), False, 'from igf_data.igfdb.experimentadaptor import ExperimentAdaptor\n'), ((1159, 1182), 'igf_data.task_tracking.igf_slack.IGF_slack', 'IGF_slack', (['slack_config'], {}), '(slack_config)\n', (1168, 1182), False, 'from igf_data.task_tracking.igf_slack import IGF_slack\n'), ((4765, 4787), 'warnings.warn', 'warnings.warn', (['message'], {}), '(message)\n', (4778, 4787), False, 'import os, warnings\n'), ((2812, 2862), 'igf_data.igfdb.igfTables.Sample_attribute.attribute_value.notin_', 'Sample_attribute.attribute_value.notin_', (['"""UNKNOWN"""'], {}), "('UNKNOWN')\n", (2851, 2862), False, 'from igf_data.igfdb.igfTables import Base, Experiment, Project, Sample, Sample_attribute\n')]
from typing import TYPE_CHECKING from sqlalchemy import Boolean, Column, Integer, String from sqlalchemy.orm import relationship from app.db.base_class import Base if TYPE_CHECKING: from .twitter_account import TwitterAccount from .hashtag import Hashtag from .hashtag_collection import HashtagCollection from .location import Location class User(Base): id = Column(Integer, primary_key=True, index=True) full_name = Column(String, index=True) email = Column(String, unique=True, index=True, nullable=False) hashed_password = Column(String, nullable=False) is_active = Column(Boolean(), default=True) is_superuser = Column(Boolean(), default=False)
[ "sqlalchemy.Boolean", "sqlalchemy.Column" ]
[((374, 419), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'index': '(True)'}), '(Integer, primary_key=True, index=True)\n', (380, 419), False, 'from sqlalchemy import Boolean, Column, Integer, String\n'), ((434, 460), 'sqlalchemy.Column', 'Column', (['String'], {'index': '(True)'}), '(String, index=True)\n', (440, 460), False, 'from sqlalchemy import Boolean, Column, Integer, String\n'), ((471, 526), 'sqlalchemy.Column', 'Column', (['String'], {'unique': '(True)', 'index': '(True)', 'nullable': '(False)'}), '(String, unique=True, index=True, nullable=False)\n', (477, 526), False, 'from sqlalchemy import Boolean, Column, Integer, String\n'), ((547, 577), 'sqlalchemy.Column', 'Column', (['String'], {'nullable': '(False)'}), '(String, nullable=False)\n', (553, 577), False, 'from sqlalchemy import Boolean, Column, Integer, String\n'), ((599, 608), 'sqlalchemy.Boolean', 'Boolean', ([], {}), '()\n', (606, 608), False, 'from sqlalchemy import Boolean, Column, Integer, String\n'), ((648, 657), 'sqlalchemy.Boolean', 'Boolean', ([], {}), '()\n', (655, 657), False, 'from sqlalchemy import Boolean, Column, Integer, String\n')]
import os import numpy as np from utils.database import COLMAPDatabase,blob_to_array DATABASE_PATH = 'sqlite/putin_68kpts.db' DIRECTORY = 'keypoints' POSTFIX = '_kpt.txt' IMAGE_EXTENSION = '.jpg' if os.path.exists(DATABASE_PATH): os.remove(DATABASE_PATH) db = COLMAPDatabase.connect(DATABASE_PATH) db.create_tables() data_files = [f for f in os.listdir(DIRECTORY) if f[-len(POSTFIX):] == POSTFIX] #add camera cam_param = np.asarray([307.2, 128.0, 128.0, 0.0]) for i in range(len(data_files)): cam_id = db.add_camera(model=2, width=256, height=256, params=cam_param) file_name = data_files[i].split(POSTFIX)[0] + IMAGE_EXTENSION db.add_image(file_name,cam_id) # build keypoint (feature extraction) kpt_len = 0 for image_id, data_file in enumerate(data_files,start=1): keypoints = np.loadtxt(os.path.join(DIRECTORY,data_file)) keypoints = keypoints[:,:2] kpt_len = keypoints.shape[0] db.add_keypoints(image_id, np.asarray(keypoints)) # feature mathcing image_matchs = list() for from_id in range(1,len(data_files)+1): for to_id in range(from_id+1,len(data_files)+1): nrange = np.arange(0,kpt_len) pairs = np.c_[nrange,nrange] # add match to database #db.add_matches(from_id, to_id, np.asarray(pairs)) db.add_two_view_geometry(from_id, to_id, pairs) # commit (save) to db db.commit() db.close()
[ "os.path.exists", "os.listdir", "numpy.asarray", "os.path.join", "utils.database.COLMAPDatabase.connect", "numpy.arange", "os.remove" ]
[((200, 229), 'os.path.exists', 'os.path.exists', (['DATABASE_PATH'], {}), '(DATABASE_PATH)\n', (214, 229), False, 'import os\n'), ((265, 302), 'utils.database.COLMAPDatabase.connect', 'COLMAPDatabase.connect', (['DATABASE_PATH'], {}), '(DATABASE_PATH)\n', (287, 302), False, 'from utils.database import COLMAPDatabase, blob_to_array\n'), ((428, 466), 'numpy.asarray', 'np.asarray', (['[307.2, 128.0, 128.0, 0.0]'], {}), '([307.2, 128.0, 128.0, 0.0])\n', (438, 466), True, 'import numpy as np\n'), ((235, 259), 'os.remove', 'os.remove', (['DATABASE_PATH'], {}), '(DATABASE_PATH)\n', (244, 259), False, 'import os\n'), ((348, 369), 'os.listdir', 'os.listdir', (['DIRECTORY'], {}), '(DIRECTORY)\n', (358, 369), False, 'import os\n'), ((814, 848), 'os.path.join', 'os.path.join', (['DIRECTORY', 'data_file'], {}), '(DIRECTORY, data_file)\n', (826, 848), False, 'import os\n'), ((945, 966), 'numpy.asarray', 'np.asarray', (['keypoints'], {}), '(keypoints)\n', (955, 966), True, 'import numpy as np\n'), ((1123, 1144), 'numpy.arange', 'np.arange', (['(0)', 'kpt_len'], {}), '(0, kpt_len)\n', (1132, 1144), True, 'import numpy as np\n')]
from pathlib import Path import logging import subprocess from astropy.io import fits import os import re import numpy as np from astropy.table import Table, vstack from shutil import rmtree import tempfile as tf import sys import warnings def make_galfit_directory(): newdir = tf.mkdtemp(prefix='galfit') return Path(newdir) def make_galfit_files(feedme, image, psf, constraints, directory): with open(directory / 'galfit.feedme', 'w') as outconf: outconf.write(feedme) fits.PrimaryHDU(image, header=image.properties.to_header()).writeto( directory/'inimg.fits') if image.uncertainty is not None: fits.PrimaryHDU(image.uncertainty.array).writeto( directory/'sigma.fits') if psf is not None: fits.PrimaryHDU(psf.data).writeto(directory/'psf.fits') if image.mask is not None: fits.PrimaryHDU(image.mask.astype(int)).writeto(directory/'mask.fits') if constraints is not None: with open(directory/'constraints.txt', 'w') as outconst: outconst.write(constraints) return directory def fit(feedme, image, psf, constraints, **kwargs): # add verbose verbose = kwargs.pop('verbose', False) deletefiles = kwargs.pop('deletefiles', True) directory = kwargs.pop('directory', '/tmp') directory = make_galfit_directory() make_galfit_files(feedme, image, psf, constraints, directory) cmd = [galfitcmd, 'galfit.feedme'] popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, cwd=directory, universal_newlines=True) print_galfit_output(popen, verbose) return_code = popen.wait() # if return_code: # raise subprocess.CalledProcessError(return_code, cmd) # maybe just read everything if fit is done and just load mode if sources # are made results = read_results(directory/'imgblock.fits') if deletefiles: rmtree(directory) if results is None: raise FitFailedError return results def print_galfit_output(process, verbose): with process.stdout as pstd: for line in pstd: if verbose: print(line, end='') class FitFailedError(Exception): def __init__(self, msg=None): if msg is None: msg = (''' __/~*##$%@@@******~\-__ /f=r/~_-~ _-_ --_.^-~--\=b\ 4fF / */ .o ._-__.__/~-. \*R\ /fF./ . /- /' /|/| \_ * *\ *\R\ (iC.I+ '| - *-/00 |- \ ) ) )|RB (I| ( [ / -|/^^\ | ) /_/ | *)B (I(. \ `` \ \m_m_|~__/ )_ .-~ F/ \b\\=_.\_b`-+-~x-_/ .. ,._/ , F/ ~\_\= = =-*###%#x==-# *=- =/ ~\**U/~ | i i | ~~~\===~ | I I \\ / // i\ \\ ( [ (( I@) ))) ) \_\_VYVU_/ || * | /* /I\ *~~\ /~-/* / \ \ ~~M~\ ____----=~ // /WVW\* \|\ ***===--___ Doh! GALFIT crashed because at least one of the model parameters is bad. The most common causes are: effective radius too small/big, component is too far outside of fitting region (also check fitting region), model mag too faint, axis ratio too small, Sersic index too small/big, Nuker powerlaw too small/big. If frustrated or problem should persist, email for help or report problem to: <EMAIL> ''') super().__init__(msg) def read_results(filename): if not Path(filename).exists(): return None out = {} with fits.open(filename) as hdul: header = fits.header.Header(hdul[2].header) out['image'] = hdul[1].data out['model'] = hdul[2].data out['residuals'] = hdul[3].data out['fitstats'] = read_fitstats_from_header(header) out['components'] = read_components_from_header(header) return out def read_fitstats_from_header(header): stats = {} stats['magzpt'] = header["MAGZPT"] fitreg = header["FITSECT"] fitreg = re.findall(r"[\w']+", fitreg) stats['box_x0'] = int(fitreg[0]) - 1 stats['box_x1'] = int(fitreg[1]) - 1 stats['box_y0'] = int(fitreg[2]) - 1 stats['box_y1'] = int(fitreg[3]) - 1 # Convolution box convbox = header["CONVBOX"] convbox = convbox.split(",") stats['convbox_x'] = convbox[0] stats['convbox_y'] = convbox[1] # Read in the chi-square value stats['chisq'] = header["CHISQ"] stats['ndof'] = header["NDOF"] stats['nfree'] = header["NFREE"] stats['reduced_chisq'] = header["CHI2NU"] return stats def read_components_from_header(header): components_packs = make_component_packs(header) components = Table() for i, componentheader in enumerate(components_packs): components = vstack( [components, make_component_from_cleaned_header(componentheader, i)]) return components def get_number_of_component(header): ncomps = 1 while True: if "COMP_" + str(ncomps + 1) in header: ncomps = ncomps + 1 else: break return ncomps def make_component_packs(header): ''' takes whole header and appends all lines that belong so one single componend as a new item to a list ''' components = [] compidx = 1 incomponent = False for i, (key, value) in enumerate(header.items()): _incomponent = is_part_of_component(key, compidx) if is_newcomponent(incomponent, _incomponent): startidx = i elif is_end_of_component(incomponent, _incomponent): components.append(header[startidx:i]) compidx += 1 incomponent = _incomponent return components def make_component_from_cleaned_header(header, idx): translator = keywordtranslator() compname = header.pop('COMP_{}'.format(idx+1)).rstrip() t = Table() t['comp'] = [compname] for key, value in header.items(): name = translator.to_python(key.replace('{}_'.format(idx+1), '')) add_parameter_to_table(t, name, *read_parameter(value)) return t def add_parameter_to_table(table, name, value, uncertainty, flag): ''' have to subtract 1 from coordinates due to indexing ''' if name == 'x' or name == 'y': value -= 1 table[name] = [value] table['{}_unc'.format(name)] = [uncertainty] table['{}_flag'.format(name)] = [flag] def test_if_all_symbols_in_string(string, *symbols): isin = True for symbol in symbols: isin = isin and symbol in string return isin def isconstrained(headerentry): return test_if_all_symbols_in_string(headerentry, '{', '}') def isfixed(headerentry): return test_if_all_symbols_in_string(headerentry, '[', ']') def isproblematic(headerentry): isproblematic = False if '*' in headerentry: isproblematic = True return isproblematic def remove_and_split_string(string, *removechars): for char in removechars: string = string.replace(char, '') return string.split() def read_parameter(headerentry): flag = '' uncertainty = -1 if isconstrained(headerentry): flag = 'constrained' strval = remove_and_split_string(headerentry, '{', '}') elif isfixed(headerentry): flag = 'fixed' strval = remove_and_split_string(headerentry, '[', ']') elif isproblematic(headerentry): flag = 'problematic' strval = remove_and_split_string(headerentry, '*') uncertainty = np.nan warnings.warn('One parameter is problematic') else: strval = remove_and_split_string(headerentry) uncertainty = float(strval[2]) value = float(strval[0]) return value, uncertainty, flag class keywordtranslator(object): def __init__(self): self.python = ['r', 'x', 'y'] self.galfit = ['RE', 'XC', 'YC'] @classmethod def to_python(cls, key, inverse=False): trans = cls() a = trans.galfit b = trans.python if inverse: a, b = b, a if key in a: key = b[a.index(key)] return key.lower() @classmethod def to_galfit(cls, key): return cls.to_python(key, inverse=True).upper() def is_part_of_component(key, idx): ispart = False if key.startswith('{}_'.format(idx)) or (key == 'COMP_{}'.format(idx)): ispart = True return ispart def is_newcomponent(incomponent_before, incomponent_now): isnew = False if (not incomponent_before) and incomponent_now: isnew = True return isnew def is_end_of_component(incomponent_before, incomponent_now): isend = False if incomponent_before and (not incomponent_now): isend = True return isend def find_galfit_executable(): galfit = '' try: galfit = os.environ['galfit'] except KeyError: logging.warn(('Galfit not found in environment. Set path manually by ' 'modifying the galfitcmd variable in core module.')) return galfit galfitcmd = find_galfit_executable()
[ "logging.warn", "astropy.table.Table", "pathlib.Path", "astropy.io.fits.header.Header", "subprocess.Popen", "astropy.io.fits.PrimaryHDU", "warnings.warn", "tempfile.mkdtemp", "shutil.rmtree", "astropy.io.fits.open", "re.findall" ]
[((284, 311), 'tempfile.mkdtemp', 'tf.mkdtemp', ([], {'prefix': '"""galfit"""'}), "(prefix='galfit')\n", (294, 311), True, 'import tempfile as tf\n'), ((323, 335), 'pathlib.Path', 'Path', (['newdir'], {}), '(newdir)\n', (327, 335), False, 'from pathlib import Path\n'), ((1551, 1640), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'cwd': 'directory', 'universal_newlines': '(True)'}), '(cmd, stdout=subprocess.PIPE, cwd=directory,\n universal_newlines=True)\n', (1567, 1640), False, 'import subprocess\n'), ((4177, 4206), 're.findall', 're.findall', (['"""[\\\\w\']+"""', 'fitreg'], {}), '("[\\\\w\']+", fitreg)\n', (4187, 4206), False, 'import re\n'), ((4852, 4859), 'astropy.table.Table', 'Table', ([], {}), '()\n', (4857, 4859), False, 'from astropy.table import Table, vstack\n'), ((6018, 6025), 'astropy.table.Table', 'Table', ([], {}), '()\n', (6023, 6025), False, 'from astropy.table import Table, vstack\n'), ((2002, 2019), 'shutil.rmtree', 'rmtree', (['directory'], {}), '(directory)\n', (2008, 2019), False, 'from shutil import rmtree\n'), ((3711, 3730), 'astropy.io.fits.open', 'fits.open', (['filename'], {}), '(filename)\n', (3720, 3730), False, 'from astropy.io import fits\n'), ((3757, 3791), 'astropy.io.fits.header.Header', 'fits.header.Header', (['hdul[2].header'], {}), '(hdul[2].header)\n', (3775, 3791), False, 'from astropy.io import fits\n'), ((9028, 9156), 'logging.warn', 'logging.warn', (['"""Galfit not found in environment. Set path manually by modifying the galfitcmd variable in core module."""'], {}), "(\n 'Galfit not found in environment. Set path manually by modifying the galfitcmd variable in core module.'\n )\n", (9040, 9156), False, 'import logging\n'), ((696, 736), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['image.uncertainty.array'], {}), '(image.uncertainty.array)\n', (711, 736), False, 'from astropy.io import fits\n'), ((851, 876), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['psf.data'], {}), '(psf.data)\n', (866, 876), False, 'from astropy.io import fits\n'), ((3642, 3656), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (3646, 3656), False, 'from pathlib import Path\n'), ((7667, 7712), 'warnings.warn', 'warnings.warn', (['"""One parameter is problematic"""'], {}), "('One parameter is problematic')\n", (7680, 7712), False, 'import warnings\n')]
""" Advent of Code Day 6 challenge: Part 1: Given a list of coordinates of the form (x, y) where x is the number of units from the left y is the number of units from the top Determine the area around each coordinate by finding the number of (x, y) locations that are closest to that coordinate, by using the Manhattan Distance The goal is to find the largest area that is fully contained by other points """ from ast import literal_eval import numpy as np import argparse import math class Coordinate: """ Coordinate class input tuple containing x, y value of coordinate ie. (1,2) x -> position from left of matrix, positive integers only, columns y -> position from the top of the matrix, positive integers only, rows """ def __init__(self, position): self.x = position[0] self.y = position[1] def compute_distance(self, new_coordinate): """ Compute Manhattan Distance between two coordinates This is evaluated by taking difference in x values + the difference in y values (1, 3) and (3, 5) has a Manhattan Distance of 4 because: |1 - 3| + |3 - 5| = 4 """ return abs(self.x - new_coordinate.x) + abs(self.y - new_coordinate.y) def is_edge_point(self, num_rows, num_cols): """ determine if the coordinate is an edge point num_rows -> bottom most row in matrix num_cols -> right most column in matrix """ return self.x == 0 or self.x == num_cols - 1 or self.y == 0 or self.y == num_rows - 1 def __repr__(self): return "({}, {})".format(self.x, self.y) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("file_name", help="input file listing coordinates for analysis") args = parser.parse_args() return args.file_name def read_input(file): """ read in input text file, which is a string of coordinates in the following format: 1, 1 1, 6 8, 3 file -> input text file returns dict of coordinate objects """ # if file is None: # raise Exception("File Not Found") with open(file, "r") as input_file: return {id: Coordinate(literal_eval(position.strip())) for id, position in enumerate(input_file, 1)} def find_matrix_size(coordinates): """ This finds the maximum x and y values from the list of coordinates coordinates -> dict of coordinate objects returns max rows and columns values, plus 1 added space because of 0 indexing """ max_x = 0 max_y = 0 for key, coordinate in coordinates.items(): if coordinate.x > max_x: max_x = coordinate.x if coordinate.y > max_y: max_y = coordinate.y return max_y+1, max_x+1 def populate_matrix(matrix, coordinate_dict): """ loops through each position on matrix, and calculates the closest coordinate from input coordinates modifies the matrix in place with the ID of the closest input coordinate. if two points are of equal distance, 0 is recorded for that position the function also determines the ids of points which are located on the edge of the matrix these are considered to have infinite areas and need to be discarded from future analysis matrix -> NxM np.array being modified with the closest points coordinate_dict -> dict of input coordinates returns set of edge points """ # get matrix size rows, columns = matrix.shape # create set of edge points edge_points = set() for row in range(rows): for column in range(columns): # generate new coordinate, x=column, y=row my_coordinate = Coordinate((column, row)) # create variable to keep track of closest point, and if another point is of equal distance closest_distance = math.inf unique_point = 0 # compute Manhattan Distance between each input point, record closest point for key, coordinate in coordinate_dict.items(): distance = my_coordinate.compute_distance(coordinate) if distance < closest_distance: closest_distance = distance unique_point = key elif distance == closest_distance: unique_point = 0 matrix[row][column] = unique_point if unique_point != 0 and my_coordinate.is_edge_point(rows, columns): edge_points.add(unique_point) return edge_points def calculate_area(matrix, coordinate_dict, edge_list): """ Finds the largest area of a fully enclosed coordinate matrix -> NxM np.array which lists the ids of coordinates closest to the input coordinates coordinate_dict -> dict of input coordinates edge_list -> set containing elements with infinite areas returns the maximum area as an int and the id as an int of input coordinates not listed in the edge_list """ max_area, max_key = 0, 0 for key in coordinate_dict: if key not in edge_list: area = np.count_nonzero(matrix == key) if area > max_area: max_area = area max_key = key return max_area, max_key def main(file): """ The magic happens here """ # read in input and calculate the size of matrix needed coordinates = read_input(file) rows, columns = find_matrix_size(coordinates) # create 2 dimensional matrix for coordinates matrix = np.zeros((rows, columns), dtype=int) # loop through each position on matrix, and record id of nearest point # returns a set of ids which have "infinite areas" edges = populate_matrix(matrix, coordinates) print(matrix) print(edges) # calculate the area of each fully enclosed coordinate max_area, max_key = calculate_area(matrix, coordinates, edges) print("the largest area which is not infinite is {} and belongs to point {}.".format(max_area, max_key)) if __name__ == "__main__": arg_file_name = parse_args() try: main(arg_file_name) except FileNotFoundError: print("the file does not exist")
[ "numpy.count_nonzero", "numpy.zeros", "argparse.ArgumentParser" ]
[((1786, 1811), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1809, 1811), False, 'import argparse\n'), ((5715, 5751), 'numpy.zeros', 'np.zeros', (['(rows, columns)'], {'dtype': 'int'}), '((rows, columns), dtype=int)\n', (5723, 5751), True, 'import numpy as np\n'), ((5285, 5316), 'numpy.count_nonzero', 'np.count_nonzero', (['(matrix == key)'], {}), '(matrix == key)\n', (5301, 5316), True, 'import numpy as np\n')]
from setuptools import setup setup( name='rmgradient', version='1.0', description='Substract a background gradient from a TIFF file image', url='https://github.com/drnc/rmgradient', author='<NAME>', author_email='<EMAIL>', license='Apache License 2.0', classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: End Users/Desktop', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3', ], keywords='remove background gradient TIFF', py_modules=['rmgradient/rmgradient'], install_requires=[ 'numpy>=1.14', 'tifffile>=2018.11.6', 'imagecodecs>=2018.11.6', 'scipy>=1.0'], python_requires='>=3', entry_points={ 'console_scripts': [ 'rmgradient=rmgradient.rmgradient:main' ], } )
[ "setuptools.setup" ]
[((30, 769), 'setuptools.setup', 'setup', ([], {'name': '"""rmgradient"""', 'version': '"""1.0"""', 'description': '"""Substract a background gradient from a TIFF file image"""', 'url': '"""https://github.com/drnc/rmgradient"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""Apache License 2.0"""', 'classifiers': "['Development Status :: 3 - Alpha',\n 'Intended Audience :: End Users/Desktop',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3']", 'keywords': '"""remove background gradient TIFF"""', 'py_modules': "['rmgradient/rmgradient']", 'install_requires': "['numpy>=1.14', 'tifffile>=2018.11.6', 'imagecodecs>=2018.11.6', 'scipy>=1.0']", 'python_requires': '""">=3"""', 'entry_points': "{'console_scripts': ['rmgradient=rmgradient.rmgradient:main']}"}), "(name='rmgradient', version='1.0', description=\n 'Substract a background gradient from a TIFF file image', url=\n 'https://github.com/drnc/rmgradient', author='<NAME>', author_email=\n '<EMAIL>', license='Apache License 2.0', classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: End Users/Desktop',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3'], keywords=\n 'remove background gradient TIFF', py_modules=['rmgradient/rmgradient'],\n install_requires=['numpy>=1.14', 'tifffile>=2018.11.6',\n 'imagecodecs>=2018.11.6', 'scipy>=1.0'], python_requires='>=3',\n entry_points={'console_scripts': ['rmgradient=rmgradient.rmgradient:main']}\n )\n", (35, 769), False, 'from setuptools import setup\n')]
from os.path import join import json import torch import numpy as np from rlpyt.envs.dm_control_env import DMControlEnv def cloth_corner_random(obs): idx = np.random.randint(0, 4) one_hot = np.zeros(4) one_hot[idx] = 1 delta = np.random.rand(3) * 2 - 1 return np.concatenate((one_hot, delta)).astype(np.float32) def rope_v2_random(obs): return np.random.rand(3) * 2 - 1 def cloth_point_random(obs): return np.random.rand(4) * 2 - 1 def simulate_policy(): policy = cloth_point_random env = DMControlEnv(domain='cloth_point', task='easy', max_path_length=120, task_kwargs=dict(random_location=False)) n_episodes = 40 returns = [] for i in range(n_episodes): o = env.reset() done = False reward = 0 while not done: o, r, done, info = env.step(policy(o)) reward += r if done or info.traj_done: break print(reward) returns.append(reward) print('Finished episode', i) print('Rewards', returns) print('Average Reward', np.mean(returns)) if __name__ == '__main__': simulate_policy()
[ "numpy.mean", "numpy.random.rand", "numpy.random.randint", "numpy.zeros", "numpy.concatenate" ]
[((164, 187), 'numpy.random.randint', 'np.random.randint', (['(0)', '(4)'], {}), '(0, 4)\n', (181, 187), True, 'import numpy as np\n'), ((202, 213), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (210, 213), True, 'import numpy as np\n'), ((1115, 1131), 'numpy.mean', 'np.mean', (['returns'], {}), '(returns)\n', (1122, 1131), True, 'import numpy as np\n'), ((248, 265), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (262, 265), True, 'import numpy as np\n'), ((285, 317), 'numpy.concatenate', 'np.concatenate', (['(one_hot, delta)'], {}), '((one_hot, delta))\n', (299, 317), True, 'import numpy as np\n'), ((375, 392), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (389, 392), True, 'import numpy as np\n'), ((443, 460), 'numpy.random.rand', 'np.random.rand', (['(4)'], {}), '(4)\n', (457, 460), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- import random SUIT_SYMBOLS = { 'spades': u'♠', 'diamonds': u'♦', 'clubs': u'♣', 'hearts': u'♥', } class InvalidMove(Exception): """Raised to indicate an invalid move""" class Card(object): def __init__(self, rank, suit, face_up=False): self.rank = rank self.suit = suit self.face_up = face_up def __repr__(self): return 'Card(rank={0.rank!r}, suit={0.suit!r}, face_up={0.face_up!r})'.format(self) @property def suit_symbol(self): return SUIT_SYMBOLS[self.suit] class Deck(object): ranks = ['A'] + [str(n) for n in range(2, 11)] + list('JQK') suits = 'spades diamonds clubs hearts'.split() def __init__(self): self._cards = [Card(rank, suit) for suit in self.suits for rank in self.ranks] def __len__(self): return len(self._cards) def __getitem__(self, position): return self._cards[position] def __iter__(self): return iter(self._cards) def shuffle(self): random.shuffle(self._cards) def suit_color(suit): return 'red' if suit in ('diamonds', 'hearts') else 'black' def rank_diff(first, second): """Return the relative difference between the given ranks""" assert first in Deck.ranks and second in Deck.ranks return Deck.ranks.index(second) - Deck.ranks.index(first) class Game(object): def __init__(self): deck = Deck() deck.shuffle() cards = list(deck) self.waste = [] self.tableau = [] for n in range(1, 8): self.tableau.append([cards.pop() for _ in range(n)]) for pile in self.tableau: pile[-1].face_up = True self.stock = list(cards) self.foundations = [[], [], [], []] def deal_from_stock(self): if not self.stock: raise InvalidMove("No cards in stock") self.waste.append(self.stock.pop()) self.waste[-1].face_up = True def restore_stock(self): self.stock = list(reversed(self.waste)) for card in self.stock: card.face_up = False self.waste[:] = [] def _is_valid_move_to_tableau(self, source_card, target_card): if target_card is None: return source_card.rank == 'K' if not source_card.face_up or not target_card.face_up: return False diff = rank_diff(source_card.rank, target_card.rank) return diff == 1 and suit_color(source_card.suit) != suit_color(target_card.suit) def move_from_waste_to_tableau(self, target_index): assert target_index in range(7) target_pile = self.tableau[target_index] target_card = target_pile[-1] if target_pile else None if self.waste and self._is_valid_move_to_tableau(self.waste[-1], target_card): target_pile.append(self.waste.pop()) else: raise InvalidMove() def move_tableau_pile(self, src_index, target_index): """Move pile, assuming that cards facing up are in the proper order""" assert src_index in range(7), "Invalid index: %r" % src_index assert target_index in range(7), "Invalid index: %r" % target_index if src_index == target_index: raise InvalidMove('Source is same as destination') source_pile, target_pile = self.tableau[src_index], self.tableau[target_index] target_card = target_pile[-1] if target_pile else None for index, card in list(enumerate(source_pile))[::-1]: if self._is_valid_move_to_tableau(card, target_card): to_move = source_pile[index:] target_pile.extend(to_move) for _ in range(len(to_move)): source_pile.pop() return raise InvalidMove() def _find_foundation_pile(self, card_to_move): for pile in self.foundations: if any([ not pile and card_to_move.rank == 'A', pile and card_to_move.suit == pile[-1].suit and rank_diff(card_to_move.rank, pile[-1].rank) == -1 ]): return pile def move_to_foundation_from_waste(self): if not self.waste: raise InvalidMove() foundation_pile = self._find_foundation_pile(self.waste[-1]) if foundation_pile is None: raise InvalidMove() foundation_pile.append(self.waste.pop()) def move_to_foundation_from_tableau(self, index): assert index in range(7), "Invalid index: %r" % index pile = self.tableau[index] if not pile: raise InvalidMove() card_to_move = pile[-1] if not card_to_move.face_up: raise InvalidMove() foundation_pile = self._find_foundation_pile(card_to_move) if foundation_pile is None: raise InvalidMove() foundation_pile.append(pile.pop())
[ "random.shuffle" ]
[((1087, 1114), 'random.shuffle', 'random.shuffle', (['self._cards'], {}), '(self._cards)\n', (1101, 1114), False, 'import random\n')]
from typing import List class Sentiment(): """Contains data for sentiment analysis report""" def __init__( self, # id: str, symbols: List[str], source: str, date: str, positive: float, neutral: float, negative: float, compound: float, ): # self.id = id self.symbols = symbols self.source = source self.date = date self.positive = positive self.neutral = neutral self.negative = negative self.compound = compound def __del__(self): pass if __name__ == "__main__": import doctest doctest.testmod()
[ "doctest.testmod" ]
[((650, 667), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (665, 667), False, 'import doctest\n')]
import pytest from poll.models import db from poll.models import Poll, Answer, Vote def test_create_poll(client, app): # Delete first with app.app_context(): Poll.query.filter_by(question='What do you want').delete() assert client.get('/').status_code == 200 response = client.post( '/poll', data={'questionTitle': 'What do you want', 'answer': ['money', 'gold', 'power'], 'maxSelectionLimit': 1} ) assert '/share' in response.headers['Location'] with app.app_context(): assert Poll.query.filter_by(question='What do you want').first() is not None @pytest.mark.parametrize(('questionTitle', 'answer', 'maxSelectionLimit', 'status_code'), ( ('', '', '', 422), (None, None, None, 422), ('a', '', 'a', 422), ('', ['test'], '1', 422), ('', [None, None], '1', 422), )) def test_register_validate_input(client, questionTitle, answer, maxSelectionLimit, status_code): response = client.post( '/poll', data={'questionTitle': questionTitle, 'answer': answer, 'maxSelectionLimit': maxSelectionLimit} ) assert response.status_code == status_code
[ "pytest.mark.parametrize", "poll.models.Poll.query.filter_by" ]
[((629, 848), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('questionTitle', 'answer', 'maxSelectionLimit', 'status_code')", "(('', '', '', 422), (None, None, None, 422), ('a', '', 'a', 422), ('', [\n 'test'], '1', 422), ('', [None, None], '1', 422))"], {}), "(('questionTitle', 'answer', 'maxSelectionLimit',\n 'status_code'), (('', '', '', 422), (None, None, None, 422), ('a', '',\n 'a', 422), ('', ['test'], '1', 422), ('', [None, None], '1', 422)))\n", (652, 848), False, 'import pytest\n'), ((175, 224), 'poll.models.Poll.query.filter_by', 'Poll.query.filter_by', ([], {'question': '"""What do you want"""'}), "(question='What do you want')\n", (195, 224), False, 'from poll.models import Poll, Answer, Vote\n'), ((557, 606), 'poll.models.Poll.query.filter_by', 'Poll.query.filter_by', ([], {'question': '"""What do you want"""'}), "(question='What do you want')\n", (577, 606), False, 'from poll.models import Poll, Answer, Vote\n')]
from django.contrib import admin from .models import Rating,Project,Profile # Register your models here. admin.site.register(Rating) admin.site.register(Profile) admin.site.register(Project)
[ "django.contrib.admin.site.register" ]
[((106, 133), 'django.contrib.admin.site.register', 'admin.site.register', (['Rating'], {}), '(Rating)\n', (125, 133), False, 'from django.contrib import admin\n'), ((134, 162), 'django.contrib.admin.site.register', 'admin.site.register', (['Profile'], {}), '(Profile)\n', (153, 162), False, 'from django.contrib import admin\n'), ((163, 191), 'django.contrib.admin.site.register', 'admin.site.register', (['Project'], {}), '(Project)\n', (182, 191), False, 'from django.contrib import admin\n')]
import collections from typing import Counter, List class Solution: def subarraySum(self, nums: List[int], k: int) -> int: result = total = 0 sum_counts = collections.Counter() # type: Counter[int] sum_counts[0] = 1 for num in nums: total += num complement = total - k if complement in sum_counts: result += sum_counts[complement] sum_counts[total] += 1 return result
[ "collections.Counter" ]
[((178, 199), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (197, 199), False, 'import collections\n')]
import sys import math import scipy.special as spec def H(p): q = 1-p return p*math.log2(1/p) + q*math.log2(1/q) def s_r_UB_q_approx(n, r, nD, q, Sigma): delta_term = ( spec.binom(n-r+nD,q) + spec.binom(n-r+nD,q-1) ) return delta_term * spec.binom(q , int(math.floor(q/2)))*spec.binom(2*nD-1, q-2) def s_r_UB_q(n, r, nD, q, Sigma): return s_r_UB_q_approx(n, r, nD, q, Sigma) def s_r_UB_nD(n, r, nD, Sigma): sigma_factor = (pow(Sigma-1, nD) + pow(Sigma, nD)) / pow(Sigma-1, 2*nD) q_sum = 0 q_max = min(2*nD, n - r + nD + 1) # bar{q} = min{2nD, nM+1} for q in range(2,q_max+1): q_sum += s_r_UB_q(n, r, nD, q, Sigma) return sigma_factor* spec.binom(n-nD, r-2*nD) * (n / (n-nD)) * q_sum def s_r_UB(n, r, Sigma): nD_sum = 0 if (r == 0): nD_sum = spec.binom(n,r) else: nD_max = int(math.floor(r/2)) for nD in range(nD_max+1): nD_sum += s_r_UB_nD(n, r, nD, Sigma) return nD_sum*pow(Sigma-1, r) def s_UB(n, Sigma): r_max = n shell_bounds = [] for r in range(r_max+1): shell_bounds.append(s_r_UB(n, r, Sigma)) return shell_bounds def bound(n, Sigma=4): # OK hulls_bound = [(i,0) for i in range(n+1)] remaining_strings = pow(Sigma,n) - 1 lb = 0 r = 1 while (remaining_strings > 0 and r <= n): lb += remaining_strings hull_count = s_r_UB(n,r, Sigma) remaining_strings = remaining_strings - hull_count hulls_bound[r] = (r,lb) r += 1 return (lb, hulls_bound, r-1) def prototype(n_max=32): import math import scipy.special as spec print("TEST") n = 24 r = 16 Sigma=4 rho = 8 / (Sigma-1) x = [] y = [] for nD in range(1,int(math.floor(r/2))+1): s_power = pow(rho, nD) s_binom_1 = spec.binom(n-nD, n-r+nD) s_binom_2 = spec.binom(n-r+3*nD, n-r+nD) #s = 1*s_binom_1 * 1*s_binom_2 #s = 1*s_power * 1*s_binom_2 # s = 1*s_power * 1*s_binom_1 #s = 1*s_power * 1*s_binom_1 * 1*s_binom_2 #s = 1*s_power #s = 1*s_binom_1 s = 1*s_binom_2 i = n - r + nD a = pow(rho,nD) * pow(math.e*math.e,i)*pow((n-r+3*nD)*(n-nD),i) / pow(i,i) x.append(s) y.append(a) print(x) print(y) print(sum(x)) print(sum(y)) if (__name__ == "__main__"): if (sys.argv.count("--test") > 0): prototype() sys.exit(0) n_max = int(sys.argv[1]) n_step = int(sys.argv[2]) Sigma=4 print("n_max: {0}\nn_step: {1}".format(n_max, n_step)) ns = list(range(50,n_max+1,n_step)) for n in ns: UB = 0 shell_bounds = s_UB(n, Sigma) (lb, hulls_bound, r_sat) = bound(n) print("{0}\t{1}".format(n,lb / (n*pow(Sigma,n))))
[ "sys.argv.count", "math.floor", "scipy.special.binom", "math.log2", "sys.exit" ]
[((183, 208), 'scipy.special.binom', 'spec.binom', (['(n - r + nD)', 'q'], {}), '(n - r + nD, q)\n', (193, 208), True, 'import scipy.special as spec\n'), ((206, 235), 'scipy.special.binom', 'spec.binom', (['(n - r + nD)', '(q - 1)'], {}), '(n - r + nD, q - 1)\n', (216, 235), True, 'import scipy.special as spec\n'), ((293, 322), 'scipy.special.binom', 'spec.binom', (['(2 * nD - 1)', '(q - 2)'], {}), '(2 * nD - 1, q - 2)\n', (303, 322), True, 'import scipy.special as spec\n'), ((811, 827), 'scipy.special.binom', 'spec.binom', (['n', 'r'], {}), '(n, r)\n', (821, 827), True, 'import scipy.special as spec\n'), ((1844, 1874), 'scipy.special.binom', 'spec.binom', (['(n - nD)', '(n - r + nD)'], {}), '(n - nD, n - r + nD)\n', (1854, 1874), True, 'import scipy.special as spec\n'), ((1889, 1927), 'scipy.special.binom', 'spec.binom', (['(n - r + 3 * nD)', '(n - r + nD)'], {}), '(n - r + 3 * nD, n - r + nD)\n', (1899, 1927), True, 'import scipy.special as spec\n'), ((2403, 2427), 'sys.argv.count', 'sys.argv.count', (['"""--test"""'], {}), "('--test')\n", (2417, 2427), False, 'import sys\n'), ((2462, 2473), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2470, 2473), False, 'import sys\n'), ((88, 104), 'math.log2', 'math.log2', (['(1 / p)'], {}), '(1 / p)\n', (97, 104), False, 'import math\n'), ((107, 123), 'math.log2', 'math.log2', (['(1 / q)'], {}), '(1 / q)\n', (116, 123), False, 'import math\n'), ((858, 875), 'math.floor', 'math.floor', (['(r / 2)'], {}), '(r / 2)\n', (868, 875), False, 'import math\n'), ((688, 718), 'scipy.special.binom', 'spec.binom', (['(n - nD)', '(r - 2 * nD)'], {}), '(n - nD, r - 2 * nD)\n', (698, 718), True, 'import scipy.special as spec\n'), ((1772, 1789), 'math.floor', 'math.floor', (['(r / 2)'], {}), '(r / 2)\n', (1782, 1789), False, 'import math\n'), ((275, 292), 'math.floor', 'math.floor', (['(q / 2)'], {}), '(q / 2)\n', (285, 292), False, 'import math\n')]
# This script requires the Meraki SDK, sys, copy, json import meraki import sys import copy import json # Treat your API key like a password. Store it in your environment variables as 'MERAKI_DASHBOARD_API_KEY' and let the SDK call it for you. # Or, call it manually after importing Python's os module: # API_KEY = os.getenv('MERAKI_DASHBOARD_API_KEY') # CUSTOMIZABLE STRINGS SETTINGS_FILENAME = 'settings.json' MAPPINGS_FILENAME = 'mappings.json' CONFIRM = 'Would you like to proceed?' CHOICES = '(y/n)' STATIC_SETTINGS_REVIEW = 'Please review these static settings before proceeding.' APPLIANCE_SETTINGS_REVIEW = 'Please review these appliance settings before proceeding.' VLAN_ERROR = 'The appliance has a VLAN for which settings.json does not have interface information. Add the interface information to continue.' GOODBYE = 'Now go check your L3 switch\'s routing settings.' # DEFINE a method that will ingest settings and param mappings def ingest(settings_filename: str, mappings_filename: str): # INGEST SETTINGS, AND PARAM MAPPINGS # READ the settings and mappings with open(settings_filename, 'r') as settings_json: settings = json.load(settings_json) with open(mappings_filename, 'r') as mappings_json: mappings = json.load(mappings_json) # CREATE a list of all the tagged VLAN IDs in settings.json tagged_vlan_ids = [vlan['id'] for vlan in settings['vlans']['others']] # CREATE a list of all the interface IPs in settings.json interface_ips = [vlan['interfaceIp'] for vlan in settings['vlans']['others']] interface_ips.append(settings['vlans']['native']['interfaceIp']) return(settings, mappings, tagged_vlan_ids, interface_ips) # DEFINE a function to build working configs that we can manipulate. In this example, it will be useful to separate the interfaces with # DHCP enabled vice disabled, because interfaces with DHCP enabled have options that need to be PUT to two different endpoints. def build_working_configs( starting_configs: list, feature_old_toggle_param: str = 'dhcpHandling', feature_old_toggle_disabled_mode: str = 'Do not respond to DHCP requests' ): # DEEPCOPY the starting config. We'll manipulate a separate working config. DEEPCOPY is important because direct assignment creates a # reference to the original object, whereas we want to modify this one without modifying the original, in case we want to debug and # compare old with new. working_configs = copy.deepcopy(starting_configs) # CREATE a reference to the working config that only contains items with the feature enabled. # Anything we modify here will also be reflected in the working_config array. working_configs_with_feature = [config for config in working_configs if config[feature_old_toggle_param] != feature_old_toggle_disabled_mode] return(working_configs, working_configs_with_feature) # DEFINE a pretty print function. def printj(ugly_json_object: list): # The json.dumps() method converts a JSON object into human-friendly formatted text pretty_json_string = json.dumps(ugly_json_object, indent = 2, sort_keys = False) print(pretty_json_string) # DEFINE a method that will check each param against the knownParams in mappings.json and choose the appropriate action for that param, # and then return a list of tasks per config item. def build_task_list(*, old_configs: list, mappings: list): # Make an empty to-do list. to_do = [] # Search every item (in this case, a VLAN) for old_config in old_configs: # Search every parameter in the old config for a matching one in the mappings file, # and make a list out of it. old_config_matched_params = [param for param in mappings['knownParams'] if param['names']['old'] in old_config] # Make an empty dict() for each old_config where we'll store the lists of params needing action params_to = dict() # REMOVE # Check each matched param's mapping status and assign to an appropriate array based on the required action. params_to['remove'] = [param for param in old_config_matched_params if param['status'] == 'deprecated'] remaining_params = [param for param in old_config_matched_params if param not in params_to['remove']] # REUSE params_to['reuse'] = [param for param in old_config_matched_params if param['status'] == 'reused'] remaining_params = [param for param in remaining_params if param not in params_to['reuse']] # RENAME params_to['rename'] = [param for param in old_config_matched_params if param['status'] == 'renamed'] remaining_params = [param for param in remaining_params if param not in params_to['rename']] # TRANSFORM params_to['transform'] = [param for param in old_config_matched_params if param['status'] == 'transformed'] remaining_params = [param for param in remaining_params if param not in params_to['transform']] if(len(remaining_params) != 0): print(f"I found params in the source that aren't mapped in the mappings file. These are: {remaining_params}") print("I will now quit so you can map those params.") sys.exit() # Add this compiled dict to our to-do list. to_do.append(params_to) return(to_do) # DEFINE a method that will remove params def remove_params(task_list: list, old_configs: list): # Operate on each task in the list modified_params = set() modified_configs = set() past_tense_verb = 'Removed' # Iterate through the tasks and configs at the same time. for task, config in zip(task_list, old_configs): # Iterate through each param in the task for param in task: modified_params.add(param['names']['old']) modified_configs.add(config['name']) # Remove the param config.pop(param['names']['old']) print(f"{past_tense_verb} {modified_params} from {modified_configs}.\n") return(old_configs) # DEFINE a method that will rename params def rename_params(task_list: list, old_configs: list): # Operate on each task in the list modified_params = set() modified_configs = set() past_tense_verb = 'Renamed' # Iterate through the tasks and configs at the same time. for task, config in zip(task_list, old_configs): # Iterate through each param in the task for param in task: modified_params.add(param['names']['old']) modified_configs.add(config['name']) # Add a param with the new name, and assign it the old value while removing it from the list config[param['names']['new']] = config.pop(param['names']['old']) print(f"{past_tense_verb} {modified_params} from {modified_configs}.\n") return(old_configs) # DEFINE a method that will replace null values in subkeys with blank strings def transform_replace_none(*, param, config, transform): modified_params = set() modified_configs = set() # Check each parameter for null values for subparam in config[param['names']['new']]: if None in subparam.values(): subparam[transform['action']] = '' return(modified_params, modified_configs) # DEFINE a method that will rename a param's mode def transform_rename_mode(*, param, config): modified_params = set() modified_configs = set() # Check each mode for the current param for mode in param['modes']: # If the current mode corresponds to a new one, replace it with the new one if config[param['names']['new']] == mode['old']: config[param['names']['new']] = mode['new'] modified_params.add(param['names']['new']) modified_configs.add(config['name']) return(modified_params, modified_configs) # DEFINE a method that will split a param's mode def transform_split_mode(*, param, config, transform): modified_params = set() modified_configs = set() # Split the given param's mode by the delimiter specified in the mappings if the delimter is there # if transform['action'] in config[param['names']['new']]: config[param['names']['new']] = config[param['names']['new']].split(transform['action']) modified_params.add(param['names']['new']) modified_configs.add(config['name']) return(modified_params, modified_configs) # DEFINE a method that will split a param's mode def transform_add_param(*, param, config, transform): modified_params = set() modified_configs = set() # Add to the config a param with the new name from the transform, and assign it the old value config[transform['action']] = config[param['names']['new']] # Assign the original param with the fallback mode config[param['names']['new']] = transform['fallback'] modified_params.add(param['names']['new']) modified_params.add(transform['action']) modified_configs.add(config['name']) return(modified_params, modified_configs) # DEFINE a method that will demote a dynamic key to a key-value pair def transform_demote_dynamic_key(*, param, config, **kwargs: dict): modified_params = set() modified_configs = set() interface_ips = kwargs['interface_ips'] # Make a new list that will contain all the new dicts new_param_list = [] # For each instance of a dynamic key in this particular vlan config for dynamic_key in config[param['names']['new']]: # Make a new dict that lists the key as a value, and its nested key/value pairs as additional top-level key/value pairs new_param = dict() # Make the dynamic key the value of the new key name per mappings new_param[param['names']['newSubParam']] = dynamic_key # Now add all the subkeys new_param.update(config[param['names']['new']][dynamic_key]) # LET'S TALK INTERFACE ADDRESSES # If the IP address in the reservation matches a new interface IP for the switch, then the two settings will # interfere. We'll simply drop the DHCP reservation for that IP address if it's the same as the IP address for # the interface. if new_param['ip'] not in interface_ips: # Add the new param to the new param list new_param_list.append(new_param) # Assign the new param list to the original key config[param['names']['new']] = new_param_list modified_params.add(param['names']['new']) modified_configs.add(config['name']) return(modified_params, modified_configs) # DEFINE a method that coordinates the transforms def transform_coordinate(*, param, config, transform, **kwargs: dict): modified_params = set() modified_configs = set() interface_ips = kwargs['interface_ips'] # Perform each transform if called for in mappings if transform['type'] == 'rename None': # Hand off the transform to a single-purpose method transformed_params, transformed_configs = transform_replace_none(param = param, config = config, transform = transform) # Merge the modification sets from that method modified_params |= (transformed_params) modified_configs |= (transformed_configs) if transform['type'] == 'rename mode': # Hand off the transform to a single-purpose method transformed_params, transformed_configs = transform_rename_mode(param = param, config = config) # Merge the modification sets from that method modified_params |= (transformed_params) modified_configs |= (transformed_configs) if transform['type'] == 'split delimited strings' and transform['action'] in config[param['names']['new']]: # Hand off the transform to a single-purpose method transformed_params, transformed_configs = transform_split_mode(param = param, config = config, transform = transform) # Merge the modification sets from that method modified_params |= (transformed_params) modified_configs |= (transformed_configs) if transform['type'] == 'add param' and isinstance(config[param['names']['new']], list): # Hand off the transform to a single-purpose method transformed_params, transformed_configs = transform_add_param(param = param, config = config, transform = transform) # Merge the modification sets from that method modified_params |= (transformed_params) modified_configs |= (transformed_configs) if transform['type'] == 'demote dynamic key' and isinstance(config[param['names']['new']], dict): # Hand off the transform to a single-purpose method transformed_params, transformed_configs = transform_demote_dynamic_key(param = param, config = config, transform = transform, interface_ips = interface_ips) # Merge the modification sets from that method modified_params |= (transformed_params) modified_configs |= (transformed_configs) return(modified_params, modified_configs) # DEFINE a method that will transform params. def transform_params(task_list: list, old_configs: list, **kwargs: dict): # Operate on each task in the list modified_params = set() modified_configs = set() past_tense_verb = 'Transformed' interface_ips = kwargs['interface_ips'] # Iterate through the tasks and configs at the same time. for task, config in zip(task_list, old_configs): # Iterate through each param in the task for param in task: modified_params.add(param['names']['old']) modified_configs.add(config['name']) # Add a param with the new name, and assign it the old value while removing it from the list config[param['names']['new']] = config.pop(param['names']['old']) # Iterate through each transformation for transform in param['transforms']: transformed_params, transformed_configs = transform_coordinate(param = param, config = config, transform = transform, interface_ips = interface_ips) # Merge the modification sets from that method modified_params |= (transformed_params) modified_configs |= (transformed_configs) print(f"{past_tense_verb} {modified_params} from {modified_configs}.\n") return(old_configs) # DEFINE a method that adds the static configuration information from settings.json to each interface def assign_statics(tagged_vlan_ids: list, settings, old_configs: list): for interface in old_configs: # ASSIGN the static information that isn't derived from the appliance config # Set the native VLAN info if interface['vlanId'] == settings['vlans']['native']['id']: interface['defaultGateway'] = settings['vlans']['native']['defaultGateway'] interface['interfaceIp'] = settings['vlans']['native']['interfaceIp'] # Set the tagged VLAN info elif interface['vlanId'] in tagged_vlan_ids: # Use a list comprehension, then pop it, to get the interface IP interface['interfaceIp'] = [tagged_vlan['interfaceIp'] for tagged_vlan \ in settings['vlans']['others'] if tagged_vlan['id'] == interface['vlanId']].pop() # We need to have this static information for each VLAN in the appliance config. If we don't find it, then we'll quit so you can fix settings.json. else: print(VLAN_ERROR) sys.exit() return(old_configs) # DEFINE a function to create the interfaces def create_interfaces(dashboard, settings, switch_interfaces: list): # Start a list to collect responses. They will be handy because they'll have the created inteface IDs. responses = [] # Create each interface. The native VLAN will need special params from settings.json. for interface in switch_interfaces: if interface['vlanId'] != settings['vlans']['native']['id']: response = dashboard.switch.createDeviceSwitchRoutingInterface( settings['switchSerial'], interface['name'], interface['interfaceIp'], interface['vlanId'], subnet = interface['subnet'] ) else: response = dashboard.switch.createDeviceSwitchRoutingInterface( settings['switchSerial'], interface['name'], interface['interfaceIp'], interface['vlanId'], subnet = interface['subnet'], defaultGateway = interface['defaultGateway'] ) responses.append(response) return responses # DEFINE a function to update the DHCP config for each interface def configure_interface_dhcp(dashboard, serial, switch_interfaces_with_dhcp, created_interfaces): responses = [] # Iterate through all the for interface in switch_interfaces_with_dhcp: created_dhcp_interface = [created_interface for created_interface in created_interfaces if created_interface['vlanId'] == interface['vlanId']].pop() if 'dnsCustomNameservers' in interface: response = dashboard.switch.updateDeviceSwitchRoutingInterfaceDhcp( serial, created_dhcp_interface['interfaceId'], dhcpMode = interface['dhcpMode'], dhcpLeaseTime = interface['dhcpLeaseTime'], dnsNameserversOption = interface['dnsNameserversOption'], dnsCustomNameservers = interface['dnsCustomNameservers'], dhcpOptions = interface['dhcpOptions'], reservedIpRanges = interface['reservedIpRanges'], fixedIpAssignments = interface['fixedIpAssignments'] ) else: response = dashboard.switch.updateDeviceSwitchRoutingInterfaceDhcp( serial, created_dhcp_interface['interfaceId'], dhcpMode = interface['dhcpMode'], dhcpLeaseTime = interface['dhcpLeaseTime'], dnsNameserversOption = interface['dnsNameserversOption'], dhcpOptions = interface['dhcpOptions'], reservedIpRanges = interface['reservedIpRanges'], fixedIpAssignments = interface['fixedIpAssignments'] ) responses.append(response) return responses # DEFINE a main method that will drive the config through all necessary param and mode changes, and push the change to Dashboard. def main(): # INGEST settings and mappings--we need some of these to start the connection settings, mappings, tagged_vlan_ids, interface_ips = ingest(SETTINGS_FILENAME, MAPPINGS_FILENAME) # START A MERAKI DASHBOARD API SESSION # Initialize the Dashboard connection. dashboard = meraki.DashboardAPI(suppress_logging=True) # SHOW the user the ingested settings print(STATIC_SETTINGS_REVIEW) for key, value in settings.items(): print(f'The setting {key} has the following value(s):') printj(value) # CONFIRM the operation if(input(f'{CONFIRM} {CHOICES}') != 'y'): sys.exit() # GET appliance VLANs from Meraki Dashboard appliance_vlans = dashboard.appliance.getNetworkApplianceVlans( networkId=settings['networkId'] ) # BUILD working configs that we can manipulate. switch_interfaces, switch_interfaces_with_dhcp = build_working_configs(appliance_vlans) # REMOVE, RENAME, REUSE, or TRANSFORM each param task_list = build_task_list(old_configs = switch_interfaces, mappings = mappings) # MIGRATE the settings task_list_types = dict() task_list_types['remove'] = [task['remove'] for task in task_list] task_list_types['rename'] = [task['rename'] for task in task_list] task_list_types['transform'] = [task['transform'] for task in task_list] # REMOVE params switch_interfaces = remove_params( task_list_types['remove'], switch_interfaces ) # RENAME params switch_interfaces = rename_params( task_list_types['rename'], switch_interfaces ) # TRANSFORM params. One of these transformations requires the list of interface IPs. switch_interfaces = transform_params( task_list_types['transform'], switch_interfaces, interface_ips = interface_ips ) # ASSIGN statics (see settings.json) switch_interfaces = assign_statics( tagged_vlan_ids, settings, switch_interfaces ) # We've now replaced all the params and modes necessary. However, unlike appliances, switches # offer different endpoints for interface settings and interface DHCP settings. Therefore we # will push some settings to the interface endpoint, and the DHCP settings to the DHCP endpoint. # Let's review what we've done: print('I created these new configs:') for interface in switch_interfaces: printj(interface) # CREATE the interfaces created_interfaces = create_interfaces( dashboard, settings, switch_interfaces ) # CONFIGURE DHCP on the DHCP interfaces configured_dhcp = configure_interface_dhcp( dashboard, settings['switchSerial'], switch_interfaces_with_dhcp, created_interfaces ) # CONFIRM printj(created_interfaces) printj(configured_dhcp) print(GOODBYE) if __name__ == "__main__": main()
[ "copy.deepcopy", "json.dumps", "sys.exit", "json.load", "meraki.DashboardAPI" ]
[((2424, 2455), 'copy.deepcopy', 'copy.deepcopy', (['starting_configs'], {}), '(starting_configs)\n', (2437, 2455), False, 'import copy\n'), ((3009, 3064), 'json.dumps', 'json.dumps', (['ugly_json_object'], {'indent': '(2)', 'sort_keys': '(False)'}), '(ugly_json_object, indent=2, sort_keys=False)\n', (3019, 3064), False, 'import json\n'), ((17196, 17238), 'meraki.DashboardAPI', 'meraki.DashboardAPI', ([], {'suppress_logging': '(True)'}), '(suppress_logging=True)\n', (17215, 17238), False, 'import meraki\n'), ((1145, 1169), 'json.load', 'json.load', (['settings_json'], {}), '(settings_json)\n', (1154, 1169), False, 'import json\n'), ((1237, 1261), 'json.load', 'json.load', (['mappings_json'], {}), '(mappings_json)\n', (1246, 1261), False, 'import json\n'), ((17493, 17503), 'sys.exit', 'sys.exit', ([], {}), '()\n', (17501, 17503), False, 'import sys\n'), ((4993, 5003), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5001, 5003), False, 'import sys\n'), ((14390, 14400), 'sys.exit', 'sys.exit', ([], {}), '()\n', (14398, 14400), False, 'import sys\n')]
from bs4 import BeautifulSoup as bs import requests import hashlib import datetime from datetime import timedelta from database.dbExecutor import dbExecutor import sys from tqdm import tqdm ''' firstRunBool used - working(res dolgo bo trajalo) created by markzakelj ''' SOURCE = 'REGIONAL-OBALA' firstRunBool = False num_pages_to_check = 1 num_errors = 0 base_url = 'http://www.regionalobala.si' full_urls = ['http://www.regionalobala.si/obalne-zgodbe/', 'http://www.regionalobala.si/vesti/', 'http://www.regionalobala.si/trendi/', 'http://www.regionalobala.si/zanimivo'] #kasneje dodas se stevilo strani (1, 2, ...) headers = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'} date_correct = {'danes': datetime.date.today().strftime('%d.%m.%Y'), 'včeraj': (datetime.date.today() - timedelta(1)).strftime('%d.%m.%Y') } def makeHash(title): return hashlib.sha1((title).encode('utf-8')).hexdigest() def find_last_page(url, session): r = get_connection(url, session) soup = bs(r.text, 'html.parser') num = soup.find('div', class_='t1 fright').find_all('a')[-1].get('href').split('/')[-1] return int(num) def log_error(text): global num_errors num_errors += 1 log_file = open('error_log_zakelj.log', 'a+') log_file.write(str(datetime.datetime.today()) + '\n') log_file.write(sys.argv[0] + '\n') log_file.write(text + '\n\n') log_file.close() def get_connection(url, session): #time.sleep(3) try: r = session.get(url, timeout=10) return r except requests.exceptions.MissingSchema: log_error('invalid url: ' + url) return session.get(url) except requests.exceptions.ConnectionError as e: log_error('connection error: '+url+'\n'+str(e)) def is_article_new(hash_str): if dbExecutor.getByHash(hash_str): return False return True def getLink(soup): link = soup.find('a') if link: return base_url + link.get('href') log_error('link not found, update select() method') return base_url def find_real_date(date): if date in date_correct: return date_correct[date] return date def getDate(soup): raw_date = soup.find('div', class_='newsDate') if raw_date: date = raw_date.text return find_real_date(date[:date.find(' ')]) #odrezi pri prvem presledku - .text je oblike 'dd.mm.yyyy ob hh:mm' raw_date = soup.find('div', class_='time') if raw_date: date = raw_date.text return find_real_date(date[:date.find(' ')]) log_error('date not found, update select() method') return '1.1.1111' def getTitle(soup): title = soup.find('a') if title: return title.text log_error('title not found, update select() method') return 'title not found' def getContent(soup): #odstrani vse 'script' elemente, da se ne pojavijo v 'content' for script in soup(['script']): script.decompose() content = soup.find('div', class_='p1 block_news no-min-height') if content: return ' '.join(content.text.split()) log_error('content not found, update select() method') return 'content not found' def formatDate(raw_date): #format date for consistent database try: date = raw_date.split('.') for i in range(2): if len(date[i]) == 1: date[i] = '0'+date[i] return '-'.join(reversed(date)) except IndexError: print('cant format date:'+ str(raw_date)) def getArticlesOn_n_pages(num_pages_to_check, session): articles = [] print('\tgathering articles ...') for url in full_urls: if firstRunBool: num_pages_to_check = find_last_page(url + '1', session) for n in tqdm(range(num_pages_to_check)): r = get_connection(url + str(n+1), session) soup = bs(r.text, 'html.parser') articles += soup.find_all('div', class_='w2 h2 x1 y1 block ') articles += soup.find_all('div', class_='grid_4 block_news_small block_news_style') return articles def main(): print('=========================') print(sys.argv[0]) print('=========================') num_new_articles = 0 with requests.Session() as session: session.headers.update(headers) articles = getArticlesOn_n_pages(num_pages_to_check, session) articles_checked = len(articles) print('\tgathering article info ...') for x in tqdm(articles): title = getTitle(x) date = getDate(x) hash_str = makeHash(title) if is_article_new(hash_str): link = getLink(x) r = get_connection(link, session) soup = bs(r.text, 'html.parser') content = getContent(soup) tup = (str(datetime.date.today()), title, content, formatDate(date), hash_str, link, SOURCE) dbExecutor.insertOne(tup) num_new_articles += 1 print(num_new_articles, 'new articles found,', articles_checked, 'articles checked', num_errors, 'errors found\n') if __name__ == '__main__': if len(sys.argv) == 2 and sys.argv[1] == "-F": firstRunBool = True main()
[ "requests.Session", "database.dbExecutor.dbExecutor.getByHash", "tqdm.tqdm", "datetime.timedelta", "bs4.BeautifulSoup", "database.dbExecutor.dbExecutor.insertOne", "datetime.datetime.today", "datetime.date.today" ]
[((1154, 1179), 'bs4.BeautifulSoup', 'bs', (['r.text', '"""html.parser"""'], {}), "(r.text, 'html.parser')\n", (1156, 1179), True, 'from bs4 import BeautifulSoup as bs\n'), ((1946, 1976), 'database.dbExecutor.dbExecutor.getByHash', 'dbExecutor.getByHash', (['hash_str'], {}), '(hash_str)\n', (1966, 1976), False, 'from database.dbExecutor import dbExecutor\n'), ((4368, 4386), 'requests.Session', 'requests.Session', ([], {}), '()\n', (4384, 4386), False, 'import requests\n'), ((4614, 4628), 'tqdm.tqdm', 'tqdm', (['articles'], {}), '(articles)\n', (4618, 4628), False, 'from tqdm import tqdm\n'), ((856, 877), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (875, 877), False, 'import datetime\n'), ((3997, 4022), 'bs4.BeautifulSoup', 'bs', (['r.text', '"""html.parser"""'], {}), "(r.text, 'html.parser')\n", (3999, 4022), True, 'from bs4 import BeautifulSoup as bs\n'), ((928, 949), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (947, 949), False, 'import datetime\n'), ((952, 964), 'datetime.timedelta', 'timedelta', (['(1)'], {}), '(1)\n', (961, 964), False, 'from datetime import timedelta\n'), ((1429, 1454), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (1452, 1454), False, 'import datetime\n'), ((4880, 4905), 'bs4.BeautifulSoup', 'bs', (['r.text', '"""html.parser"""'], {}), "(r.text, 'html.parser')\n", (4882, 4905), True, 'from bs4 import BeautifulSoup as bs\n'), ((5074, 5099), 'database.dbExecutor.dbExecutor.insertOne', 'dbExecutor.insertOne', (['tup'], {}), '(tup)\n', (5094, 5099), False, 'from database.dbExecutor import dbExecutor\n'), ((4976, 4997), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (4995, 4997), False, 'import datetime\n')]
from collections import Counter class Solution(object): def topKFrequent(self, nums, k): occurences = Counter() for n in nums: occurences[n] += 1 return [count[0] for count in occurences.most_common()[:k]]
[ "collections.Counter" ]
[((117, 126), 'collections.Counter', 'Counter', ([], {}), '()\n', (124, 126), False, 'from collections import Counter\n')]
# path: lib/elements/ # filename: field.py # description: WSGI application html form fields ''' # make python2 strings and dictionaries behave like python3 from __future__ import unicode_literals try: from builtins import dict, str except ImportError: from __builtin__ import dict, str Copyright 2017 <NAME> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' ''' external imports ''' import web import traceback ''' internal imports ''' import classes.element import field_dropdown import field_radio import field_html5 ''' Base Form Field Class ''' class Input(classes.element.Element): def __init__(self,content,definition="web.form.Textbox(name,*validators,**attrs)"): #print('lib.elements.field.Input') # super class Element super(Input, self).__init__(content) # vars self.definition = definition # name self.name = self.conf.setdefault('name') if not self.name: print('ERROR - field name not given') # args (values) self.args = self.conf.setdefault('values',[]) # args (data) if self.conf.get('data'): self.conf['data'].setdefault('format','list') self.conf['data'].setdefault('value',[]) # debug #print('loading data') if not self.content.load_data(self.conf['data']): print('data failed to load') else: # debug #print(self.content.data) if not isinstance(self.content.data,list): print('warning - data is not a list') else: self.args.extend(self.content.data) ''' Needed by dropdown, hope to find a better solution ''' # args (valuesObj) values_obj = self.conf.get('valuesObj') if values_obj: try: self.args.extend(eval('self.top.%s' %values_obj)) except: pass # validators self.validators = self.conf.get('validators', []) if 'required' in self.conf: self.conf.setdefault('required_indicator', '*') self.validators.append("web.form.notnull") else: # unset required indicator self.conf['required_indicator'] = '' # convert (eval) validators for web.py validators = [] for validator in self.validators: validators.append(eval(self.content.fnr(validator))) #validators.append(eval(validator)) self.validators = validators # attrs (attributes) self.attrs = self.conf.get('attributes', {}) # attrs hacks if 'class' in self.attrs: self.attrs['class_'] = self.attrs['class'] del self.attrs['class'] if 'class' in self.conf: self.attrs['class_'] = self.conf['class'] if not self.attrs.get('class_'): self.attrs['class_'] = "" if 'baseclass' in self.conf: self.attrs['class_'] = "%s %s" %(self.conf['baseclass'], self.attrs['class_']) # value hacks if 'value' in self.conf: self.attrs['value'] = self.conf['value'] # markup value if 'value' in self.attrs: self.attrs['value'] = self.content.fnr(self.attrs['value']) return None def fieldObj(self): #debug #print('lib.elements.field.Input fieldObj()') # vars name = self.name args = self.args validators = self.validators attrs = self.attrs # make fieldObj self.content.fieldObj = eval(self.definition) self.content.fieldObj.content = self.content return self.content.fieldObj def render(self): #debug #print('lib.elements.field.Input render()') ''' This is an interesting use of fnr_types. This concept could be used a lot more. ''' # add {{field:$attr}} to fnr_types self.view.marker_map.update({'field': 'self.fieldObj.attrs'}) # is this field in an error state? if self.content.fieldObj.note: # this field has an error push note self.conf['note'] = self.content.fieldObj.note # set error class default bootstrap has-error self.conf.setdefault('error_class', 'has-error') else: # clear values for non error state self.conf['error_class'] = '' self.conf['note'] = '' # render field using webpy method return self.content.fieldObj.render() ''' Form Field Classes ''' class Button(Input): def __init__(self,content): #debug #print('lib.elements.field.Button') # super class Input super(Button, self).__init__(content,"web.form.Button(name,*validators,**attrs)") # instanciate fieldObj try: self.fieldObj() except: traceback.print_exc() return None class Hidden(Input): def __init__(self,content): #debug #print('lib.elements.field.Hidden') # super class Input super(Hidden, self).__init__(content,"web.form.Hidden(name,**attrs)") # instanciate fieldObj try: self.fieldObj() except: traceback.print_exc() return None class Textbox(Input): def __init__(self,content): #debug #print('lib.elements.field.Textbox') # super class Input super(Textbox, self).__init__(content,"web.form.Textbox(name,*validators,**attrs)") # instanciate fieldObj try: self.fieldObj() except: traceback.print_exc() return None class Textarea(Input): def __init__(self,content): #debug #print('lib.elements.field.Textarea') # super class Input super(Textarea, self).__init__(content,"web.form.Textarea(name,*validators,**attrs)") # instanciate fieldObj try: self.fieldObj() except: traceback.print_exc() return None class Password(Input): def __init__(self,content): #debug #print('lib.elements.field.Password') # super class Input super(Password, self).__init__(content,"web.form.Password(name,*validators,**attrs)") # instanciate fieldObj try: self.fieldObj() except: traceback.print_exc() return None class Dropdown(Input): def __init__(self,content): #debug #print('lib.elements.field.Dropdown') # super class Input super(Dropdown, self).__init__(content,"field_dropdown.Dropdown(name,args,*validators,**attrs)") if 'innerclass' in self.conf: self.attrs['innerclass'] = self.conf['innerclass'] # instanciate fieldObj try: self.fieldObj() except: traceback.print_exc() return None class Radio(Input): def __init__(self,content): #debug #print('lib.elements.field.Radio') # super class Input super(Radio, self).__init__(content,"field_radio.Radio(name,args,*validators,**attrs)") if 'innerwrap' in self.conf: self.attrs['wrap'] = self.conf['innerwrap'] # instanciate fieldObj try: self.fieldObj() except: traceback.print_exc() return None class Radio4(Input): def __init__(self,content): #debug #print('lib.elements.field.Radio') # super class Input super(Radio4, self).__init__(content,"field_radio.Radio4(name,args,*validators,**attrs)") if 'innerwrap' in self.conf: self.attrs['wrap'] = self.conf['innerwrap'] # instanciate fieldObj try: self.fieldObj() except: traceback.print_exc() return None class Checkbox(Input): def __init__(self,content): #debug #print('lib.elements.field.Checkbox') # var self.conf = content.attributes # set value to True if not set self.conf.setdefault('attributes',{}) self.conf['attributes'].setdefault('value', 'True') # super class Input super(Checkbox, self).__init__(content,"web.form.Checkbox(name,*validators,**attrs)") # instanciate fieldObj try: self.fieldObj() except: traceback.print_exc() return None class File(Input): def __init__(self,content): #debug #print("lib.elements.field.File") # super class Input super(File, self).__init__(content,"web.form.File(name,*validators,**attrs)") # instanciate fieldObj try: self.fieldObj() except: traceback.print_exc() # upload? if self.top.post_vars and self.name in self.top.post_vars: if 'multiple' in self.attrs: # single or many? ''' # single FieldStorage('files', 'file1.txt', 'file1content') ''' ''' # many [ FieldStorage('files', 'file1.txt', 'file1content'), FieldStorage('files', 'file2.txt', 'file2content') ] ''' # Convert single to many if not isinstance(self.top.post_vars[self.name],list): self.top.post_vars[self.name] = [self.top.post_vars[self.name]] data = [] filenames = [] for item in self.top.post_vars[self.name]: data.append({ "name": item.filename, "value": item.value, }) filenames.append(item.filename) # save filename and file in session self.top.session.vars[self.name] = data # replace value from postvars with just filename self.top.post_vars[self.name] = ",".join(filenames) # add filename to attributes self.conf.setdefault('filename','<label>Uploaded Filename:</label> %s'%self.top.post_vars[self.name]) else: #print(self.top.post_vars[self.name]) if self.top.post_vars[self.name].filename and self.top.post_vars[self.name].value: #if 'filename' in dir(self.top.post_vars[self.name]) and 'value' in dir(self.top.post_vars[self.name]): #debug #print('found filename and value') # store filename and file data = { "name": self.top.post_vars[self.name].filename, "value": str(self.top.post_vars[self.name].value), } # save filename and file in session self.top.session.vars[self.name] = data # replace value from postvars with just filename self.top.post_vars[self.name] = self.top.post_vars[self.name].filename # add filename to attributes self.conf.setdefault('filename','<label>Uploaded Filename:</label> %s'%self.top.post_vars[self.name]) '''#debug else: print('filename and value not found') ''' return None ''' HTML5 types ''' class Number(Input): def __init__(self,content): #debug #print('lib.elements.field.Number') # super class Input super(Number, self).__init__(content,"field_html5.Number(name,*validators,**attrs)") # instanciate fieldObj try: self.fieldObj() except: traceback.print_exc() return None class Range(Input): def __init__(self,content): # super class super(Range, self).__init__(content,"field_html5.Range(name,*validators,**attrs)") # instanciate fieldObj try: self.fieldObj() except: traceback.print_exc() return None class Color(Input): def __init__(self,content): # super class super(Color, self).__init__(content,"field_html5.Color(name,*validators,**attrs)") self.fieldObj() class Date(Input): def __init__(self,content): # super class super(Date, self).__init__(content,"field_html5.Date(name,*validators,**attrs)") self.fieldObj() class DateLocal(Input): def __init__(self,content): # super class super(DateLocal, self).__init__(content,"field_html5.DateLocal(name,*validators,**attrs)") self.fieldObj() class Time(Input): def __init__(self,content): # super class super(Time, self).__init__(content,"field_html5.Time(name,*validators,**attrs)") self.fieldObj() class Week(Input): def __init__(self,content): # super class super(Week, self).__init__(content,"field_html5.Week(name,*validators,**attrs)") self.fieldObj() class Month(Input): def __init__(self,content): # super class super(Month, self).__init__(content,"field_html5.Month(name,*validators,**attrs)") self.fieldObj() class Email(Input): def __init__(self,content): # super class super(Email, self).__init__(content,"field_html5.Email(name,*validators,**attrs)") self.fieldObj() class Url(Input): def __init__(self,content): # super class super(Url, self).__init__(content,"field_html5.Url(name,*validators,**attrs)") self.fieldObj() class Search(Input): def __init__(self,content): # super class super(Search, self).__init__(content,"field_html5.Search(name,*validators,**attrs)") self.fieldObj() ''' this must have been an expirement. should really go ''' class Generic(Input): def __init__(self,content): #debug #print("lib.elements.field.File") # super class Input super(Generic, self).__init__(content,"web.form.File(name,*validators,**attrs)") # instanciate fieldObj try: self.fieldObj() except: traceback.print_exc() # upload? if self.top.post_vars and self.name in self.top.post_vars: self.conf['attributes'].setdefault('value', self.top.post_vars[self.name]) return None
[ "traceback.print_exc" ]
[((4786, 4807), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (4805, 4807), False, 'import traceback\n'), ((5096, 5117), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (5115, 5117), False, 'import traceback\n'), ((5421, 5442), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (5440, 5442), False, 'import traceback\n'), ((5750, 5771), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (5769, 5771), False, 'import traceback\n'), ((6078, 6099), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (6097, 6099), False, 'import traceback\n'), ((6511, 6532), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (6530, 6532), False, 'import traceback\n'), ((6920, 6941), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (6939, 6941), False, 'import traceback\n'), ((7331, 7352), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (7350, 7352), False, 'import traceback\n'), ((7835, 7856), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (7854, 7856), False, 'import traceback\n'), ((8148, 8169), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (8167, 8169), False, 'import traceback\n'), ((10527, 10548), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (10546, 10548), False, 'import traceback\n'), ((10794, 10815), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (10813, 10815), False, 'import traceback\n'), ((12770, 12791), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (12789, 12791), False, 'import traceback\n')]
# -*- coding: utf-8 -*- """ proxy.py ~~~~~~~~ ⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on Network monitoring, controls & Application development, testing, debugging. :copyright: (c) 2013-present by <NAME> and contributors. :license: BSD, see LICENSE for more details. """ from solana.publickey import PublicKey from .proxy import entry_point import os from .indexer.airdropper import run_airdropper from solana.rpc.api import Client if __name__ == '__main__': airdropper_mode = os.environ.get('AIRDROPPER_MODE', 'False').lower() in [1, 'true', 'True'] if airdropper_mode: print("Will run in airdropper mode") solana_url = os.environ['SOLANA_URL'] evm_loader_id = os.environ['EVM_LOADER'] pyth_mapping_account = PublicKey(os.environ['PYTH_MAPPING_ACCOUNT']) faucet_url = os.environ['FAUCET_URL'] wrapper_whitelist = os.environ['INDEXER_ERC20_WRAPPER_WHITELIST'] if wrapper_whitelist != 'ANY': wrapper_whitelist = wrapper_whitelist.split(',') log_level = os.environ['LOG_LEVEL'] neon_decimals = int(os.environ.get('NEON_DECIMALS', '9')) start_slot = os.environ.get('START_SLOT', 0) pp_solana_url = os.environ.get('PP_SOLANA_URL', None) max_conf = float(os.environ.get('MAX_CONFIDENCE_INTERVAL', 0.02)) run_airdropper(solana_url, evm_loader_id, pyth_mapping_account, faucet_url, wrapper_whitelist, log_level, neon_decimals, start_slot, pp_solana_url, max_conf) else: entry_point()
[ "solana.publickey.PublicKey", "os.environ.get" ]
[((817, 862), 'solana.publickey.PublicKey', 'PublicKey', (["os.environ['PYTH_MAPPING_ACCOUNT']"], {}), "(os.environ['PYTH_MAPPING_ACCOUNT'])\n", (826, 862), False, 'from solana.publickey import PublicKey\n'), ((1215, 1246), 'os.environ.get', 'os.environ.get', (['"""START_SLOT"""', '(0)'], {}), "('START_SLOT', 0)\n", (1229, 1246), False, 'import os\n'), ((1271, 1308), 'os.environ.get', 'os.environ.get', (['"""PP_SOLANA_URL"""', 'None'], {}), "('PP_SOLANA_URL', None)\n", (1285, 1308), False, 'import os\n'), ((1155, 1191), 'os.environ.get', 'os.environ.get', (['"""NEON_DECIMALS"""', '"""9"""'], {}), "('NEON_DECIMALS', '9')\n", (1169, 1191), False, 'import os\n'), ((1334, 1381), 'os.environ.get', 'os.environ.get', (['"""MAX_CONFIDENCE_INTERVAL"""', '(0.02)'], {}), "('MAX_CONFIDENCE_INTERVAL', 0.02)\n", (1348, 1381), False, 'import os\n'), ((548, 590), 'os.environ.get', 'os.environ.get', (['"""AIRDROPPER_MODE"""', '"""False"""'], {}), "('AIRDROPPER_MODE', 'False')\n", (562, 590), False, 'import os\n')]
from urllib.request import urlopen import json REPOSITORY = "jupyterlab" ORGANIZATION = "jupyterlab" def find_latest_stable(owner, repository): """Find latest stable release on GitHub for given repository.""" endpoint = f"https://api.github.com/repos/{owner}/{repository}/releases" releases = json.loads(urlopen(endpoint).read()) for release in releases: # skip drafts and pre-releases if release['prerelease'] or release['draft']: continue name = release['tag_name'] if not name.startswith('v'): raise ValueError('Unexpected release tag name format: does not start with v') return name[1:] if __name__ == '__main__': print(find_latest_stable(owner=ORGANIZATION, repository=REPOSITORY))
[ "urllib.request.urlopen" ]
[((320, 337), 'urllib.request.urlopen', 'urlopen', (['endpoint'], {}), '(endpoint)\n', (327, 337), False, 'from urllib.request import urlopen\n')]
# MIT License # Copyright (c) 2017 MassChallenge, Inc. from __future__ import unicode_literals import swapper from accelerator_abstract.models.base_bucket_state import BaseBucketState class BucketState(BaseBucketState): class Meta(BaseBucketState.Meta): swappable = swapper.swappable_setting( BaseBucketState.Meta.app_label, "BucketState")
[ "swapper.swappable_setting" ]
[((283, 355), 'swapper.swappable_setting', 'swapper.swappable_setting', (['BaseBucketState.Meta.app_label', '"""BucketState"""'], {}), "(BaseBucketState.Meta.app_label, 'BucketState')\n", (308, 355), False, 'import swapper\n')]
__author__ = 'Xsank' import inspect from collections import defaultdict def add_event(valid_type): def decorate(func): setattr(func,'event',valid_type.id()) return func return decorate class Listener(object): ''' This is the base class of all the listeners. Your listener have to be inheritanced from it. Otherwise the eventbus will throw the ListenertypeError exception when you register it. ''' def __init__(self): self.event_handlers=defaultdict(list) self.init_event_handlers() def init_event_handlers(self): for name,func in self.get_handlers(): self.event_handlers[func.event].append(func) def get_handlers(self): funcs=inspect.getmembers(self,predicate=inspect.ismethod) return [(name,func) for name,func in funcs if hasattr(func,'event')]
[ "inspect.getmembers", "collections.defaultdict" ]
[((497, 514), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (508, 514), False, 'from collections import defaultdict\n'), ((732, 784), 'inspect.getmembers', 'inspect.getmembers', (['self'], {'predicate': 'inspect.ismethod'}), '(self, predicate=inspect.ismethod)\n', (750, 784), False, 'import inspect\n')]
################################################################################ import stapi import types import json from pprint import pprint from operator import itemgetter import json def getPerformer(performer_name="<NAME>"): criteria = stapi.search_criteria.PerformerSearchCriteria(0, 1, "", name=performer_name) response = stapi.RestClient().performer.search(criteria) # returns dictonary performers = response["performers"] if len(performers) == 1: performer = performers[0] else: raise NameError rest_client = stapi.RestClient() return rest_client.performer.get(performer["uid"]) def getEpisode(episode_name="The Storyteller"): criteria = stapi.search_criteria.EpisodeSearchCriteria(0, 1, "", title=episode_name) response = stapi.RestClient().episode.search(criteria) episodes = response["episodes"] episode = episodes[0] rest_client = stapi.RestClient() return rest_client.episode.get(episode["uid"]) def getCharacter(character_name="<NAME>"): criteria = stapi.search_criteria.CharacterSearchCriteria(0, 1, "", name=character_name) response = stapi.RestClient().character.search(criteria) characters = response["characters"] if len(characters) == 1: character = characters[0] else: character = characters[0] # change so it includes entries besides the first rest_client = stapi.RestClient() return rest_client.character.get(character["uid"])
[ "stapi.search_criteria.EpisodeSearchCriteria", "stapi.RestClient", "stapi.search_criteria.PerformerSearchCriteria", "stapi.search_criteria.CharacterSearchCriteria" ]
[((250, 326), 'stapi.search_criteria.PerformerSearchCriteria', 'stapi.search_criteria.PerformerSearchCriteria', (['(0)', '(1)', '""""""'], {'name': 'performer_name'}), "(0, 1, '', name=performer_name)\n", (295, 326), False, 'import stapi\n'), ((579, 597), 'stapi.RestClient', 'stapi.RestClient', ([], {}), '()\n', (595, 597), False, 'import stapi\n'), ((717, 790), 'stapi.search_criteria.EpisodeSearchCriteria', 'stapi.search_criteria.EpisodeSearchCriteria', (['(0)', '(1)', '""""""'], {'title': 'episode_name'}), "(0, 1, '', title=episode_name)\n", (760, 790), False, 'import stapi\n'), ((939, 957), 'stapi.RestClient', 'stapi.RestClient', ([], {}), '()\n', (955, 957), False, 'import stapi\n'), ((1069, 1145), 'stapi.search_criteria.CharacterSearchCriteria', 'stapi.search_criteria.CharacterSearchCriteria', (['(0)', '(1)', '""""""'], {'name': 'character_name'}), "(0, 1, '', name=character_name)\n", (1114, 1145), False, 'import stapi\n'), ((1433, 1451), 'stapi.RestClient', 'stapi.RestClient', ([], {}), '()\n', (1449, 1451), False, 'import stapi\n'), ((351, 369), 'stapi.RestClient', 'stapi.RestClient', ([], {}), '()\n', (367, 369), False, 'import stapi\n'), ((814, 832), 'stapi.RestClient', 'stapi.RestClient', ([], {}), '()\n', (830, 832), False, 'import stapi\n'), ((1169, 1187), 'stapi.RestClient', 'stapi.RestClient', ([], {}), '()\n', (1185, 1187), False, 'import stapi\n')]
# ===== DOC ===== # The class that houses methods to run all wheels import random from extract import * import time loselist = ["+1500pts", "Delete up to 1000pts from enemy", "Reset, pick two factions, +500pts", "+1500pts", "Wheel Of Big", "3x Wheel Of Mid", "5x Wheel Of Chumps, times two", "Reset, use secret faction, +500pts"] factions = ["Tribal", "Farmer", "Ancient", "Medieval", "Viking", "Dynasty", "Reneissance", "Pirate", "Spooky", "Wild West", "Good", "Evil", "Legacy", "Wheel Of Secret", "250pts or less", "500pts or more", "Ranged units", "Melee units", "Opponent's choice", "Your choice", "Wheel of Wacky", "Wheel of Wacky"] specials = ["Rotate the line permanently", "Players can place units wherever", "Place each other's units", "Wheel of Huge, add to both sides", "Both players get 3000pts", "Both players use the secret faction", "Change win condition", "Both players can reset as much as they want", "Both players spin wheel of big twice"] def Wheel_Of_Units(players, rolls, type, number): time.sleep(3) if type == "small": units = chumplist elif type == "mid": units = midlist elif type == "big": units = biglist elif type == "huge": units = hugelist total = 0 if len(players) == 1: print(str(players[0].getName()) + " gets " + str(number) + " of:") else: print("Both players get" + str(number) + " of:") for i in range(0, rolls): rnumber = random.randrange(0, len(units)) unit = units[rnumber] total += unit[2] print(unit[0]) for i in range(0, len(players)): players[i].setUnits(unit[0]) total = total*number print("\nTotal cost: ", total, "\n") for i in range(0, len(players)): players[i].addPoints(total) def Wheel_Of_Wacky(player, enemy): rule = "nothing" rnumber = random.randrange(0, len(specials)) wacky = specials[rnumber] rule = None print("The wheel of wacky decrees '" + str(wacky) + "'") time.sleep(2) if rnumber == 0 or rnumber == 1 or rnumber == 2 or rnumber == 6: rule = wacky elif rnumber == 3: Wheel_Of_Units([player, enemy], 1, "huge", 1) elif rnumber == 4: player.addPoints(3000) elif rnumber == 5: player.giveFaction("Secret") enemy.giveFaction("Secret") elif rnumber == 7: player.setReset(True) elif rnumber == 8: Wheel_Of_Units([player], 2, "big", 1) Wheel_Of_Units([enemy], 2, "big", 1) return rule def Wheel_Of_Losers(player, enemy): print(str(player.getName()) + " gets to spin the wheel of losers! \n") print("...") rnumber = random.randrange(0, len(loselist)) time.sleep(3) print(str(player.getName()) + " rolls '" + str(loselist[rnumber]) + "'") if (rnumber == 0) or (rnumber == 3): player.addPoints(1500) elif rnumber == 1: enemy.addPoints(-1000) elif (rnumber == 2): player.addPoints(500) player.giveFaction("Pick two") player.setReset(True) elif rnumber == 4: Wheel_Of_Units([player], 1, "big", 1) elif rnumber == 5: Wheel_Of_Units([player], 3, "mid", 1) elif rnumber == 6: Wheel_Of_Units([player], 5, "small", 2) elif rnumber == 7: player.addPoints(500) player.giveFaction("Secret") player.setReset(True) time.sleep(3) def Wheel_Of_Warriors(player, enemy): rule = "nothing" playerName = player.getName() rnumber = random.randrange(0, len(factions)) print(str(playerName) + " spins the wheel of warriors") time.sleep(2) if rnumber < 20: playerFaction = factions[rnumber] print(str(playerName) + " gets to choose units from '" + str(playerFaction) + "'") player.giveFaction(playerFaction) else: print(str(playerName) + " lands on the wheel of wacky") rule = Wheel_Of_Wacky(player, enemy) time.sleep(3) return rule
[ "time.sleep" ]
[((1082, 1095), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1092, 1095), False, 'import time\n'), ((2083, 2096), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2093, 2096), False, 'import time\n'), ((2783, 2796), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2793, 2796), False, 'import time\n'), ((3458, 3471), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (3468, 3471), False, 'import time\n'), ((3680, 3693), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (3690, 3693), False, 'import time\n'), ((4015, 4028), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (4025, 4028), False, 'import time\n')]
import logging from django.conf import settings from django.core.management.base import BaseCommand from django.core.mail import EmailMessage from tickets.models import Order logger = logging.getLogger('kompassi') FROM = "<EMAIL>" SUBJECT = "Nekocon 2020: Tapahtuma on peruttu" MESSAGE = """NEKOCON ON PERUTTU! Nekoconin conitea on aktiivisesti seurannut tilanteen kehittymistä ja keskustellut tapahtuman tulevaisuudesta. Keskiviikkona 22.4. tulleessa hallituksen tiedotustilaisuudessa yli 500 hengen yleisötapahtumat perutaan heinäkuun loppuun saakka. Nekocon noudattaa hallituksen linjauksia koronaepidemiassa ja peruu tapahtumansa 11.-12.7.2020 Kuopion Musiikkikeskuksella. Kiitämme kaikkia, jotka olisivat olleet osa tapahtumaamme, sillä ilman teitä conia ei olisi voitu alkaa rakentamaan juuri teitä varten! Alla ohjeita sinulle, joka olit tulossa osaksi tapahtumaamme, olitpa sitten kävijä, ohjelmapitäjä tai muu taho. Tapahtumaan lipun ostaneille palautetaan lippukuluista 24 € ja majoituksen ostaneille 14 €. Lippua ja majoitusta kohden pidätetään 1€ maksuliikennekuluja. Palautukset hoidetaan pikimmiten pankkitilille, josta tapahtuma lippu ja/tai majoitus on maksettu. Nekoconiin hakeneille ohjelmapitäjille, vapaaehtoisille, sidosryhmille ja sponsoreille lähetetään viestiä pikimmiten tapahtuman peruuntumista koskien. Jos olet ilmoittautunut osaksi tapahtumaan, seuraathan siis sähköpostiasi. Olemme kaikki varmasti pahoillamme, ettei tapahtumaa päässyt tänä kesänä syntymään, mutta conitea istuu alas pohtimaan tapahtuman tulevaisuutta ja suuntaa katseensa ensi kesään. Nekoconin conitea toivottaa kaikille vaikeista ajoista huolimatta aurinkoista kesää! Nekoconin Discord myös jatkaa pyörimistä, jossa saa viettää kissojen tapaan letkeää vapaa-aikaa! Terveisin Nekoconin conitea """ class Command(BaseCommand): args = '' help = 'Send nekocon2020 cancellation message' def handle(self, *args, **opts): for order in Order.objects.filter( event__slug="nekocon2020", confirm_time__isnull=False, payment_date__isnull=False, cancellation_time__isnull=True, ): print(order.formatted_order_number, order.customer.name_and_email) EmailMessage( subject=SUBJECT, body=MESSAGE, from_email=FROM, to=(order.customer.name_and_email,), ).send(fail_silently=True)
[ "logging.getLogger", "django.core.mail.EmailMessage", "tickets.models.Order.objects.filter" ]
[((187, 216), 'logging.getLogger', 'logging.getLogger', (['"""kompassi"""'], {}), "('kompassi')\n", (204, 216), False, 'import logging\n'), ((1961, 2100), 'tickets.models.Order.objects.filter', 'Order.objects.filter', ([], {'event__slug': '"""nekocon2020"""', 'confirm_time__isnull': '(False)', 'payment_date__isnull': '(False)', 'cancellation_time__isnull': '(True)'}), "(event__slug='nekocon2020', confirm_time__isnull=False,\n payment_date__isnull=False, cancellation_time__isnull=True)\n", (1981, 2100), False, 'from tickets.models import Order\n'), ((2249, 2351), 'django.core.mail.EmailMessage', 'EmailMessage', ([], {'subject': 'SUBJECT', 'body': 'MESSAGE', 'from_email': 'FROM', 'to': '(order.customer.name_and_email,)'}), '(subject=SUBJECT, body=MESSAGE, from_email=FROM, to=(order.\n customer.name_and_email,))\n', (2261, 2351), False, 'from django.core.mail import EmailMessage\n')]
import importlib import inspect from typing import Any, Collection, Mapping, Tuple, Union from ..logger import get_logger from .module_tree import ( ModuleTree, NodeBase, ClassNode, ModuleNode, AttributeNode, FunctionNode, ) log = get_logger(__name__) def create_tree_from_module( module_name: Union[str,'Module'], tree: ModuleTree = None, ignore: Collection[str] = [], skip_private: bool = True, max_recursive: int = 2, num_recursive: int = 0, visited: Collection[str] = [], ) -> ModuleTree: """_summary_ Args: module_name (str): _description_ tree (ModuleTree, optional): _description_. Defaults to None. ignore (Collection[str], optional): _description_. Defaults to []. skip_private (bool, optional): _description_. Defaults to True. max_recursive (int, optional): _description_. Defaults to 2. num_recursive (int, optional): _description_. Defaults to 0. visited (Collection[str], optional): DO NOT INITIATE. Defaults to []. Returns: ModuleTree: _description_ """ if tree == None: tree = ModuleTree(module_name) if inspect.ismodule(module_name): _module = module_name else: _module = importlib.import_module(module_name) if num_recursive == 0: log.info(f"Creating tree from module {module_name}") else: log.debug(f"Creating tree from module {module_name}") num_recursive += 1 _nodes, _submodules = get_members_from_module( _module, ignore=ignore, skip_private=skip_private, ) log.debug(f"""In {_module.__name__} found: {len(_nodes)} Nodes {len(_submodules)} Submodules: {",".join([s.__name__ for s in _submodules])}""") tree.add_nodes(_nodes) if num_recursive == max_recursive or not _submodules: log.debug(f"Returning Tree: num_recursive={num_recursive}, _submodules={_submodules}") log.debug(f" TREE: {tree._nodes_str()}") return tree for submodule in _submodules: if submodule in visited: continue visited.append(submodule) log.debug(f"Recursing into: {submodule.__name__}") branch = create_tree_from_module( submodule, tree=tree, ignore=ignore, skip_private=skip_private, max_recursive=max_recursive, num_recursive=num_recursive, visited = visited, ) #tree.add_branch(branch,branch_name=submodule.__name__) log.debug(f"Exiting recursion from: {submodule.__name__}") return tree def _is_private( obj_name:str )->bool: private = obj_name.startswith("_") return private def _is_outside_module( member, module_name:str, ) -> bool: """_summary_ Args: member (_type_): _description_ module_name (str): _description_ Returns: bool: _description_ """ if inspect.isbuiltin(member) or member.__name__ == "builtin": return True if inspect.ismodule(member): _path = member.__package__ else: _path = member.__module__ return module_name not in _path def get_members_from_module( module, ignore: Collection[str] = [], skip_private: bool = True, skip_outside_module: bool = True, ) -> Tuple[Collection, Collection]: """_summary_ Args: module (_type_): _description_ ignore (Collection[str], optional): _description_. Defaults to []. skip_private (bool, optional): _description_. Defaults to True. Returns: Tuple[Collection[NodeBase],Collection[ModuleNode]]: _description_ """ _submodule_members = inspect.getmembers(module, predicate=inspect.ismodule) _class_members = inspect.getmembers(module, predicate=inspect.isclass) _function_members = inspect.getmembers(module, predicate=inspect.isfunction) _all_members = _submodule_members+_class_members+_function_members if skip_private: ignore += [n for n,m in _all_members if _is_private(n)] log.debug(f"Skipping private members.") if skip_outside_module: ignore += [n for n,m in _all_members if _is_outside_module(m, module.__name__)] log.debug(f"Skipping members outside {module.__name__}.") _module_members = [m for n, m in inspect.getmembers(module) if n not in ignore] #log.debug(f"Module Members: {_module_members}") submodules = [m for n,m in _submodule_members if n not in ignore] classes = [m for n,m in _class_members if n not in ignore] # should hopefully only be getting lone functions (notclass methods) bc in module, not class functions = [m for n,m in _function_members if n not in ignore] log.debug(f"""Found members to document in module {module.__name__}: - Submodules: {len(submodules)} - Classes: {len(classes)} - Functions: {len(functions)}""" ) nodes = submodules+classes+functions return nodes, submodules
[ "inspect.isbuiltin", "inspect.ismodule", "inspect.getmembers", "importlib.import_module" ]
[((1180, 1209), 'inspect.ismodule', 'inspect.ismodule', (['module_name'], {}), '(module_name)\n', (1196, 1209), False, 'import inspect\n'), ((3069, 3093), 'inspect.ismodule', 'inspect.ismodule', (['member'], {}), '(member)\n', (3085, 3093), False, 'import inspect\n'), ((3729, 3783), 'inspect.getmembers', 'inspect.getmembers', (['module'], {'predicate': 'inspect.ismodule'}), '(module, predicate=inspect.ismodule)\n', (3747, 3783), False, 'import inspect\n'), ((3805, 3858), 'inspect.getmembers', 'inspect.getmembers', (['module'], {'predicate': 'inspect.isclass'}), '(module, predicate=inspect.isclass)\n', (3823, 3858), False, 'import inspect\n'), ((3883, 3939), 'inspect.getmembers', 'inspect.getmembers', (['module'], {'predicate': 'inspect.isfunction'}), '(module, predicate=inspect.isfunction)\n', (3901, 3939), False, 'import inspect\n'), ((1269, 1305), 'importlib.import_module', 'importlib.import_module', (['module_name'], {}), '(module_name)\n', (1292, 1305), False, 'import importlib\n'), ((2982, 3007), 'inspect.isbuiltin', 'inspect.isbuiltin', (['member'], {}), '(member)\n', (2999, 3007), False, 'import inspect\n'), ((4371, 4397), 'inspect.getmembers', 'inspect.getmembers', (['module'], {}), '(module)\n', (4389, 4397), False, 'import inspect\n')]
from setuptools import setup setup( name="pingcap-interview", version="0.0.1", python_requires=">=3.8", entry_points={ 'console_scripts': [ 'fuzz = packages.cli:fuzz', 'seed = packages.cli:seed' ] } )
[ "setuptools.setup" ]
[((30, 203), 'setuptools.setup', 'setup', ([], {'name': '"""pingcap-interview"""', 'version': '"""0.0.1"""', 'python_requires': '""">=3.8"""', 'entry_points': "{'console_scripts': ['fuzz = packages.cli:fuzz', 'seed = packages.cli:seed']}"}), "(name='pingcap-interview', version='0.0.1', python_requires='>=3.8',\n entry_points={'console_scripts': ['fuzz = packages.cli:fuzz',\n 'seed = packages.cli:seed']})\n", (35, 203), False, 'from setuptools import setup\n')]
from flask import Flask,render_template,request import pickle #import numpy as np import pandas as pd app=Flask(__name__) model=pickle.load(open('model.pkl','rb')) @app.route('/') def home(): return render_template('home.html') @app.route('/predict',methods=['POST']) def predict(): City= int(request.values['Citydrop']) PM25= float(request.values['PM2.5']) PM10= float(request.values['PM10']) NH3= float(request.values['NH3']) CO= float(request.values['CO']) SO2= float(request.values['SO2']) O3= float(request.values['O3']) Nitrites= float(request.values['Nitrites']) month= int(request.values['monthdrop']) Day= int(request.values['Day']) features=[City,PM25,PM10,NH3,CO,SO2,O3,Nitrites,month,Day] test2= pd.DataFrame([features],columns= ['City', 'PM2.5', 'PM10', 'NH3', 'CO', 'SO2', 'O3', 'Nitrites', 'month', 'Day'],dtype=float) #p=model.predict(test2) #arr=[np.array(test2)] output=model.predict(test2) output=output.item() output=round(output) #print(output) def get_AQI_bucket(x): if x <= 50: return "Good" elif x <= 100: return "Satisfactory" elif x <= 200: return "Moderate" elif x <= 300: return "Poor" elif x <= 400: return "Very Poor" elif x > 400: return "Severe" else: return 0 AQI_status=get_AQI_bucket(output) return render_template ('result.html',prediction_text="The AQI index is :{}".format(output),prediction_status="The AQI Status is :{}".format(AQI_status)) if __name__=='__main__': app.run(port=5000)
[ "flask.render_template", "pandas.DataFrame", "flask.Flask" ]
[((106, 121), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (111, 121), False, 'from flask import Flask, render_template, request\n'), ((203, 231), 'flask.render_template', 'render_template', (['"""home.html"""'], {}), "('home.html')\n", (218, 231), False, 'from flask import Flask, render_template, request\n'), ((752, 882), 'pandas.DataFrame', 'pd.DataFrame', (['[features]'], {'columns': "['City', 'PM2.5', 'PM10', 'NH3', 'CO', 'SO2', 'O3', 'Nitrites', 'month', 'Day']", 'dtype': 'float'}), "([features], columns=['City', 'PM2.5', 'PM10', 'NH3', 'CO',\n 'SO2', 'O3', 'Nitrites', 'month', 'Day'], dtype=float)\n", (764, 882), True, 'import pandas as pd\n')]
""" This code demonstrates a real-world drag and drop. """ #Set Verbosity to control the display of information messages: # 2 Displays all messages # 1 Displays all but dnd_accept and dnd_motion messages # 0 Displays no messages Verbosity = 3 #When you drag an existing object on a canvas, we normally make the original # label into an invisible phantom, and what you are ACTUALLY dragging is # a clone of the objects label. If you set "LeavePhantomVisible" then you # will be able to see the phantom which persists until the object is # dropped. In real life you don't want the user to see the phantom, but # for demonstrating what is going on it is useful to see it. This topic # beaten to death in the comment string for Dragged.Press, below. LeavePhantomVisible = 0 try: from Tkinter import * except: from tkinter import * try: import Tkdnd except: from tkinter import dnd as Tkdnd def MouseInWidget(Widget,Event): """ Figure out where the cursor is with respect to a widget. Both "Widget" and the widget which precipitated "Event" must be in the same root window for this routine to work. We call this routine as part of drawing a DraggedObject inside a TargetWidget, eg our Canvas. Since all the routines which need to draw a DraggedObject (dnd_motion and it's friends) receive an Event, and since an event object contain e.x and e.y values which say where the cursor is with respect to the widget you might wonder what all the fuss is about; why not just use e.x and e.y? Well it's never that simple. The event that gets passed to dnd_motion et al was an event against the InitiatingObject and hence what e.x and e.y say is where the mouse is WITH RESPECT TO THE INITIATINGOBJECT. Since we want to know where the mouse is with respect to some other object, like the Canvas, e.x and e.y do us little good. You can find out where the cursor is with respect to the screen (w.winfo_pointerxy) and you can find out where it is with respect to an event's root window (e.*_root). So we have three locations for the cursor, none of which are what we want. Great. We solve this by using w.winfo_root* to find the upper left corner of "Widget" with respect to it's root window. Thus we now know where both "Widget" and the cursor (e.*_root) are with respect to their common root window (hence the restriction that they MUST share a root window). Subtracting the two gives us the position of the cursor within the widget. Yes, yes, we could have said: return (Event.X_root-Widget.winfo_rootx(),Event.y_root-Widget.winfo_rooty()) and done it all on one line, but this is DEMO code and the three line version below makes it rather more obvious what's going on. """ x = Event.x_root - Widget.winfo_rootx() y = Event.y_root - Widget.winfo_rooty() return (x,y) def Blab(Level,Message): """ Display Message if Verbosity says to. """ if Verbosity >= Level: print(Message) class Dragged: """ This is a prototype thing to be dragged and dropped. Derive from (or mixin) this class to creat real draggable objects. """ #We use this to assign a unique number to each instance of Dragged. # This isn't a necessity; we do it so that during the demo you can # tell one instance from another. NextNumber = 0 def __init__(self): Blab(1, "An instance of Dragged has been created") #When created we are not on any canvas self.Canvas = None self.OriginalCanvas = None #This sets where the mouse cursor will be with respect to our label self.OffsetX = 20 self.OffsetY = 10 #Assign ourselves a unique number self.Number = Dragged.NextNumber Dragged.NextNumber += 1 #Use the number to build our name self.Name = 'DragObj-%s'%self.Number def dnd_end(self,Target,Event): #this gets called when we are dropped Blab(1, self.Name + "has been dropped; Target=" + str(Target)) if self.Canvas==None and self.OriginalCanvas==None: #We were created and then dropped in the middle of nowhere, or # we have been told to self destruct. In either case # nothing needs to be done and we will evaporate shortly. return if self.Canvas is None and self.OriginalCanvas is not None: #We previously lived on OriginalCanvas and the user has # dragged and dropped us in the middle of nowhere. What you do # here rather depends on your own personal taste. There are 2 choices: # 1) Do nothing. The dragged object will simply evaporate. In effect # you are saying "dropping an existing object in the middle # of nowhere deletes it". Personally I don't like this option because # it means that if the user, while dragging an important object, # twitches their mouse finger as the object is in the middle of # nowhere then the object gets immediately deleted. Oops. # 2) Resurrect the original label (which has been there but invisible) # thus saying "dropping an existing dragged object in the middle of # nowhere is as if no drag had taken place". Thats what the code that # follows does. self.Canvas = self.OriginalCanvas self.ID = self.OriginalID self.Label = self.OriginalLabel self.Label['text'] = self.OriginalText self.Label['relief'] = RAISED #We call the canvases "dnd_enter" method here to keep its ObjectDict up # to date. We know that we had been dragged off the canvas, so before # we call "dnd_enter" the cansases ObjectDict says we are not on the # canvas. The call to "dnd_enter" will till the canvas that we are, # in effect, entering the canvas. Note that "dnd_enter" will in turn # call our "Appear" method, but "Appear" is smart enough to realize # that we already have a label on self.Canvas, so it quietly does # does nothing, self.Canvas.dnd_enter(self,Event) return #At this point we know that self.Canvas is not None, which means we have an # label of ourself on that canvas. Bind <ButtonPress> to that label so the # the user can pick us up again if and when desired. self.Label.bind('<ButtonPress>',self.Press) #If self.OriginalCanvas exists then we were an existing object and our # original label is still around although hidden. We no longer need # it so we delete it. if self.OriginalCanvas: self.OriginalCanvas.delete(self.OriginalID) self.OriginalCanvas = None self.OriginalID = None self.OriginalLabel = None def Appear(self, Canvas, XY): """ Put an label representing this Dragged instance on Canvas. XY says where the mouse pointer is. We don't, however, necessarily want to draw our upper left corner at XY. Why not? Because if the user pressed over an existing label AND the mouse wasn't exactly over the upper left of the label (which is pretty likely) then we would like to keep the mouse pointer at the same relative position inside the label. We therefore adjust X and Y by self.OffsetX and self.OffseY thus moving our upper left corner up and/or left by the specified amounts. These offsets are set to a nominal value when an instance of Dragged is created (where it matters rather less), and to a useful value by our "Press" routine when the user clicks on an existing instance of us. """ if self.Canvas: #we are already on a canvas; do nothing return self.X, self.Y = XY #Create a label which identifies us, including our unique number self.Label = Label(Canvas,text=self.Name,borderwidth=2, relief=RAISED) #Display the label on a window on the canvas. We need the ID returned by # the canvas so we can move the label around as the mouse moves. self.ID = Canvas.create_window(self.X-self.OffsetX, self.Y-self.OffsetY, window=self.Label, anchor="nw") #Note the canvas on which we drew the label. self.Canvas = Canvas def Vanish(self,All=0): """ If there is a label representing us on a canvas, make it go away. if self.Canvas is not None, that implies that "Appear" had prevously put a label representing us on the canvas and we delete it. if "All" is true then we check self.OriginalCanvas and if it not None we delete from it the label which represents us. """ if self.Canvas: #we have a label on a canvas; delete it self.Canvas.delete(self.ID) #flag that we are not represented on the canvas self.Canvas = None #Since ID and Label are no longer meaningful, get rid of them lest they #confuse the situation later on. Not necessary, but tidy. del self.ID del self.Label if All and self.OriginalCanvas: #Delete label representing us from self.OriginalCanvas self.OriginalCanvas.delete(self.OriginalID) self.OriginalCanvas = None del self.OriginalID del self.OriginalLabel def Move(self,XY): """ If we have a label a canvas, then move it to the specified location. XY is with respect to the upper left corner of the canvas """ assert self.Canvas, "Can't move because we are not on a canvas" self.X, self.Y = XY self.Canvas.coords(self.ID,self.X-self.OffsetX,self.Y-self.OffsetY) def Press(self,Event): """ User has clicked on a label representing us. Initiate drag and drop. There is a problem, er, opportunity here. In this case we would like to act as both the InitiationObject (because the user clicked on us and it't up to us to start the drag and drop) but we also want to act as the dragged object (because it's us the user wants to drag around). If we simply pass ourself to "Tkdnd" as the dragged object it won't work because the entire drag and drop process is moved along by <motion> events as a result of a binding by the widget on which the user clicked. That widget is the label which represents us and it get moved around by our "move" method. It also gets DELETED by our "vanish" method if the user moves it off the current canvas, which is a perfectly legal thing from them to do. If the widget which is driving the process gets deleted, the whole drag and drop grinds to a real quick halt. We use a little sleight of hand to get around this: 1) From the label which is currently representing us (self.Label) we take the text and save it in self.OriginalText. This will allow us to resurrect the label at a later time if so desired. (It turns out we so desire if the user tries to drop us in the middle of nowhere, but that's a different story; see "dnd_end", above). 2) We take the label which is currently representing us (self.Label) and we make it into an invisible phantom by setting its text to '' and settings its relief to FLAT. It is now, so to speak, a polar bear in a snowstorm. It's still there, but it blends in with the rest of then canvas on which it sits. 3) We move all the information about the phantom label (Canvas, ID and Label) into variables which store information about the previous label (PreviousCanvas, PreviousID and PreviousLabel) 4) We set self.Canvas and friends to None, which indicates that we don't have a label representing us on the canvas. This is a bit of a lie (the phantom is technically on the canvas) but it does no harm. 5) We call "self.Appear" which, noting that don't have a label representing us on the canvas, promptly draws one for us, which gets saved as self.Canvas etc. We went to all this trouble so that: a) The original widget on which the user clicked (now the phantom) could hang around driving the drag and drop until it is done, and b) The user has a label (the one just created by Appear) which they can drag around, from canvas to canvas as desired, until they drop it. THIS one can get deleted from the current canvas and redrawn on another canvas without Anything Bad happening. From the users viewpoint the whole thing is seamless: they think the ARE dragging around the original label, but they are not. To make it really clear what is happening, go to the top of the code and set "LeavePhantomVisible" to 1. Then when you drag an existing object, you will see the phantom. The phantom is resolved by routine "dnd_end" above. If the user drops us on a canvas, then we take up residence on the canvas and the phantom label, no longer needed, is deleted. If the user tries to drop us in the middle of nowhere, then there will be no 'current' label for us (because we are in the middle of nowhere) and thus we resurrect the phantom label which in this case continues to represent us. Note that this whole deal happens ONLY when the user clicks on an EXISTING instance of us. In the case where the user clicks over the button marked "InitiationObject" then it it that button that IS the initiation object, it creates a copy of us and the whole opportunity never happens, since the "InitiationObject" button is never in any danger of being deleted. """ Blab(1, "Dragged.press") #Save our current label as the Original label self.OriginalID = self.ID self.OriginalLabel = self.Label self.OriginalText = self.OriginalLabel['text'] self.OriginalCanvas = self.Canvas #Made the phantom invisible (unless the user asked to see it) if LeavePhantomVisible: self.OriginalLabel['text'] = '<phantom>' self.OriginalLabel['relief']=RAISED else: self.OriginalLabel['text'] = '' self.OriginalLabel['relief']=FLAT #Say we have no current label self.ID = None self.Canvas = None self.Label = None #Ask Tkdnd to start the drag operation if Tkdnd.dnd_start(self,Event): #Save where the mouse pointer was in the label so it stays in the # same relative position as we drag it around self.OffsetX, self.OffsetY = MouseInWidget(self.OriginalLabel,Event) #Draw a label of ourself for the user to drag around XY = MouseInWidget(self.OriginalCanvas,Event) self.Appear(self.OriginalCanvas,XY) class CanvasDnd(Canvas): """ A canvas to which we have added those methods necessary so it can act as both a TargetWidget and a TargetObject. Use (or derive from) this drag-and-drop enabled canvas to create anything that needs to be able to receive a dragged object. """ def __init__(self, Master, cnf={}, **kw): if cnf: kw.update(cnf) Canvas.__init__(self, Master, kw) #ObjectDict is a dictionary of dragable object which are currently on # this canvas, either because they have been dropped there or because # they are in mid-drag and are over this canvas. self.ObjectDict = {} #----- TargetWidget functionality ----- def dnd_accept(self,Source,Event): #Tkdnd is asking us (the TargetWidget) if we want to tell it about a # TargetObject. Since CanvasDnd is also acting as TargetObject we # return 'self', saying that we are willing to be the TargetObject. Blab(2, "Canvas: dnd_accept") return self #----- TargetObject functionality ----- def dnd_enter(self,Source,Event): #This is called when the mouse pointer goes from outside the # Target Widget to inside the Target Widget. Blab(1, "Receptor: dnd_enter") #Figure out where the mouse is with respect to this widget XY = MouseInWidget(self,Event) #Since the mouse pointer is just now moving over us (the TargetWidget), # we ask the DraggedObject to represent itself on us. # "Source" is the DraggedObject. # "self" is us, the CanvasDnd on which we want the DraggedObject to draw itself. # "XY" is where (on CanvasDnd) that we want the DraggedObject to draw itself. Source.Appear(self,XY) #Add the DraggedObject to the dictionary of objects which are on this # canvas. self.ObjectDict[Source.Name] = Source def dnd_leave(self,Source,Event): #This is called when the mouse pointer goes from inside the # Target Widget to outside the Target Widget. Blab(1, "Receptor: dnd_leave") #Since the mouse pointer is just now leaving us (the TargetWidget), we # ask the DraggedObject to remove the representation of itself that it # had previously drawn on us. Source.Vanish() #Remove the DraggedObject from the dictionary of objects which are on # this canvas del self.ObjectDict[Source.Name] def dnd_motion(self,Source,Event): #This is called when the mouse pointer moves withing the TargetWidget. Blab(2, "Receptor: dnd_motion") #Figure out where the mouse is with respect to this widget XY = MouseInWidget(self,Event) #Ask the DraggedObject to move it's representation of itself to the # new mouse pointer location. Source.Move(XY) def dnd_commit(self,Source,Event): #This is called if the DraggedObject is being dropped on us. #This demo doesn't need to do anything here (the DraggedObject is # already in self.ObjectDict) but a real application would # likely want to do stuff here. Blab(1, "Receptor: dnd_commit; Object received= " + str(Source)) #----- code added for demo purposes ----- def ShowObjectDict(self,Comment): """ Print Comment and then print the present content of our ObjectDict. """ print(Comment) if len(self.ObjectDict) > 0: for Name,Object in self.ObjectDict.items(): print(' ' + Name + ", " + str(Object)) else: print(" <empty>" ) class TrashBin(CanvasDnd): """ A canvas specifically for deleting dragged objects. """ def __init__(self,Master,**kw): #Set default height/width if user didn't specify. if 'width' not in kw: kw['width'] =150 if 'height' not in kw: kw['height'] = 25 CanvasDnd.__init__(self, Master, kw) #Put the text "trash" in the middle of the canvas X = kw['width'] / 2 Y = kw['height'] /2 self.create_text(X,Y,text='TRASH') def dnd_commit(self,Source,Event): """ Accept an object dropped in the trash. Note that the dragged object's 'dnd_end' method is called AFTER this routine has returned. We call the dragged objects "Vanish(All=1)" routine to get rid of any labels it has on any canvas. Having done so, it will, at 'dnd_end' time, allow itself to evaporate. If you DON'T call "Vanish(All=1)" AND there is a phantom label of the dragged object on an OriginalCanvas then the dragged object will think it has been erroniously dropped in the middle of nowhere and it will resurrect itself from the OriginalCanvas label. Since we are trying to trash it, we don't want this to happen. """ Blab(1, "TrashBin: dnd_commit") #tell the dropped object to remove ALL labels of itself. Source.Vanish(All=1) #were a trash bin; don't keep objects dropped on us. self.ObjectDict.clear() if __name__ == "__main__": def on_dnd_start(Event): """ This is invoked by InitiationObject to start the drag and drop process """ #Create an object to be dragged ThingToDrag = Dragged() #Pass the object to be dragged and the event to Tkdnd Tkdnd.dnd_start(ThingToDrag,Event) def ShowObjectDicts(): """ Some demo code to let the user see what ojects we think are on each of the three canvases. """ TargetWidget_TargetObject.ShowObjectDict('UpperCanvas') TargetWidget_TargetObject2.ShowObjectDict('LowerCanvas') Trash.ShowObjectDict('Trash bin') print('----------') Root = Tk() Root.title('Drag-and-drop "real-world" demo') #Create a button to act as the InitiationObject and bind it to <ButtonPress> so # we start drag and drop when the user clicks on it. #The only reason we display the content of the trash bin is to show that it # has no objects, even after some have been dropped on it. InitiationObject = Button(Root,text='InitiationObject') InitiationObject.pack(side=TOP) InitiationObject.bind('<ButtonPress>',on_dnd_start) InitiationObject2 = Button(Root,text='Lots and lots of writing...') InitiationObject2.pack(side=TOP) InitiationObject2.bind('<ButtonPress>',on_dnd_start) #Create two canvases to act as the Target Widgets for the drag and drop. Note that # these canvases will act as both the TargetWidget AND the TargetObject. TargetWidget_TargetObject = CanvasDnd(Root,relief=RAISED,bd=2) TargetWidget_TargetObject.pack(expand=YES,fill=BOTH) TargetWidget_TargetObject2 = CanvasDnd(Root,relief=RAISED,bd=2) TargetWidget_TargetObject2.pack(expand=YES,fill=BOTH) #Create an instance of a trash can so we can get rid of dragged objects # if so desired. Trash = TrashBin(Root, relief=RAISED,bd=2) Trash.pack(expand=NO) #Create a button we can press to display the current content of the # canvases ObjectDictionaries. Button(text='Show canvas ObjectDicts',command=ShowObjectDicts).pack() Root.mainloop()
[ "tkinter.dnd.dnd_start" ]
[((15562, 15590), 'tkinter.dnd.dnd_start', 'Tkdnd.dnd_start', (['self', 'Event'], {}), '(self, Event)\n', (15577, 15590), True, 'from tkinter import dnd as Tkdnd\n'), ((21618, 21653), 'tkinter.dnd.dnd_start', 'Tkdnd.dnd_start', (['ThingToDrag', 'Event'], {}), '(ThingToDrag, Event)\n', (21633, 21653), True, 'from tkinter import dnd as Tkdnd\n')]
import logging from logging import handlers import pickle import time import uuid import os import requests import json from bson.binary import Binary from bson.json_util import dumps from flask import request from flask_classful import FlaskView, route import pymongo from katana.shared_utils.mongoUtils import mongoUtils from katana.shared_utils.policyUtils import neatUtils, test_policyUtils # Logging Parameters logger = logging.getLogger(__name__) file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) stream_handler = logging.StreamHandler() formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") file_handler.setFormatter(formatter) stream_handler.setFormatter(stream_formatter) logger.setLevel(logging.DEBUG) logger.addHandler(file_handler) logger.addHandler(stream_handler) class PolicyView(FlaskView): route_prefix = "/api/" req_fields = ["id", "url", "type"] def index(self): """ Returns a list of policy management system and their details, used by: `katana policy ls` """ policy_data = mongoUtils.index("policy") return_data = [] for item in policy_data: return_data.append( dict( _id=item["_id"], component_id=item["id"], created_at=item["created_at"], type=item["type"], ) ) return dumps(return_data) def get(self, uuid): """ Returns the details of specific policy management system, used by: `katana policy inspect [uuid]` """ data = mongoUtils.get("policy", uuid) if data: return dumps(data), 200 else: return "Not Found", 404 def post(self): """ Add a new policy management system. The request must provide the system details. used by: `katana policy add -f [file]` """ # Create the object and store it in the object collection try: if request.json["type"] == "test-policy": policy = test_policyUtils.Policy(id=request.json["id"], url=request.json["url"]) elif request.json["type"] == "neat": policy = neatUtils.Policy(id=request.json["id"], url=request.json["url"]) else: return "Error: Not supported Policy system type", 400 except KeyError: return f"Error: Required fields: {self.req_fields}", 400 new_uuid = str(uuid.uuid4()) request.json["_id"] = new_uuid request.json["created_at"] = time.time() # unix epoch try: new_uuid = mongoUtils.add("policy", request.json) except pymongo.errors.DuplicateKeyError: return ( "Policy management system with id {0} already exists".format(request.json["id"]), 400, ) # Store the policy object to the mongo db thebytes = pickle.dumps(policy) obj_json = {"_id": new_uuid, "id": request.json["id"], "obj": Binary(thebytes)} mongoUtils.add("policy_obj", obj_json) return new_uuid, 201 def delete(self, uuid): """ Delete a specific policy management system. used by: `katana policy rm [uuid]` """ del_policy = mongoUtils.delete("policy", uuid) if del_policy: return "Deleted policy management system {}".format(uuid), 200 else: # if uuid is not found, return error return "Error: No such policy management system: {}".format(uuid), 404 def put(self, uuid): """ Update the details of a specific policy engine system. used by: `katana policy update [uuid] -f [file]` """ data = request.json data["_id"] = uuid old_data = mongoUtils.get("policy", uuid) if old_data: data["created_at"] = old_data["created_at"] try: for entry in self.req_fields: if data[entry] != old_data[entry]: return "Cannot update field: " + entry, 400 except KeyError: return f"Error: Required fields: {self.req_fields}", 400 else: mongoUtils.update("policy", uuid, data) return f"Modified {uuid}", 200 else: # Create the object and store it in the object collection try: if request.json["type"] == "test-policy": policy = test_policyUtils.Policy(id=request.json["id"], url=request.json["url"]) elif request.json["type"] == "neat": policy = neatUtils.Policy(id=request.json["id"], url=request.json["url"]) else: return "Error: Not supported Policy system type", 400 except KeyError: return f"Error: Required fields: {self.req_fields}", 400 new_uuid = uuid request.json["_id"] = new_uuid request.json["created_at"] = time.time() # unix epoch try: new_uuid = mongoUtils.add("policy", request.json) except pymongo.errors.DuplicateKeyError: return ( "Policy management system with id {0} already exists".format( request.json["id"] ), 400, ) # Store the policy object to the mongo db thebytes = pickle.dumps(policy) obj_json = {"_id": new_uuid, "id": request.json["id"], "obj": Binary(thebytes)} mongoUtils.add("policy_obj", obj_json) return new_uuid, 201 # NEAT Policy Engine specific Endpoints @route("/neat/<slice_id>", methods=["GET"]) def neat(self, slice_id): """ Send the slice parameters to the neat UE Policy System """ slice_parameters = mongoUtils.get("slice", slice_id) if slice_parameters: return slice_parameters, 200 else: return f"Slice with id {slice_id} was not found", 404 # APEX Policy Engine specific Endpoints @route("/apex/action", methods=["POST"]) def apex_action(self): """ Receive feedback from the APEX policy engine """ # Check if APEX is configured in katana isapex = os.getenv("APEX", None) if not isapex: return "APEX Policy Engine is not configured", 400 apexpolicy = request.json logger.info(f"Received new APEX action: {apexpolicy}") try: # Check the Policy Type if apexpolicy["policyType"] == "FailingNS": failing_ns_action = apexpolicy["policy"]["action"] slice_id = apexpolicy["policy"]["slice_id"] ns_id = apexpolicy["policy"]["ns_id"] nsd_id = apexpolicy["policy"]["nsd_id"] # Notify NEAT if needed notify_neat = apexpolicy["policy"]["extra_actions"].get("notify_neat", False) if not notify_neat: notify_neat = apexpolicy["policy"]["extra_actions"].get("notify_NEAT", False) if notify_neat: neat_list = mongoUtils.find_all("policy", {"type": "neat"}) for ineat in neat_list: # Get the NEAT object neat_obj = pickle.loads(mongoUtils.get("policy_obj", ineat["_id"])["obj"]) neat_obj.notify(alert_type="FailingNS", slice_id=slice_id, status=True) if failing_ns_action == "restart_ns": # Scenario 1 restart_ns_message = { "domain": "NFV", "action": "RestartNS", "details": {"ns_id": ns_id, "location": nsd_id, "change_vim": False}, } r_url = f"http://katana-nbi:8000/api/slice/{slice_id}/modify" r = requests.post( f"http://katana-nbi:8000/api/slice/{slice_id}/modify", json=json.loads(json.dumps(restart_ns_message)), ) return "Restaring the NS", 200 elif failing_ns_action == "restart_slice": # Scenario 2 restart_ns_message = { "domain": "NFV", "action": "RestartNS", "details": {"ns_id": ns_id, "location": nsd_id, "change_vim": True}, } r_url = f"http://katana-nbi:8000/api/slice/{slice_id}/modify" r = requests.post( f"http://katana-nbi:8000/api/slice/{slice_id}/modify", json=json.loads(json.dumps(restart_ns_message)), ) return "Restaring the NS", 200 elif failing_ns_action == "stop_slice": # Scenario 3 r_url = f"http://katana-nbi:8000/api/slice/{slice_id}" r = requests.delete(r_url) return "Stopping Slice", 200 else: return f"Action {failing_ns_action} is not supported", 400 else: return f"Policy type {apexpolicy['policyType']} is not supported", 400 except KeyError as err: return f"Key {err} is missing", 400
[ "logging.getLogger", "logging.StreamHandler", "bson.binary.Binary", "pickle.dumps", "katana.shared_utils.mongoUtils.mongoUtils.get", "katana.shared_utils.mongoUtils.mongoUtils.delete", "katana.shared_utils.mongoUtils.mongoUtils.update", "json.dumps", "katana.shared_utils.mongoUtils.mongoUtils.find_all", "katana.shared_utils.policyUtils.neatUtils.Policy", "katana.shared_utils.mongoUtils.mongoUtils.add", "logging.handlers.RotatingFileHandler", "uuid.uuid4", "katana.shared_utils.mongoUtils.mongoUtils.index", "time.time", "bson.json_util.dumps", "flask_classful.route", "os.getenv", "logging.Formatter", "katana.shared_utils.policyUtils.test_policyUtils.Policy", "requests.delete" ]
[((428, 455), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (445, 455), False, 'import logging\n'), ((471, 544), 'logging.handlers.RotatingFileHandler', 'handlers.RotatingFileHandler', (['"""katana.log"""'], {'maxBytes': '(10000)', 'backupCount': '(5)'}), "('katana.log', maxBytes=10000, backupCount=5)\n", (499, 544), False, 'from logging import handlers\n'), ((562, 585), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (583, 585), False, 'import logging\n'), ((598, 665), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(name)s %(levelname)s %(message)s"""'], {}), "('%(asctime)s %(name)s %(levelname)s %(message)s')\n", (615, 665), False, 'import logging\n'), ((685, 752), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(name)s %(levelname)s %(message)s"""'], {}), "('%(asctime)s %(name)s %(levelname)s %(message)s')\n", (702, 752), False, 'import logging\n'), ((5922, 5964), 'flask_classful.route', 'route', (['"""/neat/<slice_id>"""'], {'methods': "['GET']"}), "('/neat/<slice_id>', methods=['GET'])\n", (5927, 5964), False, 'from flask_classful import FlaskView, route\n'), ((6343, 6382), 'flask_classful.route', 'route', (['"""/apex/action"""'], {'methods': "['POST']"}), "('/apex/action', methods=['POST'])\n", (6348, 6382), False, 'from flask_classful import FlaskView, route\n'), ((1204, 1230), 'katana.shared_utils.mongoUtils.mongoUtils.index', 'mongoUtils.index', (['"""policy"""'], {}), "('policy')\n", (1220, 1230), False, 'from katana.shared_utils.mongoUtils import mongoUtils\n'), ((1562, 1580), 'bson.json_util.dumps', 'dumps', (['return_data'], {}), '(return_data)\n', (1567, 1580), False, 'from bson.json_util import dumps\n'), ((1760, 1790), 'katana.shared_utils.mongoUtils.mongoUtils.get', 'mongoUtils.get', (['"""policy"""', 'uuid'], {}), "('policy', uuid)\n", (1774, 1790), False, 'from katana.shared_utils.mongoUtils import mongoUtils\n'), ((2740, 2751), 'time.time', 'time.time', ([], {}), '()\n', (2749, 2751), False, 'import time\n'), ((3113, 3133), 'pickle.dumps', 'pickle.dumps', (['policy'], {}), '(policy)\n', (3125, 3133), False, 'import pickle\n'), ((3230, 3268), 'katana.shared_utils.mongoUtils.mongoUtils.add', 'mongoUtils.add', (['"""policy_obj"""', 'obj_json'], {}), "('policy_obj', obj_json)\n", (3244, 3268), False, 'from katana.shared_utils.mongoUtils import mongoUtils\n'), ((3467, 3500), 'katana.shared_utils.mongoUtils.mongoUtils.delete', 'mongoUtils.delete', (['"""policy"""', 'uuid'], {}), "('policy', uuid)\n", (3484, 3500), False, 'from katana.shared_utils.mongoUtils import mongoUtils\n'), ((3989, 4019), 'katana.shared_utils.mongoUtils.mongoUtils.get', 'mongoUtils.get', (['"""policy"""', 'uuid'], {}), "('policy', uuid)\n", (4003, 4019), False, 'from katana.shared_utils.mongoUtils import mongoUtils\n'), ((6109, 6142), 'katana.shared_utils.mongoUtils.mongoUtils.get', 'mongoUtils.get', (['"""slice"""', 'slice_id'], {}), "('slice', slice_id)\n", (6123, 6142), False, 'from katana.shared_utils.mongoUtils import mongoUtils\n'), ((6552, 6575), 'os.getenv', 'os.getenv', (['"""APEX"""', 'None'], {}), "('APEX', None)\n", (6561, 6575), False, 'import os\n'), ((2650, 2662), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2660, 2662), False, 'import uuid\n'), ((2802, 2840), 'katana.shared_utils.mongoUtils.mongoUtils.add', 'mongoUtils.add', (['"""policy"""', 'request.json'], {}), "('policy', request.json)\n", (2816, 2840), False, 'from katana.shared_utils.mongoUtils import mongoUtils\n'), ((3204, 3220), 'bson.binary.Binary', 'Binary', (['thebytes'], {}), '(thebytes)\n', (3210, 3220), False, 'from bson.binary import Binary\n'), ((5220, 5231), 'time.time', 'time.time', ([], {}), '()\n', (5229, 5231), False, 'import time\n'), ((5675, 5695), 'pickle.dumps', 'pickle.dumps', (['policy'], {}), '(policy)\n', (5687, 5695), False, 'import pickle\n'), ((5800, 5838), 'katana.shared_utils.mongoUtils.mongoUtils.add', 'mongoUtils.add', (['"""policy_obj"""', 'obj_json'], {}), "('policy_obj', obj_json)\n", (5814, 5838), False, 'from katana.shared_utils.mongoUtils import mongoUtils\n'), ((1827, 1838), 'bson.json_util.dumps', 'dumps', (['data'], {}), '(data)\n', (1832, 1838), False, 'from bson.json_util import dumps\n'), ((2234, 2305), 'katana.shared_utils.policyUtils.test_policyUtils.Policy', 'test_policyUtils.Policy', ([], {'id': "request.json['id']", 'url': "request.json['url']"}), "(id=request.json['id'], url=request.json['url'])\n", (2257, 2305), False, 'from katana.shared_utils.policyUtils import neatUtils, test_policyUtils\n'), ((4420, 4459), 'katana.shared_utils.mongoUtils.mongoUtils.update', 'mongoUtils.update', (['"""policy"""', 'uuid', 'data'], {}), "('policy', uuid, data)\n", (4437, 4459), False, 'from katana.shared_utils.mongoUtils import mongoUtils\n'), ((5290, 5328), 'katana.shared_utils.mongoUtils.mongoUtils.add', 'mongoUtils.add', (['"""policy"""', 'request.json'], {}), "('policy', request.json)\n", (5304, 5328), False, 'from katana.shared_utils.mongoUtils import mongoUtils\n'), ((5770, 5786), 'bson.binary.Binary', 'Binary', (['thebytes'], {}), '(thebytes)\n', (5776, 5786), False, 'from bson.binary import Binary\n'), ((2380, 2444), 'katana.shared_utils.policyUtils.neatUtils.Policy', 'neatUtils.Policy', ([], {'id': "request.json['id']", 'url': "request.json['url']"}), "(id=request.json['id'], url=request.json['url'])\n", (2396, 2444), False, 'from katana.shared_utils.policyUtils import neatUtils, test_policyUtils\n'), ((4691, 4762), 'katana.shared_utils.policyUtils.test_policyUtils.Policy', 'test_policyUtils.Policy', ([], {'id': "request.json['id']", 'url': "request.json['url']"}), "(id=request.json['id'], url=request.json['url'])\n", (4714, 4762), False, 'from katana.shared_utils.policyUtils import neatUtils, test_policyUtils\n'), ((7433, 7480), 'katana.shared_utils.mongoUtils.mongoUtils.find_all', 'mongoUtils.find_all', (['"""policy"""', "{'type': 'neat'}"], {}), "('policy', {'type': 'neat'})\n", (7452, 7480), False, 'from katana.shared_utils.mongoUtils import mongoUtils\n'), ((4845, 4909), 'katana.shared_utils.policyUtils.neatUtils.Policy', 'neatUtils.Policy', ([], {'id': "request.json['id']", 'url': "request.json['url']"}), "(id=request.json['id'], url=request.json['url'])\n", (4861, 4909), False, 'from katana.shared_utils.policyUtils import neatUtils, test_policyUtils\n'), ((9318, 9340), 'requests.delete', 'requests.delete', (['r_url'], {}), '(r_url)\n', (9333, 9340), False, 'import requests\n'), ((7619, 7661), 'katana.shared_utils.mongoUtils.mongoUtils.get', 'mongoUtils.get', (['"""policy_obj"""', "ineat['_id']"], {}), "('policy_obj', ineat['_id'])\n", (7633, 7661), False, 'from katana.shared_utils.mongoUtils import mongoUtils\n'), ((8340, 8370), 'json.dumps', 'json.dumps', (['restart_ns_message'], {}), '(restart_ns_message)\n', (8350, 8370), False, 'import json\n'), ((9024, 9054), 'json.dumps', 'json.dumps', (['restart_ns_message'], {}), '(restart_ns_message)\n', (9034, 9054), False, 'import json\n')]
from django.urls import path from .views import delete_obat, index, ajson, add, add_forms, edit_obat, add_from_flutter urlpatterns = [ path('', index, name='index'), path('add/', add, name='add'), path('forms/', add_forms, name='add_forms'), path('delete', delete_obat, name='delete_obat'), path('edit_obat', edit_obat, name='edit_obat'), path('json/', ajson, name = 'json'), path('add-from-flutter', add_from_flutter, name='add-from-flutter') ]
[ "django.urls.path" ]
[((141, 170), 'django.urls.path', 'path', (['""""""', 'index'], {'name': '"""index"""'}), "('', index, name='index')\n", (145, 170), False, 'from django.urls import path\n'), ((176, 205), 'django.urls.path', 'path', (['"""add/"""', 'add'], {'name': '"""add"""'}), "('add/', add, name='add')\n", (180, 205), False, 'from django.urls import path\n'), ((211, 254), 'django.urls.path', 'path', (['"""forms/"""', 'add_forms'], {'name': '"""add_forms"""'}), "('forms/', add_forms, name='add_forms')\n", (215, 254), False, 'from django.urls import path\n'), ((260, 307), 'django.urls.path', 'path', (['"""delete"""', 'delete_obat'], {'name': '"""delete_obat"""'}), "('delete', delete_obat, name='delete_obat')\n", (264, 307), False, 'from django.urls import path\n'), ((313, 359), 'django.urls.path', 'path', (['"""edit_obat"""', 'edit_obat'], {'name': '"""edit_obat"""'}), "('edit_obat', edit_obat, name='edit_obat')\n", (317, 359), False, 'from django.urls import path\n'), ((365, 398), 'django.urls.path', 'path', (['"""json/"""', 'ajson'], {'name': '"""json"""'}), "('json/', ajson, name='json')\n", (369, 398), False, 'from django.urls import path\n'), ((406, 473), 'django.urls.path', 'path', (['"""add-from-flutter"""', 'add_from_flutter'], {'name': '"""add-from-flutter"""'}), "('add-from-flutter', add_from_flutter, name='add-from-flutter')\n", (410, 473), False, 'from django.urls import path\n')]
from PIL.ImageGrab import grab from tkinter.filedialog import asksaveasfilename import speech_recognition as speech typesallowed = [("PNG Image", "*.png"), ("JPEG Image", "*.jpeg"), ("GIF Image", "*.gif")] words = [ "screenshot", "take picture", "take screenshot", "picture", "print screen" ] while True: recogniser = speech.Recognizer() with speech.Microphone() as src: print("Please input:") audio = recogniser.listen(src) try: said = recogniser.recognize_google(audio) if (said in words): print("Screenshotting...") content = grab() filename = asksaveasfilename(filetypes = typesallowed, defaultextension=typesallowed, initialdir="Downloads", title="Save screenshot") if (filename): try: content.save(filename) print("Saved as: {0}".format(filename)) except ValueError: print("Unable to save with that file extension.") else: print("Chosen not to save file.") elif (said == "exit"): print("Exiting...") break else: print("You said '{0}' which I don't understand".format(said)) except speech.UnknownValueError: print("Couldn't understand what you said.") except speech.RequestError as e: print("Couldn't request results from Google Speech Recognition service; {0}".format(e))
[ "speech_recognition.Recognizer", "PIL.ImageGrab.grab", "speech_recognition.Microphone", "tkinter.filedialog.asksaveasfilename" ]
[((348, 367), 'speech_recognition.Recognizer', 'speech.Recognizer', ([], {}), '()\n', (365, 367), True, 'import speech_recognition as speech\n'), ((378, 397), 'speech_recognition.Microphone', 'speech.Microphone', ([], {}), '()\n', (395, 397), True, 'import speech_recognition as speech\n'), ((626, 632), 'PIL.ImageGrab.grab', 'grab', ([], {}), '()\n', (630, 632), False, 'from PIL.ImageGrab import grab\n'), ((656, 781), 'tkinter.filedialog.asksaveasfilename', 'asksaveasfilename', ([], {'filetypes': 'typesallowed', 'defaultextension': 'typesallowed', 'initialdir': '"""Downloads"""', 'title': '"""Save screenshot"""'}), "(filetypes=typesallowed, defaultextension=typesallowed,\n initialdir='Downloads', title='Save screenshot')\n", (673, 781), False, 'from tkinter.filedialog import asksaveasfilename\n')]
""" Trim a tree at a given phylogenetic leaf e.g. phylum and write a new leaf list """ import os import sys import argparse from ete3 import Tree global tag def load_jplacer(jpf): """ lo ad the jplacer file and return the tree :param jpf: The jplacer file :return: the data structure of the tree """ with open(jpf, 'r') as f: data = json.load(f) tree = Tree(data['tree'], quoted_node_names=True, format=1) return tree def is_leaf_node(node): global tag if tag in node.name: return True else: return False def write_leaves(tree, outputf): """ Write a list of all the leaves, one line per leaf. :param tree: the tree :param outputf: the file to write :return: """ with open(outputf, 'w') as out: for n in tree.get_leaves(): out.write("{}\n".format(n.name)) def write_tree(tree, outputf): """ Write the tree to a file. :param tree: The tree to write :param outputf: The output filename :return: """ tree.write(outfile=outputf, format=1) if __name__ == '__main__': parser = argparse.ArgumentParser(description="trim tree") parser.add_argument('-t', help='tree file to trim', required=True) parser.add_argument('-p', help='phylogenetic level to trim at', required=True) parser.add_argument('-o', help='output tree to write') parser.add_argument('-l', help='list of leaves to write') parser.add_argument('-j', help='tree is a jplacer file. Default is to assume tree will be a newick file', action='store_true', default=False) parser.add_argument('-v', help='verbose output', action="store_true") args = parser.parse_args() if args.j: tree = load_jplacer(args.t) else: tree = Tree(args.t, quoted_node_names=True, format=1) tag = "r_{}".format(args.p) trimmed = Tree( tree.write(is_leaf_fn=is_leaf_node) ) if args.o: write_tree(trimmed, args.o) if args.l: write_leaves(trimmed, args.l)
[ "ete3.Tree", "argparse.ArgumentParser" ]
[((396, 448), 'ete3.Tree', 'Tree', (["data['tree']"], {'quoted_node_names': '(True)', 'format': '(1)'}), "(data['tree'], quoted_node_names=True, format=1)\n", (400, 448), False, 'from ete3 import Tree\n'), ((1134, 1182), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""trim tree"""'}), "(description='trim tree')\n", (1157, 1182), False, 'import argparse\n'), ((1786, 1832), 'ete3.Tree', 'Tree', (['args.t'], {'quoted_node_names': '(True)', 'format': '(1)'}), '(args.t, quoted_node_names=True, format=1)\n', (1790, 1832), False, 'from ete3 import Tree\n')]
# Generated by Django 2.2.4 on 2019-08-07 08:10 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('restaurant', '0002_remove_restaurnat_has_private_room'), ] operations = [ migrations.CreateModel( name='Tags', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('short_description_text', models.CharField(max_length=150)), ], ), migrations.AddField( model_name='restaurnat', name='has_private_room', field=models.BooleanField(default=False), ), ]
[ "django.db.models.AutoField", "django.db.models.CharField", "django.db.models.BooleanField" ]
[((664, 698), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (683, 698), False, 'from django.db import migrations, models\n'), ((347, 440), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (363, 440), False, 'from django.db import migrations, models\n'), ((482, 514), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (498, 514), False, 'from django.db import migrations, models\n')]
# Generated by Django 2.2 on 2019-05-18 14:35 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('calc', '0010_add_price_field_to_service_model'), ] operations = [ migrations.AlterField( model_name='service', name='price', field=models.DecimalField(decimal_places=2, default=0, editable=False, max_digits=9, verbose_name='Total'), ), ]
[ "django.db.models.DecimalField" ]
[((345, 450), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'default': '(0)', 'editable': '(False)', 'max_digits': '(9)', 'verbose_name': '"""Total"""'}), "(decimal_places=2, default=0, editable=False, max_digits\n =9, verbose_name='Total')\n", (364, 450), False, 'from django.db import migrations, models\n')]
from math import cos, sqrt, sin ''' g: función continua con un único punto fijo p0: valor inicial aproximado del punto fijo TOL: precisión deseada nMax: iteraciones ''' #Se ingresa la derivada para calcular K def fixedPointD(g, p0, TOL, nMax, g2): print("i\t p0\t\t\t p\t\t\t |p-p0| \t K") for i in range(0,nMax): p=g(p0) print("{0:d} \t {1:.15f} \t {2:.15f} \t {3:.5e} \t {4:.5e}".format(i,p0,p,abs(p-p0),g2(p))) if abs(p-p0)<TOL: break p0=p #Calculando K sin derivada def fixedPoint(g, p0, TOL, nMax): print("i\t p0\t\t\t p\t\t\t |p-p0| \t K") error = 0.00001 for i in range(0,nMax): p=g(p0) lastError = error error = abs(p-p0) print("{0:d} \t {1:.15f} \t {2:.15f} \t {3:.5e} \t {4:.15f}".format(i,p0,p,error,abs(error/lastError))) if abs(p-p0)<TOL: break p0=p # fixedPointD(lambda x: sqrt(x+1), 0, 10**-10, 1000, lambda x: (1/2)*(x+1)**(-1/2)) fixedPoint(lambda x: 1 + pow(sin(x),2), 1.5, 10**-9, 1000)
[ "math.sin" ]
[((1006, 1012), 'math.sin', 'sin', (['x'], {}), '(x)\n', (1009, 1012), False, 'from math import cos, sqrt, sin\n')]
import os import sys #sys.path.append('/Users/carlomazzaferro/Documents/Code/variantannotation-master') from variantannotation import annotate_batch from variantannotation import myvariant_parsing_utils from variantannotation import mongo_DB_export from variantannotation import create_output_files from variantannotation import utilities from variantannotation import MongoDB_querying #set paths collection_name = 'Test_Normal_Targeted' db_name = 'My_Variant_Database' #set paths filepath = "/Volumes/Seagate Backup Plus Drive/vcf_files/" csv_file = "normal_targeted_seq.hg19_multianno.csv" vcf_file = "normal_targeted_seq.vcf" os.chdir(filepath) #ANNOVAR_PATH = '/database/annovar/' #IN_PATH = '/data/Nof1/file.vcf' #OUT_PATH = '/data/ccbb_internal/interns/Carlo/annovar_results' #1. Get csv file: run annovar #utilities.run_annovar(ANNOVAR_PATH, IN_PATH, OUT_PATH) #METHOD 1: by chunks, iteratively. chunksize = 10000 step = 0 collection_name = 'ANNOVAR_MyVariant_chunks' db_name = 'My_Variant_Database' #Get variant list. Should always be the first step after running ANNOVAR open_file = myvariant_parsing_utils.VariantParsing() list_file = open_file.get_variants_from_vcf(vcf_file) #Run process, export to MongoDB in-built as_batch = annotate_batch.AnnotationMethods() as_batch.by_chunks(list_file, chunksize, step, csv_file, collection_name, db_name) #Apply filter(s). filter_collection = MongoDB_querying.Filters(db_name, collection_name) rare_cancer_variants = filter_collection.rare_cancer_variant() rare_disease_variants = filter_collection.rare_disease_variant() cadd_phred_high_impact_variants = filter_collection.rare_high_impact_variants() #Create 4 output files: annotated vcf, annotated csv, filtered vcf, filtered csv #Annotated vcf and csv, unfiltered. Will contain all info coming from annovar and myvariant out_unfiltered_vcf_file = filepath + "/normal_targ_unfilterd_vcf_annotated.vcf" out_unfiltered_csv_file = filepath + "/normal_targ_unfiltered_csv_annotated.csv" rare_cancer_variants_csv = filepath + "/normal_targ_rare_cancer_vars.csv" rare_cancer_variants_vcf = filepath + "/normal_targ_rare_cancer_vars.vcf" rare_disease_variants_csv = filepath + "/normal_targ_rare_disease_vars.csv" rare_diseasw_variants_vcf = filepath + "/normal_targ_rare_disease_vars.vcf" cadd_phred_high_impact_variants_csv = filepath + "/normal_targ_cadd_phred_high_impact_variants.csv" cadd_phred_high_impact_variants_vcf = filepath + "/normal_targ_cadd_phred_high_impact_variants.vcf" in_vcf_file = filepath + "/normal_targeted_seq.vcf.gz" #Create writer object my_writer_1 = create_output_files.FileWriter(db_name, collection_name) #Write collection to csv and vcf my_writer_1.generate_unfiltered_annotated_csv(out_unfiltered_csv_file) my_writer_1.generate_unfiltered_annotated_vcf(in_vcf_file, out_unfiltered_vcf_file) #Crete writer object for filtered lists: my_writer_2 = create_output_files.FileWriter(db_name, collection_name) #cancer variants filtered files my_writer_2.generate_annotated_csv(rare_cancer_variants, rare_cancer_variants_csv) my_writer_2.generate_annotated_vcf(rare_cancer_variants, in_vcf_file, rare_cancer_variants_vcf) #disease variants filtered files my_writer_2.generate_annotated_csv(rare_disease_variants, rare_disease_variants) my_writer_2.generate_annotated_vcf(rare_disease_variants, rare_disease_variants) #high impact cadd_phredd filtered files my_writer_2.generate_annotated_csv(cadd_phred_high_impact_variants, cadd_phred_high_impact_variants_csv) my_writer_2.generate_annotated_vcf(cadd_phred_high_impact_variants, cadd_phred_high_impact_variants_vcf) #---------------#--------------#---------------#--------------#---------------#--------------#---------------# #METHOD 2: usign full file, and holding it in memory (OK for smaller files) ##TEST THIS## #get variant list. Should always be the first step after running ANNOVAR open_file = myvariant_parsing_utils.VariantParsing() list_file = open_file.get_variants_from_vcf(vcf_file) #Run process, data saved to joint_list as_one_file = annotate_batch.AnnotationMethods() joint_list = as_one_file.full_file(list_file, csv_file) #Name Collection & DB collection_name = 'ANNOVAR_MyVariant_full' db_name = 'My_Variant_Database' #Export exporting_function = mongo_DB_export.export exporting_function(joint_list, collection_name, db_name) #Generate output files out_vcf_file = filepath + "/Tumor_RNAseq_rare_variants_ANNOTATED_FULL.vcf" out_csv_file = filepath + "/Tumor_RNAseq_rare_variants_ANNOTATED_FULL.csv" in_vcf_file = filepath + "/Tumor_RNAseq_rare_variants_VCF.vcf" create_output_files.generate_annotated_vcf(joint_list, in_vcf_file, out_vcf_file) create_output_files.generate_annotated_csv(joint_list, out_csv_file) #Filtering #---------------#--------------#---------------#--------------#---------------#--------------#---------------# #METHOD 3: ignore annovar, get data solely from myvariant (much faster, requires nothing but a VCF file. #will however be incomplete (some variants will have no information). #Get variant list form vcf file open_file = myvariant_parsing_utils.VariantParsing() list_file = open_file.get_variants_from_vcf(vcf_file) #Run process my_variants = annotate_batch.AnnotationMethods() myvariant_data = my_variants.my_variant_at_once(list_file) #Name Collection & DB collection_name = 'My_Variant_Info_Collection_Full' db_name = 'My_Variant_Database' #Export exporting_function = mongo_DB_export.export exporting_function(myvariant_data, collection_name, db_name) #---------------#--------------#---------------#--------------#---------------#--------------#---------------# #METHOD 4: ignore annovar, Get data solely from myvariant (much faster, requires nothing but a VCF file. #will however be incomplete (some variants will have no information). #Do so BY CHUNKS. Export function is built in the methods myvariant_chunks import myvariant filepath = "/Users/carlomazzaferro/Desktop/" vcf_file = "ShortSample.vcf" os.chdir(filepath) chunksize = 1000 step = 0 #Get variant list from vcf file open_file = myvariant_parsing_utils.VariantParsing() list_ids = list(myvariant.get_hgvs_from_vcf(vcf_file)) list_file = open_file.get_variants_from_vcf(vcf_file) #Name Collection & DB collection_name = 'My_Variant_Info_Collection_Chunks' db_name = 'My_Variant_Database' #Run process, export to MongoDB in-built my_variants = annotate_batch.AnnotationMethods() myvariant_data = my_variants.myvariant_chunks(list_file, chunksize, step, collection_name, db_name) out_vcf_file = filepath + "/Tumor_RNAseq_rare_variants_ANNOTATED_MYV_FULL.vcf" out_csv_file = filepath + "/Tumor_RNAseq_rare_variants_ANNOTATED_MyV_FULL.csv" in_vcf_file = filepath + "/Tumor_RNAseq_rare_variants_VCF.vcf" create_output_files.generate_annotated_vcf(myvariant_data, in_vcf_file, out_vcf_file) create_output_files.generate_annotated_csv(myvariant_data, out_csv_file) ########DEBUG######### import os collection_name = 'Test_Normal_Targeted' db_name = 'My_Variant_Database' #set paths filepath = "/Volumes/Seagate Backup Plus Drive/vcf_files" csv_file = "normal_targeted_seq.hg19_multianno.csv" vcf_file = "normal_targeted_seq.vcf" os.chdir(filepath) from variantannotation import myvariant_parsing_utils from variantannotation import csv_to_df from variantannotation import annovar_processing from variantannotation import utilities open_file = myvariant_parsing_utils.VariantParsing() list_file = open_file.get_variants_from_vcf(vcf_file) df = csv_to_df.parse_to_df(csv_to_df.open_and_parse(csv_file)) list1 = annovar_processing.get_list_from_annovar_csv(df, list_file[0:5000]) open_file = myvariant_parsing_utils.VariantParsing() from_myvariant = open_file.get_dict_myvariant(list_file[0:5000]) utilities.final_joint(list1, from_myvariant) joined_list = list1 from pymongo import MongoClient client = MongoClient() db = client.My_Variant_Database collection = db.Test_Normal_Targeted all_my_data = list(collection.find({})) chr_vars = [] location_vars_ant = [] location_vars_pos = [] for i in range(0, len(all_my_data)): if all_my_data[i]['Chr'] == 'chrMT': chr_vars.append('chrM') else: chr_vars.append(all_my_data[i]['Chr'].encode('ascii','ignore')) location_vars_ant.append(all_my_data[i]['Start'] + 1) location_vars_pos.append(all_my_data[i]['Start'] - 1) import vcf in_vcf_file = filepath + "/somatic_mutect_old.vcf" vcf_output_path = "/Users/carlomazzaferro/Desktop/test.vcf" vcf_reader = vcf.Reader(filename=in_vcf_file) vcf_writer = vcf.Writer(open(vcf_output_path, 'w'), vcf_reader) for i in range(0, len(chr_vars)): for record in vcf_reader.fetch(chr_vars[i], location_vars_pos[i], location_vars_ant[i]): record.INFO.update(joined_list[i]) vcf_writer.write_record(record)
[ "variantannotation.utilities.final_joint", "variantannotation.myvariant_parsing_utils.VariantParsing", "variantannotation.csv_to_df.open_and_parse", "myvariant.get_hgvs_from_vcf", "variantannotation.create_output_files.generate_annotated_vcf", "variantannotation.create_output_files.generate_annotated_csv", "variantannotation.create_output_files.FileWriter", "variantannotation.annotate_batch.AnnotationMethods", "vcf.Reader", "os.chdir", "pymongo.MongoClient", "variantannotation.MongoDB_querying.Filters", "variantannotation.annovar_processing.get_list_from_annovar_csv" ]
[((633, 651), 'os.chdir', 'os.chdir', (['filepath'], {}), '(filepath)\n', (641, 651), False, 'import os\n'), ((1103, 1143), 'variantannotation.myvariant_parsing_utils.VariantParsing', 'myvariant_parsing_utils.VariantParsing', ([], {}), '()\n', (1141, 1143), False, 'from variantannotation import myvariant_parsing_utils\n'), ((1252, 1286), 'variantannotation.annotate_batch.AnnotationMethods', 'annotate_batch.AnnotationMethods', ([], {}), '()\n', (1284, 1286), False, 'from variantannotation import annotate_batch\n'), ((1409, 1459), 'variantannotation.MongoDB_querying.Filters', 'MongoDB_querying.Filters', (['db_name', 'collection_name'], {}), '(db_name, collection_name)\n', (1433, 1459), False, 'from variantannotation import MongoDB_querying\n'), ((2601, 2657), 'variantannotation.create_output_files.FileWriter', 'create_output_files.FileWriter', (['db_name', 'collection_name'], {}), '(db_name, collection_name)\n', (2631, 2657), False, 'from variantannotation import create_output_files\n'), ((2903, 2959), 'variantannotation.create_output_files.FileWriter', 'create_output_files.FileWriter', (['db_name', 'collection_name'], {}), '(db_name, collection_name)\n', (2933, 2959), False, 'from variantannotation import create_output_files\n'), ((3912, 3952), 'variantannotation.myvariant_parsing_utils.VariantParsing', 'myvariant_parsing_utils.VariantParsing', ([], {}), '()\n', (3950, 3952), False, 'from variantannotation import myvariant_parsing_utils\n'), ((4061, 4095), 'variantannotation.annotate_batch.AnnotationMethods', 'annotate_batch.AnnotationMethods', ([], {}), '()\n', (4093, 4095), False, 'from variantannotation import annotate_batch\n'), ((4597, 4682), 'variantannotation.create_output_files.generate_annotated_vcf', 'create_output_files.generate_annotated_vcf', (['joint_list', 'in_vcf_file', 'out_vcf_file'], {}), '(joint_list, in_vcf_file,\n out_vcf_file)\n', (4639, 4682), False, 'from variantannotation import create_output_files\n'), ((4679, 4747), 'variantannotation.create_output_files.generate_annotated_csv', 'create_output_files.generate_annotated_csv', (['joint_list', 'out_csv_file'], {}), '(joint_list, out_csv_file)\n', (4721, 4747), False, 'from variantannotation import create_output_files\n'), ((5093, 5133), 'variantannotation.myvariant_parsing_utils.VariantParsing', 'myvariant_parsing_utils.VariantParsing', ([], {}), '()\n', (5131, 5133), False, 'from variantannotation import myvariant_parsing_utils\n'), ((5216, 5250), 'variantannotation.annotate_batch.AnnotationMethods', 'annotate_batch.AnnotationMethods', ([], {}), '()\n', (5248, 5250), False, 'from variantannotation import annotate_batch\n'), ((5985, 6003), 'os.chdir', 'os.chdir', (['filepath'], {}), '(filepath)\n', (5993, 6003), False, 'import os\n'), ((6076, 6116), 'variantannotation.myvariant_parsing_utils.VariantParsing', 'myvariant_parsing_utils.VariantParsing', ([], {}), '()\n', (6114, 6116), False, 'from variantannotation import myvariant_parsing_utils\n'), ((6392, 6426), 'variantannotation.annotate_batch.AnnotationMethods', 'annotate_batch.AnnotationMethods', ([], {}), '()\n', (6424, 6426), False, 'from variantannotation import annotate_batch\n'), ((6750, 6839), 'variantannotation.create_output_files.generate_annotated_vcf', 'create_output_files.generate_annotated_vcf', (['myvariant_data', 'in_vcf_file', 'out_vcf_file'], {}), '(myvariant_data, in_vcf_file,\n out_vcf_file)\n', (6792, 6839), False, 'from variantannotation import create_output_files\n'), ((6836, 6908), 'variantannotation.create_output_files.generate_annotated_csv', 'create_output_files.generate_annotated_csv', (['myvariant_data', 'out_csv_file'], {}), '(myvariant_data, out_csv_file)\n', (6878, 6908), False, 'from variantannotation import create_output_files\n'), ((7180, 7198), 'os.chdir', 'os.chdir', (['filepath'], {}), '(filepath)\n', (7188, 7198), False, 'import os\n'), ((7397, 7437), 'variantannotation.myvariant_parsing_utils.VariantParsing', 'myvariant_parsing_utils.VariantParsing', ([], {}), '()\n', (7435, 7437), False, 'from variantannotation import myvariant_parsing_utils\n'), ((7565, 7632), 'variantannotation.annovar_processing.get_list_from_annovar_csv', 'annovar_processing.get_list_from_annovar_csv', (['df', 'list_file[0:5000]'], {}), '(df, list_file[0:5000])\n', (7609, 7632), False, 'from variantannotation import annovar_processing\n'), ((7645, 7685), 'variantannotation.myvariant_parsing_utils.VariantParsing', 'myvariant_parsing_utils.VariantParsing', ([], {}), '()\n', (7683, 7685), False, 'from variantannotation import myvariant_parsing_utils\n'), ((7751, 7795), 'variantannotation.utilities.final_joint', 'utilities.final_joint', (['list1', 'from_myvariant'], {}), '(list1, from_myvariant)\n', (7772, 7795), False, 'from variantannotation import utilities\n'), ((7858, 7871), 'pymongo.MongoClient', 'MongoClient', ([], {}), '()\n', (7869, 7871), False, 'from pymongo import MongoClient\n'), ((8490, 8522), 'vcf.Reader', 'vcf.Reader', ([], {'filename': 'in_vcf_file'}), '(filename=in_vcf_file)\n', (8500, 8522), False, 'import vcf\n'), ((6133, 6170), 'myvariant.get_hgvs_from_vcf', 'myvariant.get_hgvs_from_vcf', (['vcf_file'], {}), '(vcf_file)\n', (6160, 6170), False, 'import myvariant\n'), ((7521, 7555), 'variantannotation.csv_to_df.open_and_parse', 'csv_to_df.open_and_parse', (['csv_file'], {}), '(csv_file)\n', (7545, 7555), False, 'from variantannotation import csv_to_df\n')]
# # GroupManager.py # # (c) 2020 by <NAME> # License: BSD 3-Clause License. See the LICENSE file for further details. # # Managing entity for resource groups # from Logging import Logging from typing import Union, List from Constants import Constants as C from Types import ResourceTypes as T, Result, ConsistencyStrategy, Permission, Operation, ResponseCode as RC, CSERequest, JSON import CSE, Utils from resources import FCNT, MgmtObj from resources.Resource import Resource from resources.GRP_FOPT import GRP_FOPT import resources.Factory as Factory class GroupManager(object): def __init__(self) -> None: # Add delete event handler because we like to monitor the resources in mid CSE.event.addHandler(CSE.event.deleteResource, self.handleDeleteEvent) # type: ignore Logging.log('GroupManager initialized') def shutdown(self) -> bool: Logging.log('GroupManager shut down') return True ######################################################################### def validateGroup(self, group:Resource, originator:str) -> Result: # Get consistencyStrategy csy = group.csy # Check member types and group set type # Recursive for sub groups, if .../fopt. Check privileges of originator if not (res := self._checkMembersAndPrivileges(group, group.mt, group.csy, group.spty, originator)).status: return res # Check for max members if group.hasAttribute('mnm'): # only if mnm attribute is set try: # mnm may not be a number if len(group.mid) > int(group.mnm): return Result(status=False, rsc=RC.maxNumberOfMemberExceeded, dbg='max number of members exceeded') except ValueError: return Result(status=False, rsc=RC.invalidArguments, dbg='invalid arguments') group.dbUpdate() # TODO: check virtual resources return Result(status=True) def _checkMembersAndPrivileges(self, group:Resource, mt:int, csy:int, spty:Union[int, str], originator:str) -> Result: # check for duplicates and remove them midsList = [] # contains the real mi remoteResource:JSON = None rsc = 0 for mid in group['mid']: isLocalResource = True; #Check whether it is a local resource or not if Utils.isSPRelative(mid): targetCSE = f'/{mid.split("/")[0]}' if targetCSE != CSE.cseCsi: """ RETRIEVE member from a remote CSE """ isLocalResource = False if (url := CSE.request._getForwardURL(mid)) is None: return Result(status=False, rsc=RC.notFound, dbg=f'forwarding URL not found for group member: {mid}') Logging.log(f'Retrieve request to: {url}') remoteResult = CSE.request.sendRetrieveRequest(url, CSE.cseCsi) # get the resource and check it hasFopt = False if isLocalResource: hasFopt = mid.endswith('/fopt') id = mid[:-5] if len(mid) > 5 and hasFopt else mid # remove /fopt to retrieve the resource if (res := CSE.dispatcher.retrieveResource(id)).resource is None: return Result(status=False, rsc=RC.notFound, dbg=res.dbg) resource = res.resource else: if remoteResult.dict is None or len(remoteResult.dict) == 0: if remoteResult.rsc == RC.originatorHasNoPrivilege: # CSE has no privileges for retrieving the member return Result(status=False, rsc=RC.receiverHasNoPrivileges, dbg='wrong privileges for CSE to retrieve remote resource') else: # Member not found return Result(status=False, rsc=RC.notFound, dbg=f'remote resource not found: {mid}') else: resource = Factory.resourceFromDict(remoteResult.dict).resource # skip if ri is already in th if isLocalResource: if (ri := resource.ri) in midsList: continue else: if mid in midsList: continue # check privileges if isLocalResource: if not CSE.security.hasAccess(originator, resource, Permission.RETRIEVE): return Result(status=False, rsc=RC.receiverHasNoPrivileges, dbg=f'wrong privileges for originator to retrieve local resource: {mid}') # if it is a group + fopt, then recursively check members if (ty := resource.ty) == T.GRP and hasFopt: if isLocalResource: if not (res := self._checkMembersAndPrivileges(resource, mt, csy, spty, originator)).status: return res ty = resource.mt # set the member type to the group's member type # check specializationType spty if spty is not None: if isinstance(spty, int): # mgmtobj type if isinstance(resource, MgmtObj.MgmtObj) and ty != spty: return Result(status=False, rsc=RC.groupMemberTypeInconsistent, dbg=f'resource and group member types mismatch: {ty:d} != {spty:d} for: {mid}') elif isinstance(spty, str): # fcnt specialization if isinstance(resource, FCNT.FCNT) and resource.cnd != spty: return Result(status=False, rsc=RC.groupMemberTypeInconsistent, dbg=f'resource and group member specialization types mismatch: {resource.cnd} != {spty} for: {mid}') # check type of resource and member type of group if not (mt == T.MIXED or ty == mt): # types don't match if csy == ConsistencyStrategy.abandonMember: # abandon member continue elif csy == ConsistencyStrategy.setMixed: # change group's member type mt = T.MIXED group['mt'] = T.MIXED else: # abandon group return Result(status=False, rsc=RC.groupMemberTypeInconsistent, dbg='group consistency strategy and type "mixed" mismatch') # member seems to be ok, so add ri to th if isLocalResource: midsList.append(ri if not hasFopt else ri + '/fopt') # restore fopt for ri else: midsList.append(mid) # remote resource appended with original memberID # ^^^ for end group['mid'] = midsList # replace with a cleaned up mid group['cnm'] = len(midsList) group['mtv'] = True return Result(status=True) def foptRequest(self, operation:Operation, fopt:GRP_FOPT, request:CSERequest, id:str, originator:str) -> Result: """ Handle requests to a fanOutPoint. This method might be called recursivly, when there are groups in groups.""" # get parent / group and check permissions group = fopt.retrieveParentResource() if group is None: return Result(rsc=RC.notFound, dbg='group resource not found') # get the permission flags for the request operation permission = operation.permission() #check access rights for the originator through memberAccessControlPolicies if CSE.security.hasAccess(originator, group, requestedPermission=permission, ty=request.headers.resourceType, isCreateRequest=True if operation == Operation.CREATE else False) == False: return Result(rsc=RC.originatorHasNoPrivilege, dbg='access denied') # check whether there is something after the /fopt ... _, _, tail = id.partition('/fopt/') if '/fopt/' in id else (None, None, '') Logging.logDebug(f'Adding additional path elements: {tail}') # walk through all members resultList:list[Result] = [] tail = '/' + tail if len(tail) > 0 else '' # add remaining path, if any for mid in group.mid.copy(): # copy mi because it is changed in the loop # Try to get the SRN and add the tail if (srn := Utils.structuredPathFromRI(mid)) is not None: mid = srn + tail else: mid = mid + tail # Invoke the request if operation == Operation.RETRIEVE: if (res := CSE.dispatcher.processRetrieveRequest(request, originator, mid)).resource is None: return res elif operation == Operation.CREATE: if (res := CSE.dispatcher.processCreateRequest(request, originator, mid)).resource is None: return res elif operation == Operation.UPDATE: if (res := CSE.dispatcher.processUpdateRequest(request, originator, mid)).resource is None: return res elif operation == Operation.DELETE: if (res := CSE.dispatcher.processDeleteRequest(request, originator, mid)).rsc != RC.deleted: return res else: return Result(rsc=RC.operationNotAllowed, dbg='operation not allowed') resultList.append(res) # construct aggregated response if len(resultList) > 0: items = [] for result in resultList: if result.resource is not None and isinstance(result.resource, Resource): item = { 'rsc' : result.rsc, 'rqi' : request.headers.requestIdentifier, 'pc' : result.resource.asDict() if isinstance(result.resource, Resource) else result.resource, # in case 'resource' is a dict 'to' : result.resource[Resource._srn], 'rvi' : '3' # TODO constant? from conifguration } else: # e.g. when deleting item = { 'rsc' : result.rsc, 'rqi' : request.headers.requestIdentifier, 'rvi' : '3' # TODO constant? from configuration } items.append(item) rsp = { 'm2m:rsp' : items} agr = { 'm2m:agr' : rsp } else: agr = {} return Result(resource=agr) # Response Status Code is OK regardless of the requested fanout operation ######################################################################### def handleDeleteEvent(self, deletedResource:Resource) -> None: """Handle a delete event. Check whether the deleted resource is a member of group. If yes, remove the member. This method is called by the event manager. """ ri = deletedResource.ri groups = CSE.storage.searchByTypeFieldValue(T.GRP, 'mid', ri) for group in groups: group['mid'].remove(ri) group['cnm'] = group.cnm - 1 group.dbUpdate()
[ "Utils.structuredPathFromRI", "CSE.storage.searchByTypeFieldValue", "CSE.request.sendRetrieveRequest", "CSE.dispatcher.processRetrieveRequest", "CSE.security.hasAccess", "CSE.dispatcher.retrieveResource", "CSE.dispatcher.processDeleteRequest", "Utils.isSPRelative", "CSE.dispatcher.processCreateRequest", "Types.Result", "Logging.Logging.log", "CSE.event.addHandler", "resources.Factory.resourceFromDict", "Logging.Logging.logDebug", "CSE.request._getForwardURL", "CSE.dispatcher.processUpdateRequest" ]
[((693, 763), 'CSE.event.addHandler', 'CSE.event.addHandler', (['CSE.event.deleteResource', 'self.handleDeleteEvent'], {}), '(CSE.event.deleteResource, self.handleDeleteEvent)\n', (713, 763), False, 'import CSE, Utils\n'), ((783, 822), 'Logging.Logging.log', 'Logging.log', (['"""GroupManager initialized"""'], {}), "('GroupManager initialized')\n", (794, 822), False, 'from Logging import Logging\n'), ((856, 893), 'Logging.Logging.log', 'Logging.log', (['"""GroupManager shut down"""'], {}), "('GroupManager shut down')\n", (867, 893), False, 'from Logging import Logging\n'), ((1786, 1805), 'Types.Result', 'Result', ([], {'status': '(True)'}), '(status=True)\n', (1792, 1805), False, 'from Types import ResourceTypes as T, Result, ConsistencyStrategy, Permission, Operation, ResponseCode as RC, CSERequest, JSON\n'), ((5710, 5729), 'Types.Result', 'Result', ([], {'status': '(True)'}), '(status=True)\n', (5716, 5729), False, 'from Types import ResourceTypes as T, Result, ConsistencyStrategy, Permission, Operation, ResponseCode as RC, CSERequest, JSON\n'), ((6709, 6769), 'Logging.Logging.logDebug', 'Logging.logDebug', (['f"""Adding additional path elements: {tail}"""'], {}), "(f'Adding additional path elements: {tail}')\n", (6725, 6769), False, 'from Logging import Logging\n'), ((8688, 8708), 'Types.Result', 'Result', ([], {'resource': 'agr'}), '(resource=agr)\n', (8694, 8708), False, 'from Types import ResourceTypes as T, Result, ConsistencyStrategy, Permission, Operation, ResponseCode as RC, CSERequest, JSON\n'), ((9128, 9180), 'CSE.storage.searchByTypeFieldValue', 'CSE.storage.searchByTypeFieldValue', (['T.GRP', '"""mid"""', 'ri'], {}), "(T.GRP, 'mid', ri)\n", (9162, 9180), False, 'import CSE, Utils\n'), ((2165, 2188), 'Utils.isSPRelative', 'Utils.isSPRelative', (['mid'], {}), '(mid)\n', (2183, 2188), False, 'import CSE, Utils\n'), ((6083, 6138), 'Types.Result', 'Result', ([], {'rsc': 'RC.notFound', 'dbg': '"""group resource not found"""'}), "(rsc=RC.notFound, dbg='group resource not found')\n", (6089, 6138), False, 'from Types import ResourceTypes as T, Result, ConsistencyStrategy, Permission, Operation, ResponseCode as RC, CSERequest, JSON\n'), ((6317, 6497), 'CSE.security.hasAccess', 'CSE.security.hasAccess', (['originator', 'group'], {'requestedPermission': 'permission', 'ty': 'request.headers.resourceType', 'isCreateRequest': '(True if operation == Operation.CREATE else False)'}), '(originator, group, requestedPermission=permission,\n ty=request.headers.resourceType, isCreateRequest=True if operation ==\n Operation.CREATE else False)\n', (6339, 6497), False, 'import CSE, Utils\n'), ((6510, 6570), 'Types.Result', 'Result', ([], {'rsc': 'RC.originatorHasNoPrivilege', 'dbg': '"""access denied"""'}), "(rsc=RC.originatorHasNoPrivilege, dbg='access denied')\n", (6516, 6570), False, 'from Types import ResourceTypes as T, Result, ConsistencyStrategy, Permission, Operation, ResponseCode as RC, CSERequest, JSON\n'), ((1526, 1623), 'Types.Result', 'Result', ([], {'status': '(False)', 'rsc': 'RC.maxNumberOfMemberExceeded', 'dbg': '"""max number of members exceeded"""'}), "(status=False, rsc=RC.maxNumberOfMemberExceeded, dbg=\n 'max number of members exceeded')\n", (1532, 1623), False, 'from Types import ResourceTypes as T, Result, ConsistencyStrategy, Permission, Operation, ResponseCode as RC, CSERequest, JSON\n'), ((1652, 1722), 'Types.Result', 'Result', ([], {'status': '(False)', 'rsc': 'RC.invalidArguments', 'dbg': '"""invalid arguments"""'}), "(status=False, rsc=RC.invalidArguments, dbg='invalid arguments')\n", (1658, 1722), False, 'from Types import ResourceTypes as T, Result, ConsistencyStrategy, Permission, Operation, ResponseCode as RC, CSERequest, JSON\n'), ((2509, 2551), 'Logging.Logging.log', 'Logging.log', (['f"""Retrieve request to: {url}"""'], {}), "(f'Retrieve request to: {url}')\n", (2520, 2551), False, 'from Logging import Logging\n'), ((2572, 2620), 'CSE.request.sendRetrieveRequest', 'CSE.request.sendRetrieveRequest', (['url', 'CSE.cseCsi'], {}), '(url, CSE.cseCsi)\n', (2603, 2620), False, 'import CSE, Utils\n'), ((2913, 2963), 'Types.Result', 'Result', ([], {'status': '(False)', 'rsc': 'RC.notFound', 'dbg': 'res.dbg'}), '(status=False, rsc=RC.notFound, dbg=res.dbg)\n', (2919, 2963), False, 'from Types import ResourceTypes as T, Result, ConsistencyStrategy, Permission, Operation, ResponseCode as RC, CSERequest, JSON\n'), ((3717, 3782), 'CSE.security.hasAccess', 'CSE.security.hasAccess', (['originator', 'resource', 'Permission.RETRIEVE'], {}), '(originator, resource, Permission.RETRIEVE)\n', (3739, 3782), False, 'import CSE, Utils\n'), ((3796, 3927), 'Types.Result', 'Result', ([], {'status': '(False)', 'rsc': 'RC.receiverHasNoPrivileges', 'dbg': 'f"""wrong privileges for originator to retrieve local resource: {mid}"""'}), "(status=False, rsc=RC.receiverHasNoPrivileges, dbg=\n f'wrong privileges for originator to retrieve local resource: {mid}')\n", (3802, 3927), False, 'from Types import ResourceTypes as T, Result, ConsistencyStrategy, Permission, Operation, ResponseCode as RC, CSERequest, JSON\n'), ((7036, 7067), 'Utils.structuredPathFromRI', 'Utils.structuredPathFromRI', (['mid'], {}), '(mid)\n', (7062, 7067), False, 'import CSE, Utils\n'), ((2409, 2508), 'Types.Result', 'Result', ([], {'status': '(False)', 'rsc': 'RC.notFound', 'dbg': 'f"""forwarding URL not found for group member: {mid}"""'}), "(status=False, rsc=RC.notFound, dbg=\n f'forwarding URL not found for group member: {mid}')\n", (2415, 2508), False, 'from Types import ResourceTypes as T, Result, ConsistencyStrategy, Permission, Operation, ResponseCode as RC, CSERequest, JSON\n'), ((3187, 3304), 'Types.Result', 'Result', ([], {'status': '(False)', 'rsc': 'RC.receiverHasNoPrivileges', 'dbg': '"""wrong privileges for CSE to retrieve remote resource"""'}), "(status=False, rsc=RC.receiverHasNoPrivileges, dbg=\n 'wrong privileges for CSE to retrieve remote resource')\n", (3193, 3304), False, 'from Types import ResourceTypes as T, Result, ConsistencyStrategy, Permission, Operation, ResponseCode as RC, CSERequest, JSON\n'), ((3344, 3422), 'Types.Result', 'Result', ([], {'status': '(False)', 'rsc': 'RC.notFound', 'dbg': 'f"""remote resource not found: {mid}"""'}), "(status=False, rsc=RC.notFound, dbg=f'remote resource not found: {mid}')\n", (3350, 3422), False, 'from Types import ResourceTypes as T, Result, ConsistencyStrategy, Permission, Operation, ResponseCode as RC, CSERequest, JSON\n'), ((3449, 3492), 'resources.Factory.resourceFromDict', 'Factory.resourceFromDict', (['remoteResult.dict'], {}), '(remoteResult.dict)\n', (3473, 3492), True, 'import resources.Factory as Factory\n'), ((4425, 4566), 'Types.Result', 'Result', ([], {'status': '(False)', 'rsc': 'RC.groupMemberTypeInconsistent', 'dbg': 'f"""resource and group member types mismatch: {ty:d} != {spty:d} for: {mid}"""'}), "(status=False, rsc=RC.groupMemberTypeInconsistent, dbg=\n f'resource and group member types mismatch: {ty:d} != {spty:d} for: {mid}')\n", (4431, 4566), False, 'from Types import ResourceTypes as T, Result, ConsistencyStrategy, Permission, Operation, ResponseCode as RC, CSERequest, JSON\n'), ((5217, 5338), 'Types.Result', 'Result', ([], {'status': '(False)', 'rsc': 'RC.groupMemberTypeInconsistent', 'dbg': '"""group consistency strategy and type "mixed" mismatch"""'}), '(status=False, rsc=RC.groupMemberTypeInconsistent, dbg=\n \'group consistency strategy and type "mixed" mismatch\')\n', (5223, 5338), False, 'from Types import ResourceTypes as T, Result, ConsistencyStrategy, Permission, Operation, ResponseCode as RC, CSERequest, JSON\n'), ((2354, 2385), 'CSE.request._getForwardURL', 'CSE.request._getForwardURL', (['mid'], {}), '(mid)\n', (2380, 2385), False, 'import CSE, Utils\n'), ((2846, 2881), 'CSE.dispatcher.retrieveResource', 'CSE.dispatcher.retrieveResource', (['id'], {}), '(id)\n', (2877, 2881), False, 'import CSE, Utils\n'), ((4698, 4865), 'Types.Result', 'Result', ([], {'status': '(False)', 'rsc': 'RC.groupMemberTypeInconsistent', 'dbg': 'f"""resource and group member specialization types mismatch: {resource.cnd} != {spty} for: {mid}"""'}), "(status=False, rsc=RC.groupMemberTypeInconsistent, dbg=\n f'resource and group member specialization types mismatch: {resource.cnd} != {spty} for: {mid}'\n )\n", (4704, 4865), False, 'from Types import ResourceTypes as T, Result, ConsistencyStrategy, Permission, Operation, ResponseCode as RC, CSERequest, JSON\n'), ((7211, 7274), 'CSE.dispatcher.processRetrieveRequest', 'CSE.dispatcher.processRetrieveRequest', (['request', 'originator', 'mid'], {}), '(request, originator, mid)\n', (7248, 7274), False, 'import CSE, Utils\n'), ((7786, 7849), 'Types.Result', 'Result', ([], {'rsc': 'RC.operationNotAllowed', 'dbg': '"""operation not allowed"""'}), "(rsc=RC.operationNotAllowed, dbg='operation not allowed')\n", (7792, 7849), False, 'from Types import ResourceTypes as T, Result, ConsistencyStrategy, Permission, Operation, ResponseCode as RC, CSERequest, JSON\n'), ((7364, 7425), 'CSE.dispatcher.processCreateRequest', 'CSE.dispatcher.processCreateRequest', (['request', 'originator', 'mid'], {}), '(request, originator, mid)\n', (7399, 7425), False, 'import CSE, Utils\n'), ((7515, 7576), 'CSE.dispatcher.processUpdateRequest', 'CSE.dispatcher.processUpdateRequest', (['request', 'originator', 'mid'], {}), '(request, originator, mid)\n', (7550, 7576), False, 'import CSE, Utils\n'), ((7667, 7728), 'CSE.dispatcher.processDeleteRequest', 'CSE.dispatcher.processDeleteRequest', (['request', 'originator', 'mid'], {}), '(request, originator, mid)\n', (7702, 7728), False, 'import CSE, Utils\n')]
from flask import Blueprint, jsonify errors = Blueprint('errors', __name__) @errors.app_errorhandler(404) def url_not_found(e): """error for non_existent request""" return jsonify({"error": "Oops! wrong url"}), 404 @errors.app_errorhandler(500) def internal_server_error(e): """error for serverside error""" return jsonify({"error": "Oops! Internal Server error"}), 500
[ "flask.Blueprint", "flask.jsonify" ]
[((46, 75), 'flask.Blueprint', 'Blueprint', (['"""errors"""', '__name__'], {}), "('errors', __name__)\n", (55, 75), False, 'from flask import Blueprint, jsonify\n'), ((182, 219), 'flask.jsonify', 'jsonify', (["{'error': 'Oops! wrong url'}"], {}), "({'error': 'Oops! wrong url'})\n", (189, 219), False, 'from flask import Blueprint, jsonify\n'), ((335, 384), 'flask.jsonify', 'jsonify', (["{'error': 'Oops! Internal Server error'}"], {}), "({'error': 'Oops! Internal Server error'})\n", (342, 384), False, 'from flask import Blueprint, jsonify\n')]