id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
9795579
|
import os
import unittest2 as unittest
from collections import defaultdict
from tincmmgr.tincmm import PythonFileHandler, SQLFileHandler
from mpp.models import MPPTestCase, SQLTestCase
class PythonFileHandlerTests(unittest.TestCase):
def test_parser(self):
test_file_handle = PythonFileHandler(os.path.join(os.path.dirname(__file__), 'sample_tests.py'))
# Check that docstring is not empy
self.assertTrue(test_file_handle.docstring_tuples)
# sample_tests.py has many classes and methods:
# (wd) means with docstring ; (nd) means no docstring ; (ntm) means not test method ; (ntc) means not test class
# SampleMPPTests (wd): test_1 (nd), test_2 (wd), test_3 (wd)
# SampleSQLTests (wd): test_explicit_1 (nd), test_explicit_2 (wd)
# SampleTestsNoDocstring (nd): test_no_1 (nd), test_no_2 (nd), test_no_3 (ntm)
# stray_def: Stray definition that is not test method and that is not part of any test class.
# StrayClass (ntc): test_stray_1 (ntm), test_stray_2 (ntm)
# SampleTestsAfterStray (wd): test_last_1 (wd), test_last_2 (wd)
# So there should be 5 method docstrings and 3 classes docstring = 8 valid docstrings
# And then there 4 test methods without docstring and 1 test class without docstring = 5 valid test method/class with no docstring
self.assertEqual(len(test_file_handle.docstring_tuples), 13)
docstring_present_count = 0
docstring_absent_count = 0
for my_tuple in test_file_handle.docstring_tuples:
if my_tuple.original_docstring:
docstring_present_count += 1
else:
docstring_absent_count += 1
self.assertEquals(docstring_present_count, 8)
self.assertEquals(docstring_absent_count, 5)
def test_get_metadata_dictionary(self):
test_file_handle = PythonFileHandler(os.path.join(os.path.dirname(__file__), 'sample_tests.py'))
# Check that docstring is not empy
self.assertTrue(test_file_handle.docstring_tuples)
# Let's check each docstring metadata
# All dictionaries will always have __changed__ key
# SampleMPPTests only has @tags class1
my_dict = test_file_handle.get_metadata_dictionary(class_name = 'SampleMPPTests', method_name = None)
self.assertEqual(len(my_dict), 2)
self.assertEqual(my_dict['tags'], "class1")
# SampleMPPTests' test_1 should be empty.
my_dict = test_file_handle.get_metadata_dictionary(class_name = 'SampleMPPTests', method_name = 'test_1')
self.assertEqual(len(my_dict), 1)
# SampleMPPTests' test_2 should only have @tags test2.
my_dict = test_file_handle.get_metadata_dictionary(class_name = 'SampleMPPTests', method_name = 'test_2')
self.assertEqual(len(my_dict), 2)
self.assertEqual(my_dict['tags'], "test2")
# SampleMPPTests' test_3 should only have @product_version prod1:
my_dict = test_file_handle.get_metadata_dictionary(class_name = 'SampleMPPTests', method_name = 'test_3')
self.assertEqual(len(my_dict), 2)
self.assertEqual(my_dict['product_version'], "prod1:")
# SampleSQLTests has @tags sql smoke, and @author sql_author
my_dict = test_file_handle.get_metadata_dictionary(class_name = 'SampleSQLTests', method_name = None)
self.assertEqual(len(my_dict), 3)
self.assertEqual(my_dict['tags'], "sql smoke")
self.assertEqual(my_dict['author'], "sql_author")
# SampleSQLTests' test_explicit_1 has nothing
my_dict = test_file_handle.get_metadata_dictionary(class_name = 'SampleSQLTests', method_name = 'test_explicit_1')
self.assertEqual(len(my_dict), 1)
# SampleSQLTests' test_explicit_2 has @tags explicit
my_dict = test_file_handle.get_metadata_dictionary(class_name = 'SampleSQLTests', method_name = 'test_explicit_2')
self.assertEqual(len(my_dict), 2)
self.assertEqual(my_dict['tags'], "explicit")
def test_generate_new_docstring(self):
test_file_handle = PythonFileHandler(os.path.join(os.path.dirname(__file__), 'sample_tests.py'))
metadata_dictionary = test_file_handle.get_metadata_dictionary(class_name = 'SampleSQLTests', method_name = 'test_explicit_2')
metadata_dictionary['__changed__'] = True
original_docstring = ''
for my_tuple in test_file_handle.docstring_tuples:
if my_tuple.class_name == 'SampleSQLTests' and my_tuple.method_name == 'test_explicit_2':
original_docstring = my_tuple.original_docstring
offset = my_tuple.offset
break
new_docstring = test_file_handle.generate_new_docstring(original_docstring, metadata_dictionary, offset)
self.assertEquals(new_docstring.strip('\n'), original_docstring)
# Original tags is explicit. Change it to blah
metadata_dictionary['tags'] = "blah"
new_docstring = test_file_handle.generate_new_docstring(original_docstring, metadata_dictionary, offset)
self.assertTrue("@tags blah" in new_docstring)
self.assertTrue("@tags test_explicit_2" not in new_docstring)
# Add something to metadata_dictionary that wasn't there before
metadata_dictionary['author'] = 'mel'
new_docstring = test_file_handle.generate_new_docstring(original_docstring, metadata_dictionary, offset)
self.assertTrue("@author mel" in new_docstring)
def test_generate_new_docstring_complicated(self):
# Work with more complicated docstring: SampleSQLTests
test_file_handle = PythonFileHandler(os.path.join(os.path.dirname(__file__), 'sample_tests.py'))
metadata_dictionary = test_file_handle.get_metadata_dictionary(class_name = 'SampleSQLTests', method_name = None)
metadata_dictionary['__changed__'] = True
original_docstring = ''
for my_tuple in test_file_handle.docstring_tuples:
if my_tuple.class_name == 'SampleSQLTests' and my_tuple.method_name == None:
original_docstring = my_tuple.original_docstring
offset = my_tuple.offset
break
new_docstring = test_file_handle.generate_new_docstring(original_docstring, metadata_dictionary, offset)
self.assertEquals(new_docstring.strip('\n'), original_docstring)
# Original author is sql_author. Change it to blah
metadata_dictionary['author'] = "blah"
new_docstring = test_file_handle.generate_new_docstring(original_docstring, metadata_dictionary, offset)
self.assertTrue("@author blah" in new_docstring)
self.assertTrue("@author sql_author" not in new_docstring)
# Verify that the comment is still there
self.assertTrue("Comment here" in new_docstring)
self.assertTrue("Some space above" in new_docstring)
# Add something to metadata_dictionary that wasn't there before
metadata_dictionary['bugs'] = 'mpp-1'
new_docstring = test_file_handle.generate_new_docstring(original_docstring, metadata_dictionary, offset)
self.assertTrue("@bugs mpp-1" in new_docstring)
def test_update_docstring_class(self):
test_file_handle = PythonFileHandler(os.path.join(os.path.dirname(__file__), 'sample_tests.py'))
metadata_dictionary = test_file_handle.get_metadata_dictionary(class_name = 'SampleSQLTests', method_name = None)
metadata_dictionary['__changed__'] = True
# Original author is sql_author. Change it to bal_author
metadata_dictionary['author'] = "bal_author"
# Add something to metadata_dictionary that wasn't there before
metadata_dictionary['bugs'] = 'mpp-1'
test_file_handle.update_docstring(class_name = 'SampleSQLTests', method_name = None, metadata_dictionary = metadata_dictionary)
# Go through the docstring_tuples and find our tuple
tuple_found = None
for my_tuple in test_file_handle.docstring_tuples:
if my_tuple.class_name == 'SampleSQLTests' and my_tuple.method_name == None:
tuple_found = my_tuple
break
self.assertTrue(tuple_found is not None)
self.assertTrue("@author bal_author" in tuple_found.new_docstring)
self.assertTrue("@author sql_author" not in tuple_found.new_docstring)
# Verify that the comment is still there
self.assertTrue("Comment here" in tuple_found.new_docstring)
self.assertTrue("Some space above" in tuple_found.new_docstring)
# Verify that new metadata got added with 4 spaces
self.assertTrue(" @bugs mpp-1" in tuple_found.new_docstring)
def test_update_docstring_method(self):
test_file_handle = PythonFileHandler(os.path.join(os.path.dirname(__file__), 'sample_tests.py'))
metadata_dictionary = test_file_handle.get_metadata_dictionary(class_name = 'SampleSQLTests', method_name = "test_explicit_2")
metadata_dictionary['__changed__'] = True
# Add something to metadata_dictionary that wasn't there before
metadata_dictionary['bugs'] = 'mpp-2'
test_file_handle.update_docstring(class_name = 'SampleSQLTests', method_name = 'test_explicit_2', metadata_dictionary = metadata_dictionary)
# Go through the docstring_tuples and find our tuple
tuple_found = None
for my_tuple in test_file_handle.docstring_tuples:
if my_tuple.class_name == 'SampleSQLTests' and my_tuple.method_name == 'test_explicit_2':
tuple_found = my_tuple
break
self.assertTrue(tuple_found is not None)
# Verify that new metadata got added with 8 spaces for method
self.assertTrue(" @bugs mpp-2" in tuple_found.new_docstring)
def test_update_docstring_no_original_class(self):
test_file_handle = PythonFileHandler(os.path.join(os.path.dirname(__file__), 'sample_tests.py'))
metadata_dictionary = test_file_handle.get_metadata_dictionary(class_name = 'SampleTestsNoDocstring', method_name = None)
metadata_dictionary['__changed__'] = True
# Add something to metadata_dictionary that wasn't there before
metadata_dictionary['bugs'] = 'mpp-3'
metadata_dictionary['tags'] = 'smoke'
test_file_handle.update_docstring(class_name = 'SampleTestsNoDocstring', method_name = None, metadata_dictionary = metadata_dictionary)
# Go through the docstring_tuples and find our tuple
tuple_found = None
for my_tuple in test_file_handle.docstring_tuples:
if my_tuple.class_name == 'SampleTestsNoDocstring' and my_tuple.method_name == None:
tuple_found = my_tuple
break
self.assertTrue(tuple_found is not None)
# Verify that new metadata got added with 4 spaces for class
self.assertTrue(" @bugs mpp-3" in tuple_found.new_docstring)
self.assertTrue(" @tags smoke" in tuple_found.new_docstring)
self.assertTrue(tuple_found.original_docstring is None)
def test_update_docstring_no_original_method(self):
test_file_handle = PythonFileHandler(os.path.join(os.path.dirname(__file__), 'sample_tests.py'))
metadata_dictionary = test_file_handle.get_metadata_dictionary(class_name = 'SampleTestsNoDocstring', method_name = 'test_no_1')
metadata_dictionary['__changed__'] = True
# Add something to metadata_dictionary that wasn't there before
metadata_dictionary['bugs'] = 'mpp-4'
metadata_dictionary['tags'] = 'smokey'
test_file_handle.update_docstring(class_name = 'SampleTestsNoDocstring', method_name = 'test_no_1', metadata_dictionary = metadata_dictionary)
# Go through the docstring_tuples and find our tuple
tuple_found = None
for my_tuple in test_file_handle.docstring_tuples:
if my_tuple.class_name == 'SampleTestsNoDocstring' and my_tuple.method_name == 'test_no_1':
tuple_found = my_tuple
break
self.assertTrue(tuple_found is not None)
# Verify that new metadata got added with 4 spaces for class
self.assertTrue(" @bugs mpp-4" in tuple_found.new_docstring)
self.assertTrue(" @tags smokey" in tuple_found.new_docstring)
self.assertTrue(tuple_found.original_docstring is None)
def test_update_file_existing_docstring(self):
test_file_handle = PythonFileHandler(os.path.join(os.path.dirname(__file__), 'sample_tests.py'))
metadata_dictionary_class = test_file_handle.get_metadata_dictionary(class_name = 'SampleSQLTests', method_name = None)
metadata_dictionary_class['__changed__'] = True
# Original author is sql_author. Change it to bal_author
metadata_dictionary_class['author'] = "bal_author"
# Add something to metadata_dictionary that wasn't there before
metadata_dictionary_class['bugs'] = 'mpp-1'
# Modify method one as well
metadata_dictionary_method = test_file_handle.get_metadata_dictionary(class_name = 'SampleMPPTests', method_name = 'test_3')
metadata_dictionary_method['__changed__'] = True
# Original product_version is prod1:. Delete it.
metadata_dictionary_method.pop('product_version')
# Add something to metadata_dictionary that wasn't there before
metadata_dictionary_method['tags'] = 'smoke'
new_file = os.path.join(os.path.dirname(__file__), 'sample_tests_new.py')
test_file_handle.update_docstring(class_name = 'SampleSQLTests', method_name = None, metadata_dictionary = metadata_dictionary_class)
test_file_handle.update_docstring(class_name = 'SampleMPPTests', method_name = 'test_3', metadata_dictionary = metadata_dictionary_method)
test_file_handle.update_file(new_file)
# Verify that new file exists
self.assertTrue(os.path.exists(new_file))
# Now, get the docstring from new file
new_file_handle = PythonFileHandler(new_file)
new_class_tuple = None
new_method_tuple = None
for my_tuple in new_file_handle.docstring_tuples:
if my_tuple.class_name == 'SampleSQLTests' and my_tuple.method_name == None:
new_class_tuple = my_tuple
if my_tuple.class_name == 'SampleMPPTests' and my_tuple.method_name == 'test_3':
new_method_tuple = my_tuple
self.assertTrue(new_class_tuple is not None)
self.assertTrue(new_method_tuple is not None)
# Verify original docstring of new_file
self.assertTrue(" Comment here" in new_class_tuple.original_docstring)
self.assertTrue(" @author bal_author" in new_class_tuple.original_docstring)
self.assertTrue(" @bugs mpp-1" in new_class_tuple.original_docstring)
self.assertTrue(" @tags smoke" in new_method_tuple.original_docstring)
self.assertTrue("@product_version" not in new_method_tuple.original_docstring)
# Verify that update_file in-place works
new_class_dict = new_file_handle.get_metadata_dictionary(class_name = 'SampleSQLTests', method_name = None)
new_method_dict = new_file_handle.get_metadata_dictionary(class_name = 'SampleMPPTests', method_name = 'test_3')
new_class_dict['__changed__'] = True
new_method_dict['__changed__'] = True
new_class_dict.pop('author')
new_class_dict.pop('tags')
new_class_dict.pop('bugs')
new_method_dict.pop('tags')
new_file_handle.update_docstring(class_name = 'SampleSQLTests', method_name = None, metadata_dictionary = new_class_dict)
new_file_handle.update_docstring(class_name = 'SampleMPPTests', method_name = 'test_3', metadata_dictionary = new_method_dict)
new_file_handle.update_file()
new_file_handle = PythonFileHandler(new_file)
new_class_dict = new_file_handle.get_metadata_dictionary(class_name = 'SampleSQLTests', method_name = None)
new_method_dict = new_file_handle.get_metadata_dictionary(class_name = 'SampleMPPTests', method_name = 'test_3')
# Should have no keys (except for __changed__)
self.assertEqual(len(new_class_dict), 1)
self.assertEqual(len(new_method_dict), 1)
os.remove(new_file)
@unittest.skip("Failing for now...")
def test_update_file_no_docstring(self):
test_file_handle = PythonFileHandler(os.path.join(os.path.dirname(__file__), 'sample_tests.py'))
metadata_dictionary_class = test_file_handle.get_metadata_dictionary(class_name = 'SampleTestsNoDocstring', method_name = None)
metadata_dictionary_class['__changed__'] = True
# Add something to metadata_dictionary that wasn't there before
metadata_dictionary_class['bugs'] = 'mpp-1'
# Modify method one as well
metadata_dictionary_method = test_file_handle.get_metadata_dictionary(class_name = 'SampleTestsNoDocstring', method_name = 'test_no_1')
metadata_dictionary_method['__changed__'] = True
# Add something to metadata_dictionary that wasn't there before
metadata_dictionary_method['tags'] = 'smoke'
new_file = os.path.join(os.path.dirname(__file__), 'sample_tests_new.py')
test_file_handle.update_docstring(class_name = 'SampleTestsNoDocstring', method_name = None, metadata_dictionary = metadata_dictionary_class)
test_file_handle.update_docstring(class_name = 'SampleTestsNoDocstring', method_name = 'test_no_1', metadata_dictionary = metadata_dictionary_method)
test_file_handle.update_file(new_file)
# Verify that new file exists
self.assertTrue(os.path.exists(new_file))
# Now, get the docstring from new file
new_file_handle = PythonFileHandler(new_file)
new_class_tuple = None
new_method_tuple = None
for my_tuple in new_file_handle.docstring_tuples:
if my_tuple.class_name == 'SampleTestsNoDocstring' and my_tuple.method_name == None:
new_class_tuple = my_tuple
if my_tuple.class_name == 'SampleTestsNoDocstring' and my_tuple.method_name == 'test_no_1':
new_method_tuple = my_tuple
self.assertTrue(new_class_tuple is not None)
self.assertTrue(new_method_tuple is not None)
# Verify original docstring of new_file
self.assertTrue(new_class_tuple.original_docstring is not None)
self.assertTrue(new_method_tuple.original_docstring is not None)
self.assertTrue(" @bugs mpp-1" in new_class_tuple.original_docstring)
self.assertTrue(" @tags smoke" in new_method_tuple.original_docstring)
# Verify that update_file in-place works
new_class_dict = new_file_handle.get_metadata_dictionary(class_name = 'SampleTestsNoDocstring', method_name = None)
new_method_dict = new_file_handle.get_metadata_dictionary(class_name = 'SampleTestsNoDocstring', method_name = 'test_no_1')
new_class_dict['__changed__'] = True
new_method_dict['__changed__'] = True
new_class_dict.pop('bugs')
new_method_dict.pop('tags')
new_file_handle.update_docstring(class_name = 'SampleTestsNoDocstring', method_name = None, metadata_dictionary = new_class_dict)
new_file_handle.update_docstring(class_name = 'SampleTestsNoDocstring', method_name = 'test_no_1', metadata_dictionary = new_method_dict)
new_file_handle.update_file()
new_file_handle = PythonFileHandler(new_file)
new_class_dict = new_file_handle.get_metadata_dictionary(class_name = 'SampleTestsNoDocstring', method_name = None)
new_method_dict = new_file_handle.get_metadata_dictionary(class_name = 'SampleTestsNoDocstring', method_name = 'test_no_1')
# Should have no keys (except for __changed__)
self.assertEqual(len(new_class_dict), 1)
self.assertEqual(len(new_method_dict), 1)
os.remove(new_file)
class SQLFileHandlerTests(unittest.TestCase):
def test_parser(self):
test_file_handle = SQLFileHandler(os.path.join(os.path.dirname(__file__), 'sql', 'query01.sql'))
# Check that docstring is not empty
self.assertTrue(test_file_handle.original_docstring)
# Verify that the original docstring is correct
self.assertTrue("-- @tags tag1 tag2 tag3 bug-3" in test_file_handle.original_docstring)
self.assertTrue("-- comment" in test_file_handle.original_docstring)
self.assertTrue("--@bugs MPP-1" in test_file_handle.original_docstring)
test_file_handle = SQLFileHandler(os.path.join(os.path.dirname(__file__), 'sql', 'query02.sql'))
# Check that docstring is not empty
self.assertTrue(test_file_handle.original_docstring)
# Verify that the original docstring is correct
self.assertTrue("-- @product_version gpdb:" in test_file_handle.original_docstring)
test_file_handle = SQLFileHandler(os.path.join(os.path.dirname(__file__), 'sql', 'query_nodocstring.sql'))
# Check that docstring is empty
self.assertTrue(test_file_handle.original_docstring is "")
def test_get_metadata_dictionary(self):
# All dictionaries will always have __changed__ key
# query01 has tags and bugs
test_file_handle = SQLFileHandler(os.path.join(os.path.dirname(__file__), 'sql', 'query01.sql'))
self.assertTrue(test_file_handle.original_docstring)
my_dict = test_file_handle.get_metadata_dictionary()
self.assertEqual(len(my_dict), 3)
self.assertEqual(my_dict['tags'], "tag1 tag2 tag3 bug-3")
self.assertEqual(my_dict['bugs'], "MPP-1")
# query02 has product_version
test_file_handle = SQLFileHandler(os.path.join(os.path.dirname(__file__), 'sql', 'query02.sql'))
self.assertTrue(test_file_handle.original_docstring)
my_dict = test_file_handle.get_metadata_dictionary()
self.assertEqual(len(my_dict), 2)
self.assertEqual(my_dict['product_version'], "gpdb:")
# query_nodocstring has nothing
test_file_handle = SQLFileHandler(os.path.join(os.path.dirname(__file__), 'sql', 'query_nodocstring.sql'))
self.assertTrue(test_file_handle.original_docstring == "")
my_dict = test_file_handle.get_metadata_dictionary()
self.assertEqual(len(my_dict), 1)
def test_update_docstring_existing(self):
# Query01 (scenario that has some docstring)
test_file_handle = SQLFileHandler(os.path.join(os.path.dirname(__file__), 'sql', 'query01.sql'))
self.assertTrue(test_file_handle.original_docstring)
my_dict = test_file_handle.get_metadata_dictionary()
my_dict['__changed__'] = True
# Change original tags to smoke
my_dict['tags'] = 'smoke'
# Remove bugs
my_dict.pop('bugs')
# Add author
my_dict['author'] = 'blah'
test_file_handle.update_docstring(my_dict)
# Verify that new_docstring exists
self.assertTrue(test_file_handle.new_docstring)
# Verify that tags is updated
self.assertTrue("@tags smoke" in test_file_handle.new_docstring)
# Verify that bugs is removed
self.assertTrue('@bugs' not in test_file_handle.new_docstring)
# Verify that author is added
self.assertTrue('@author blah' in test_file_handle.new_docstring)
# Verify that the comment is still there
self.assertTrue('-- comment' in test_file_handle.new_docstring)
def test_update_docstring_new(self):
# Query01 (scenario that has some docstring)
test_file_handle = SQLFileHandler(os.path.join(os.path.dirname(__file__), 'sql', 'query_nodocstring.sql'))
self.assertTrue(test_file_handle.original_docstring == "")
my_dict = test_file_handle.get_metadata_dictionary()
my_dict['__changed__'] = True
# Add tags
my_dict['tags'] = 'smoke'
# Add author
my_dict['author'] = 'blah'
test_file_handle.update_docstring(my_dict)
# Verify that new_docstring exists
self.assertTrue(test_file_handle.new_docstring)
# Verify that tags is added
self.assertTrue("@tags smoke" in test_file_handle.new_docstring)
# Verify that author is added
self.assertTrue('@author blah' in test_file_handle.new_docstring)
def test_update_file_existing_docstring(self):
# Query01 (scenario that has some docstring)
test_file_handle = SQLFileHandler(os.path.join(os.path.dirname(__file__), 'sql', 'query01.sql'))
self.assertTrue(test_file_handle.original_docstring)
my_dict = test_file_handle.get_metadata_dictionary()
my_dict['__changed__'] = True
# Change original tags to smoke
my_dict['tags'] = 'smoke'
# Remove bugs
my_dict.pop('bugs')
# Add author
my_dict['author'] = 'blah'
new_file = os.path.join(os.path.dirname(__file__), 'sql', 'query01_new.sql')
test_file_handle.update_docstring(my_dict)
test_file_handle.update_file(new_file)
# Verify that new file exists
self.assertTrue(os.path.exists(new_file))
# Now, get the metadata dictionary from new file
new_file_handle = SQLFileHandler(new_file)
self.assertTrue(new_file_handle.original_docstring)
new_dict = new_file_handle.get_metadata_dictionary()
# Verify the updated metadata in new_dict's original_docstring
# Verify that tags is updated
self.assertTrue("@tags smoke" in new_file_handle.original_docstring)
# Verify that bugs is removed
self.assertTrue('@bugs' not in new_file_handle.original_docstring)
# Verify that author is added
self.assertTrue('@author blah' in new_file_handle.original_docstring)
# Verify that the comment is still there
self.assertTrue('-- comment' in new_file_handle.original_docstring)
# Verify that all metadata can be removed and update_file in-place works
new_dict['__changed__'] = True
new_dict.pop('tags')
new_dict.pop('author')
new_file_handle.update_docstring(new_dict)
new_file_handle.update_file()
# Get the file content
new_file_content = None
with open(new_file, "r") as new_file_object:
new_file_content = new_file_object.read().replace('\n', '')
self.assertTrue(new_file_content is not None)
self.assertTrue('tags' not in new_file_content)
self.assertTrue('author' not in new_file_content)
self.assertTrue('-- comment' in new_file_content)
os.remove(new_file)
def test_update_file_no_docstring(self):
# Query01 (scenario that has some docstring)
test_file_handle = SQLFileHandler(os.path.join(os.path.dirname(__file__), 'sql', 'query_nodocstring.sql'))
self.assertTrue(test_file_handle.original_docstring == "")
my_dict = test_file_handle.get_metadata_dictionary()
my_dict['__changed__'] = True
# Add tags
my_dict['tags'] = 'smoke'
# Add author
my_dict['author'] = 'blah'
new_file = os.path.join(os.path.dirname(__file__), 'sql', 'query_nodocstring_new.sql')
test_file_handle.update_docstring(my_dict)
test_file_handle.update_file(new_file)
# Verify that new file exists
self.assertTrue(os.path.exists(new_file))
# Now, get the metadata dictionary from new file
new_file_handle = SQLFileHandler(new_file)
self.assertTrue(new_file_handle.original_docstring)
new_dict = new_file_handle.get_metadata_dictionary()
# Verify the new metadata in new_dict's original_docstring
# Verify that tags is added
self.assertTrue("@tags smoke" in new_file_handle.original_docstring)
# Verify that author is added
self.assertTrue('@author blah' in new_file_handle.original_docstring)
# Verify that all metadata can be removed and update_file in-place works
new_dict['__changed__'] = True
new_dict.pop('tags')
new_dict.pop('author')
new_file_handle.update_docstring(new_dict)
new_file_handle.update_file()
# Get the file content
new_file_content = None
with open(new_file, "r") as new_file_object:
new_file_content = new_file_object.read().replace('\n', '')
self.assertTrue(new_file_content is not None)
self.assertTrue('tags' not in new_file_content)
self.assertTrue('author' not in new_file_content)
self.assertTrue('--' not in new_file_content)
os.remove(new_file)
|
StarcoderdataPython
|
6588684
|
<filename>run.py
#@<NAME>
import json
import yaml
import os
import sys
with open("config.json","r") as jFile:
jData=jFile.read()
jData=json.loads(jData)
with open("docker-compose.yml","r") as cFile:
yData=(yaml.safe_load(cFile))
domain=jData["DOMAIN"]
subdomain=jData["SUBDOMAIN"]
hostport=str(jData["CHISEL_PORT"])
port=jData["RDP_PORT"]
passwd=jData["PASSWD"]
local=jData["LOCAL_CHISEL"]
name=jData["CONTAINER_NAME"]
nodejsPort=jData["NODEJS_PORT"]
appRepoLink=jData["APP_REPO_LINK"]
appFile=jData["APP_FILE"]
appDir=jData["APP_DIR_NAME"]
if local not in ["yes","no"]:
print("Error in config file: LOCAL must be either yes or no")
sys.exit(-1)
if (type(port)) != int:
print("Error in config file: PORT must be an integer")
sys.exit(-1)
os.system("docker run -d --name "+name+" --hostname terminalserver --shm-size 1g -p "+str(port)+":3389 -p 2244:22 -p "+str(nodejsPort)+":3000 danielguerra/ubuntu-xrdp")
cmd="docker exec -ti "+name+" /bin/bash -c \"echo -e \\\""+passwd+"\n"+passwd+"\n\n\n\\\" | (passwd ubuntu)\""
os.system("docker exec uxrdp-node /bin/bash -c \" cd ~ && git clone "+appRepoLink+" "+appDir+"\"")
os.system("docker exec -ti "+name+" /bin/bash -c \"cd ~/"+appDir+"/ && npm install && pm2 start "+appFile+"\"")
os.system(cmd)
if(local=="yes"):
os.system("docker exec "+name+" /bin/bash -c \"chisel client http://"+subdomain+"."+domain+":"+hostport+" R:"+str(port)+":localhost:3389 R:"+str(nodejsPort)+":localhost:"+str(nodejsPort)+"\"")
|
StarcoderdataPython
|
3217755
|
import pandas as pd
import numpy as np
import tensorflow as tf
import os
import matplotlib.pyplot as plt
import seaborn as sns
import PIL
from typing import List
# EfficientNet
from tensorflow.keras.applications import EfficientNetB7, ResNet50
from tensorflow.keras.applications.efficientnet import preprocess_input
# Data Augmentation
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Model Layers
from tensorflow.keras import Model, Input
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D , Flatten, Dropout, BatchNormalization
# Compiling and Callbacks
from tensorflow.keras.optimizers import SGD,Adam
from tensorflow.keras.losses import BinaryCrossentropy
from tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping
#-----------------------------------------------------------------------------------------------------
# Competition Directory
comp_dir="/kaggle/input/ranzcr-clip-catheter-line-classification/"
# Get Training Data Labels
df_train=pd.read_csv(comp_dir+"train.csv").sample(frac=1).reset_index(drop=True)
# Get Training/Testing Data Paths
test_files = os.listdir(comp_dir+"test")
df_test = pd.DataFrame({"StudyInstanceUID": test_files})
image_size = 512
batch_size = 16
num_epochs = 12
learn_rate = 1e-03
df_train.StudyInstanceUID += ".jpg"
#-----------------------------------------------------------------------------------------------------
label_cols=df_train.columns.tolist()
label_cols.remove("StudyInstanceUID")
label_cols.remove("PatientID")
datagen=ImageDataGenerator(rescale=1./255.)
test_datagen=ImageDataGenerator(rescale=1./255.)
train_generator=datagen.flow_from_dataframe(
dataframe=df_train[:21000],
directory=comp_dir+"train",
x_col="StudyInstanceUID",
y_col=label_cols,
batch_size=batch_size,
seed=42,
shuffle=True,
color_mode="rgb",
class_mode="raw",
target_size=(image_size,image_size),
interpolation="bilinear")
valid_generator=test_datagen.flow_from_dataframe(
dataframe=df_train[21000:],
directory=comp_dir+"train",
x_col="StudyInstanceUID",
y_col=label_cols,
batch_size=batch_size,
seed=42,
shuffle=True,
color_mode="rgb",
class_mode="raw",
target_size=(image_size,image_size),
interpolation="bilinear")
test_generator=test_datagen.flow_from_dataframe(
dataframe=df_test,
directory=comp_dir+"test",
x_col="StudyInstanceUID",
batch_size=1,
seed=42,
shuffle=False,
color_mode="rgb",
class_mode=None,
target_size=(image_size,image_size),
interpolation="bilinear")
#-----------------------------------------------------------------------------------------------------
base_model = ResNet50(include_top=False,
weights=None,
input_shape=(image_size, image_size, 3))
base_model.load_weights("../input/keras-pretrained-models/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5", by_name=True)
base_model.trainable = False
#-----------------------------------------------------------------------------------------------------
inp = Input(shape = (image_size,image_size,3))
x = base_model(inp)
x = Flatten()(x)
output1 = Dense(1, activation = 'sigmoid')(x)
output2 = Dense(1, activation = 'sigmoid')(x)
output3 = Dense(1, activation = 'sigmoid')(x)
output4 = Dense(1, activation = 'sigmoid')(x)
output5 = Dense(1, activation = 'sigmoid')(x)
output6 = Dense(1, activation = 'sigmoid')(x)
output7 = Dense(1, activation = 'sigmoid')(x)
output8 = Dense(1, activation = 'sigmoid')(x)
output9 = Dense(1, activation = 'sigmoid')(x)
output10 = Dense(1, activation = 'sigmoid')(x)
output11 = Dense(1, activation = 'sigmoid')(x)
model = Model(inp,[output1,output2,output3,output4,output5,output6,output7,output8,output9,output10,output11])
sgd = SGD(lr=learn_rate, momentum=.9, nesterov=False)
model.compile(optimizer=sgd,
loss = ["binary_crossentropy" for i in range(11)],
metrics = ["accuracy"])
def generator_wrapper(generator):
for batch_x,batch_y in generator:
yield (batch_x,[batch_y[:,i] for i in range(11)])
STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size
STEP_SIZE_VALID = valid_generator.n//valid_generator.batch_size
STEP_SIZE_TEST = test_generator.n//test_generator.batch_size
history = model.fit_generator(generator=generator_wrapper(train_generator),
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=generator_wrapper(valid_generator),
validation_steps=STEP_SIZE_VALID,
epochs=num_epochs,verbose=2)
test_generator.reset()
pred = model.predict_generator(test_generator,
steps=STEP_SIZE_TEST,
verbose=1)
# Create Submission df
df_submission = pd.DataFrame(np.squeeze(pred)).transpose()
df_submission.rename(columns=dict(zip([str(i) for i in range(12)], label_cols)))
df_submission["StudyInstanceUID"] = test_files
df_submission.to_csv("submission.csv", index=False)
epochs = range(1,num_epochs)
plt.plot(history.history['loss'], label='Training Set')
plt.plot(history.history['val_loss'], label='Validation Data)')
plt.title('Training and Validation loss')
plt.ylabel('MAE')
plt.xlabel('Num Epochs')
plt.legend(loc="upper left")
plt.show()
plt.savefig("loss.png")
|
StarcoderdataPython
|
1684917
|
# coding:utf-8
from ihome import create_app, db
# 数据库管理命令
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
app = create_app('develop')
manager = Manager(app)
Migrate(app, db)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8000, debug=True)
|
StarcoderdataPython
|
6684676
|
import os
import sys
import random
import math
import re
import time
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
print(ROOT_DIR)
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
from mrcnn.model import log
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
class ShapesConfig(Config):
"""Configuration for training on the toy shapes dataset.
Derives from the base Config class and overrides values specific
to the toy shapes dataset.
"""
# Give the configuration a recognizable name
NAME = "shapes"
# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
GPU_COUNT = 1
IMAGES_PER_GPU = 8
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # background + 3 shapes
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
IMAGE_MIN_DIM = 448
IMAGE_MAX_DIM = 448
IMAGE_SHAPE = [448, 448, 3]
# Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels
# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 32
# Use a small epoch since the data is simple
STEPS_PER_EPOCH = 100
# use small validation steps since the epoch is small
VALIDATION_STEPS = 5
config = ShapesConfig()
config.display()
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Change the default size attribute to control the size
of rendered images
"""
_, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))
return ax
from PIL import Image
class SpaceNetDataset(utils.Dataset):
"""Generates the shapes synthetic dataset. The dataset consists of simple
shapes (triangles, squares, circles) placed randomly on a blank surface.
The images are generated on the fly. No file access required.
"""
def get_obj_index(self, image):
n = np.max(image)
return n
def load_shapes(self, img_list, img_floder, mask_floder):
"""Generate the requested number of synthetic images.
#count: number of images to generate.
image_id_list : list of images to generate.
height, width: the size of the generated images.
"""
# Add classes
self.add_class("shapes", 1, "shapes")
# self.add_class("shapes", 2, "others")
# self.add_class("shapes", 3, "triangle")
# Add images
# Generate random specifications of images (i.e. color and
# list of shapes sizes and locations). This is more compact than
# actual images. Images are generated on the fly in load_image().
# imglist = os.listdir(img_floder)
for i in imglist:
# 获取图片宽和高
# filestr = imglist[i]
image_id = i.split("_")[-1][3:-4]
# print(imglist[i],"-->",cv_img.shape[1],"--->",cv_img.shape[0])
# print("id-->", i, " imglist[", i, "]-->", imglist[i],"filestr-->",filestr)
# filestr = filestr.split("_")[1]
mask_path = mask_floder + "/" + "mask_" + i
img_path = img_floder + "/" + i
# yaml_path = dataset_root_path + "labelme_json/" + filestr + "_json/info.yaml"
# print(i)
cv_img = cv2.imread(img_path)
self.add_image("shapes", image_id=image_id, path=img_path,
width=cv_img.shape[1], height=cv_img.shape[0],
mask_path=mask_path)
def draw_mask(self, num_obj, mask, image, image_id):
# print("draw_mask-->",image_id)
# print("self.image_info",self.image_info)
info = self.image_info[image_id]
for index in range(num_obj):
for i in range(info['width']):
for j in range(info['height']):
at_pixel = image.getpixel((i, j))
if at_pixel == index + 1:
mask[j, i, index] = 1
return mask
# def load_image(self, image_id):
# """Generate an image from the specs of the given image ID.
# Typically this function loads the image from a file, but
# in this case it generates the image on the fly from the
# specs in image_info.
# """
# # info = self.image_info[image_id]
# # bg_color = np.array(info['bg_color']).reshape([1, 1, 3])
# # image = np.ones([info['height'], info['width'], 3], dtype=np.uint8)
# # image = image * bg_color.astype(np.uint8)
# # for shape, color, dims in info['shapes']:
# # image = self.draw_shape(image, shape, dims, color)
# img_dir = "/home/ly/data/dl_data/spacenet/AOI_5_Khartoum_Train/RGB-PanSharpen"
# img_name = "RGB-PanSharpen_AOI_5_Khartoum_img{}.tif".format(image_id)
# img_path = os.path.join(img_dir, img_name)
# if not os.path.isfile(img_path):
# pass
# else:
# image = cv2.imread(img_path)
# return image
def image_reference(self, image_id):
"""Return the shapes data of the image."""
info = self.image_info[image_id]
if info["source"] == "shapes":
return info["shapes"]
else:
super(self.__class__).image_reference(self, image_id)
def load_mask(self, image_id):
"""Generate instance masks for shapes of the given image ID.
"""
global iter_num
# print("image_id",image_id)
info = self.image_info[image_id]
count = 1 # number of object
img = Image.open(info['mask_path'])
num_obj = self.get_obj_index(img)
# print("num_obj:",num_obj)
mask = np.zeros([info['height'], info['width'], 1], dtype=np.uint8)
mask = self.draw_mask(num_obj, mask, img, image_id)
# print(mask.shape)
occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)
for i in range(count - 1, -1, -1):
mask[:, :, i] = mask[:, :, i] * occlusion
occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))
# labels = []
# labels = self.from_yaml_get_class(image_id)
# labels_form = []
# for i in range(len(labels)):
# if labels[i].find("tongue") != -1:
# # print "box"
# labels_form.append("tongue")
# class_ids = np.array([self.class_names.index(s) for s in labels_form])
class_ids = np.array([1])
return mask, class_ids.astype(np.int32)
img_dir = "/home/ly/data/dl_data/spacenet/AOI_5_Khartoum_Train/RGB-PanSharpen"
mask_dir = "/home/ly/data/dl_data/spacenet/AOI_5_Khartoum_Train/mask"
img_id_list = [ ]
imglist = os.listdir(img_dir)
for i in imglist:
img_id = i.split('_')[-1][3:-4]
img_id_list.append(img_id)
dataset_train = SpaceNetDataset()
dataset_train.load_shapes(imglist[:-50],img_dir, mask_dir)
dataset_train.prepare()
#Validation dataset
dataset_val = SpaceNetDataset()
dataset_val.load_shapes(imglist[-50:], img_dir, mask_dir)
dataset_val.prepare()
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=MODEL_DIR)
# Which weights to start with?
init_with = "coco" # imagenet, coco, or last
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5 # maximun alloc gpu50% of MEM
config.gpu_options.allow_growth = True #allocate dynamically
sess = tf.Session(config = config)
if init_with == "imagenet":
model.load_weights(model.get_imagenet_weights(), by_name=True)
elif init_with == "coco":
# Load weights trained on MS COCO, but skip layers that
# are different due to the different number of classes
# See README for instructions to download the COCO weights
model.load_weights(COCO_MODEL_PATH, by_name=True,
exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
elif init_with == "last":
# Load the last model you trained and continue training
model.load_weights(model.find_last()[1], by_name=True)
# Train the head branches
# Passing layers="heads" freezes all layers except the head
# layers. You can also pass a regular expression to select
# which layers to train by name pattern.
model.train(dataset_train, dataset_val,
learning_rate=0.01,
epochs=1,
layers='heads')
# Fine tune all layers
# Passing layers="all" trains all layers. You can also
# pass a regular expression to select which layers to
# train by name pattern.
model.train(dataset_train, dataset_val,
learning_rate=0.01 / 10,
epochs=5,
layers="all")
|
StarcoderdataPython
|
9693732
|
import sys
import random
n = int(sys.argv[1])
a = int(sys.argv[2])
b = int(sys.argv[3])
print(n)
for _ in range(n):
print(random.randint(a, b))
|
StarcoderdataPython
|
3418686
|
print("Loading Libraries...")
import os
import numpy as np
import json
import tensorflow as tf
import nibabel
import sys
import nibabel as nib
import datetime
import tkinter
from tkinter import filedialog
import SimpleITK as sitk
from skimage import io
from skimage.io import imsave
from skimage.segmentation import mark_boundaries
from skimage.transform import resize
from skimage.exposure import rescale_intensity
from keras import backend as K
from keras.models import model_from_json
print("Loading Libraries Ended.")
print('-'*30)
root = tkinter.Tk()
root.withdraw()
desktop = os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop')
currentDate = datetime.datetime.now().strftime("%m-%d-%y %H-%M")
nii_save_path = desktop + "\\PSGizmoLog\\Predict\\" + currentDate + "\\" #save
if not os.path.exists(nii_save_path):
os.makedirs(nii_save_path)
imageRows = int(512/2)
imageCols = int(512/2)
smooth = 1.
global affineValue
def Dice_Coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def Dice_Coef_Loss(y_true, y_pred):
return -Dice_Coef(y_true, y_pred)
def Adapt_Data(imgs):
imgs_p = np.ndarray((imgs.shape[0], imageRows, imageCols), dtype='float32')
for i in range(imgs.shape[0]):
imgs_p[i] = resize(imgs[i], (imageCols, imageRows), preserve_range=True)
imgs_p = imgs_p[..., np.newaxis]
print("File is resized.");
print('-'*30)
return imgs_p
def Load_Prediction_Images():
print("Load .npy to predict:")
prediction_data_path = filedialog.askopenfilename()
imgst = np.load(prediction_data_path)
return imgst
def nifti_format(imgst, layerst):
print("Exporting...")
if not os.path.exists(nii_save_path +"\\Nifti"):
os.makedirs(nii_save_path +"\\Nifti")
filtered_image = sitk.GetImageFromArray(imgst)
writer = sitk.ImageFileWriter()
writer.SetFileName(nii_save_path +"\\Nifti\\Liver.nii.gz")
writer.Execute(filtered_image)
filtered_mask = sitk.GetImageFromArray(layerst)
writer = sitk.ImageFileWriter()
writer.SetFileName(nii_save_path + "\\Nifti\\Layer.nii.gz")
writer.Execute(filtered_mask)
print("Exporting is completed.")
print('-'*30)
ch = input("Do you want to check the results using MRIcron? (y/n): ")
if ch == 'y' or ch == 'Y':
os.startfile(".\\AdditionalSources\\MRIcron\\mricron.exe")
#TODO
def png_format(imgToPredict, predictedImage):
if not os.path.exists(nii_save_path + "PNG"):
os.mkdir(nii_save_path + "PNG")
if not os.path.exists(nii_save_path + "PNG\\Numpy"):
os.mkdir(nii_save_path + "PNG\\Numpy")
np.save(nii_save_path + "PNG\\Numpy" + '\\imgs_mask_test.npy', predictedImage)
print('Saving predicted masks to files...')
print('-' * 30)
print('Saving Our Predictions in the Directory preds')
print('-' * 30)
for k in range(len(predictedImage)):
a=rescale_intensity(imgToPredict[k][:,:,0],out_range=(-1,1))
b=(predictedImage[k][:,:,0]).astype(np.uint8)
io.imsave(os.path.join(nii_save_path + "PNG\\", str(k) + '_pred.png'), mark_boundaries(a, b,color=(1,1,0), mode='thick'))
print('Saving predictions is completed!')
print('-' * 30)
def Predict():
#Load Normalization Values
print("Load JSON files directory:")
json_data_path = filedialog.askdirectory()
print("Selected JSON path: " + json_data_path)
normalizationFile = open(json_data_path + '\\normalization.json', 'r')
normalizationString = normalizationFile.read()
normalizationData = json.loads(normalizationString)
normalizationFile.close()
#Load Model
jsonFile = open( json_data_path + '\\model.json', 'r')
loadedModelJson = jsonFile.read()
model = model_from_json(loadedModelJson)
jsonFile.close()
#model.summary()
imgToPredict = Load_Prediction_Images()
imgToPredict = Adapt_Data(imgToPredict)
mean = normalizationData["mean"]
std = normalizationData["std"]
imgToPredict = imgToPredict.astype('float32')
imgToPredict -= mean
imgToPredict /= std
print("Load weights.h5 file: ")
model_data_path = filedialog.askopenfilename()
print(model_data_path + "is weights.h5 path")
print('-'*30)
model.load_weights( model_data_path)
print("Model is loaded.")
print('-'*30)
predictedImageAsNumpy = model.predict(imgToPredict, verbose=1)
print("Prediction is completed.")
print('-'*30)
return imgToPredict, predictedImageAsNumpy
def Export_Prediction():
imgToPredict, predictedImage = Predict()
print("Exporting predictions...")
print('-'*30)
new_images = []
imgst = np.ndarray((imageRows, imageCols, 0), dtype='float32')
layerst = np.ndarray((imageRows, imageCols, 0), dtype='float32')
print("Slices are being adapted...")
for k in range(len(predictedImage)):
a=rescale_intensity(imgToPredict[k][:,:,0],out_range=(-1,1))
b=(predictedImage[k][:,:,0]).astype(np.uint8)
imgst = np.dstack((imgst, a))
layerst = np.dstack((layerst, b))
print("Slices are adapted.")
print('-'*30)
print("1: Mask and Resized CT in nifti format")
print("2: Slices in png format")
print("3: Transparent png format")
print("4: GIF Format")
choice = input()
if(choice == '1'):
nifti_format(imgst,layerst)
elif(choice == '2'):
png_format(imgToPredict, predictedImage)
else:
print("Invalid choice")
while(choice != '1' or choice != '2'):
choice = input()
if(choice == '1'):
nifti_format(imgst,layerst)
elif(choice == '2'):
png_format(imgToPredict, predictedImage)
print('-'*30)
if __name__ == '__main__':
Export_Prediction()
input("Press ENTER to Exit")
|
StarcoderdataPython
|
11282038
|
class Node:
def __init__(self, key, val, prev=None, next=None):
self.key = key
self.val = val
self.prev = prev
self.next = next
class LRUCache:
"""
@param: capacity: An integer
"""
def __init__(self, capacity):
self.capacity = capacity
self.count = 0
self.dummy = Node(-1, 0)
self.tail = self.dummy
self.node_map = {}
"""
@param: key: An integer
@return: An integer
"""
def get(self, key):
if key not in self.node_map:
return -1
node = self.node_map[key]
self.__moveToHead(node)
return node.val
"""
@param: key: An integer
@param: value: An integer
@return: nothing
"""
def set(self, key, value):
if key not in self.node_map:
if self.count == self.capacity:
self.__removeTail()
else:
self.count += 1
node = Node(key, value, prev=self.tail)
self.node_map[key] = node
self.tail.next = node
self.tail = node
else:
node = self.node_map[key]
node.val = value
self.__moveToHead(node)
def __removeTail(self):
if self.tail is self.dummy:
return
del self.node_map[self.tail.key]
self.tail = self.tail.prev
self.tail.next = None
def __moveToHead(self, node):
if node is self.dummy.next:
return
if node is self.tail:
self.tail = node.prev
if node.next is not None:
node.next.prev = node.prev
if node.prev is not None:
node.prev.next = node.next
node.next = self.dummy.next
node.prev = self.dummy
if self.dummy.next:
self.dummy.next.prev = node
self.dummy.next = node
|
StarcoderdataPython
|
3550867
|
<gh_stars>0
import sys
import unittest
from utils import sysfont
class FontTest(unittest.TestCase):
__tags__ = []
def test_init(self):
sysfont.init()
def test_list_fonts(self):
sansfonts = [f for f in sysfont.list_fonts() if "sans" in f[0]]
self.assertGreaterEqual(len(sansfonts), 1)
def test_get_fonts(self):
fontnames = ["sans", "arial", "helvetica", "times new roman", "serif"]
# At least two fonts must be found.
success = 0
for fname in fontnames:
count = len(sysfont.get_fonts(fname))
if count >= 1:
success += 1
count = len(sysfont.get_fonts(fname, sysfont.STYLE_BOLD))
if count >= 1:
success += 1
count = len(sysfont.get_fonts(fname, sysfont.STYLE_ITALIC))
if count >= 1:
success += 1
count = len(sysfont.get_fonts(fname, sysfont.STYLE_ITALIC |
sysfont.STYLE_BOLD))
if count >= 1:
success += 1
self.assertGreaterEqual(success, 4,
"did not meet enough font criteria for get_fonts()")
def test_get_font(self):
fontnames = ["sans", "arial", "helvetica", "times new roman", "serif"]
# At least two fonts must be found.
success = 0
for fname in fontnames:
fontfile = sysfont.get_font(fname)
if fontfile is not None:
success += 1
self.assertGreaterEqual(success, 2,
"could not find the required fonts for get_font()")
if __name__ == '__main__':
sys.exit(unittest.main())
|
StarcoderdataPython
|
3365179
|
from core.himesis import Himesis
import uuid
class Hlayer1rule0(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule layer1rule0.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(Hlayer1rule0, self).__init__(name='Hlayer1rule0', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """layer1rule0"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'layer1rule0')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
# match class ImplementationModule(layer1rule0class0) node
self.add_node()
self.vs[3]["mm__"] = """ImplementationModule"""
self.vs[3]["attr1"] = """+"""
# match_contains node for class ImplementationModule(layer1rule0class0)
self.add_node()
self.vs[4]["mm__"] = """match_contains"""
# match class ClientServerInterface(layer1rule0class1) node
self.add_node()
self.vs[5]["mm__"] = """ClientServerInterface"""
self.vs[5]["attr1"] = """+"""
# match_contains node for class ClientServerInterface(layer1rule0class1)
self.add_node()
self.vs[6]["mm__"] = """match_contains"""
# apply class ImplementationModule(layer1rule0class2) node
self.add_node()
self.vs[7]["mm__"] = """ImplementationModule"""
self.vs[7]["attr1"] = """1"""
# apply_contains node for class ImplementationModule(layer1rule0class2)
self.add_node()
self.vs[8]["mm__"] = """apply_contains"""
# apply class StructDeclaration(layer1rule0class3) node
self.add_node()
self.vs[9]["mm__"] = """StructDeclaration"""
self.vs[9]["attr1"] = """1"""
# apply_contains node for class StructDeclaration(layer1rule0class3)
self.add_node()
self.vs[10]["mm__"] = """apply_contains"""
# match association ImplementationModule--contents-->ClientServerInterface node
self.add_node()
self.vs[11]["attr1"] = """contents"""
self.vs[11]["mm__"] = """directLink_S"""
# apply association ImplementationModule--contents-->StructDeclaration node
self.add_node()
self.vs[12]["attr1"] = """contents"""
self.vs[12]["mm__"] = """directLink_T"""
# backward association ImplementationModule---->ImplementationModule node
self.add_node()
self.vs[13]["mm__"] = """backward_link"""
# backward association ClientServerInterface---->StructDeclaration node
self.add_node()
self.vs[14]["mm__"] = """backward_link"""
# Add the edges
self.add_edges([
(0,4), # matchmodel -> match_contains
(4,3), # match_contains -> match_class ImplementationModule(layer1rule0class0)
(0,6), # matchmodel -> match_contains
(6,5), # match_contains -> match_class ClientServerInterface(layer1rule0class1)
(1,8), # applymodel -> apply_contains
(8,7), # apply_contains -> apply_class ImplementationModule(layer1rule0class2)
(1,10), # applymodel -> apply_contains
(10,9), # apply_contains -> apply_class StructDeclaration(layer1rule0class3)
(3,11), # match_class ImplementationModule(layer1rule0class0) -> association contents
(11,5), # association contents -> match_class ClientServerInterface(layer1rule0class1)
(7,12), # apply_class ImplementationModule(layer1rule0class2) -> association contents
(12,9), # association contents -> apply_class StructDeclaration(layer1rule0class3)
(7,13), # apply_class ImplementationModule(layer1rule0class2) -> backward_association
(13,3), # backward_association -> apply_class ImplementationModule(layer1rule0class0)
(9,14), # apply_class StructDeclaration(layer1rule0class3) -> backward_association
(14,5), # backward_association -> apply_class ClientServerInterface(layer1rule0class1)
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((7,'__ApplyAttribute'),('constant','ImplementationModule')), ((9,'__ApplyAttribute'),('constant','ClientServerStructIData')), ]
|
StarcoderdataPython
|
118962
|
<filename>pydy/codegen/cython_code.py
#!/usr/bin/env python
import os
import sys
import shutil
import tempfile
import importlib
import subprocess
from collections import defaultdict
from .c_code import CMatrixGenerator
from ..utils import wrap_and_indent
class CythonMatrixGenerator(object):
"""This class generates the Cython code for evaluating a sequence of
matrices. It can compile the code and return a Python function."""
_pyx_template = \
"""\
import numpy as np
cimport numpy as np
cimport cython
cdef extern from "{prefix}_c.h":
void evaluate(
{header_args}
)
@cython.boundscheck(False)
@cython.wraparound(False)
def eval(
{python_args}
):
evaluate(
{c_args}
)
return (
{output}
)\
"""
_setup_py_template = """\
#!/usr/bin/env python
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
import numpy
extension = Extension(name="{prefix}",
sources=["{prefix}.pyx",
"{prefix}_c.c"],
include_dirs=[numpy.get_include()])
setup(name="{prefix}",
ext_modules=cythonize([extension]))\
"""
_module_counter = 0
def __init__(self, arguments, matrices, prefix='pydy_codegen'):
"""
Parameters
==========
arguments : sequences of sequences of SymPy Symbol or Function.
Each of the sequences will be converted to input arrays in the
Cython function. All of the symbols/functions contained in
``matrices`` need to be in the sequences, but the sequences can
also contain extra symbols/functions that are not contained in
the matrices.
matrices : sequence of SymPy.Matrix
A sequence of the matrices that should be evaluated in the
function. The expressions should contain only sympy.Symbol or
sympy.Function that are functions of me.dynamicsymbols._t.
prefix : string, optional
The desired prefix for the generated files.
"""
self.prefix = prefix
self.matrices = matrices
self.arguments = arguments
self.num_matrices = len(matrices)
self.num_arguments = len(arguments)
self.c_matrix_generator = CMatrixGenerator(arguments, matrices)
self._generate_code_blocks()
def _generate_code_blocks(self):
lines = defaultdict(list)
hd = 'double* {}_{},'
py = "np.ndarray[np.double_t, ndim=1, mode='c'] {}_{},"
c = '<double*> {}_{}.data,'
out = 'output_{}.reshape({}, {}),'
out_vec = 'output_{},'
for i in range(self.num_arguments):
lines['header_args'].append(hd.format('input', i))
lines['python_args'].append(py.format('input', i))
lines['c_args'].append(c.format('input', i))
for i, matrix in enumerate(self.matrices):
lines['header_args'].append(hd.format('output', i))
lines['python_args'].append(py.format('output', i))
lines['c_args'].append(c.format('output', i))
nr, nc = matrix.shape
if nc == 1:
lines['output'].append(out_vec.format(i))
else:
lines['output'].append(out.format(i, nr, nc))
indents = {'header_args': 18,
'python_args': 9,
'c_args': 13,
'output': 12}
self.code_blocks = {k: wrap_and_indent(v, indents[k])[:-1] for k, v
in lines.items()}
def doprint(self):
"""Returns the text of the four source files needed to compile
Cython wrapper that evaluates the provided SymPy matrices.
Returns
=======
setup_py : string
The text of the setup.py file used to compile the Cython
extension.
cython_source : string
The text of the Cython pyx file which includes the wrapper
function ``eval``.
c_header : string
The text of the C header file that exposes the evaluate
function.
c_source : string
The text of the C source file containing the function that
evaluates the matrices.
"""
c_header, c_source = self.c_matrix_generator.doprint(
prefix=self.prefix + '_c')
filling = {'prefix': self.prefix}
filling.update(self.code_blocks)
cython_source = self._pyx_template.format(**filling)
setup_py = self._setup_py_template.format(**filling)
return setup_py, cython_source, c_header, c_source
def write(self, path=None):
"""Writes the four source files needed to compile the Cython
function to the current working directory.
Parameters
==========
path : string
The absolute or relative path to an existing directory to place
the files instead of the cwd.
"""
if path is None:
path = os.getcwd()
self.c_matrix_generator.write(self.prefix + '_c')
setup_py, pyx, c_header, c_source = self.doprint()
with open(os.path.join(path, self.prefix + '_setup.py'), 'w') as f:
f.write(setup_py)
with open(os.path.join(path, self.prefix + '.pyx'), 'w') as f:
f.write(pyx)
def compile(self, tmp_dir=None):
"""Returns a function which evaluates the matrices.
Parameters
==========
tmp_dir : string
The path to an existing or non-existing directory where all of
the generated files will be stored.
"""
base_prefix = self.prefix
if tmp_dir is None:
codedir = tempfile.mkdtemp(".pydy_compile")
else:
codedir = os.path.abspath(tmp_dir)
if not os.path.exists(codedir):
os.makedirs(codedir)
self.prefix = '{}_{}'.format(base_prefix,
CythonMatrixGenerator._module_counter)
workingdir = os.getcwd()
os.chdir(codedir)
try:
sys.path.append(codedir)
self.write()
cmd = [sys.executable, self.prefix + '_setup.py', 'build_ext',
'--inplace']
subprocess.call(cmd, stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
cython_module = importlib.import_module(self.prefix)
except:
raise Exception('Failed to compile and import Cython module.')
finally:
sys.path.remove(codedir)
CythonMatrixGenerator._module_counter += 1
os.chdir(workingdir)
if tmp_dir is None:
shutil.rmtree(codedir)
self.prefix = base_prefix
return getattr(cython_module, 'eval')
|
StarcoderdataPython
|
93197
|
import os
from pyBigstick.nucleus import Nucleus
import streamlit as st
import numpy as np
import plotly.express as px
from barChartPlotly import plotly_barcharts_3d
from PIL import Image
he4_image = Image.open('assets/he4.png')
nucl_image = Image.open('assets/nucl_symbol.png')
table_image = Image.open('assets/table.jpg')
scattering_image = Image.open('assets/scattering.jpeg')
deexcitation_image = Image.open('assets/deexcitation.png')
lvl_image = Image.open('assets/Energy_levels.png')
shells_image = Image.open('assets/shells.png')
bs = os.getcwd() +'/src/bigstick.x'
header_container = st.container()
intro_container = st.container()
bs_container = st.container()
states_container = st.container()
densities_container = st.container()
hide_table_row_index = """
<style>
tbody th {display:none}
.blank {display:none}
</style>
"""
light_nuclei = ['F', 'Ne', 'Na', 'Mg', 'Al', 'Si', 'P', 'S', 'Cl']
with header_container:
st.title('pyBigstick')
st.markdown("""This streamlit app visualizes the nuclear transitions calculated by [pyBigstick](https://github.com/noctildon/pyBigstick),
including the energy levels and the density matrices.""")
with intro_container:
st.subheader("Basic knowledge about nuclear physics")
st.markdown('Physicists usually use a symbol + 2 number to represent a unique nucleus')
st.image(nucl_image, width=500)
st.markdown('For example, the following is identical to He (Helium) with mass number 4 and atomic number 2, or equivalently 2 protons and 2 neutrons.')
st.image(he4_image,width=300)
st.markdown('Atomic number can be determined by element symbol uniquely, so sometimes it is skipped and ignored.')
st.text('And this is the well-known periodic table')
st.image(table_image,width=800)
st.markdown('Experimentalists use neutrinos (an extremely small and light particle) to hit the nucleus. This process is called "scattering".')
st.image(scattering_image,width=800)
st.markdown("""Before scattering the nucleus has lowest possbile energy (ground state). After scattering nucleus gain some energy from the neutrinos,
being called "excited nucleus" or "excited state". Then there is a chance that the excited nucleus would drop back to the ground state by emitting gamma ray.
""")
col1, col2 = st.columns(2)
with col1:
st.image(deexcitation_image,width=400)
with col2:
st.image(lvl_image,width=400)
st.markdown("""What happen in the scattering is that some of the nucleons get excited to the orbit with high energy.
The core algorithm of pyBigstick is to iterate all possible combinations and transitions of the nucleons.
And the density matrices describe how nucleons move among the orbits by talking us a probability-like value.
""")
st.image(shells_image,width=700)
with bs_container:
st.subheader("Control panel")
st.markdown("""Input the info of the interested nucleus, eg. F19, Be10.
Not sure which nucleus to pick? check out [this](https://periodictable.com/Isotopes/009.19/index.html).
(Not all of the nucleus is possible to calculate).""")
col1_1, col1_2 = st.columns(2)
with col1_1:
s1 = st.selectbox('The nucleus to calculate', light_nuclei, index=0)
with col1_2:
s2 = st.selectbox('Mass number', range(9,41), index=10)
col2_1, col2_2 = st.columns(2)
with col2_1:
n_states = st.selectbox('Number of states to calculate (more states always need more time)', range(1,7), index=2)
with col2_2:
maxiter = st.selectbox('Number of iteration (higher iteration is more accurate on results, but takes longer)', np.arange(50,510,10), index=5)
s1 = s1.lower()
nucl_name = f'{s1}{s2}'
st.text(f'Calculate {nucl_name}...')
nu = Nucleus(nucl_name, n_states=n_states, maxiter=maxiter)
if st.button('Clean the previous result and rerun'):
st.write(f'Successfully clean nucleus {nucl_name}. Running {nucl_name} again...')
nu.clean()
if not nu.check():
nu.script_save()
nu.prepare()
nu.run(bs=bs)
nu.save_results()
with states_container:
st.subheader('Energy level states')
st.markdown("""When the scattering happens to a nucleus, the nucleus could be excited to higher state.
In general, initially the nucleus is in ground state (the state with the lowest energy).
Then after scattering, it is excited to some higher state with energy higher than ground state.
We also label the state with n. Ground state has n=1. First excited state has n=2. Second excited has n=3, and so on.""")
fig = px.bar(nu.states, x='state', y='Ex',
labels={
'state': 'State',
'Ex': 'Excitation energy (MeV)'
})
st.plotly_chart(fig, use_container_width=True)
if st.checkbox('Show all states data'):
st.text('States')
st.write(nu.states)
with densities_container:
st.subheader('Density matrices')
st.markdown("""The amp (transition amplitdue) in the last column below is (to some degree) proportional to the probability that
a nucleon moves from one orbit to another, given the condition that the nucleus jumps from one state to another (say from n=1 to n=2).
Jt and Tt are the spin and isospin of the transition, respectively. They are the attributes of a transition.
A transition could have multiple values of Jt. Tt can be either 0 or 1. Most of the amp is zero.""")
col1, col2, col3, col4 = st.columns(4)
with col1:
statei = st.selectbox('Initial state', nu.states['state'])
with col2:
statej = st.selectbox('Final state', nu.states['state'])
with col3:
Jt = st.selectbox('Jt', np.unique(nu.densities['Jt']))
with col4:
Tt = st.selectbox('Tt', [0,1])
filter_densities = nu.densities.loc[(nu.densities['statei']==statei) & (nu.densities['statej']==statej) &\
(nu.densities['Jt']==Jt) & (nu.densities['Tt']==Tt)]
st.subheader('Non-zero elements')
st.markdown(hide_table_row_index, unsafe_allow_html=True)
st.table(filter_densities)
st.subheader('3D plot of the density matrices')
st.text('The plot only shows non-zero elements.')
if not filter_densities.empty:
fig = plotly_barcharts_3d(filter_densities['orba'], filter_densities['orbb'], filter_densities['amp'],
x_title='orbit a', y_title='orbit b', z_title='amp')
fig.update_layout(width=700, height=700, yaxis = dict(scaleanchor = 'x'))
st.plotly_chart(fig, use_container_width=True)
else:
st.text('All elements are zero, so the plot is skipped.')
if st.checkbox('Show all raw densities data'):
st.text('Density matrices')
st.write(nu.densities)
|
StarcoderdataPython
|
5007040
|
# coding: utf-8
from __future__ import annotations
from datetime import date, datetime # noqa: F401
import re # noqa: F401
from typing import Any, Dict, List, Optional # noqa: F401
from pydantic import AnyUrl, BaseModel, EmailStr, validator # noqa: F401
from acapy_wrapper.models.attach_decorator import AttachDecorator
from acapy_wrapper.models.v20_cred_format import V20CredFormat
from acapy_wrapper.models.v20_cred_preview import V20CredPreview
class V20CredProposal(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
V20CredProposal - a model defined in OpenAPI
id: The id of this V20CredProposal [Optional].
type: The type of this V20CredProposal [Optional].
comment: The comment of this V20CredProposal [Optional].
credential_preview: The credential_preview of this V20CredProposal [Optional].
filtersattach: The filtersattach of this V20CredProposal.
formats: The formats of this V20CredProposal.
"""
id: Optional[str] = None
type: Optional[str] = None
comment: Optional[str] = None
credential_preview: Optional[V20CredPreview] = None
filtersattach: List[AttachDecorator]
formats: List[V20CredFormat]
V20CredProposal.update_forward_refs()
|
StarcoderdataPython
|
8123371
|
# -*- coding: iso-8859-1 -*-
# Maintainer: joaander
from hoomd import *
from hoomd import md
context.initialize()
import unittest
import os
import math
import numpy
# tests dihedral.table
class dihedral_table_tests (unittest.TestCase):
def setUp(self):
print
snap = data.make_snapshot(N=40,
box=data.boxdim(L=100),
particle_types = ['A'],
bond_types = [],
angle_types = [],
dihedral_types = ['dihedralA'],
improper_types = [])
if comm.get_rank() == 0:
snap.dihedrals.resize(10);
for i in range(10):
x = numpy.array([i, 0, 0], dtype=numpy.float32)
numpy.random.seed(10)
snap.particles.position[4*i+0,:] = x;
x += numpy.random.random(3) * 0.1;
snap.particles.position[4*i+1,:] = x;
x += numpy.random.random(3) * 0.1;
snap.particles.position[4*i+2,:] = x;
x += numpy.random.random(3) * 0.1;
snap.particles.position[4*i+3,:] = x;
snap.dihedrals.group[i,:] = [4*i+0, 4*i+1, 4*i+2, 4*i+3];
self.sys = init.read_snapshot(snap)
context.current.sorter.set_params(grid=8)
# test to see that se can create a dihedral.table
def test_create(self):
md.dihedral.table(width=100);
# test setting the table
def test_set_coeff(self):
def har(theta, kappa, theta_0):
V = 0.5 * kappa * (theta-theta_0)**2;
T = -kappa*(theta-theta_0);
return (V, T)
harmonic = md.dihedral.table(width=1000)
harmonic.dihedral_coeff.set('dihedralA', func=har, coeff=dict(kappa=1,theta_0=.1))
all = group.all();
md.integrate.mode_standard(dt=0.005);
md.integrate.nve(all);
run(100);
# test coefficient not set checking
def test_set_coeff_fail(self):
harmonic = md.dihedral.table(width=123)
all = group.all();
md.integrate.mode_standard(dt=0.005);
md.integrate.nve(all);
self.assertRaises(RuntimeError, run, 100);
# compare against harmonic dihedral
def test_harmonic_compare(self):
harmonic_1 = md.dihedral.table(width=1000)
harmonic_1.dihedral_coeff.set('dihedralA', func=lambda theta: (0.5*1*( 1 + math.cos(theta)), 0.5*1*math.sin(theta)),coeff=dict())
harmonic_2 = md.dihedral.harmonic()
harmonic_2.dihedral_coeff.set('dihedralA', k=1.0, d=1,n=1)
md.integrate.mode_standard(dt=0.005);
all = group.all()
md.integrate.nve(all)
run(1)
for i in range(len(self.sys.particles)):
f_1 = harmonic_1.forces[i]
f_2 = harmonic_2.forces[i]
# we have to have a very rough tolerance (~10%) because
# of 1) discretization of the potential and 2) different handling of precision issues in both potentials
self.assertAlmostEqual(f_1.energy, f_2.energy,3)
self.assertAlmostEqual(f_1.force[0], f_2.force[0],1)
self.assertAlmostEqual(f_1.force[1], f_2.force[1],1)
self.assertAlmostEqual(f_1.force[2], f_2.force[2],1)
def tearDown(self):
del self.sys
context.initialize();
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
|
StarcoderdataPython
|
5167587
|
<reponame>andreyvit/pyjamas
def init():
JS("""
// Set up event dispatchers.
$wnd.__dispatchEvent = function() {
if ($wnd.event.returnValue == null) {
$wnd.event.returnValue = true;
if (!DOM.previewEvent($wnd.event))
return;
}
var listener, curElem = this;
while (curElem && !(listener = curElem.__listener))
curElem = curElem.parentElement;
if (listener)
DOM.dispatchEvent($wnd.event, curElem, listener);
};
$wnd.__dispatchDblClickEvent = function() {
var newEvent = $doc.createEventObject();
this.fireEvent('onclick', newEvent);
if (this.__eventBits & 2)
$wnd.__dispatchEvent.call(this);
};
$doc.body.onclick =
$doc.body.onmousedown =
$doc.body.onmouseup =
$doc.body.onmousemove =
$doc.body.onkeydown =
$doc.body.onkeypress =
$doc.body.onkeyup =
$doc.body.onfocus =
$doc.body.onblur =
$doc.body.ondblclick = $wnd.__dispatchEvent;
""")
def compare(elem1, elem2):
JS("""
if (!elem1 && !elem2)
return true;
else if (!elem1 || !elem2)
return false;
return (elem1.uniqueID == elem2.uniqueID);
""")
def createInputRadio(group):
JS("""
return $doc.createElement("<INPUT type='RADIO' name='" + group + "'>");
""")
def eventGetCurrentTarget(event):
return event.currentEventTarget
def eventGetTarget(evt):
JS("""
var elem = evt.srcElement;
return elem ? elem : null;
""")
def eventGetToElement(evt):
JS("""
return evt.toElement ? evt.toElement : null;
""")
def eventPreventDefault(evt):
JS("""
evt.returnValue = false;
""")
def eventToString(evt):
JS("""
if (evt.toString) return evt.toString();
return "[object Event]";
""")
def getBodyOffsetTop():
JS("""
return $doc.body.parentElement.clientTop;
""")
def getBodyOffsetLeft():
JS("""
return $doc.body.parentElement.clientLeft;
""")
def getAbsoluteLeft(elem):
JS("""
// getBoundingClientRect() throws a JS exception if the elem is not attached
// to the document, so we wrap it in a try/catch block
var zoomMultiple = $doc.body.parentElement.offsetWidth /
$doc.body.offsetWidth;
try {
return Math.floor((elem.getBoundingClientRect().left / zoomMultiple) +
$doc.body.parentElement.scrollLeft );
} catch (e) {
return 0;
}
""")
def getAbsoluteTop(elem):
JS("""
// getBoundingClientRect() throws a JS exception if the elem is not attached
// to the document, so we wrap it in a try/catch block
var zoomMultiple = $doc.body.parentElement.offsetWidth /
$doc.body.offsetWidth;
try {
return Math.floor((elem.getBoundingClientRect().top / zoomMultiple) +
$doc.body.parentElement.scrollTop );
} catch (e) {
return 0;
}
""")
def getChild(elem, index):
JS("""
var child = elem.children[index];
return child ? child : null;
""")
def getChildCount(elem):
JS("""
return elem.children.length;
""")
def getChildIndex(parent, child):
JS("""
var count = parent.children.length;
for (var i = 0; i < count; ++i) {
if (child.uniqueID == parent.children[i].uniqueID)
return i;
}
return -1;
""")
def getFirstChild(elem):
JS("""
var child = elem.firstChild;
return child ? child : null;
""")
def getInnerText(elem):
JS("""
var ret = elem.innerText;
return (ret == null) ? null : ret;
""")
def getNextSibling(elem):
JS("""
var sib = elem.nextSibling;
return sib ? sib : null;
""")
def getParent(elem):
JS("""
var parent = elem.parentElement;
return parent ? parent : null;
""")
def insertChild(parent, child, index):
JS("""
if (index == parent.children.length)
parent.appendChild(child);
else
parent.insertBefore(child, parent.children[index]);
""")
def insertListItem(select, text, value, index):
JS("""
var newOption = document.createElement("Option");
if(index==-1) {
select.add(newOption);
} else {
select.add(newOption,index);
}
newOption.text=text;
newOption.value=value;
""")
def isOrHasChild(parent, child):
JS("""
while (child) {
if (parent.uniqueID == child.uniqueID)
return true;
child = child.parentElement;
}
return false;
""")
def releaseCapture(elem):
JS("""
elem.releaseCapture();
""")
def setCapture(elem):
JS("""
elem.setCapture();
""")
def setInnerText(elem, text):
JS("""
if (!text)
text = '';
elem.innerText = text;
""")
def sinkEvents(elem, bits):
JS("""
elem.__eventBits = bits;
elem.onclick = (bits & 0x00001) ? $wnd.__dispatchEvent : null;
elem.ondblclick = (bits & 0x00002) ? $wnd.__dispatchDblClickEvent : null;
elem.onmousedown = (bits & 0x00004) ? $wnd.__dispatchEvent : null;
elem.onmouseup = (bits & 0x00008) ? $wnd.__dispatchEvent : null;
elem.onmouseover = (bits & 0x00010) ? $wnd.__dispatchEvent : null;
elem.onmouseout = (bits & 0x00020) ? $wnd.__dispatchEvent : null;
elem.onmousemove = (bits & 0x00040) ? $wnd.__dispatchEvent : null;
elem.onkeydown = (bits & 0x00080) ? $wnd.__dispatchEvent : null;
elem.onkeypress = (bits & 0x00100) ? $wnd.__dispatchEvent : null;
elem.onkeyup = (bits & 0x00200) ? $wnd.__dispatchEvent : null;
elem.onchange = (bits & 0x00400) ? $wnd.__dispatchEvent : null;
elem.onfocus = (bits & 0x00800) ? $wnd.__dispatchEvent : null;
elem.onblur = (bits & 0x01000) ? $wnd.__dispatchEvent : null;
elem.onlosecapture = (bits & 0x02000) ? $wnd.__dispatchEvent : null;
elem.onscroll = (bits & 0x04000) ? $wnd.__dispatchEvent : null;
elem.onload = (bits & 0x08000) ? $wnd.__dispatchEvent : null;
elem.onerror = (bits & 0x10000) ? $wnd.__dispatchEvent : null;
elem.oncontextmenu = (bits & 0x20000) ? $wnd.__dispatchEvent : null;
""")
def toString(elem):
JS("""
return elem.outerHTML;
""")
|
StarcoderdataPython
|
6495814
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 06 17:39:01 2015
@author: tsz
"""
from __future__ import division
import numpy as np
class PV(object):
"""
Implementation of the PV class.
"""
def __init__(self, environment, method, area=0.0, peak_power=0.0, eta_noct=0.18, radiation_noct=1000.0,
t_cell_noct=45.0, t_ambient_noct=20.0, alpha_noct=0, beta=0, gamma=0, tau_alpha=0.9):
"""
Parameters
----------
environment : environment object
Common to all other objects. Includes time and weather instances
method : integer
- `0` : Calculate PV power based on an area in m^2 equipped with PV panels
- `1` : Calculate PV power based on the installed PV peak power in kWp
area : float, optional
PV unit installation area in m^2
Requires ``method=0``
peak_power : float, optional
PV peak power installation in kWp
Requires ``method=1``
eta_noct : float, optional
Electrical efficiency at NOCT conditions (without unit)
NOCT conditions: See manufacturer's data sheets or
Duffie, Beckman - Solar Engineering of Thermal Processes (4th ed.), page 759
Requires ``method=0``
radiation_noct : float, optional
Nominal solar radiation at NOCT conditions (in W/m^2)
NOCT conditions: See manufacturer's data sheets or
Duffie, Beckman - Solar Engineering of Thermal Processes (4th ed.), page 759
t_cell_noct : float, optional
Nominal cell temperature at NOCT conditions (in degree Celsius)
NOCT conditions: See manufacturer's data sheets or
Duffie, Beckman - Solar Engineering of Thermal Processes (4th ed.), page 759
t_ambient_noct : float, optional
Nominal ambient air temperature at NOCT conditions (in degree Celsius)
NOCT conditions: See manufacturer's data sheets or
Duffie, Beckman - Solar Engineering of Thermal Processes (4th ed.), page 759
alpha_noct : float, optional
Temperature coefficient at NOCT conditions (without unit)
NOCT conditions: See manufacturer's data sheets or
Duffie, Beckman - Solar Engineering of Thermal Processes (4th ed.), page 759
beta : float, optional
Slope, the angle (in degree) between the plane of the surface in
question and the horizontal. 0 <= beta <= 180. If beta > 90, the
surface faces downwards.
gamma : float, optional
Surface azimuth angle. The deviation of the projection on a
horizontal plane of the normal to the surface from the local
meridian, with zero due south, east negative, and west positive.
-180 <= gamma <= 180
tau_alpha : float
Optical properties of the PV unit. Product of absorption and
transmission coeffients.
According to Duffie, Beckman - Solar Engineering of Thermal
Processes (4th ed.), page 758, this value is typically close to 0.9
"""
self._kind = "pv"
self.environment = environment
self.method = method
self.area = area
self.peak_power = peak_power
self.eta_noct = eta_noct
self.radiation_noct = radiation_noct
self.t_cell_noct = t_cell_noct
self.t_ambient_noct = t_ambient_noct
self.alpha_noct = alpha_noct
self.beta = beta
self.gamma = gamma
self.tau_alpha = tau_alpha
self.total_power = np.zeros(environment.timer.timesteps_total)
self.total_radiation = np.zeros(environment.timer.timesteps_total)
self.current_power = np.zeros(environment.timer.timesteps_horizon)
@property
def kind(self):
return self._kind
def getNominalValues(self):
"""
Return collector's area, efficiency, nominal cell temperature and
loss coefficient.
"""
return (self.eta_noct, self.radiation_noct, self.t_cell_noct, self.t_ambient_noct, self.alpha_noct)
def _computePowerArea(self, currentValues=True):
"""
Compute PV electric output power based on a certain area equipped with PV panels
Parameters
----------
currentValues : bool, optional
If True, returns values of current horizon (default: True).
If False, returns annual values.
Returns
-------
res_tuple : tuple
2d tuple holding power array in Watt and radiation array in W/m^2
"""
# Get radiation on a tilted surface
radiation = self.environment.weather.getRadiationTiltedSurface(beta=self.beta,
gamma=self.gamma,
update=True,
currentValues=currentValues)
# If no temperature coefficient is given, a simple PV model is applied
if self.alpha_noct == 0:
power = self.area * self.eta_noct * radiation[0]
else:
# Get ambient temperature
getTemperature = self.environment.weather.getWeatherForecast
t_ambient = getTemperature(getTAmbient=True, currentValues=currentValues)
# Compute the cell temperature.
# Assumption: Wind velocity is 1 m/s (same as NOCT conditions)
# The resulting equation is based on equation 23.3.3 (page 758,
# <NAME> - Solar Engineering of Thermal Processes, 4th ed)
# as well as equation 3 (Skroplaki, Palyvos - 2009 - On the
# temperature dependence of photovoltaic module electrical
# performance. A review of efficiency-power correlations.)
# Introduce a few abbreviations
a1 = (self.t_cell_noct - self.t_ambient_noct) * radiation[0] / self.radiation_noct
denominator = 1 - a1 * self.alpha_noct * self.eta_noct / self.tau_alpha
numerator = 1 - self.alpha_noct * (t_ambient[0] - self.t_cell_noct + a1)
eta = self.eta_noct * numerator / denominator
# Compute power
power = self.area * eta * radiation[0]
return (power, radiation[0])
def _computePowerPeakInstallation(self, currentValues=True):
"""
Compute PV electric output power based on a given PV peak power installation
Parameters
----------
currentValues : bool, optional
If True, returns values of current horizon (default: True).
If False, returns annual values.
Returns
-------
res_tuple : tuple
2d tuple holding power array in Watt and radiation array in W/m^2
"""
# Get radiation on a tilted surface
radiation = self.environment.weather.getRadiationTiltedSurface(beta=self.beta,
gamma=self.gamma,
update=True,
currentValues=currentValues)
# Get ambient temperature
getTemperature = self.environment.weather.getWeatherForecast
t_ambient = getTemperature(getTAmbient=True, currentValues=currentValues)
# Calculation of PV power output according to:
# "A novel model for photovoltaic array performance prediction"
# <NAME> et. al., in Applied Energy 84 (2007), pp. 1187-1198
# Constants:
q = 1.602 * np.power(10.0, -19.0)
n = 1.3
K = 1.38 * np.power(10.0, -23.0)
Kelvin = 273.15
# Reference module parameters (monocrystalline module):
P_max_mp = 250.0
V_mp = 31.1
I_mp = 8.05
R_s = 0.012
Alpha_module = 1.21
Beta_module = 0.058
Gamma_module = 0.012
# Variables:
I_sc = 0.0 # Short circuit current
V_oc = 0.0 # Open circuit voltage
V_oc_norm = 0.0 # Normalized open circuit voltage
FF_o = 0.0 # Fill factor at ideal PV module
FF = 0.0 # Fill factor
P_module = np.zeros(len(radiation[0])) # PV power output per module
# Straight forward calculation of PV power:
for i in range(len(radiation[0])):
if radiation[0][i] <= 0.0:
I_sc = 0.0
else:
I_sc = I_mp * np.power((radiation[0][i] / self.radiation_noct), Alpha_module)
if (t_ambient[0][i] + Kelvin) == 0.0 or radiation[0][i] <= 0.0:
V_oc = 0.0
else:
if (1 + Beta_module * np.log(self.radiation_noct / radiation[0][i])) == 0.0:
V_oc = 0.0
else:
V_oc = (V_mp / (1 + Beta_module * np.log(self.radiation_noct / radiation[0][i]))) * \
np.power(((self.t_ambient_noct + Kelvin) / (t_ambient[0][i] + Kelvin)), Gamma_module)
if (t_ambient[0][i] + Kelvin) == 0.0:
V_oc_norm = 0.0
else:
V_oc_norm = (V_oc / (n * K * (t_ambient[0][i] + Kelvin) / q))
if (1 + V_oc_norm) == 0.0:
FF_o = 0.0
else:
FF_o = (V_oc_norm - np.log(V_oc_norm + 0.72)) / (1 + V_oc_norm)
if V_oc == 0.0 or I_sc == 0.0:
FF = 0.0
else:
FF = FF_o * (1 - (R_s / (V_oc / I_sc)))
P_module[i] = FF * V_oc * I_sc
n_modules = int(1000.0*self.peak_power/P_max_mp)
power = np.array(P_module)*n_modules
return (power, radiation[0])
def getPower(self, currentValues=True, updatePower=True):
"""
Get the PV generation.
Parameters
----------
currentValues : Boolean, optional
- True : Return the PV generation array for the current forecasting
horizon
- False : Return the entire PV generation array for all previous
time steps
updatePower : Boolean, optional
- True: Compute the PV generation forecast for the upcoming horizon
- False: Do not compute a new PV generation forecast
"""
current_timestep = self.environment.timer.current_timestep
timesteps = self.environment.timer.timesteps_horizon
if self.method in (0, 1):
if updatePower:
if self.method == 0:
(current_power, currentRadiation) = self._computePowerArea(currentValues=currentValues)
elif self.method == 1:
(current_power, currentRadiation) = self._computePowerPeakInstallation(currentValues=currentValues)
if currentValues:
self.current_power = current_power
self.total_power[current_timestep:(current_timestep + timesteps)] = current_power
self.total_radiation[current_timestep:(current_timestep + timesteps)] = currentRadiation
return self.current_power
else:
self.total_power = current_power
self.total_radiation = currentRadiation
return self.total_power
|
StarcoderdataPython
|
3429345
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012 pyReScene
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from __future__ import division
import optparse
import sys
import os
import glob
import re
try:
import _preamble
except ImportError:
pass
import rescene
from rescene.rar import RarReader, BlockType, COMPR_STORING
def check_compression(srr_file):
for block in RarReader(srr_file):
if block.rawtype == BlockType.RarPackedFile:
if block.compression_method != COMPR_STORING:
return True
return False
def check_empty(srr_file):
for block in RarReader(srr_file):
if block.rawtype == BlockType.RarPackedFile:
return False
return True
def check_image(srr_file, noproof):
images = (".jpg", ".png", ".bmp", ".gif", "jpeg")
for block in RarReader(srr_file):
if (block.rawtype == BlockType.SrrStoredFile and
os.path.splitext(block.file_name)[1] in images):
if noproof and "proof" in block.file_name.lower():
return False
return True
return False
def check_repack(srr_file):
tmatch = ("rpk", "repack", "-r.part01.rar", "-r.rar")
for block in RarReader(srr_file):
if block.rawtype == BlockType.SrrRarFile:
matchf = lambda keyword: keyword in block.file_name
if any(map(matchf, tmatch)):
return True
return False
def check_nfos(srr_file):
for block in RarReader(srr_file):
nfo_count = 0
if (block.rawtype == BlockType.SrrStoredFile and
block.file_name[-4:].lower() == ".nfo"):
nfo_count += 1
return False if nfo_count <= 1 else True
def check_duplicates(srr_file):
found = []
for block in RarReader(srr_file):
if (block.rawtype == BlockType.SrrStoredFile):
if found.count(block.file_name):
return True
found.append(block.file_name)
return False
def check_for_possible_nonscene(srr_file):
for block in RarReader(srr_file):
if (block.rawtype == BlockType.SrrRarFile and
block.file_name != block.file_name.lower()):
return True
return False
def check_availability_stored_files(srr_file):
for block in RarReader(srr_file):
if block.rawtype == BlockType.SrrStoredFile:
return False
return True
def check_for_no_ext(srr_file, extension):
for block in RarReader(srr_file):
if (block.rawtype == BlockType.SrrStoredFile and
block.file_name.lower().endswith(extension)):
return False
return True
def check_for_ext(srr_file, extension):
for block in RarReader(srr_file):
if (block.rawtype == BlockType.SrrStoredFile and
block.file_name.lower().endswith(extension)):
return True
return False
rar_sizes = 0 # bytes
def check(srr_file):
try:
result = False
if options.verify or options.multiple:
info = rescene.info(srr_file)
global rar_sizes
rar_sizes += sum([info['rar_files'][f].file_size
for f in info['rar_files']])
if options.multiple:
sets = []
for f in info["rar_files"]:
ms = "^(.*?)(.part\d+.rar|(.[rstuv]\d\d|.rar))$"
base = re.match(ms, f, re.IGNORECASE).group(1)
if not base in sets:
sets.append(base)
result |= len(info["archived_files"]) > len(sets)
# print(sets) # useful to check ordering
if options.dirfix:
if "dirfix" in srr_file.lower() or "nfofix" in srr_file.lower():
print(srr_file)
if options.lowercase:
group = srr_file[:-4].rsplit("-")[-1]
if group == group.lower():
result |= True
if "." in group: # does not have a group name
result |= True
fn = os.path.split(srr_file)[1]
if fn == fn.lower():
result |= True
if options.compressed:
result |= check_compression(srr_file)
if options.empty:
result |= check_empty(srr_file)
if options.image or options.noproof:
result |= check_image(srr_file, options.noproof)
if options.repack:
result |= check_repack(srr_file)
if options.nfos:
result |= check_nfos(srr_file)
if options.duplicates:
result |= check_duplicates(srr_file)
if options.peer2peer:
result |= check_for_possible_nonscene(srr_file)
if options.nofiles:
result |= check_availability_stored_files(srr_file)
if options.nosfv:
result |= check_for_no_ext(srr_file, ".sfv")
if options.nonfo:
result |= check_for_no_ext(srr_file, ".nfo")
if options.txt:
result |= check_for_ext(srr_file, ".txt")
if result and options.output_dir:
print("Moving %s." % srr_file)
srr_name = os.path.basename(srr_file)
# move the SRR to the given directory
os.renames(srr_file, os.path.join(options.output_dir, srr_name))
if result:
print(os.path.basename(srr_file))
except (EnvironmentError, Exception) as err:
# the storing of a srr_file failed -> corrupt SRR
print("Something wrong with reading %s" % srr_file)
print(err)
def main(options, args):
for element in args:
if os.path.isdir(element):
for srr_file in glob.iglob(element + "/*.srr"):
check(srr_file)
elif os.path.isfile(element) and element.endswith(".srr"):
check(element)
else:
print("WTF are you supplying me?")
if rar_sizes:
print("%d bytes" % rar_sizes)
print("%.2f KiB" % (rar_sizes / 1024))
print("%.2f MiB" % (rar_sizes / 1024 / 1024))
print("%.2f GiB" % (rar_sizes / 1024 / 1024 / 1024))
print("%.2f TiB" % (rar_sizes / 1024 / 1024 / 1024 / 1024))
if __name__ == '__main__':
parser = optparse.OptionParser(
usage="Usage: %prog [directories] [srrs] [options]'\n"
"This tool will list compressed, empty or SRR files with images.\n"
"It optionally moves them to a given output directory.",
version="%prog 1.0 (2012-09-20)") # --help, --version
parser.add_option("-c", "--compressed", help="list compressed SRRs",
action="store_true", dest="compressed", default=False)
parser.add_option("-e", "--empty", help="list SRRs with no RAR data",
action="store_true", dest="empty", default=False)
parser.add_option("-v", "--verify", help="check whole SRR for correctness "
"and return full RAR sizes at the end",
action="store_true", dest="verify", default=False)
parser.add_option("-t", "--nfos", help="two or more NFOs",
action="store_true", dest="nfos", default=False)
parser.add_option("-u", "--duplicates", help="the same file is stored twice",
action="store_true", dest="duplicates", default=False)
parser.add_option("-p", "--noproof", help="list SRRs with images that "
"do not contain the word proof",
action="store_true", dest="noproof", default=False)
parser.add_option("-i", "--image", help="list SRRs with stored images",
action="store_true", dest="image", default=False)
parser.add_option("-f", "--nofiles", help="list SRRs if no files are stored",
action="store_true", dest="nofiles", default=False)
parser.add_option("-m", "--multiple",
help="list SRRs with multiple archived files",
action="store_true", dest="multiple", default=False)
parser.add_option("-s", "--nosfv", help="list SRRs without SFV",
action="store_true", dest="nosfv", default=False)
parser.add_option("-n", "--nonfo", help="list SRRs without NFO",
action="store_true", dest="nonfo", default=False)
parser.add_option("--txt", help="list SRRs with TXT files",
action="store_true", dest="txt", default=False)
parser.add_option("-r", "--repack", help="list SRRs with -rpk., -r. in RAR name",
action="store_true", dest="repack", default=False)
parser.add_option("-2", "--p2p", help="not all RARs are lower case",
action="store_true", dest="peer2peer", default=False)
parser.add_option("-l", help="list lower case/no group names",
action="store_true", dest="lowercase", default=False)
parser.add_option("-d", "--dirfix", help="dirfixes and nfofixes",
action="store_true", dest="dirfix", default=False)
parser.add_option("-o", dest="output_dir", metavar="DIRECTORY",
help="moves the matched SRR files to the given DIRECTORY")
# no arguments given
if len(sys.argv) < 2:
print(parser.format_help())
else:
(options, args) = parser.parse_args()
main(options, args)
"""
>>> list = os.listdir(".")
>>> def get_name(name):
... return name[:-4].rsplit("-")[-1]
...
>>> get_name("Zebraman.2.Attack.On.Zebra.City.2011.720p.BluRay.x264-Japhson.srr")
'Japhson'
>>> for e in list:
... n = get_name(e)
... if n == n.lower():
... print(e)
...
"""
|
StarcoderdataPython
|
6436451
|
<filename>stools-core/src/dev/python/scheduler.py
__author__ = 'lfischer'
# @author <NAME>
#
# Copyright 2016 University of Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
import collections
from kazoo.client import KazooClient
from kazoo.recipe.watchers import DataWatch
from kazoo.protocol.states import EventType
import logging
import json
#
# This script reads the sendgraph of a storm topology from zookeeper, creates a
# schedule based on the sendgraph and write this schedule back into zookeeper.
#
# At the moment there are two methos supported:
# - schedulerOneWorker -> schedules all work to one workder
# - schedulerMetis -> use METIS to schedule the work
#
#zk_connect_string = "127.0.0.1:2000"
#zk_connect_string = "claudio01.ifi.uzh.ch:2181"
zk_connect_string = "tentacle.ifi.uzh.ch:2181"
metrics_zk_path = "/stools/scheduler/metrics" # where we expect the metrics to be written to
schedule_zk_path = "/stools/scheduler/schedule" # where we will write the schedules to
#workerbeats_zk_path = "/storm/workerbeats/" # where we go to find out how many workers per topo there are
metis_binary = "/Users/lfischer/tools/metis-5.1.0/build/Darwin-x86_64/programs/gpmetis"
# def create_empty_sendgraph():
# """ This method creates a two dimentional default dictionary that can be used to reconstruct the sendgraph. """
# return collections.defaultdict(collections.defaultdict)
def byteArrayToInt(byteArray):
""" This method converts the contents of the ZK nodes, which were long values in Java, into int values. """
return int(byteArray.encode('hex'), 16)
def scheduleOneWorker(num_workers, jsonSendGraph):
"""This scheduler assigns all work to only one scheduler."""
all_tasks = set()
schedule = ""
# get a list of all tasks
for node in jsonSendGraph['nodes']:
all_tasks.add(int(node['name']))
print("assigning all tasks to one partition.")
# we simulate the output of a metis schedule, which one line per vertex, each having one number
# which is the partition (worker) it should be assigned to.
for task in all_tasks:
if len(schedule) > 0:
schedule += "\n"
schedule += "0"
return schedule
def createMetisGraphFile(jsonSendgraph):
"""This method creates the contents that a graph input file needs to have in order to be processable by METIS
(http://glaros.dtc.umn.edu/gkhome/projects/gp/products?q=views/metis). A METIS input file consists of one header
line and of n lines, where n is the number of vertices in the graph. For our purposes the header line looks as
follows:
n m 011 1
n and m are the number of vertices and edges in the graph respectively while the latter two elements tell METIS
that we will have one weight on each vertex and that the edges are also weighted.
All remaining lines are of the form
vw v1 w1 ... w1 wN
where the ith line contains information about the ith vertex. On each line vw is the weight of the vertex,
v1..vN are the vertices adjacent to the ith vertex and w1..wN are the weights for these vertices.
more information about this format can be found here: http://glaros.dtc.umn.edu/gkhome/fetch/sw/metis/manual.pdf
(page 10 at the very top).
"""
graph_file = "tmp.graph"
# default is a dictionary of a default dictionary with a default weight of 0
edge_weights = collections.defaultdict(lambda: collections.defaultdict(lambda: 0)) # default value is 0
vertex_weights = collections.defaultdict(lambda: 1) # default weight of 1 - zero is not supported by metis
max_node_id = 0
# build the connection matrix
edge_count = 0
for link in jsonSendgraph['links']:
sourceId = int(link['source'])
targetId = int(link['target'])
value = int(link['value'])
if sourceId > max_node_id:
max_node_id = sourceId
if targetId > max_node_id:
max_node_id = targetId
if sourceId != targetId:
if edge_weights[sourceId][targetId] == 0: # only count the edge if it does not yet exist
edge_count += 1
edge_weights[sourceId][targetId] += value
# metis requires a reverse link, so we put a value of 1 if it does not yet exist
if edge_weights[targetId][sourceId] == 0:
edge_weights[targetId][sourceId] = 1
vertex_weights[targetId] += value
vertex_weights[sourceId] += value # count outgoing messages as well to vertex weight
# count nodes and links
vertex_count = max_node_id
graph_file_content = "{n} {m} 011 1".format(n=vertex_count, m=edge_count)
for from_vertex in range(1, vertex_count + 1): # indexed from 1..num_Vertex
vertex_line = ""
for to_vertex in edge_weights[from_vertex].keys():
if len(vertex_line) > 0:
vertex_line += " "
vertex_line += "{v} {w}".format(v=to_vertex, w=edge_weights[from_vertex][to_vertex])
# prepend vertex weight
vertex_line = "{vw} ".format(vw=vertex_weights[from_vertex]) + vertex_line
graph_file_content = graph_file_content + "\n" + vertex_line
with open(graph_file, 'w') as f:
f.write(graph_file_content)
# print("Metis file content:\n{0}".format(graph_file_content))
return graph_file
def schedulerMetis(num_workers, jsonSendgraph):
"""This scheduler uses METIS to partition the graph and schedule the work."""
import subprocess
schedule = ""
# Create METIS schedule
print("Creating METIS graph file for {0} partitions".format(num_workers))
graph_file = createMetisGraphFile(jsonSendgraph)
partition_file = "{gf}.part.{partitions}".format(gf=graph_file, partitions=num_workers)
# todo: objtype=cut is not necessary I think
print("Calling METIS")
try:
metis_output = subprocess.check_output([metis_binary, "-objtype=vol", graph_file, str(num_workers)])
#print("metis output:\n{0}".format(metis_output))
# read the partition file
with open(partition_file) as f:
schedule = ""
for line in f.readlines():
if len(line) > 0:
schedule += line
except subprocess.CalledProcessError as e:
print("something went wrong when calling metis:\n" + e.output)
# print(schedule)
# exit(1)
return schedule
# here we configure the scheduler currently in use (either schedulerOneWorker or schedulerMetis)
scheduler = schedulerMetis
# scheduler = scheduleOneWorker
def schedule(zk, topology):
topology_metrics_path = metrics_zk_path + "/" + topology
topology_sendgraph_path = topology_metrics_path + "/sendgraph"
topology_config_path = topology_metrics_path + "/config"
topology_schedule_path = schedule_zk_path + "/" + topology
print("scheduling using data at {0} writing schedule into {1}".
format(topology_metrics_path, topology_schedule_path))
jsonStr = ""
retry = 0
while len(jsonStr) == 0:
retry += 1
if retry >= 24: # try for two minutes
raise RuntimeError("Could not find send graph data in Zookeeper")
elif retry > 1:
print("Could not find send graph data in Zookeeper. Retrying in 10 seconds...")
time.sleep(10) # wait for 5 seconds before retrying
# extract the sendgraph from zookeeper
(jsonStr, node_stat) = zk.get(topology_sendgraph_path)
# parse the json graph
jsonSendGraph = json.loads(jsonStr)
# write json to file for debugging
with open("tmp.graph.json", 'w') as f:
f.write(jsonStr)
# get number of workers for topology
(nw_value, nw_stat) = zk.get(topology_config_path + "/topology.workers")
num_workers = byteArrayToInt(nw_value)
# the output of the schedules need to be the same as the partitioning files of METIS:
# one line per vertex, each having one number which is the partition (worker) it should
# be assigned to.
schedule = scheduler(num_workers, jsonSendGraph)
#schedule = "" # turn the scheduler off
if len(schedule) > 0:
debug = False
if debug:
print("computed schedule for topo {0}:\n{1}".format(topology, schedule))
else:
# write schedule to zk
print("writing schedule to zookeeper")
zk.ensure_path(topology_schedule_path)
zk.set(topology_schedule_path, bytes(bytearray(schedule)))
print("schedule written")
else:
print("No schedule computed")
def main_loop():
logging.basicConfig()
zk = KazooClient(hosts=zk_connect_string)
zk.start()
# make sure the root folders for the sendgraph and the schedules exist
zk.ensure_path(metrics_zk_path)
zk.ensure_path(schedule_zk_path)
for topology in zk.get_children(metrics_zk_path):
topology_metrics_zk_path = metrics_zk_path + "/" + topology
print("registering watcher schedule for " + topology_metrics_zk_path)
# register a data watch for each
def watchFunc(data, stat, event):
#print("watch called")
if event is not None and event.type == EventType.CHANGED:
print("new sendgraph data for {0} at {1}".format(topology, byteArrayToInt(data)))
schedule(zk, topology)
return True # returning false will disable the watch
# install data watch
#DataWatch(zk, topology_metrics_zk_path, func=watchFunc)
# if there is some data already, schedule immediately
if len(zk.get_children(topology_metrics_zk_path)):
print("existing sendgraph data for {0}".format(topology))
schedule(zk, topology)
# while 1:
# # wait until we get cancelled
# time.sleep(0.1)
if __name__ == '__main__':
try:
main_loop()
except KeyboardInterrupt:
print >> sys.stderr, '\nExiting by user request.\n'
sys.exit(0)
|
StarcoderdataPython
|
6625489
|
<reponame>minhtannguyen/transformer-mgk<filename>language-modeling/fast_transformers/__init__.py<gh_stars>1-10
"""Provide a library with fast transformer implementations."""
__author__ = ""
__copyright__ = ""
__license__ = "MIT"
__maintainer__ = ""
__email__ = ""
__url__ = "https://github.com/idiap/fast-transformers"
__version__ = "0.4.0"
|
StarcoderdataPython
|
1971809
|
<gh_stars>10-100
from util import Storage
class MockProtocol(Storage):
pass
class MockContact(Storage):
pass
|
StarcoderdataPython
|
6410499
|
import os
import re
from django.db.models import Q
from storage.models import RootPath, RelPath, File, ExcludeDir
from logger import init_logging
logger = init_logging(__name__)
class Scan(object):
"""Abstract functionality for scanning folders.
Either FullScan or QuickScan should be instantiated."""
def __init__(self, rp=None):
if rp is None:
self.root_paths = list(RootPath.objects.all())
else:
self.root_paths = list(RootPath.objects.filter(path__icontains=rp))
if len(self.root_paths) == 0:
raise Exception("No root paths found from: {0}".format(rp))
def scan(self):
"""Scan each root path in turn and update the database"""
# Iterate over each of the managed root paths
for root_path in self.root_paths:
# Build the list of regexe's
regexes = []
query = Q(root_path=root_path) | Q(root_path=None)
for rec in ExcludeDir.objects.filter(query):
regexes.append(re.compile(rec.regex))
# Iterate over each of the files in the current root path
for root, dirs, files in os.walk(root_path.abspath):
#
# Get the path to the current directory
#
rel_path = RelPath.getrelpath(root, root_path=root_path)
#
# Continue if directory is excluded
#
skip = False
i = 0
while (i < len(regexes)) and not skip:
if regexes[i].search(rel_path.abspath):
logger.debug(u"Skipping: {0}".format(rel_path.abspath))
skip=True
i += 1
if skip:
continue
#
# Get the list of known files so we can remove any
# that have been deleted
#
known_files = list(File.objects.filter(path=rel_path))
#
# Iterate over the files in the current directory and ensure
# the hash is up to date (QuickScan or FullScan).
# Remove each file from the list of known files on the way.
#
for fname in files:
file = self.file(rel_path, fname)
if file is None:
self.add_file(rel_path, fname)
else:
known_files.remove(file)
if self.needs_rehash(file):
self.update_file(file)
else:
logger.debug(u"No change: {0}".format(file.abspath))
#
# What's left in known_files has been deleted
# Remove from db
#
for file in known_files:
logger.debug(u"Removing from known_files: {0}".format(file.abspath))
file.mark_deleted()
def file(self, rel_path, fname):
"""Answer the File object if present or None."""
files = File.objects.filter(path=rel_path, name=fname, deleted=None)
assert len(files) <= 1, "Found more than one matching file"
if len(files) == 0:
return None
else:
return files[0]
def add_file(self, rel_path, fname):
"""Add the supplied fname to the db"""
logger.debug(u"Adding: {0}".format(fname))
file = File(path=rel_path, name=fname)
file.update_details()
return
def update_file(self, file):
logger.debug(u"Updating: {0}".format(file.abspath))
file.update_details()
return
class QuickScan(Scan):
"""QuickScan assumes that a file with the same mtime and size hasn't
changed, so doesn't require its hash to be recalculated."""
def needs_rehash(self, file):
return file.os_stats_changed()
class FullScan(Scan):
"""FullScan rehashes every file, regardless of whether it has been
changed or not"""
def needs_rehash(self, file):
return True
|
StarcoderdataPython
|
12840218
|
<gh_stars>1-10
import hikari
import tanjun
from avgamah.core.client import Client
pussy_component = tanjun.Component()
@pussy_component.with_slash_command
@tanjun.with_own_permission_check(
hikari.Permissions.SEND_MESSAGES
| hikari.Permissions.VIEW_CHANNEL
| hikari.Permissions.EMBED_LINKS
)
@tanjun.with_nsfw_check
@tanjun.as_slash_command("pussy", "Cute pussy cats.")
async def pussy(ctx: tanjun.abc.Context):
await ctx.shards.reddit_cache.reddit_sender(ctx, "pussy")
@tanjun.as_loader
def load_components(client: Client):
client.add_component(pussy_component.copy())
|
StarcoderdataPython
|
1671866
|
# A part of pdfrw (pdfrw.googlecode.com)
# Copyright (C) 2006-2012 <NAME>, Austin, Texas
# MIT license -- See LICENSE.txt for details
class PdfObject(str):
''' A PdfObject is a textual representation of any PDF file object
other than an array, dict or string. It has an indirect attribute
which defaults to False.
'''
indirect = False
|
StarcoderdataPython
|
6633694
|
<filename>hybridpy/dataset/triploader.py
__author__ = 'astyler'
import pandas as pd
import numpy as np
import math
import osmapping
from scipy.signal import butter, filtfilt
def load(fname):
trip = pd.read_csv(fname)
elapsed = np.cumsum(trip.PeriodMS / 1000.0)
elapsed -= elapsed[0]
trip['ElapsedSeconds'] = elapsed
# smooth speed
b, a = butter(2, 0.5)
trip['SpeedFilt'] = filtfilt(b, a, trip.Speed)
trip.Acceleration = trip['SpeedFilt'].diff()
trip.Acceleration[0] = 0
# smooth noisy elevation measurements
b, a = butter(4, 0.05)
trip['ElevationFilt'] = filtfilt(b, a, trip.Elevation)
locations = trip[['Latitude', 'Longitude']].values
# add heading
headings = [compute_heading(lat1=here[0], lat2=there[0], lon1=here[1], lon2=there[1]) for here, there in zip(locations[0:-1], locations[1:])]
headings.append(headings[-1])
trip['HeadingRaw'] = headings
filtered_headings = [headings[0]]
for heading, speed in zip(headings[1:], trip['SpeedFilt'].values[1:]):
if speed < 1:
filtered_headings.append(filtered_headings[-1])
else:
filtered_headings.append(heading)
b, a = butter(2, 0.2)
trip['HeadingCosF'] = filtfilt(b,a,np.cos(filtered_headings))
trip['HeadingSinF'] = filtfilt(b,a,np.sin(filtered_headings))
# add gradient
planar_distances = [osmapping.haversine(here, there)+1.0 for here, there in zip(locations[0:-1], locations[1:])]
trip['GradientRaw'] = trip.Elevation.diff() / ([1.0] + planar_distances)
trip.loc[0, 'GradientRaw'] = trip.loc[1, 'GradientRaw']
return trip
def compute_heading(lat1, lat2, lon1, lon2):
lat1, lat2, lon1, lon2 = map(math.radians, [lat1, lat2, lon1, lon2])
return math.atan2(math.cos(lat1) * math.sin(lat2) - math.sin(lat1) * math.cos(lat2) * math.cos(lon2 - lon1),
math.sin(lon2 - lon1) * math.cos(lat2))
|
StarcoderdataPython
|
8101675
|
<gh_stars>1-10
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from pmdarima.arima import ndiffs
import sys
#from statsmodels.graphics.tsaplots import plot_pacf
from statsmodels.tsa.stattools import acf, pacf
if __name__ == "__main__":
data = pd.read_csv(sys.argv[1])
d = sys.argv[2]
#print(d)
#print(d)
#lag_acf = acf(data["value"].diff(d).dropna(), nlags=30)
#lag_pacf = pacf(data["value"].dropna(), nlags=25)
if d == '0':
lag_pacf = pacf(data["value"].dropna(), nlags=25)
else:
lag_pacf = pacf(data["value"].diff(d).dropna(), nlags=25)
#print(lag_pacf)
f = open("./ts_analysis/pacf.txt", "w")
#f = open("pacf.txt", "w")
for i in lag_pacf:
f.write(str(i)+'\n')
f.close()
#plot_pacf(data["value"].diff(d).dropna())
#lt.savefig("../desktop/for_redis/pacf.png")
#plt.savefig("pacf.png")
# 觀察ACF圖,參數是差分之後的資料
#plot_acf(data["time_imputed"].diff(1))
#plt.show()
|
StarcoderdataPython
|
6515482
|
<filename>motion_detection.py
#####################################################################
# Import Libriries
#####################################################################
import cv2
import numpy as np
import time
#####################################################################
print("\n[INFO] Read frames from the webcam / file\n")
#####################################################################
input_file = 'videos/test_fire_2.mp4'
if isinstance(input_file, str):
video_source = input_file
input_file_name = input_file[:-4]
video = cv2.VideoCapture(video_source)
else:
video_source = 0
input_file_name = "videos/webcam"
video = cv2.VideoCapture(video_source)
time.sleep(2)
if video.isOpened() == False:
print("[INFO] Unable to read the camera feed")
#####################################################################
# Background Extraction
#####################################################################
# Subtractors
knnSubtractor = cv2.createBackgroundSubtractorKNN(100, 400, True)
# Motion detection parameters
percentage = 0.2 # percent
thresholdCount = 1500
movementText = "Movement is Detected"
textColor = (255, 255, 255)
titleTextPosition = (50, 50)
titleTextSize = 1.2
motionTextPosition = (50, 50)
frameIdx = 0
#####################################################################
# Write video settings: Save video + detection on the disk
#####################################################################
save_output = True
fourcc = cv2.VideoWriter_fourcc(*'MP4V')
writer = None
print("[INFO] Processing...(Press q to stop)")
while(1):
# Return Value and the current frame
ret, frame = video.read()
frameIdx += 1
print('[INFO] Frame Number: %d' % (frameIdx))
# Check if a current frame actually exist
if not ret:
break
if writer is None:
(frame_height, frame_width) = frame.shape[:2]
output_file_name = input_file_name + \
'_motion_detection_{}_{}'.format(frame_height, frame_width)+'.mp4'
writer = cv2.VideoWriter(
output_file_name, fourcc, 20.0, (frame_width, frame_height))
output_motion_file_name = input_file_name + \
'_motion_{}_{}'.format(frame_height, frame_width)+'.mp4'
writer_motion = cv2.VideoWriter(
output_motion_file_name, fourcc, 20.0, (frame_width, frame_height),0)
#############
pixel_total = frame_height * frame_width
thresholdCount = (percentage * pixel_total) / 100
print('[INFO] frame_height={}, frame_width={}'.format(
frame_height, frame_width))
print('[INFO] Number of pixels of the frame: {}'.format(pixel_total))
print('[INFO] Number of pixels to trigger Detection ({}%) : {}'.format(percentage,
thresholdCount))
print("\n[INFO] Perform Movement Detection: KNN")
#####################################################################
tic = time.time()
knnMask = knnSubtractor.apply(frame)
toc = time.time()
knnPixelCount = np.count_nonzero(knnMask)
knnPixelPercentage = (knnPixelCount*100.0)/pixel_total
print('[INFO] Processing time (Movement Detection): {0:2.2f} ms'.format(
(toc-tic)*1000))
print('[INFO] Percentage of Moving Pixel: {0:2.4f} % ({1:d})'.format(
knnPixelPercentage, knnPixelCount))
if (knnPixelCount > thresholdCount) and (frameIdx > 1):
cv2.putText(frame, movementText, motionTextPosition,
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)
# Display Results
#####################################################################
cv2.imshow('Original', frame)
cv2.imshow('Movement: KNN', knnMask)
cv2.moveWindow('Original', 50, 50)
cv2.moveWindow('Movement: KNN', frame_width, 50)
# Record Video
writer.write(frame) if save_output else 0
writer_motion.write(knnMask) if save_output else 0
# if the `q` key was pressed, break from the loop
#####################################################################
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
video.release()
cv2.destroyAllWindows()
|
StarcoderdataPython
|
4950667
|
from rest_framework import serializers
from .models import Notes
from .models import User
from rest_framework_simplejwt.tokens import RefreshToken
from . import string_to_JSX
class NotesInfoSerializer(serializers.ModelSerializer):
"""This serializer just gives a brief description of a Note"""
# A custom serializer field
owner_name = serializers.SerializerMethodField(read_only=True)
class Meta:
model = Notes
# all the field we want to be returned
fields = ['id','title','owner_name','created_on','last_modified','favourite','global_file']
def get_owner_name(self,obj):
"""owner_name should contain name of the owner of the Note"""
return obj.owner.username
class NotesDetailSerializer(serializers.ModelSerializer):
"""Detail description of the Note"""
graph_content = serializers.SerializerMethodField(read_only=True)
class Meta:
model = Notes
fields = '__all__'
extra_fields = ['graph_content']
extra_kwargs={
"owner":{"read_only":True}
}
def get_field_names(self, declared_fields, info):
"""Overriding get_field_names function to include the extra_fields in the list of fields
**implementation taken from
https://stackoverflow.com/questions/38245414/django-rest-framework-how-to-include-all-fields-and-a-related-field-in-mo
**
"""
expanded_fields = super(NotesDetailSerializer, self).get_field_names(declared_fields, info)
if getattr(self.Meta, 'extra_fields', None):
return expanded_fields + self.Meta.extra_fields
else:
return expanded_fields
def get_graph_content(self,obj):
"""graph_content is the JSX string of user content"""
return string_to_JSX.convert_to_JSX_string(obj.content)
class UserSerializer(serializers.ModelSerializer):
"""Serializer for User"""
full_name = serializers.SerializerMethodField(read_only=True)
class Meta:
model = User
fields = ['id','username','first_name','last_name','full_name','image']
extra_kwargs = {
'password':{
'write_only':True,
'style':{'input_type':'password'}
}
}
def get_full_name(self,obj):
"""full_name is combination of first and last name"""
full_name = f"{obj.first_name} {obj.last_name}"
return full_name
class UserSerializerWithToken(serializers.ModelSerializer):
"""Same as UserSerializer but will have one additional field for JWT token """
token = serializers.SerializerMethodField(read_only=True)
full_name = serializers.SerializerMethodField(read_only=True)
class Meta:
model = User
fields = ['id','username','first_name','last_name','token','full_name','image']
extra_kwargs = {
'password':{
'write_only':True,
'style':{'input_type':'password'}
}
}
def get_token(self,obj):
token = RefreshToken.for_user(obj)
return str(token.access_token)
def get_full_name(self,obj):
full_name = f"{obj.first_name} {obj.last_name}"
return full_name
|
StarcoderdataPython
|
3583188
|
<reponame>ropable/wastd<gh_stars>1-10
from rest_framework.serializers import ModelSerializer
from rest_framework_gis.serializers import GeoFeatureModelSerializer
from taxonomy.models import (
Community,
Crossreference,
HbvFamily,
HbvGenus,
HbvGroup,
HbvName,
HbvParent,
HbvSpecies,
HbvSupra,
HbvVernacular,
HbvXref,
Taxon,
Vernacular,
)
class HbvNameSerializer(ModelSerializer):
class Meta:
model = HbvName
fields = "__all__"
class HbvSupraSerializer(ModelSerializer):
class Meta:
model = HbvSupra
fields = "__all__"
class HbvGroupSerializer(ModelSerializer):
class Meta:
model = HbvGroup
fields = "__all__"
class HbvFamilySerializer(ModelSerializer):
class Meta:
model = HbvFamily
fields = "__all__"
class HbvGenusSerializer(ModelSerializer):
class Meta:
model = HbvGenus
fields = "__all__"
class HbvSpeciesSerializer(ModelSerializer):
class Meta:
model = HbvSpecies
fields = "__all__"
class HbvVernacularSerializer(ModelSerializer):
class Meta:
model = HbvVernacular
fields = "__all__"
class HbvXrefSerializer(ModelSerializer):
class Meta:
model = HbvXref
fields = "__all__"
class HbvParentSerializer(ModelSerializer):
class Meta:
model = HbvParent
fields = "__all__"
class TaxonSerializer(ModelSerializer):
"""Serializer for Taxon.
Includes a summary of conservation status which is ingested into WACensus.
Example:
NAME_ID | CONSV_CODE | LIST_CODE | EPBC | WA_IUCN | IUCN_CRITERIA
228 | 3 | Priority | | |
297 | T | WCA_1991 | EN | VU | D1+2
436 | T | WCA_1991 | EN | EN | B1+2c
"""
class Meta:
model = Taxon
fields = (
"pk",
"name_id",
"name",
"rank",
"parent",
"author",
"current",
"publication_status",
"vernacular_name",
"vernacular_names",
"canonical_name",
"taxonomic_name",
"paraphyletic_groups"
)
class FastTaxonSerializer(ModelSerializer):
"""Minimal serializer for Taxon to be used in other serializers.
"""
class Meta:
model = Taxon
fields = (
"pk",
"name_id",
"canonical_name",
"taxonomic_name",
"vernacular_names",
)
class VernacularSerializer(ModelSerializer):
taxon = FastTaxonSerializer(many=False)
class Meta:
model = Vernacular
fields = (
"ogc_fid",
"taxon",
"name",
"language",
"preferred",
)
class CrossreferenceSerializer(ModelSerializer):
predecessor = FastTaxonSerializer(many=False)
successor = FastTaxonSerializer(many=False)
class Meta:
model = Crossreference
fields = (
"xref_id",
"predecessor",
"successor",
"reason",
"authorised_by",
"authorised_on",
"effective_to",
"comments",
)
class CommunitySerializer(GeoFeatureModelSerializer):
class Meta:
model = Community
geo_field = "eoo"
fields = ["code", "name", "description", "eoo"]
|
StarcoderdataPython
|
6446655
|
# MultimediaCase for Raspberry Pi - by Joy-IT
# Addon published under MIT-License
import sys
sys.path.append('/storage/.kodi/addons/virtual.rpi-tools/lib')
sys.path.append('/storage/.kodi/addons/script.module.pyserial/lib')
import xbmcaddon
import xbmcgui
import subprocess
import time
import os
import serial
addon = xbmcaddon.Addon()
addonname = addon.getAddonInfo('name')
_localize_ = addon.getLocalizedString
monitor = xbmc.Monitor()
Cancel = False
status_Fan = False
status_LearningMode = False
os.system("touch /storage/.config/autostart.sh")
os.system("rm /storage/.kodi/temp/functions.txt && touch /storage/.kodi/temp/functions.txt")
flags = ["python /storage/.kodi/addons/script.module.MultimediaCase/lib/fan.py &\n", "python /storage/.kodi/addons/script.module.MultimediaCase/lib/shutdown-function.py &"]
with open("/storage/.config/autostart.sh","r") as log, open("/storage/.kodi/temp/functions.txt","w") as file:
for line in log:
if not any(flag in line for flag in flags):
file.write(line)
with open("/storage/.kodi/temp/functions.txt", "a") as log:
log.write("python /storage/.kodi/addons/script.module.MultimediaCase/lib/shutdown-function.py &\n")
def fanControll():
global status_Fan
global Cancel
fan = xbmcgui.Dialog().select(_localize_(32001), [_localize_(32002),_localize_(32003),_localize_(32021)])
if fan == -1:
Cancel = True
if fan == 1:
xbmcgui.Dialog().ok(_localize_(32004),_localize_(32005))
if fan == 0:
with open("/storage/.kodi/temp/functions.txt", "a") as log:
log.write("python /storage/.kodi/addons/script.module.MultimediaCase/lib/fan.py &\n")
xbmcgui.Dialog().ok(_localize_(32004),_localize_(32006))
status_Fan = True
if fan == 2:
xbmcgui.Dialog().ok(_localize_(32022),_localize_(32023))
fanControll()
def learningMode():
global status_LearningMode
global Cancel
if Cancel == False:
learning_Mode = xbmcgui.Dialog().select(_localize_(32007), [_localize_(32008),_localize_(32009),_localize_(32021)])
if learning_Mode == -1:
Cancel = True
if learning_Mode == 1:
xbmcgui.Dialog().ok(_localize_(32010),_localize_(32011))
if learning_Mode == 0:
ser = serial.Serial(port='/dev/serial0', baudrate = 38400, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, timeout=1)
ser.write(str.encode('\x0D'))
ser.write(str.encode('X04'))
ser.write(str.encode('\x0D'))
xbmcgui.Dialog().textviewer(_localize_(32012),_localize_(32013))
xbmcgui.Dialog().ok(_localize_(32010),_localize_(32014))
status_LearningMode = True
if learning_Mode == 2:
xbmcgui.Dialog().ok(_localize_(32024),_localize_(32025))
learningMode()
while not monitor.abortRequested():
fanControll()
learningMode()
if Cancel == False:
os.system("rm /storage/.config/autostart.sh && touch /storage/.config/autostart.sh")
os.system("cp /storage/.kodi/temp/functions.txt /storage/.config/autostart.sh")
os.system("rm /storage/.kodi/temp/functions.txt")
if status_Fan == False and status_LearningMode == False:
xbmcgui.Dialog().ok(addonname, _localize_(32015))
elif status_Fan == True and status_LearningMode == False:
xbmcgui.Dialog().ok(addonname, _localize_(32016))
elif status_Fan == False and status_LearningMode == True:
xbmcgui.Dialog().ok(addonname, _localize_(32017))
elif status_Fan == True and status_LearningMode == True:
xbmcgui.Dialog().ok(addonname, _localize_(32018))
xbmcgui.Dialog().ok(addonname, _localize_(32019))
os.system ("reboot")
else:
os.system("rm /storage/.kodi/temp/functions.txt")
xbmcgui.Dialog().ok(addonname, _localize_(32020))
break
|
StarcoderdataPython
|
4999927
|
from django.core.mail import backends
from django.test import TestCase
from .test_backends import ErrorRaisingBackend
from django_mail_admin.connections import connections
class ConnectionTest(TestCase):
def test_get_connection(self):
# Ensure ConnectionHandler returns the right connection
self.assertTrue(isinstance(connections['error'], ErrorRaisingBackend))
self.assertTrue(isinstance(connections['locmem'], backends.locmem.EmailBackend))
|
StarcoderdataPython
|
8109507
|
def recursive_digit_sum(n, k):
def repeat_to_length(s, wanted):
return s * wanted
def sum_digits(n):
r = 0
while n:
r, n = r + n % 10, n // 10
return r
p = repeat_to_length(n, k)
while len(str(p)) != 1:
p = sum_digits(int(p))
return p
print(recursive_digit_sum(148,3))
print(recursive_digit_sum(123,3))
print(recursive_digit_sum(861568688536788, 100000))
|
StarcoderdataPython
|
4852265
|
from discord.ext import commands
import discord
import random
class Tips(commands.Cog):
"""Commands for providing tips about using the bot."""
def __init__(self, bot, config):
self.bot = bot
self.config = config[__name__.split(".")[-1]]
self.tips = ["Tip of this dick in your ass"]
@commands.command()
async def tip(self, ctx):
"""Get a random tip about using the bot."""
index = random.randrange(len(self.tips))
await ctx.send(f"**Tip #{index+1}:** {self.tips[index]}")
|
StarcoderdataPython
|
1966538
|
from typing import List, Union
from structlog import get_logger
from app.api.client.apis_helper import Apis
from app.api.client.base_api_caller import BaseApiCaller
from app.core.config import settings
logger = get_logger()
def search_newly_added_videos(
published_after: str,
next_page_token: str,
content_type: str = "video",
order_by: str = "date",
):
"""
Fetches the newly added videos
Args:
published_after:
next_page_token:
content_type:
order_by:
Returns:
"""
result = None
logger.info(f"Searching for newly published youtube videos.")
# Checking if api key is already assigned & is a valid API key
if settings.google_api_key:
resp = _youtube_search_videos_caller(
content_type=content_type,
order_by=order_by,
published_after=published_after,
api_key=settings.google_api_key,
next_page_token=next_page_token,
)
if resp:
return resp
# If the API key has become invalid, finding a valid key from the list
for api_key in settings.api_keys:
resp = _youtube_search_videos_caller(
content_type=content_type,
order_by=order_by,
published_after=published_after,
api_key=api_key,
next_page_token=next_page_token,
)
if resp:
settings.google_api_key = api_key
result = resp
break
if not result:
logger.error(f"Please add a new valid API key at {settings.keys_file_path}")
logger.error(f"Retrying after {settings.sleep_interval} seconds..")
return result
def _youtube_search_videos_caller(
content_type, order_by, published_after, api_key, next_page_token
):
"""
API builder & caller method for youtube search API
"""
params = {
"type": content_type,
"order": order_by,
"publishedAfter": published_after,
"maxResults": 50,
"key": api_key,
"q": settings.video_query_string,
}
if next_page_token:
params["pageToken"] = next_page_token
youtube_search_uri = Apis.search_api
_base_api_caller = BaseApiCaller(
base_url=settings.youtube_base_url, path=youtube_search_uri, params=params
)
return _base_api_caller.get()
def get_video_details(video_ids: Union[str, List[str]]):
"""
Method to get details for given video(s)
"""
result = None
if isinstance(video_ids, list):
video_ids = ",".join(video_ids)
# Checking if api key is already assigned & is a valid API key
if settings.google_api_key:
resp = _video_details_caller(video_ids, settings.google_api_key)
if resp:
return resp
# If the API key has become invalid, finding a valid key from the list
for api_key in settings.api_keys:
resp = _video_details_caller(video_ids, api_key)
if resp:
result = resp
break
if not result:
logger.error(f"Please add a new valid API key at {settings.keys_file_path}")
logger.error(f"Retrying after {settings.sleep_interval} seconds..")
return result
def _video_details_caller(video_ids, api_key):
"""
API builder & caller method for youtube video details API
"""
params = {
"key": api_key,
"id": video_ids,
"part": "snippet",
}
youtube_video_details_uri = Apis.video_details
_base_api_caller = BaseApiCaller(
base_url=settings.youtube_base_url,
path=youtube_video_details_uri,
params=params,
)
return _base_api_caller.get()
|
StarcoderdataPython
|
4828915
|
<filename>opencv_load_image_bonus.py
# HOW TO RUN
# python3 opencv_load_image_bonus.py --image images/floppy_disk.jpg --output output.jpg
# python3 opencv_load_image_bonus.py --image images/floppy_disk.jpg
# import the necessary modules
import argparse
import cv2
# initialize the argument parser and establish the arguments required
parser = argparse.ArgumentParser()
parser.add_argument('--image', required=True, help='Path to the image')
parser.add_argument('--output', default='output.jpg', help='Path to the output image')
args = vars(parser.parse_args())
def load_image(filename):
"""
:param filename: path to the image file
:return: a numpy array if the image file exists
else None
"""
image = cv2.imread(filename)
# validate if the image was loaded correctly
if image is None:
return None
return image
# load the image from where the file is located
image = load_image(args['image'])
# validate if the image was loaded properly
if image is None:
print("Do nothing")
else:
# display the image and waits for any key to be pressed;
# remove any created gui window from the screen & memory;
# and then save the image directly to your current folder
cv2.imshow("Image", image)
cv2.waitKey(0)
cv2.imwrite(args['output'], image)
|
StarcoderdataPython
|
1870536
|
<filename>Regs/Block_E/RE113.py
from ..IReg import IReg
class RE113(IReg):
def __init__(self):
self._header = ['REG',
'COD_PART',
'COD_MOD',
'SER',
'SUB',
'NUM-DOC',
'DT_DOC',
'COD_ITEM',
'VL_AJ_ITEM',
'CHV_DECe']
self._hierarchy = "5"
|
StarcoderdataPython
|
3376673
|
from neural.loss.naive_entropy import NaiveEntropy
from neural.loss.mse import MeanSquaredError
__all__ = ['NaiveEntropy', 'MeanSquaredError']
|
StarcoderdataPython
|
6492746
|
#!/usr/bin/env python
"""
Copyright 2017 ThoughtSpot
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function
import sys
import argparse
import csv
import os.path
import subprocess
import time
from subprocess import check_output
"""
Delete records from tables based on a CSV document.
Each line of the description file has the following format: table_name, col1, col2, etc.
Each line of the data file has the following format: table_name, val1, val2, etc.
The delete will be of the format DELETE FROM table_name WHERE col1 = val1 AND col2 = val2, etc.
ASSUMPTIONS:
* the format of TQL output will not change
* this script will be run on the appliance and be able to call TQL to execute delete commands.
* fields do not contain the separator value.
"""
def main():
"""
Reads a description file for what to delete and then deletes the appropriate records.
"""
args = parse_args()
if check_args(args):
read_descriptions(args)
generate_deletes(args)
def parse_args():
"""
Parse the command line arguments.
:return: An args dictionary with command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--filename", help="path to file with records to delete"
)
parser.add_argument(
"-t", "--table", help="name of the table to delete records from"
)
parser.add_argument("-d", "--database", help="database to delete from")
parser.add_argument(
"-s",
"--schema",
default="falcon_default_schema",
help="schema to delete from",
)
parser.add_argument(
"-p", "--separator", default="|", help="separator to use in data"
)
args = parser.parse_args()
return args
# description of the tables in the database.
# { table1 : { column_name : type, ... }, table2 : { column_name : type, ...} }
descriptions = {}
def check_args(args):
args_are_good = True
if args.filename is None or os.path.isfile(args.filename) is False:
eprint("Delete file %s was not found." % args.filename)
args_are_good = False
if args.table is None:
eprint("Table was not specified.")
args_are_good = False
if args.database is None:
eprint("Database was not specified.")
args_are_good = False
return args_are_good
def read_descriptions(args):
"""
Reads the table descriptions from the database schema and populates the descriptions.
WARNING: This depends on the format of tql output not changing.
:param args: Command line arguments.
"""
table_list = check_output(
'echo "show tables %s;" | tql' % args.database, shell=True
).split(
"\n"
)
for table in table_list:
table_details = table.split("|")
if len(table_details) >= 2:
schema_name = table_details[0].strip()
table_name = table_details[1].strip()
schema = descriptions.get(schema_name, None)
if schema is None:
schema = {}
table = schema.get(table_name, None)
if table is None:
table = {}
column_list = check_output(
'echo "show table %s.%s.%s;" | tql'
% (args.database, schema_name, table_name),
shell=True,
).split(
"\n"
)
for column in column_list:
column_details = column.split("|")
if len(column_details) >= 2:
column_name = column_details[0].strip()
column_type = column_details[2].strip()
table[column_name] = column_type
schema[table_name] = table
descriptions[schema_name] = schema
# print (descriptions)
def generate_deletes(args):
"""
Creates and executes the delete statements from from the values file.
:param args: Command line arguments.
"""
start = time.time()
nbr_deletes = 0
# get the column descriptions.
columns = descriptions.get(args.schema, {}).get(args.table, None)
if columns is None:
eprint("Table %s.%s not found." % (args.schema, args.table))
return
tmpfile = "/tmp/deleteme"
with open(args.filename, "rb") as valuefile:
filereader = csv.DictReader(valuefile, delimiter="|", quotechar='"')
with open(tmpfile, "w") as deletefile:
for values in filereader:
delete_stmt = "DELETE FROM %s.%s.%s WHERE " % (
args.database, args.schema, args.table
)
first = True
for key in values.keys():
if not first:
delete_stmt += " AND "
else:
first = False
# TODO see if I need to un-quote non-numeric. Might need to re-do desc file.
if "int" in columns[key] or "double" in columns[key]:
delete_stmt += ("%s = %s" % (key, values[key]))
else:
delete_stmt += ("%s = '%s'" % (key, values[key]))
delete_stmt += ";\n"
deletefile.write(delete_stmt)
nbr_deletes += 1
subprocess.call("cat %s | tql" % tmpfile, shell=True)
finish = time.time()
print(
"Executed %d deletes in %s seconds." % (nbr_deletes, (finish - start))
)
def eprint(*args, **kwargs):
"""Prints to standard error"""
print(*args, file=sys.stderr, **kwargs)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
320227
|
"""
Submarine.py
PURPOSE:
Creates the Submarine class that is usable by players.
"""
from __future__ import annotations
from Objects.Vessels.Vessel import TraditionalVessel
# Due to circular import, cannot import form Settings.py
GREEN = '\033[32m' # Green
YELLOW = '\033[33m' # Yellow
RED = '\033[31m' # Red
class TraditionalSubmarine(TraditionalVessel):
"""The Submarine vessel class used for Traditional Battleship."""
def __init__(self, nation: str, name: str, pennant: int) -> None:
"""
Initializes a new Submarine.
Parameter(s):
- nation: The affiliated nation
- name: The name of the vessel
- pennant: The id number of the vessel
"""
super().__init__(nation, name, pennant)
self.type = 'Submarine'
self.abbrev = 'SM'
self.symbol = 'S'
self.hp = 4
self.hits_received = 0
self.health_color = f'{GREEN}'
self.enemy_hp_color = f'{RED}'
def __copy__(self) -> TraditionalSubmarine:
ret = TraditionalSubmarine(self.nation, self.name, self.pennant)
ret.hp = self.hp
ret.hits_received = self.hits_received
ret.health_color = self.health_color
ret.enemy_hp_color = self.enemy_hp_color
ret.sunk = self.sunk
ret.bow = self.bow
ret.hit = self.hit
return ret
def update_health_color(self) -> None:
if self.hp == 4:
return
elif self.hp > 1:
self.health_color = f'{YELLOW}'
self.enemy_hp_color = f'{YELLOW}'
else:
self.health_color = f'{RED}'
self.enemy_hp_color = f'{GREEN}'
|
StarcoderdataPython
|
6546774
|
# Step 3. Publish Scored Data
# Sample Python script designed to save scored data into a
# target (sink) datastore.
from azureml.core import Run, Workspace, Datastore, Dataset
from azureml.data.datapath import DataPath
import pandas as pd
import os
import argparse
# Get current run
current_run = Run.get_context()
# Get associated AML workspace
ws = current_run.experiment.workspace
# Get default datastore
ds = ws.get_default_datastore()
# Get inferencing dataset
scored_dataset = current_run.input_datasets['scored_data']
scored_data_df = scored_dataset.to_pandas_dataframe()
################################# MODIFY #################################
# You can optionally save data to a separate datastore in this step.
##########################################################################
# Save dataset to ./outputs dir
os.makedirs('./outputs', exist_ok=True)
scored_data_df.to_csv(os.path.join('outputs', 'scored_data.csv'), index=False)
|
StarcoderdataPython
|
1805334
|
#!/usr/bin/env python
"""
Tutorial to demonstrate running parameter estimation on a binary neutron star
system taking into account tidal deformabilities.
This example estimates the masses using a uniform prior in both component masses
and also estimates the tidal deformabilities using a uniform prior in both
tidal deformabilities
"""
import numpy as np
import bilby
# Specify the output directory and the name of the simulation.
outdir = 'outdir'
label = 'bns_example'
bilby.core.utils.setup_logger(outdir=outdir, label=label)
# Set up a random seed for result reproducibility. This is optional!
np.random.seed(88170235)
# We are going to inject a binary neutron star waveform. We first establish a
# dictionary of parameters that includes all of the different waveform
# parameters, including masses of the two black holes (mass_1, mass_2),
# aligned spins of both black holes (chi_1, chi_2), etc.
injection_parameters = dict(
mass_1=1.5, mass_2=1.3, chi_1=0.02, chi_2=0.02, luminosity_distance=50.,
theta_jn=0.4, psi=2.659, phase=1.3, geocent_time=1126259642.413,
ra=1.375, dec=-1.2108, lambda_1=400, lambda_2=450)
# Set the duration and sampling frequency of the data segment that we're going
# to inject the signal into. For the
# TaylorF2 waveform, we cut the signal close to the isco frequency
duration = 32
sampling_frequency = 2 * 1024
start_time = injection_parameters['geocent_time'] + 2 - duration
# Fixed arguments passed into the source model. The analysis starts at 40 Hz.
waveform_arguments = dict(waveform_approximant='IMRPhenomPv2_NRTidal',
reference_frequency=50., minimum_frequency=40.0)
# Create the waveform_generator using a LAL Binary Neutron Star source function
waveform_generator = bilby.gw.WaveformGenerator(
duration=duration, sampling_frequency=sampling_frequency,
frequency_domain_source_model=bilby.gw.source.lal_binary_neutron_star,
parameter_conversion=bilby.gw.conversion.convert_to_lal_binary_neutron_star_parameters,
waveform_arguments=waveform_arguments)
# Set up interferometers. In this case we'll use three interferometers
# (LIGO-Hanford (H1), LIGO-Livingston (L1), and Virgo (V1)).
# These default to their design sensitivity and start at 40 Hz.
interferometers = bilby.gw.detector.InterferometerList(['H1', 'L1', 'V1'])
for interferometer in interferometers:
interferometer.minimum_frequency = 40
interferometers.set_strain_data_from_power_spectral_densities(
sampling_frequency=sampling_frequency, duration=duration,
start_time=start_time)
interferometers.inject_signal(parameters=injection_parameters,
waveform_generator=waveform_generator)
# Load the default prior for binary neutron stars.
# We're going to sample in chirp_mass, symmetric_mass_ratio, lambda_tilde, and
# delta_lambda rather than mass_1, mass_2, lambda_1, and lambda_2.
# BNS have aligned spins by default, if you want to allow precessing spins
# pass aligned_spin=False to the BNSPriorDict
priors = bilby.gw.prior.BNSPriorDict()
for key in ['psi', 'geocent_time', 'ra', 'dec', 'chi_1', 'chi_2',
'theta_jn', 'luminosity_distance', 'phase']:
priors[key] = injection_parameters[key]
priors.pop('mass_ratio')
priors.pop('lambda_1')
priors.pop('lambda_2')
priors['chirp_mass'] = bilby.core.prior.Gaussian(
1.215, 0.1, name='chirp_mass', unit='$M_{\\odot}$')
priors['symmetric_mass_ratio'] = bilby.core.prior.Uniform(
0.1, 0.25, name='symmetric_mass_ratio')
priors['lambda_tilde'] = bilby.core.prior.Uniform(0, 5000, name='lambda_tilde')
priors['delta_lambda'] = bilby.core.prior.Uniform(
-5000, 5000, name='delta_lambda')
# Initialise the likelihood by passing in the interferometer data (IFOs)
# and the waveform generator
likelihood = bilby.gw.GravitationalWaveTransient(
interferometers=interferometers, waveform_generator=waveform_generator,
time_marginalization=False, phase_marginalization=False,
distance_marginalization=False, priors=priors)
# Run sampler. In this case we're going to use the `nestle` sampler
result = bilby.run_sampler(
likelihood=likelihood, priors=priors, sampler='nestle', npoints=100,
injection_parameters=injection_parameters, outdir=outdir, label=label,
conversion_function=bilby.gw.conversion.generate_all_bns_parameters)
result.plot_corner()
|
StarcoderdataPython
|
6661850
|
#!/usr/bin/env python
#-*- encoding: utf-8 -*-
'''
purpose: 实现邮件smtp客户端
author : <EMAIL>
date : 2017-06-21
history:
'''
import sys
import os
import smtplib
import mimetypes
import email
from email.utils import COMMASPACE
from email.utils import parseaddr, formataddr
from email.header import Header
from email.Encoders import encode_base64
class MailClient(object):
class Scheme(object):
def __init__(self, scheme_data):
'''
scheme_data格式;
ssl://smtp_host/port?user=&password=&aliasName=
non_ssl://smtp_host/port?user=&password=&aliasName=
'''
self.scheme_data = scheme_data
self.parse(self.scheme_data)
def parse(self, scheme_data):
if scheme_data is not None and scheme_data != "":
self.scheme_data = scheme_data
segs = self.scheme_data.split(':')
# TODO check
if segs[0] == "ssl":
self.ssl = True
else:
self.ssl = False
other_data = segs[1].lstrip('//')
segs = other_data.split('?')
s_segs = segs[0].split('/')
self.host = s_segs[0]
self.port = int(s_segs[1])
s_segs = segs[1].split('&')
for seg in s_segs:
sub_seg = seg.split('=')
k = sub_seg[0]
v = ""
if len(sub_seg) > 1:
v = sub_seg[1]
if k == 'user':
self.user = v
elif k == 'password':
self.password = v
elif k == 'aliasName':
self.alias_name = v
else:
pass
def __init__(self, scheme):
self.scheme = MailClient.Scheme(scheme)
def __format_addr(self, s):
name, addr = parseaddr(s)
return formataddr((\
Header(name, 'utf-8').encode(), \
addr.encode('utf-8') if isinstance(addr, unicode) else addr))
def connect(self):
if self.scheme.ssl:
self.cli = smtplib.SMTP_SSL(self.scheme.host, port = self.scheme.port)
self.cli.ehlo()
#self.cli.starttls()
self.cli.ehlo()
else:
self.cli = smtplib.SMTP(self.scheme.host, port = self.scheme.port)
return True
def login(self):
try:
self.cli.login(self.scheme.user, self.scheme.password)
except smtplib.SMTPAuthenticationError, e:
return -1
return 0
def logout(self):
self.cli.close()
def send_mail(self, recipients, msg):
'''
recipients 是接收者列表
msg 是由MIMEMultipart执行as_string得到的数据
'''
msg_obj = email.message_from_string(msg)
del msg_obj["From"]
del msg_obj["To"]
#print self.scheme.alias_name
msg_obj["From"] = self.__format_addr(u'%s <%s>' %(unicode(self.scheme.alias_name, "utf-8"), self.scheme.user))
msg_obj["To"] = COMMASPACE.join(recipients)
ret = -1
reason = ""
try:
result = self.cli.sendmail(self.scheme.user, recipients, msg_obj.as_string())
if len(result) <= 0:
ret = 0
else:
ret = -1
except smtplib.SMTPRecipientsRefused, e:
ret = -2
for r in e.recipients.keys():
d = e.recipients[r]
reason = "%s; %s:%s" %(reason, r, d[1])
except smtplib.SMTPDataError, e:
ret = -3
reason = e[1]
except smtplib.SMTPServerDisconnected, e:
ret = -4
reason = e[0]
except smtplib.SMTPSenderRefused, e:
ret = -5
reason = e[1]
return (ret, reason)
|
StarcoderdataPython
|
6487064
|
<filename>venv/lib/python3.8/site-packages/azureml/_cli/folder/folder_commands.py<gh_stars>0
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import os
from azureml._cli.folder.folder_subgroup import FolderSubGroup
from azureml._cli.cli_command import command
from azureml._cli import argument
from azureml.exceptions import UserErrorException
@command(
subgroup_type=FolderSubGroup,
command="attach",
short_description="Attach a folder to an AzureML workspace "
"and optionally a specific experiment to use by default. "
"If experiment name is not specified, it defaults to the folder name.",
argument_list=[
argument.EXPERIMENT_NAME,
argument.PROJECT_PATH
])
def attach_folder_to_workspace_and_experiment(
workspace=None,
experiment_name=None,
path=None,
# We should enforce a logger
logger=None):
path = os.path.abspath(path)
if os.path.exists(path) and not os.path.isdir(path):
raise UserErrorException("The provided path [{}] must be a directory".format(path))
elif not os.path.exists(path):
logger.info("Creating non-existent path %s", path)
os.makedirs(path, exist_ok=True)
logger.debug("Workspace to attach is %s", workspace._workspace_id)
if experiment_name is None:
path = path.rstrip('\\/')
experiment_to_attach = os.path.basename(path)
logger.debug("No experiment name was provided")
else:
experiment_to_attach = experiment_name
logger.debug("Attaching folder %s to experiment %s", path, experiment_to_attach)
project = workspace._initialize_folder(experiment_to_attach, directory=path)
return project._serialize_to_dict()
|
StarcoderdataPython
|
1720005
|
from typing import List, Union, Type
from enum import Enum
__all__ = ("EnumConfig", "Config")
class ConfigType(Enum):
boolean = "boolean"
integer = "number"
string = "string"
class BaseConfig:
def __init__(self, *, name: str, description: str) -> None:
self.name = name
self.description = description
@classmethod
def from_dict(cls, conf_dict):
return cls(**conf_dict)
@property
def __dict__(self):
return {"name": self.name, "description": self.description}
def __repr__(self):
return f"<vscode.BaseConfig name={self.name} description={self.description}>"
class EnumConfig(BaseConfig):
def __repr__(self):
return f"<vscode.EnumConfig name={self.name} description={self.description}>"
class Config(BaseConfig):
def __init__(
self,
*,
name: str,
description: str,
input_type: Type[Union[str, int, bool]],
enums: List[EnumConfig] = [],
default=None,
) -> None:
if input_type not in (bool, str, int):
raise TypeError("input_type must be either the bool, str or int class")
types = {bool: ConfigType.boolean, str: ConfigType.string, int: ConfigType.integer}
input_type = types[input_type]
super().__init__(name=name, description=description)
self.type = input_type.name
self.default = default
self.enums = enums
@property
def __dict__(self) -> dict:
out = super().__dict__
out["type"] = self.type
out["default"] = self.default
if len(self.enums):
out["enum"] = [enum.name for enum in self.enums]
out["enumDescriptions"] = [enum.description for enum in self.enums]
return out
def __repr__(self):
return f"<vscode.Config name={self.name} description={self.description} type={self.type} default={self.default} enums={[repr(enum) for enum in self.enums]}>"
|
StarcoderdataPython
|
143836
|
# REQUIRES: bindings_python
# XFAIL: true
# RUN: %PYTHON% %s | FileCheck %s
import mlir
import circt
from circt.design_entry import Input, Output, module
from circt.esi import types
from circt.dialects import comb, hw
import sys
@module
class PolynomialCompute:
"""Module to compute ax^3 + bx^2 + cx + d for design-time coefficients"""
# Evaluate polynomial for 'x'.
x = Input(types.i32)
def __init__(self, coefficients: list[int], **kwargs):
"""coefficients is in 'd' -> 'a' order."""
self.__coefficients = coefficients
# Full result.
self.y = Output(types.i32)
def construct(self, mod):
"""Implement this module for input 'x'."""
x = mod.x
taps: list[mlir.ir.Value] = list()
runningPower: list[mlir.ir.Value] = list()
for power, coeff in enumerate(self.__coefficients):
coeffVal = hw.ConstantOp(types.i32,
mlir.ir.IntegerAttr.get(types.i32, coeff))
if power == 0:
newPartialSum = coeffVal.result
else:
partialSum = taps[-1]
if power == 1:
currPow = x
else:
x_power = [x for i in range(power - 1)]
currPow = comb.MulOp(types.i32, x_power + [runningPower[-1]]).result
newPartialSum = comb.AddOp(
types.i32,
[
partialSum,
comb.MulOp(types.i32, [coeffVal.result, currPow]).result
]).result
runningPower.append(currPow)
taps.append(newPartialSum)
# Final output
return {"y": taps[-1]}
def build(top):
i32 = mlir.ir.Type.parse("i32")
c23 = mlir.ir.IntegerAttr.get(i32, 23)
x = hw.ConstantOp(i32, c23)
poly = PolynomialCompute([62, 42, 6], x=x)
hw.OutputOp([poly.y])
mod = mlir.ir.Module.create()
with mlir.ir.InsertionPoint(mod.body), circt.support.BackedgeBuilder():
hw.HWModuleOp(name='top',
input_ports=[],
output_ports=[('y', mlir.ir.Type.parse("i32"))],
body_builder=build)
mod.operation.print()
# CHECK: hw.module @top() -> (%y: i32) {
# CHECK: %c23_i32 = hw.constant 23 : i32
# CHECK: [[REG0:%.+]] = "circt.design_entry.PolynomialCompute"(%c23_i32) : (i32) -> i32
# CHECK: hw.output [[REG0]] : i32
print("\n\n=== Verilog ===")
# CHECK-LABEL: === Verilog ===
pm = mlir.passmanager.PassManager.parse(
"hw-legalize-names,hw.module(hw-cleanup)")
pm.run(mod)
circt.export_verilog(mod, sys.stdout)
# CHECK: module PolynomialCompute(
# CHECK: input [31:0] x,
# CHECK: output [31:0] y);
# CHECK: assign y = 32'h3E + 32'h2A * x + 32'h6 * x * x;
|
StarcoderdataPython
|
1651236
|
<filename>pyqtgraph_extended/opengl/__init__.py
from pyqtgraph.opengl import *
from pyqtgraph_extensions.opengl import *
if __name__=="__main__":
def test_GLViewWidget():
view=GLViewWidget()
ai=GLAxisItem()
view.addItem(ai)
view.show()
return view
view=test_GLViewWidget()
|
StarcoderdataPython
|
3255461
|
#
# Transmission Line Simulator
#
# Author(s): <NAME>
# Created: Aug-28-2017
#
# Copied from scipy so I don't have to import the whole scipy library.
import numpy as np
def gaussian(M, std, sym=True):
"""Return a Gaussian window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
std : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left(\frac{n}{\sigma}\right)^2 }
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.gaussian(51, std=7)
>>> plt.plot(window)
>>> plt.title(r"Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
n = np.arange(0, M) - (M - 1.0) / 2.0
sig2 = 2 * std * std
w = np.exp(-n ** 2 / sig2)
return _truncate(w, needs_trunc)
def _len_guards(M):
"""Handle small or incorrect window lengths"""
if int(M) != M or M < 0:
raise 'Window length M must be a non-negative integer'
return M <= 1
def _extend(M, sym):
"""Extend window by 1 sample if needed for DFT-even symmetry"""
if not sym:
return M + 1, True
else:
return M, False
def _truncate(w, needed):
"""Truncate window by 1 sample if needed for DFT-even symmetry"""
if needed:
return w[:-1]
else:
return w
|
StarcoderdataPython
|
34524
|
def print_lol(arr):
for row in arr:
if (isinstance(row, list)):
print_lol(row)
else:
print
row
|
StarcoderdataPython
|
5172610
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
*****************************************
Author: zhlinh
Email: <EMAIL>
Version: 0.0.1
Created Time: 2016-02-22
Last_modify: 2016-02-22
******************************************
'''
'''
Given a collection of intervals, merge all overlapping intervals.
For example,
Given [1,3],[2,6],[8,10],[15,18],
return [1,6],[8,10],[15,18].
'''
# Definition for an interval.
class Interval(object):
def __init__(self, s=0, e=0):
self.start = s
self.end = e
class Solution(object):
def merge(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[Interval]
"""
if len(intervals) < 2:
return intervals
intervals.sort(key = lambda x: x.start)
prev = intervals[0]
res = []
for i in range(1, len(intervals)):
if prev.end >= intervals[i].start:
prev.end = max(prev.end, intervals[i].end)
else:
res.append(prev)
prev = intervals[i]
res.append(prev)
return res
|
StarcoderdataPython
|
4861130
|
# Generated by Django 3.2.11 on 2022-01-27 12:43
from django.db import migrations
from spritstat.models import Location, LocationType
def set_name(apps, schema_editor):
# Create the name from the address/city/plz/region_name fields
locations = Location.objects.all()
for loc in locations:
if loc.type == LocationType.NAMED:
loc.name = f"{loc.address}, {loc.postal_code} {loc.city}"
else:
loc.name = loc.region_name
loc.save()
class Migration(migrations.Migration):
dependencies = [
("spritstat", "0007_auto_20220127_1203"),
]
operations = [
migrations.RunPython(set_name),
]
|
StarcoderdataPython
|
1791046
|
<filename>ultraopt/optimizer/random_opt.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : <NAME>
# @Contact : <EMAIL>
import numpy as np
from ultraopt.optimizer.base_opt import BaseOptimizer
class RandomOptimizer(BaseOptimizer):
def _new_result(self, budget, vectors: np.ndarray, losses: np.ndarray):
pass
def _get_config(self, budget, max_budget):
return self.pick_random_initial_config(budget, origin="Random Search")
def get_available_max_budget(self):
for budget in reversed(sorted(self.budgets)):
if self.budget2obvs[budget]["losses"]:
return budget
return self.budgets[0]
|
StarcoderdataPython
|
217394
|
# Copyright 2018 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""":mod:`microprobe.passes.structure` module
"""
# Futures
from __future__ import absolute_import, division
# Built-in modules
# Third party modules
from six.moves import range
# Own modules
import microprobe.passes
from microprobe.code.address import InstructionAddress
from microprobe.code.bbl import replicate_bbls
from microprobe.utils.logger import get_logger
# Local modules
# Constants
LOG = get_logger(__name__)
__all__ = ['SimpleBuildingBlockPass', 'GenericCodeStructurePass']
# Functions
# Classes
class SimpleBuildingBlockPass(microprobe.passes.Pass):
"""SimpleBuildingBlockPass Class.
This :class:`~.Pass` adds a single building block of a given instruction
size to the given building block. The pass fails if the buiding block
size differs in ratio more than the threshold provided.
"""
def __init__(self, bblsize, threshold=0.1):
"""Create a SimpleBuildingBlockPass object.
:param bblsize: Size of the building block to add
:type bblsize: :class:`int`
:param threshold: Allowed deviation from the size provided
(Default value = 0.1)
:type threshold: :class:`float`
:return: A new SimpleBuildingBlockPass object.
:rtype: :class:`SimpleBuildingBlockPass`
"""
super(SimpleBuildingBlockPass, self).__init__()
self._bblsize = bblsize
self._description = "Create a basic block with '%d' " \
"instructions" % self._bblsize
self._threshold = threshold
def __call__(self, building_block, dummy):
"""
:param building_block:
:param dummy:
"""
building_block.cfg.add_bbl(size=self._bblsize)
def check(self, building_block, dummy_target):
"""
:param building_block:
:param dummy_target:
"""
pass_ok = True
pass_ok = pass_ok and (len(building_block.cfg.bbls) == 1)
for bbl in building_block.cfg.bbls:
LOG.debug("BBL size: %d", bbl.size)
LOG.debug("Expected BBL size: %d", self._bblsize)
if (abs((bbl.size * 1.0) / self._bblsize) - 1) > self._threshold:
LOG.warning(
"Percentage deviation: %.2f", abs(
(bbl.size * 1.0) / self._bblsize
) - 1
)
pass_ok = False
return pass_ok
class GenericCodeStructurePass(microprobe.passes.Pass):
""" """
def __init__(self, model):
"""
:param model:
"""
super(GenericCodeStructurePass, self).__init__()
self._model = model
def __call__(self, building_block, target):
"""
:param building_block:
:param target:
"""
descriptors = self._model(building_block.code_size)
current_displacement = 0
orig_bbls = building_block.cfg.bbls[:]
iteration_register = None
first = True
for idx, elem in enumerate(descriptors):
chunks, displacement, iterations = elem
LOG.debug("Descriptor info:")
LOG.debug(" Chunks: %d", chunks)
LOG.debug(" Displacement: %d", displacement)
LOG.debug(" Iterations: %d", iterations)
if chunks < 1 or iterations < 1:
continue
for dummy in range(0, iterations):
first_chunk_in_iteration = None
last_chunk_in_iteration = None
for dummy in range(0, chunks):
if first:
LOG.debug("First Chunk - No copy")
first = False
current_chunk = orig_bbls
# prev_chunk = None
else:
LOG.debug("Other Chunk - Replicating")
# prev_chunk = current_chunk
current_chunk = replicate_bbls(
orig_bbls,
displacement=current_displacement
)
building_block.cfg.add_bbls(current_chunk)
if first_chunk_in_iteration is None:
first_chunk_in_iteration = current_chunk
last_chunk_in_iteration = current_chunk
current_displacement = current_displacement + displacement
first_chunk_in_iteration = first_chunk_in_iteration[0]
last_chunk_in_iteration = last_chunk_in_iteration[-1]
if iterations > 1:
LOG.debug("Adding loop control instructions")
if iteration_register is None:
iteration_register = \
target.get_register_for_address_arithmetic(
building_block.context)
building_block.context.add_reserved_registers(
[iteration_register]
)
LOG.debug(
"Using register '%s' as loop control "
"register", iteration_register
)
# set the number of iteration to the first
newins = target.set_register(
iteration_register, 0, building_block.context
)
# pylint: disable=E1103
first_chunk_in_iteration.insert_instr(
newins, before=first_chunk_in_iteration.instrs[0]
)
# pylint: enable=E1103
# set the branch to the last
label = "chunk_%d" % (idx)
newins = target.add_to_register(iteration_register, 1)
newins += target.compare_and_branch(
iteration_register, iterations, "<", label,
building_block.context
)
# pylint: disable=E1103
first_chunk_in_iteration.instrs[0].setlabel(label)
last_chunk_in_iteration.insert_instr(
newins, after=last_chunk_in_iteration.instrs[-1]
)
# pylint: enable=E1103
# link_BBLS
prev_bbl = None
for bbl in building_block.cfg.bbls:
if prev_bbl is not None:
LOG.debug("Linking with previous chunk")
source = InstructionAddress(
base_address="code",
displacement=prev_bbl.instrs[-1].address.displacement +
prev_bbl.instrs[-1].architecture_type.format.length
)
newins = target.branch_unconditional_relative(
source, bbl.instrs[0]
)
prev_bbl.insert_instr([newins], after=prev_bbl.instrs[-1])
prev_bbl = bbl
# exit(0)
def check(self, building_block, target):
"""
:param building_block:
:param target:
"""
raise NotImplementedError
|
StarcoderdataPython
|
1671258
|
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from aws_cdk.assertions import Template
from aws_ddk_core.base import BaseStack
from aws_ddk_core.resources import KinesisStreamsFactory
def test_data_stream(test_stack: BaseStack) -> None:
KinesisStreamsFactory.data_stream(
scope=test_stack, id="dummy-stream-1", environment_id="dev", stream_name="dummy-stream"
)
template = Template.from_stack(test_stack)
template.has_resource_properties(
"AWS::Kinesis::Stream",
props={
"Name": "dummy-stream",
},
)
|
StarcoderdataPython
|
1999288
|
# coding: utf-8
import model
import model_exceptions
import view
class Control(object):
def __init__(self, file_name=None):
'''
parameters
----------
file_name : str
name for u json file(without .json extension); if None, the
file will receive the dafault name.
'''
self._model = model.Model(file_name)
self._view = view.View(self, f'Todo {self._model.file_name()}')
self._view.show_todos(self._model.file())
def _save_and_show(func, *args, **kwargs):
'''
Decorator to save and update the view.
parameters
----------
func : function
return
------
inner_func : function
'''
def inner_func(self, *args, **kwargs):
func(self, *args, **kwargs)
self._model.save_file()
self._view.show_todos(self._model.file())
return inner_func
@_save_and_show
def inser_task(self, task):
'''
Will try insert a new task.
parameters
----------
task : str
'''
try:
self._model.inser_task(task)
self._view.current().set('')
self._view.show_message(f'\"{task}\" added with success.')
except model_exceptions.AlreadyStored:
self._view.show_alert(f'{task} already stored.')
except ValueError:
self._view.show_alert('Must contain some value.')
@_save_and_show
def updat_task(self, *tasks):
'''
Will update the status for each selectioned task.
paramaters
----------
tasks : str
'''
if tasks:
try:
for task in tasks:
self._model.updat_task(task)
self._view.show_message('Update complete.')
except model_exceptions.NotStored:
self._view.show_alert(f'{task} not stored.')
else:
self._view.show_alert('Must select some value.')
@_save_and_show
def delet_task(self, *tasks):
'''
Will delete each selectioned task.
paramaters
----------
tasks : str
'''
if tasks:
if self._view.askquestion('Do u really wan\'t delete?'):
for task in tasks:
try:
self._model.delet_task(task)
except model_exceptions.NotStored:
self._view.show_alert(f'{task} not stored.')
else:
self._view.show_alert('Must select some value.')
def main(self):
'''
Initialize the gui.
'''
self._view.main()
|
StarcoderdataPython
|
1754524
|
<reponame>DanielSBrown/osf.io
"""
With email confirmation enabled, the `date_confirmed` is used to filter users
for e.g. search results. This requires setting this field for all users
registered before confirmation was added. This migration sets each user's
`date_confirmed` to his / her `date_registered`.
"""
from website.app import init_app
from website import models
app = init_app()
def add_date_confirmed():
for user in models.User.find():
if user.date_confirmed is None:
user.date_confirmed = user.date_registered
user.save()
if __name__ == '__main__':
add_date_confirmed()
|
StarcoderdataPython
|
6619549
|
<gh_stars>0
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# -------------------------------------------#
# author: <NAME> #
# email: <EMAIL> #
# -------------------------------------------#
from __future__ import absolute_import, unicode_literals
import sys
from xmnlp.config import path as C_PATH
from . import sentiment
if sys.version_info[0] == 2:
reload(sys)
sys.setdefaultencoding('utf8')
model = None
def loader():
"""load model"""
global model
if model is None:
print("(Lazy Load) Loading model...")
model = sentiment.Sentiment()
model.load(C_PATH.sentiment['model']['sentiment'])
def predict(text, stopword=None):
"""predict sentiment"""
loader()
return model.predict(text, stopword=stopword)
def load(path):
"""load model from path"""
global model
model = sentiment.Sentiment()
model.load(path)
|
StarcoderdataPython
|
1879513
|
import logging
from typing import List
logger = logging.getLogger(__name__)
class GameInfo:
def __init__(self):
self.server_id = None
self.server_name = None
self.match_guid = None
self.game_mode = None
self.mutator_index = None
self.rumble_mutator = None
def parse_game_info_actor(self, actor_data: dict, game_event_actor: dict, objects: List[str]):
"""
Parses game actor
:param actor_data: game replication info
:param game_event_actor: soccar game event actor
:param objects: object list from the decompiled replay
:return: self
"""
# There is no GameServerID if you play alone
self.server_id = ''
if 'ProjectX.GRI_X:GameServerID' in actor_data:
self.server_id = str(actor_data['ProjectX.GRI_X:GameServerID']['q_word'])
self.server_name = actor_data['Engine.GameReplicationInfo:ServerName']
# A custom lobby doesn't have a MatchGUID
self.match_guid = actor_data.get('ProjectX.GRI_X:MatchGUID', '')
self.playlist = actor_data['ProjectX.GRI_X:ReplicatedGamePlaylist']
self.mutator_index = actor_data.get('ProjectX.GRI_X:ReplicatedGameMutatorIndex', 0)
if 'TAGame.GameEvent_Soccar_TA:SubRulesArchetype' in game_event_actor:
# Only used for rumble stats
# TODO can this contain any other mutators?
self.rumble_mutator = objects[game_event_actor['TAGame.GameEvent_Soccar_TA:SubRulesArchetype']]
logger.info('Created game info from actor')
return self
|
StarcoderdataPython
|
4939046
|
<filename>src/lobster.py
#!/usr/bin/env python
#
# lobster.py - lobster
#
# (c) gdifiore 2018 <<EMAIL>>
#
import os
import sys
import json
from lobster_json import *
from bs4 import BeautifulSoup
type = sys.argv[1]
file = sys.argv[2]
theme = sys.argv[3]
if type == "simple":
def writeToHTML(title, header, content):
html_file = theme + ".html"
path = "themes\\" + html_file
soup = BeautifulSoup(open(path), "html.parser")
for i in soup.find_all('title'):
i.string = title
for i in soup.find_all(class_='header'):
i.string = header
for i in soup.find_all(class_='content'):
i.string = content
#print(soup)
finished = theme + "_finished.html"
with open(finished, "w") as text_file:
text_file.write(str(soup))
lobster_data = readJSON(file)
title = getTitle(lobster_data)
header = getHeader(lobster_data)
content= getContent(lobster_data)
writeToHTML(title, header, content)
if type == "blog":
def writeToHTMLBlog(title, header, content, author, date):
html_file = theme + ".html"
path = "themes\\" + html_file
soup = BeautifulSoup(open(path), "html.parser")
for i in soup.find_all('title'):
i.string = title
for i in soup.find_all(class_='header'):
i.string = header
for i in soup.find_all(class_='content'):
i.string = content
for i in soup.find_all(class_='author'):
i.string = author
for i in soup.find_all(class_='date'):
i.string = date
#print(soup)
finished = theme + "_finished.html"
with open(finished, "w") as text_file:
text_file.write(str(soup))
lobster_data = readJSON(file)
title = getTitle(lobster_data)
header = getHeader(lobster_data)
content= getContent(lobster_data)
author = getAuthor(lobster_data)
date = getDate(lobster_data)
writeToHTMLBlog(title, header, content, author, date)
else:
print(sys.argv[1])
print("failure")
|
StarcoderdataPython
|
11269970
|
<reponame>rmrector/service.stinger.notification<filename>python/libs/quickjson.py
import collections
import json
import sys
import xbmc
movie_properties = ['imdbnumber', 'tag']
nostingertags_filter = {'and': [{'field': 'tag', 'operator':'isnot', 'value':'duringcreditsstinger'}, {'field': 'tag', 'operator':'isnot', 'value':'aftercreditsstinger'}]}
def get_movies(sort_method='sorttitle', ascending=True, limit=None, properties=None, listfilter=None):
json_request = get_base_json_request('VideoLibrary.GetMovies')
json_request['params']['properties'] = properties if properties != None else movie_properties
json_request['params']['sort'] = {'method': sort_method, 'order': 'ascending' if ascending else 'descending'}
if listfilter:
json_request['params']['filter'] = listfilter
if limit:
json_request['params']['limits'] = {'end': limit}
json_result = execute_jsonrpc(json_request)
if _check_json_result(json_result, 'movies', json_request):
return json_result['result']['movies']
else:
return []
def get_movie_details(movie_id, properties=None):
json_request = get_base_json_request('VideoLibrary.GetMovieDetails')
json_request['params']['movieid'] = movie_id
json_request['params']['properties'] = properties if properties != None else movie_properties
json_result = json.loads(xbmc.executeJSONRPC(json.dumps(json_request)))
if _check_json_result(json_result, 'moviedetails', json_request):
return json_result['result']['moviedetails']
def set_movie_details(movie_id, **movie_details):
json_request = get_base_json_request('VideoLibrary.SetMovieDetails')
json_request['params']['movieid'] = movie_id
for param, value in movie_details.items():
json_request['params'][param] = value
json_result = execute_jsonrpc(json_request)
_check_json_result(json_result, 'OK', json_request)
def get_base_json_request(method):
return {'jsonrpc': '2.0', 'method': method, 'params': {}, 'id': 1}
def execute_jsonrpc(jsonrpc_command):
if isinstance(jsonrpc_command, dict):
jsonrpc_command = json.dumps(jsonrpc_command)
json_result = xbmc.executeJSONRPC(jsonrpc_command)
return json.loads(json_result)
def _check_json_result(json_result, result_key, json_request):
if 'error' in json_result:
raise JSONException(json_request, json_result)
return 'result' in json_result and result_key in json_result['result']
class JSONException(Exception):
def __init__(self, json_request, json_result):
self.json_request = json_request
self.json_result = json_result
message = "There was an error with a JSON-RPC request.\nRequest: "
message += json.dumps(json_request, skipkeys=True, ensure_ascii=False, indent=2, cls=LogJSONEncoder)
message += "\nResult: "
message += json.dumps(json_result, skipkeys=True, ensure_ascii=False, indent=2, cls=LogJSONEncoder)
super(JSONException, self).__init__(message)
class LogJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, collections.Mapping):
return dict((key, obj[key]) for key in obj.keys())
if isinstance(obj, collections.Iterable):
return list(obj)
if hasattr(obj, '__dict__'):
return obj.__dict__
return str(obj)
|
StarcoderdataPython
|
9750084
|
<gh_stars>1-10
#
# Shoulder
# Copyright (C) 2018 Assured Information Security, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import io
import typing
from dataclasses import dataclass, field
from shoulder.gadget.gadget_properties import GadgetProperties
@dataclass
class properties(GadgetProperties):
""" Properties of the C++ function_definition gadget """
gadget_name: str = "shoulder.cxx.function_definition"
""" The name of the C function to generate """
name: str = "function_definition"
""" The name of the C function to generate """
args: typing.List[typing.Tuple[str, str]] = \
field(default_factory= lambda: [])
""" List of argmuent type/name tuples for the generated function """
return_type: str = "uint64_t"
""" Return type for the generated function """
inline: bool = True
""" Set True to declare function as inline """
const: bool = False
""" Set True to declare function as const """
noexcept: bool = False
""" Set True to declare function as noexcept """
indent: int = 0
""" The indentation level to generate at """
def function_definition(decorated):
"""
A decorator gadget that generates a C++ function declaration. The function
decorated by this gadget should generate the function's body.
Usage:
properties.name = "my_function"
properties.return_type = "uint64_t"
properties.args = [("uint64_t", "arg1")]
@function_definition
function(generator, outfile, ...):
outfile.write("contents inside function body")
Generates:
inline uint64_t my_function(uint64_t arg1)
{ contents inside function body }
"""
def function_definition_decorator(generator, outfile, *args, **kwargs):
properties = generator.gadgets["shoulder.cxx.function_definition"]
indent_str = ""
for level in range(0, properties.indent):
indent_str += "\t"
outfile.write(indent_str)
if properties.inline == True:
outfile.write("inline ")
outfile.write(str(properties.return_type) + " ")
outfile.write(str(properties.name))
if len(properties.args) == 0:
outfile.write("(void)\n")
else:
outfile.write("(")
for idx, arg_pair in enumerate(properties.args):
if idx > 0:
outfile.write(", ")
outfile.write(str(arg_pair[0]) + " ")
outfile.write(str(arg_pair[1]))
outfile.write(")\n")
outfile.write(indent_str + "{")
contents = io.StringIO()
decorated(generator, contents, *args, **kwargs)
lines = contents.getvalue().splitlines()
if len(lines) == 1:
outfile.write(" ")
outfile.write(lines[0])
outfile.write(" ")
elif len(lines) > 1:
outfile.write("\n")
for line in lines:
outfile.write(indent_str + "\t" + line + "\n")
outfile.write(indent_str)
outfile.write("}\n\n")
return function_definition_decorator
|
StarcoderdataPython
|
4850923
|
#!/usr/bin/env python
class Solution:
def divide(self, dividend, divisor):
"""
:type dividend: int
:type divisor: int
:rtype: int
"""
n = 31
quotient = 0
flip = 1
if dividend < 0:
dividend = -dividend
flip = -flip
if divisor < 0:
divisor = -divisor
flip = -flip
while n >= 0:
print('n = %d' % n)
d = divisor << n
if dividend >= d:
print('shift %d bits' % n)
dividend -= d
quotient += 1 << n
print('new dividend: %d' % dividend)
n -= 1
if dividend < divisor:
if flip < 0:
quotient = -quotient
if quotient < -2147483648 or quotient > 2147483647:
quotient = 2147483647
return quotient
sol = Solution()
dividend, divisor = 10, 3
dividend, divisor = 7, -3
dividend, divisor = 0, -3
dividend, divisor = 1, 1
dividend, divisor = -2147483648, -1
print(sol.divide(dividend, divisor))
|
StarcoderdataPython
|
3389278
|
<filename>avwx_api/views.py<gh_stars>0
"""
<NAME> - <EMAIL>
avwx_api.views - Routes and views for the Quart application
"""
# pylint: disable=W0702
# stdlib
from dataclasses import asdict
# library
import avwx
from quart import Response, jsonify
from quart_openapi.cors import crossdomain
# module
from avwx_api import app
# Static Web Pages
@app.route("/")
@app.route("/home")
async def home() -> Response:
"""
Returns static home page
"""
return await app.send_static_file("html/home.html")
# API Routing Errors
@app.route("/api")
async def no_report() -> Response:
"""
Returns no report msg
"""
return jsonify({"error": "No report type given"}), 400
@app.route("/api/metar")
@app.route("/api/taf")
async def no_station() -> Response:
"""
Returns no station msg
"""
return jsonify({"error": "No station given"}), 400
@app.route("/api/station/<string:station>")
@crossdomain(origin="*")
async def station_endpoint(station: str) -> Response:
"""
Returns raw station info if available
"""
station = station.upper()
try:
return jsonify(asdict(avwx.Station.from_icao(station)))
except avwx.exceptions.BadStation:
return jsonify(
{
"error": f'Station ident "{station}" not found. Email me if data is missing :)'
}
)
|
StarcoderdataPython
|
11201419
|
class SchedulerFixed(object):
def __init__(self, fixed_lr: float):
super().__init__()
self.fixed_lr = fixed_lr
def get_learning_rate(self, step: int):
return self.fixed_lr
|
StarcoderdataPython
|
5093503
|
<gh_stars>0
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
# Test Case Title : Check that network input can be created, received by the authority, and processed
# fmt: off
class Tests():
enter_game_mode = ("Entered game mode", "Failed to enter game mode")
exit_game_mode = ("Exited game mode", "Couldn't exit game mode")
find_network_player = ("Found network player", "Couldn't find network player")
found_lines = ("Expected log lines were found", "Expected log lines were not found")
found_unexpected_lines = ("Unexpected log lines were not found", "Unexpected log lines were found")
# fmt: on
def Multiplayer_AutoComponent_NetworkInput():
r"""
Summary:
Runs a test to make sure that network input can be sent from the autonomous player, received by the authority, and processed
Level Description:
- Dynamic
1. Although the level is empty, when the server and editor connect the server will spawn and replicate the player network prefab.
a. The player network prefab has a NetworkTestPlayerComponent.AutoComponent and a script canvas attached which will listen for the CreateInput and ProcessInput events.
Print logs occur upon triggering the CreateInput and ProcessInput events along with their values; we are testing to make sure the expected events are values are recieved.
- Static
1. This is an empty level. All the logic occurs on the Player.network.spawnable (see the above Dynamic description)
Expected Outcome:
We should see editor logs stating that network input has been created and processed.
However, if the script receives unexpected values for the Process event we will see print logs for bad data as well.
:return:
"""
import azlmbr.legacy.general as general
from editor_python_test_tools.utils import Report
from editor_python_test_tools.utils import Tracer
from editor_python_test_tools.utils import TestHelper as helper
from ly_remote_console.remote_console_commands import RemoteConsole as RemoteConsole
def find_expected_line(expected_line):
found_lines = [printInfo.message.strip() for printInfo in section_tracer.prints]
return expected_line in found_lines
def find_unexpected_line(expected_line):
return not find_expected_line(expected_line)
unexpected_lines = [
'AutoComponent_NetworkInput received bad fwdback!',
'AutoComponent_NetworkInput received bad leftright!',
]
expected_lines = [
'AutoComponent_NetworkInput ProcessInput called!',
'AutoComponent_NetworkInput CreateInput called!',
]
expected_lines_server = [
'(Script) - AutoComponent_NetworkInput ProcessInput called!',
]
level_name = "AutoComponent_NetworkInput"
helper.init_idle()
general.set_cvar_integer('editorsv_port', 33452)
# 1) Open Level
helper.open_level("Multiplayer", level_name)
with Tracer() as section_tracer:
# 2) Enter game mode
helper.multiplayer_enter_game_mode(Tests.enter_game_mode)
# 3) Make sure the network player was spawned
player_id = general.find_game_entity("Player")
Report.critical_result(Tests.find_network_player, player_id.IsValid())
# 4) Check the editor logs for expected and unexpected log output
EXPECTEDLINE_WAIT_TIME_SECONDS = 1.0
for expected_line in expected_lines :
helper.wait_for_condition(lambda: find_expected_line(expected_line), EXPECTEDLINE_WAIT_TIME_SECONDS)
Report.result(Tests.found_lines, find_expected_line(expected_line))
general.idle_wait_frames(1)
for unexpected_line in unexpected_lines :
Report.result(Tests.found_unexpected_lines, find_unexpected_line(unexpected_line))
# 5) Check the ServerLauncher logs for expected log output
# Since the editor has started a server launcher, the RemoteConsole with the default port=4600 will automatically be able to read the server logs
server_console = RemoteConsole()
server_console.start()
for line in expected_lines_server:
assert server_console.expect_log_line(line, EXPECTEDLINE_WAIT_TIME_SECONDS), f"Expected line not found: {line}"
server_console.stop()
# Exit game mode
helper.exit_game_mode(Tests.exit_game_mode)
if __name__ == "__main__":
from editor_python_test_tools.utils import Report
Report.start_test(Multiplayer_AutoComponent_NetworkInput)
|
StarcoderdataPython
|
1926688
|
'''
Created on 07-May-2017
@author: <NAME>
'''
print("Program to check whether a character is vowel or consonant")
char=input("Enter a character...")
vowel=('a','e','i','o','u')
if char.isalpha() and len(char)==1:
for i in vowel:
if char == i:
val=1
else:
val=0
if val == 1:
print("Vowel")
elif val == 0:
print("Consonant")
else:
print("Enter a valid character")
|
StarcoderdataPython
|
1808726
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-09-18 06:41
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import geoposition.fields
import mptt.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('street', models.CharField(help_text='Street Name', max_length=255, verbose_name='Street')),
('street_number', models.CharField(help_text='House Number', max_length=55, verbose_name='Street Number')),
('supplement', models.CharField(blank=True, help_text='Additional Information (Appartment Number,...)', max_length=255, null=True, verbose_name='Address Supplements')),
('zipcode', models.CharField(blank=True, help_text='Area Code', max_length=55, null=True, verbose_name='Zip Code')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Updated at')),
],
options={
'verbose_name': 'Address',
'verbose_name_plural': 'Addresses',
'ordering': ['street', 'street_number'],
},
),
migrations.CreateModel(
name='Area',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Name of Area in the hierarchy. E.g.: City or Country', max_length=255, verbose_name='Area Name')),
('geobox_upper', geoposition.fields.GeopositionField(blank=True, help_text='Upper-Left Geoposition (latitude and longitude) of the Area', max_length=42, null=True, verbose_name='Upper-Left Geoposition Boundary')),
('geobox_lower', geoposition.fields.GeopositionField(blank=True, help_text='Lower-Right Geoposition (latitude and longitude) of the Area', max_length=42, null=True, verbose_name='Lower-Right Geoposition Boundary')),
('geobox_center', geoposition.fields.GeopositionField(blank=True, help_text='Centered Geoposition (latitude and longitude) of the Area', max_length=42, null=True, verbose_name='Centered Geoposition')),
('verified', models.BooleanField(default=False, help_text='If the specified area is verified to be existent and relevant', verbose_name='Verified Area')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Updated at')),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('adjacent_areas', mptt.fields.TreeManyToManyField(blank=True, help_text='Areas that are neighbors or subsets of this area.', related_name='_area_adjacent_areas_+', to='utils.Area')),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='utils.Area')),
],
options={
'verbose_name': 'Area',
'verbose_name_plural': 'Areas',
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(help_text='Standard language code representation. Ex.: en_AU', max_length=10, verbose_name='Language Code')),
('name', models.CharField(help_text='Name of the Language. Ex.: English', max_length=100, verbose_name='Language Name')),
('name_en', models.CharField(help_text='Name of the Language. Ex.: English', max_length=100, null=True, verbose_name='Language Name')),
('name_de', models.CharField(help_text='Name of the Language. Ex.: English', max_length=100, null=True, verbose_name='Language Name')),
('name_pt', models.CharField(help_text='Name of the Language. Ex.: English', max_length=100, null=True, verbose_name='Language Name')),
('name_es', models.CharField(help_text='Name of the Language. Ex.: English', max_length=100, null=True, verbose_name='Language Name')),
('country', models.CharField(blank=True, help_text='Name of the Country. Ex.: Australia', max_length=100, null=True, verbose_name='Country')),
('country_en', models.CharField(blank=True, help_text='Name of the Country. Ex.: Australia', max_length=100, null=True, verbose_name='Country')),
('country_de', models.CharField(blank=True, help_text='Name of the Country. Ex.: Australia', max_length=100, null=True, verbose_name='Country')),
('country_pt', models.CharField(blank=True, help_text='Name of the Country. Ex.: Australia', max_length=100, null=True, verbose_name='Country')),
('country_es', models.CharField(blank=True, help_text='Name of the Country. Ex.: Australia', max_length=100, null=True, verbose_name='Country')),
('supported', models.BooleanField(default=False, help_text='If the language is supported in the App Translation Sytem', verbose_name='Translation supported')),
],
options={
'verbose_name': 'Language',
'verbose_name_plural': 'Language',
'ordering': ['name', 'country'],
},
),
migrations.AddField(
model_name='address',
name='area',
field=models.ForeignKey(help_text='Owner of the Item or Service', on_delete=django.db.models.deletion.PROTECT, related_name='addresses', to='utils.Area'),
),
migrations.AddField(
model_name='address',
name='user',
field=models.ForeignKey(blank=True, help_text='User creating the Address', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='addresses', to=settings.AUTH_USER_MODEL),
),
]
|
StarcoderdataPython
|
6702504
|
<reponame>atsgen/tf-api-client
#!/usr/bin/env python
"""
python %prog [options] <in_schema.xsd> <out_schema.xsd>
Synopsis:
Prepare schema document. Replace include and import elements.
Examples:
python %prog myschema.xsd
python %prog myschema.xsd newschema.xsd
python %prog -f myschema.xsd newschema.xsd
cat infile.xsd | python %prog > outfile.xsd
"""
from __future__ import print_function
#
# Imports
from future import standard_library
standard_library.install_aliases()
from builtins import input
from builtins import object
import sys
import os
import urllib.request, urllib.error, urllib.parse
import ftplib
import copy
import types
from optparse import OptionParser, Values
import itertools
from lxml import etree
#
# Globals and constants
#
# Functions for external use
def process_include_files(infile, outfile, inpath=''):
options = Values({
'force': False,
})
prep_schema_doc(infile, outfile, inpath, options)
#
# Classes
class Params(object):
members = ('base_url', 'already_processed', 'parent_url', )
def __init__(self):
self.base_url = None
self.already_processed = set()
self.parent_url = None
def __setattr__(self, name, value):
if name not in self.members:
raise AttributeError('Class %s has no set-able attribute "%s"' % (
self.__class__.__name__, name, ))
self.__dict__[name] = value
class SchemaIOError(IOError):
pass
#
# Functions for internal use and testing
def clear_includes_and_imports(node):
namespace = node.nsmap[node.prefix]
child_iter1 = node.iterfind('{%s}include' % (namespace, ))
child_iter2 = node.iterfind('{%s}import' % (namespace, ))
for child in itertools.chain(child_iter1, child_iter2):
repl = etree.Comment(etree.tostring(child))
node.replace(child, repl)
def resolve_ref(node, params, options):
content = None
url = node.get('schemaLocation')
if not url:
msg = '*** Warning: missing "schemaLocation" attribute in %s\n' % (
params.parent_url, )
sys.stderr.write(msg)
return None
# Uncomment the next line to help track down missing schemaLocation etc.
# print '(resolve_ref) url: %s\n parent-url: %s' % (url, params.parent_url, )
if params.base_url and not (
url.startswith('/') or
url.startswith('http:') or
url.startswith('ftp:')
):
locn = '%s/%s' % (params.base_url, url, )
else:
locn = url
if locn is not None:
schema_name = os.path.split(locn)
if schema_name not in params.already_processed:
params.already_processed.add(schema_name)
print('trace --')
print(' base : %s' % (params.base_url, ))
print(' parent: %s' % (params.parent_url, ))
print(' locn : %s' % (locn, ))
if locn.startswith('http:') or locn.startswith('ftp:'):
try:
urlfile = urllib.request.urlopen(locn)
content = urlfile.read()
urlfile.close()
params.parent_url = locn
params.base_url = os.path.split(locn)[0]
except urllib.error.HTTPError as exp:
msg = "Can't find file %s referenced in %s." % (
locn, params.parent_url, )
raise SchemaIOError(msg)
else:
if os.path.exists(locn):
infile = open(locn)
content = infile.read()
infile.close()
params.parent_url = locn
params.base_url = os.path.split(locn)[0]
if content is None:
msg = "Can't find file %s referenced in %s." % (
locn, params.parent_url, )
raise SchemaIOError(msg)
## if content is None:
## msg = "Can't find file %s referenced in %s." % (
## locn, params.parent_url, )
## raise SchemaIOError(msg)
return content
def collect_inserts(node, params, inserts, options):
namespace = node.nsmap[node.prefix]
child_iter1 = node.iterfind('{%s}include' % (namespace, ))
child_iter2 = node.iterfind('{%s}import' % (namespace, ))
for child in itertools.chain(child_iter1, child_iter2):
collect_inserts_aux(child, params, inserts, options)
def collect_inserts_aux(child, params, inserts, options):
save_base_url = params.base_url
string_content = resolve_ref(child, params, options)
if string_content is not None:
root = etree.fromstring(string_content, base_url=params.base_url)
for child1 in root:
if not isinstance(child1, etree._Comment):
namespace = child1.nsmap[child1.prefix]
if (child1.tag != '{%s}include' % (namespace, ) and
child1.tag != '{%s' % (namespace, )):
comment = etree.Comment(etree.tostring(child))
inserts.append(comment)
inserts.append(child1)
collect_inserts(root, params, inserts, options)
params.base_url = save_base_url
def make_file(outFileName, options):
outFile = None
if (not options.force) and os.path.exists(outFileName):
reply = eval(input('File %s exists. Overwrite? (y/n): ' % outFileName))
if reply == 'y':
outFile = open(outFileName, 'w')
else:
outFile = open(outFileName, 'w')
return outFile
def prep_schema_doc(infile, outfile, inpath, options):
doc1 = etree.parse(infile)
root1 = doc1.getroot()
params = Params()
params.parent_url = infile
params.base_url = os.path.split(inpath)[0]
inserts = []
collect_inserts(root1, params, inserts, options)
root2 = copy.copy(root1)
clear_includes_and_imports(root2)
for insert_node in inserts:
root2.append(insert_node)
doc2 = etree.ElementTree(root2)
doc2.write(outfile)
return doc2
def prep_schema(inpath, outpath, options):
if inpath:
infile = open(inpath, 'r')
else:
infile = sys.stdin
if outpath:
outfile = make_file(outpath, options)
else:
outfile = sys.stdout
if outfile is None:
return
prep_schema_doc(infile, outfile, inpath, options)
if inpath:
infile.close()
if outpath:
outfile.close()
USAGE_TEXT = __doc__
def usage(parser):
parser.print_help()
sys.exit(1)
def main():
parser = OptionParser(USAGE_TEXT)
parser.add_option("-f", "--force", action="store_true",
dest="force", default=False,
help="force overwrite without asking")
(options, args) = parser.parse_args()
if len(args) == 2:
inpath = args[0]
outpath = args[1]
elif len(args) == 1:
inpath = args[0]
outpath = None
elif len(args) == 0:
inpath = None
outpath = None
else:
usage(parser)
prep_schema(inpath, outpath, options)
if __name__ == "__main__":
#import pdb; pdb.set_trace()
main()
|
StarcoderdataPython
|
93359
|
<reponame>anushkrishnav/QiskitBot
import discord
from discord.ext import commands
import asyncio
import subprocess
import logging
import re
logger = logging.getLogger(__name__)
class DocInfo(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name = 'docs')
async def docs(self, ctx, *, arg):
if re.match('^`[A-Za-z]{1,20}`$', arg):
out = subprocess.run( f'echo "from qiskit import *\nhelp({arg[1:-1]})" | python3',
shell = True, text = True, capture_output = True)
if out.returncode == 0:
embed = discord.Embed(title = f'Info on {arg}',
description = f'```py{out.stdout}```')
await ctx.send(embed = embed)
else:
embed = discord.Embed(title = 'Error',
description = 'Try again with a correct class or function name and with a limit of 20 characters.')
await ctx.send(embed = embed)
def setup(bot):
bot.add_cog(DocInfo(bot))
|
StarcoderdataPython
|
11223579
|
import cv2
import numpy as np
import alglib.processing as processing
lower1 = np.array([0, 30, 0])
upper1 = np.array([180, 190, 256])
lower2 = np.array([26, 0, 0])
upper2 = np.array([40, 45, 256])
lower3 = np.array([25, 0, 0])
upper3 = np.array([160, 256, 256])
lower4 = np.array([160, 125, 0])
upper4 = np.array([180, 256, 256])
def filtered_frame(frame):
hsv_filer1 = processing.hsv_mask(frame, lower1, upper1)
hsv_filer2 = processing.hsv_mask(frame, lower2, upper2)
hsv_filer3 = processing.hsv_mask(frame, lower3, upper3)
hsv_filer4 = processing.hsv_mask(frame, lower4, upper4)
blocking = cv2.bitwise_or(hsv_filer3, hsv_filer4)
cv2.bitwise_not(hsv_filer2, hsv_filer2)
blocking_filter = cv2.bitwise_and(hsv_filer2, blocking)
cv2.bitwise_not(blocking_filter, blocking_filter)
hsv_filter_final = cv2.bitwise_and(hsv_filer1, blocking_filter)
return cv2.bitwise_and(frame, frame, mask=hsv_filter_final), hsv_filter_final
|
StarcoderdataPython
|
6595401
|
<filename>src/year2018/day09b.py
"""2018 - Day 9 Part 2: <NAME>.
Amused by the speed of your answer, the Elves are curious:
What would the new winning Elf's score be if the number of the last marble were
100 times larger?
"""
from src.year2018.day09a import Game
from src.year2018.day09a import parse_task
def solve(task: str) -> int:
"""Get the winner score if the last marble is 100 times larger."""
players_number, marbles = parse_task(task)
game = Game(players_number, marbles * 100)
while not game.finished:
game.make_turn()
return game.winner.score
|
StarcoderdataPython
|
6655813
|
import matplotlib.pyplot as plt
#funzione per il plotting dei grafici riguardati la storia dell'addestramento di un modello.
def visualize(history):
# "Accuratezza"
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
# "Perdita"
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
|
StarcoderdataPython
|
392520
|
'''Communicate with local or remote peercoin-daemon via JSON-RPC'''
from operator import itemgetter
from .common import Provider
from pypeerassets.exceptions import InsufficientFunds
from btcpy.structs.transaction import MutableTxIn, Sequence, ScriptSig
from decimal import Decimal, getcontext
getcontext().prec = 6
try:
from peercoin_rpc import Client
except:
raise EnvironmentError("peercoin_rpc library is required for this to work,\
use pip to install it.")
class RpcNode(Client, Provider):
'''JSON-RPC connection to local Peercoin node'''
def select_inputs(self, address: str, amount: int) -> dict:
'''finds apropriate utxo's to include in rawtx, while being careful
to never spend old transactions with a lot of coin age.
Argument is intiger, returns list of apropriate UTXO's'''
utxos = []
utxo_sum = Decimal(-0.01) # starts from negative due to minimal fee
for tx in sorted(self.listunspent(address=address), key=itemgetter('confirmations')):
if tx["address"] not in (self.pa_parameters.P2TH_addr,
self.pa_parameters.test_P2TH_addr):
utxos.append(
MutableTxIn(txid=tx['txid'],
txout=tx['vout'],
sequence=Sequence.max(),
script_sig=ScriptSig.empty())
)
utxo_sum += Decimal(tx["amount"])
if utxo_sum >= amount:
return {'utxos': utxos, 'total': utxo_sum}
if utxo_sum < amount:
raise InsufficientFunds("Insufficient funds.")
raise Exception("undefined behavior :.(")
@property
def is_testnet(self) -> bool:
'''check if node is configured to use testnet or mainnet'''
if self.getinfo()["testnet"] is True:
return True
else:
return False
@property
def network(self) -> str:
'''return which network is the node operating on.'''
if self.is_testnet:
return "tsprk"
else:
return "sprk"
def listunspent(
self,
address: str="",
minconf: int=1,
maxconf: int=999999,
) -> list:
'''list UTXOs
modified version to allow filtering by address.
'''
if address:
return self.req("listunspent", [minconf, maxconf, [address]])
return self.req("listunspent", [minconf, maxconf])
|
StarcoderdataPython
|
106525
|
<reponame>code-review-doctor/orchestra
from rest_framework import permissions
from orchestra.models import Worker
from orchestra.models import Todo
from orchestra.models import TodoQA
class IsAssociatedWithTodosProject(permissions.BasePermission):
"""
Ensures that a user's worker is accoiated with the todo's project.
"""
def has_object_permission(self, request, view, obj):
worker = Worker.objects.get(user=request.user)
if isinstance(obj, Todo):
project = obj.project
elif isinstance(obj, TodoQA):
project = obj.todo.project
else:
project = None
return (
project and
(worker.is_project_admin() or
worker.assignments.filter(task__project=project).exists()))
class IsAssociatedWithProject(permissions.BasePermission):
"""
Ensures that a user's worker is associated with the request's
`project`.
"""
def has_permission(self, request, view):
"""
We pass project_id as a payload in cases when the request
is either POST, PUT or PATCH. It can be passed via query param
not only in a GET request, but also in the requests listed above
(when applying a filter).
"""
worker = Worker.objects.get(user=request.user)
if worker.is_project_admin():
return True
todo_id = request.data.get('todo')
if todo_id is None:
todo_id = view.kwargs.get('pk')
project_id = request.data.get(
'project') or request.data.get('project__id')
if project_id is None:
project_id = request.query_params.get(
'project') or request.query_params.get('project__id')
if project_id is None and todo_id is not None:
project_id = Todo.objects.get(id=todo_id).project.id
return worker.assignments.filter(task__project__id=project_id).exists()
class IsAssociatedWithTask(permissions.BasePermission):
"""
Ensures that a user's worker is associated with the request's
`task`.
"""
def has_permission(self, request, view):
worker = Worker.objects.get(user=request.user)
if worker.is_project_admin():
return True
if request.method == 'GET':
task_id = request.query_params.get('task')
return worker.assignments.filter(task=task_id).exists()
return False
|
StarcoderdataPython
|
1658236
|
<filename>src/estimate_biomass.py
import argparse
from pathlib import Path
import cv2
import numpy as np
import pandas as pd
import torch
import keypoints_detector.config as cfg
from keypoints_detector.model import keypoint_detector
from keypoints_detector.predict import predict, show_prediction
from scale_detector.scale_detector import read_scale
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_dir", type=Path, default="./images/",
help="Input images directory.")
parser.add_argument("-o", "--output_dir", type=Path, default="../results/",
help="Outputs directory.")
return parser.parse_args()
def filter_cnts(cnts):
cnts_filtered = []
bboxes_fit = []
bboxes = []
for cnt in cnts:
if cv2.contourArea(cnt) < 1000:
continue
rect_tilted = cv2.minAreaRect(cnt)
if 0.5 < rect_tilted[1][0] / rect_tilted[1][1] < 2:
continue
rect = cv2.boundingRect(cnt)
bboxes.append(rect)
cnts_filtered.append(cnt)
bboxes_fit.append(rect_tilted)
return cnts_filtered, bboxes_fit, bboxes
def simple_segmenter(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY)
cnts, hier = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cnts, bboxes_fit, bboxes = filter_cnts(cnts)
return cnts, bboxes_fit, bboxes, thresh
def calc_dist(pt1, pt2):
return np.sqrt(np.sum(np.square(pt1 - pt2)))
def fit_curve(bbox, points, keypoints_num):
bbox_w = bbox[0] - bbox[2]
bbox_l = bbox[1] - bbox[3]
pts_ind = [0, 1] if bbox_w <= bbox_l else [1, 0]
x = points[: keypoints_num - 2][:, pts_ind[0]]
y = points[: keypoints_num - 2][:, pts_ind[1]]
model = np.poly1d(np.polyfit(x, y, 5))
range_pts = [x[0], x[-1]]
lin_sp = np.arange(min(range_pts), max(range_pts), 10)
res = [lin_sp, model(lin_sp)]
pts = np.round(np.hstack((np.resize(res[pts_ind[0]], (res[pts_ind[0]].shape[0], 1)),
np.resize(res[pts_ind[1]], (res[pts_ind[1]].shape[0], 1)))))
pts_num = pts.shape[0]
# TODO: create different error handling function
if pts_num == 0:
pts = np.reshape(bbox, ((bbox.size // 2), 2))
return pts
list_ind = [0, pts_num - 1]
distances = [calc_dist(pts[0], points[0]), calc_dist(pts[0], points[keypoints_num - 3])]
position = list_ind[distances.index(min(distances))]
insertion_ind = [keypoints_num - 3, 0] if position == 0 else [0, keypoints_num - 3]
inserted = np.append(pts, points[insertion_ind[0]])
inserted = np.insert(inserted, 0, points[insertion_ind[1]])
pts = np.reshape(inserted, ((inserted.size // 2), 2))
return pts
def calc_dimensions(length_pts, width_pts, scale_ratio):
points = width_pts.astype(np.uint64)
right, left = points
width = calc_dist(left, right) * scale_ratio
len_parts = [calc_dist(length_pts[i], length_pts[i + 1]) for i in range(len(length_pts) - 1)]
length = np.sum(np.asarray(len_parts)) * scale_ratio
return length, width
def calculate_mass(predicted, scale, img_path):
keypoints_num = 7
scale_ratio = scale["um"] / scale["pix"]
density = 1.04
results = []
lengths_points = []
for i, (bbox, points) in enumerate(zip(predicted["bboxes"], predicted["keypoints"])):
length_pts = fit_curve(bbox, points, keypoints_num)
lengths_points.append(length_pts)
length, width = calc_dimensions(length_pts, points[-2:], scale_ratio)
class_name = cfg.INSTANCE_CATEGORY_NAMES[predicted["labels"][i]]
if length and width != np.nan:
R = length / width
# If the mass of the species Echiniscus is estimated, use different equation
if class_name == 'echiniscus':
mass = (1 / 12) * length * np.pi * (length / R) ** 2 * density * 10 ** -6 # [ug]
else:
mass = length * np.pi * (length / (2 * R)) ** 2 * density * 10 ** -6 # [ug]
info = {"img_path": img_path,
"id": i,
"class": class_name,
"length": length,
"width": width,
"biomass": mass}
# print(info)
results.append(info)
return pd.DataFrame(results), lengths_points
def main(args):
args.output_dir.mkdir(exist_ok=True, parents=True)
images_extensions = ("png", "tif", "jpg", "jpeg")
images_paths = []
for ext in images_extensions:
ext_paths = list(args.input_dir.glob(f"*.{ext}"))
images_paths.extend(ext_paths)
print(f"IMG PATHS: {images_paths}")
# images = [Path("./images/krio5_OM_1.5_5.jpg")]
model = keypoint_detector()
model.load_state_dict(torch.load("keypoints_detector/checkpoints/keypoints_detector.pth"))
for img_path in images_paths:
try:
img = cv2.imread(str(img_path), 1)
cnts, bboxes_fit, bboxes, thresh = simple_segmenter(img)
image_scale, img = read_scale(img, bboxes, device="cpu")
predicted = predict(model, img, device=cfg.DEVICE)
results_df, lengths_points = calculate_mass(predicted, image_scale, img_path)
img = predicted["image"]
if not results_df.empty:
mass_total = results_df["biomass"].sum()
mass_mean = results_df["biomass"].mean()
mass_std = results_df["biomass"].std()
print(f"image path: {img_path}\n"
f"Image scale: {image_scale}\n"
f"Image total mass: {mass_total} ug")
print("-" * 50)
info_dict = {"scale": (f"Scale: {image_scale['um']} um", (50, 50)),
"number": (f"Animal number: {predicted['bboxes'].shape[0]}", (50, 100)),
"mass": (f"Total biomass: {round(mass_total, 5)} ug", (50, 150)),
"mean": (f"Animal mass mean: {round(mass_mean, 5)} ug", (50, 200)),
"std": (f"Animal mass std: {round(mass_std, 5)} ug", (50, 250))}
for text, position in info_dict.values():
img = cv2.putText(img, text, position,
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
for pts in lengths_points:
img = cv2.polylines(img, [pts.astype(np.int32)], False, (255, 0, 0), 1)
else:
print("-" * 50)
print(f"Mass calculation results empty for file: {str(img_path)}")
print("-" * 50)
results_df.to_csv(args.output_dir / f"{img_path.stem}_results.csv")
cv2.imwrite(str(args.output_dir / f"{img_path.stem}_results.jpg"), img)
cv2.imshow('predicted', cv2.resize(img, (0, 0), fx=0.6, fy=0.6))
cv2.waitKey(2500)
except Exception as e:
print(e)
if __name__ == '__main__':
main(parse_args())
# img = cv2.imread("./images/krio5_OM_1.5_5.jpg")
# show_prediction(img)
|
StarcoderdataPython
|
5036212
|
<filename>mobius/management/commands/create_admin_user.py
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
def handle(self, *args, **options):
from django.contrib.auth import get_user_model
user, created = get_user_model().objects.get_or_create(username="admin", is_superuser=1, is_staff=1)
if created:
user.set_password("<PASSWORD>")
user.save()
|
StarcoderdataPython
|
1914711
|
<reponame>cescalara/icecube_tools
import numpy as np
from pytest import approx
from icecube_tools.utils.vMF import get_kappa, get_theta_p
from icecube_tools.detector.angular_resolution import AngularResolution
def test_kappa_conversion():
theta_1sigma = 1.0
kappa = get_kappa(theta_1sigma, 0.68)
theta_p = get_theta_p(kappa, 0.68)
assert theta_1sigma == approx(theta_p)
def test_angular_resolution():
# Load
ang_res = AngularResolution.from_dataset(
"20181018",
ret_ang_err_p=0.9,
offset=0.4,
)
# Sample
ra = 0.0 # rad
dec = np.pi / 4 # rad
Etrue = 1e5 # GeV
ang_res.sample(Etrue, (ra, dec))
# Return angular error
assert ang_res.ret_ang_err == ang_res.get_ret_ang_err(Etrue)
|
StarcoderdataPython
|
5074531
|
<reponame>fmi-basel/dl-utils<filename>tests/test_runner.py<gh_stars>0
from dlutils.prediction.runner import runner
import pytest
import numpy as np
def test_runner(n_vals=100):
'''test runner with functions
'''
def generator_fn(val):
return val % 2, val
def processor_fn(key, val):
return key, val * 2
container = {0: [], 1: []}
def output_fn(key, val):
container[key].append(val)
runner(generator_fn, processor_fn, output_fn, range(n_vals))
for key, vals in container.items():
assert len(vals) == n_vals / 2
if key == 0:
np.testing.assert_array_equal(vals, np.arange(n_vals)[::2] * 2)
elif key == 1:
np.testing.assert_array_equal(vals, np.arange(n_vals)[1::2] * 2)
else:
assert False
def test_runner_from_generator(k_runs=5, n_vals=10):
'''test runner with generators
'''
def generator_fn(val):
for k in range(k_runs):
yield val % 2, val
def processor_fn(key, val):
for k in range(k_runs):
yield key, val * 2
container = {0: [], 1: []}
def output_fn(key, val):
container[key].append(val)
runner(generator_fn, processor_fn, output_fn, range(n_vals))
for key, vals in container.items():
assert len(vals) == n_vals / 2 * k_runs**2
if key == 0:
np.testing.assert_array_equal(
np.unique(vals),
np.arange(n_vals)[::2] * 2)
elif key == 1:
np.testing.assert_array_equal(
np.unique(vals),
np.arange(n_vals)[1::2] * 2)
else:
assert False
@pytest.mark.parametrize("prep_fails, processor_fails, post_fails",
[[True, False, False], [False, True, False],
[False, False, True], [False, False, False]])
def test_exception_handling(prep_fails, processor_fails, post_fails):
'''test error handling
'''
n_vals = 100
class CustomException(Exception):
pass
def fail(val):
raise CustomException('')
def do_nothing(val):
return val
args = [
fail if prep_fails else do_nothing,
fail if processor_fails else do_nothing,
fail if post_fails else do_nothing,
]
if prep_fails or processor_fails or post_fails:
with pytest.raises(CustomException):
runner(*args, range(n_vals))
else:
runner(*args, range(n_vals))
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.DEBUG)
test_exception_handling(False, False, True)
|
StarcoderdataPython
|
5075694
|
#!/usr/bin/env python
#coding: utf8
# some random homework, for neu.edu, TELE5330
import subprocess
# because the requirement is too clever to use `sys`
ARGV = subprocess.sys.argv
def nslookup(n):
stdout, _ = subprocess.Popen(
'nslookup "%s" | grep name' % n,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
shell=True
).communicate()
try:
return stdout[stdout.index('name =')+6:-2].strip()
except ValueError:
return None
if len(ARGV)!=4:
exit('Missing parameters: <IP> <IP> <hostname>')
# parse the first ARGV
# why would anyone use `nslookup` and not `dig` or `socket.gethostbyname()`?
print 'IP address = %s' % ARGV[1]
# No Regexp? Really?
ip = nslookup(ARGV[1])
print 'Hostname %s' % ('= %s' % ip if ip else 'not found')
# parse the second ARGV
print 'IP address = %s' % ARGV[2]
ret = subprocess.os.system('ping -c1 -t2 "%s" 2>&1 >/dev/null' % ARGV[2])
print 'Status = %s' % (['Up', 'Down'][bool(ret)])
# parse the third ARGV
stdout, _ = subprocess.Popen(
'nmap -n -oG - "%s" | grep "Ports:"' % ARGV[3],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
shell=True
).communicate()
print 'open TCP connections = %s' % stdout.count('/open/tcp//')
try:
f_count = stdout[stdout.index(' filtered (')+11:].strip()[:-1]
except ValueError:
f_count = 0
print 'filtered TCP connections = %s' % f_count
|
StarcoderdataPython
|
3341937
|
import datetime
from typing import Optional
from aioftx.types import Side
from aioftx.http import HTTPMethod, PaginatedResponse, Request, Response
from pydantic import BaseModel, Field
from ..quotes.schemas import Quote
from ..shared.schemas import Option, OptionStatus, OptionType
class QuoteRequest(BaseModel):
id: int
option: Option
side: Side
size: float
time: datetime.datetime
request_expiry: datetime.datetime
status: OptionStatus
limit_price: Optional[float]
class MyQuoteRequest(BaseModel):
id: int
option: Option
side: Side
size: float
time: datetime.datetime
request_expiry: datetime.datetime
status: OptionStatus
limit_price: Optional[bool]
quotes: list[Quote]
hide_limit_price: bool
""" Methods """
class GetQuoteRequestsRequest(Request):
path = "/options/requests"
class GetQuoteRequestsResponse(PaginatedResponse[QuoteRequest]):
pass
class GetMyQuoteRequestsRequest(Request):
path = "/options/my_requests"
class GetMyQuoteRequestsResponse(PaginatedResponse[MyQuoteRequest]):
pass
class CreateQuoteRequestRequest(Request):
http_method = HTTPMethod.POST
path = "/options/requests"
underlying: str
type: OptionType
strike: float
expiry: int
side: Side
size: float
limit_price: Optional[float]
hide_limit_price: bool = True
request_expiry: Optional[int] = None # unix timestamp
counterparty_id: Optional[
int
] = None # when specified, makes the request private to the specified counterparty
class CreateQuoteRequestResponse(Response[QuoteRequest]):
pass
class DeleteQuoteRequestRequest(Request):
http_method = HTTPMethod.DELETE
path = "/options/requests/{request_id}"
request_id: int = Field(..., path=True)
class DeleteQuoteRequestResponse(Response[QuoteRequest]):
pass
|
StarcoderdataPython
|
1738101
|
from django.contrib.auth import get_user_model
from home.models import Student, Teacher
from django.http import HttpRequest
from django.utils.translation import ugettext_lazy as _
from allauth.account import app_settings as allauth_settings
from allauth.account.forms import ResetPasswordForm
from allauth.utils import email_address_exists, generate_unique_username
from allauth.account.adapter import get_adapter
from allauth.account.utils import setup_user_email
from rest_framework import serializers
from rest_auth.serializers import PasswordResetSerializer
User = get_user_model()
class SignupSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ("id", "name", "email", "password")
extra_kwargs = {
"password": {"write_only": True, "style": {"input_type": "password"}},
"email": {
"required": True,
"allow_blank": False,
},
}
def _get_request(self):
request = self.context.get("request")
if (
request
and not isinstance(request, HttpRequest)
and hasattr(request, "_request")
):
request = request._request
return request
def validate_email(self, email):
email = get_adapter().clean_email(email)
if allauth_settings.UNIQUE_EMAIL:
if email and email_address_exists(email):
raise serializers.ValidationError(
_("A user is already registered with this e-mail address.")
)
return email
def create(self, validated_data):
user = User(
email=validated_data.get("email"),
name=validated_data.get("name"),
username=generate_unique_username(
[validated_data.get("name"), validated_data.get("email"), "user"]
),
)
user.set_password(validated_data.get("password"))
user.save()
request = self._get_request()
setup_user_email(request, user, [])
return user
def save(self, request=None):
"""rest_auth passes request so we must override to accept it"""
return super().save()
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ["id", "email", "name"]
class PasswordSerializer(PasswordResetSerializer):
"""Custom serializer for rest_auth to solve reset password error"""
password_reset_form_class = ResetPasswordForm
class TeacherSerializer(serializers.ModelSerializer):
class Meta:
model = Teacher
fields = "__all__"
class StudentSerializer(serializers.ModelSerializer):
class Meta:
model = Student
fields = "__all__"
|
StarcoderdataPython
|
102930
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from rest_framework_bulk.routes import BulkRouter
from rest_framework_nested.routers import NestedSimpleRouter
__all__ = ('BulkRouter', 'BulkNestedRouter')
# Map of HTTP verbs to rest_framework_bulk operations.
BULK_OPERATIONS_MAP = {
'put': 'bulk_update',
'patch': 'partial_bulk_update',
'delete': 'bulk_destroy',
}
class BulkNestedRouter(NestedSimpleRouter):
"""
Bulk-enabled nested router.
"""
def __init__(self, *args, **kwargs):
super(BulkNestedRouter, self).__init__(*args, **kwargs)
self.routes[0].mapping.update(BULK_OPERATIONS_MAP)
|
StarcoderdataPython
|
1871733
|
<gh_stars>1-10
# flake8: noqa
from .version import __version__
default_app_config = 'entity_history.apps.EntityHistoryConfig'
|
StarcoderdataPython
|
11200600
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from .import attestation_pb2 as attestation__pb2
from .import beacon_block_pb2 as beacon__block__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from .import validator_pb2 as validator__pb2
class BeaconNodeValidatorStub(object):
"""Beacon node validator API
The beacon node validator API enables a validator to connect
and perform its obligations on the Ethereum 2.0 phase 0 beacon chain.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetDuties = channel.unary_unary(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/GetDuties',
request_serializer=validator__pb2.DutiesRequest.SerializeToString,
response_deserializer=validator__pb2.DutiesResponse.FromString,
)
self.StreamDuties = channel.stream_stream(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/StreamDuties',
request_serializer=validator__pb2.DutiesRequest.SerializeToString,
response_deserializer=validator__pb2.DutiesResponse.FromString,
)
self.DomainData = channel.unary_unary(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/DomainData',
request_serializer=validator__pb2.DomainRequest.SerializeToString,
response_deserializer=validator__pb2.DomainResponse.FromString,
)
self.WaitForChainStart = channel.unary_stream(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/WaitForChainStart',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=validator__pb2.ChainStartResponse.FromString,
)
self.WaitForSynced = channel.unary_stream(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/WaitForSynced',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=validator__pb2.SyncedResponse.FromString,
)
self.WaitForActivation = channel.unary_stream(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/WaitForActivation',
request_serializer=validator__pb2.ValidatorActivationRequest.SerializeToString,
response_deserializer=validator__pb2.ValidatorActivationResponse.FromString,
)
self.ValidatorIndex = channel.unary_unary(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/ValidatorIndex',
request_serializer=validator__pb2.ValidatorIndexRequest.SerializeToString,
response_deserializer=validator__pb2.ValidatorIndexResponse.FromString,
)
self.ValidatorStatus = channel.unary_unary(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/ValidatorStatus',
request_serializer=validator__pb2.ValidatorStatusRequest.SerializeToString,
response_deserializer=validator__pb2.ValidatorStatusResponse.FromString,
)
self.GetBlock = channel.unary_unary(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/GetBlock',
request_serializer=validator__pb2.BlockRequest.SerializeToString,
response_deserializer=beacon__block__pb2.BeaconBlock.FromString,
)
self.ProposeBlock = channel.unary_unary(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/ProposeBlock',
request_serializer=beacon__block__pb2.SignedBeaconBlock.SerializeToString,
response_deserializer=validator__pb2.ProposeResponse.FromString,
)
self.GetAttestationData = channel.unary_unary(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/GetAttestationData',
request_serializer=validator__pb2.AttestationDataRequest.SerializeToString,
response_deserializer=attestation__pb2.AttestationData.FromString,
)
self.ProposeAttestation = channel.unary_unary(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/ProposeAttestation',
request_serializer=attestation__pb2.Attestation.SerializeToString,
response_deserializer=validator__pb2.AttestResponse.FromString,
)
self.SubmitAggregateSelectionProof = channel.unary_unary(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/SubmitAggregateSelectionProof',
request_serializer=validator__pb2.AggregateSelectionRequest.SerializeToString,
response_deserializer=validator__pb2.AggregateSelectionResponse.FromString,
)
self.SubmitSignedAggregateSelectionProof = channel.unary_unary(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/SubmitSignedAggregateSelectionProof',
request_serializer=validator__pb2.SignedAggregateSubmitRequest.SerializeToString,
response_deserializer=validator__pb2.SignedAggregateSubmitResponse.FromString,
)
self.ProposeExit = channel.unary_unary(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/ProposeExit',
request_serializer=beacon__block__pb2.SignedVoluntaryExit.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.SubscribeCommitteeSubnets = channel.unary_unary(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/SubscribeCommitteeSubnets',
request_serializer=validator__pb2.CommitteeSubnetsSubscribeRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class BeaconNodeValidatorServicer(object):
"""Beacon node validator API
The beacon node validator API enables a validator to connect
and perform its obligations on the Ethereum 2.0 phase 0 beacon chain.
"""
def GetDuties(self, request, context):
"""Retrieves validator duties for the requested validators.
The duties consist of:
Proposer - the validator that creates a beacon chain block.
Attester — a validator that is part of a committee that needs to sign off on a beacon chain
block while simultaneously creating a cross link to a recent shard block on a particular shard chain.
The server returns a list of duties which are the actions should be performed by validators for a given epoch.
Validator duties should be polled every epoch, but due to chain reorg of >MIN_SEED_LOOKAHEAD could occur,
the validator duties could chain. For complete safety, it is recommended to poll at every slot to ensure
validator is fully aware of any sudden chain reorg.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StreamDuties(self, request_iterator, context):
"""Stream validator duties for the requested validators.
The duties consist of:
Proposer - the validator that creates a beacon chain block.
Attester — a validator that is part of a committee that needs to sign off on a beacon chain
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DomainData(self, request, context):
"""DomainData fetches the current BLS signature domain version information from the
running beacon node's state. This information is used when validators sign
blocks and attestations appropriately based on their duty.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def WaitForChainStart(self, request, context):
"""WaitForChainStart queries the logs of the Validator Deposit Contract on the Ethereum
proof-of-work chain to verify the beacon chain has started its runtime and
validators are ready to begin their responsibilities.
If the chain has not yet started, this endpoint starts a server-side stream which updates
the client when the beacon chain is ready.
This RPC is deprecated. Please use WaitForSynced.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def WaitForSynced(self, request, context):
"""WaitForSynced checks if the beacon node is synced and ready to communicate with the validator.
If the node is not synced yet, this endpoint starts a server-side stream which updates
the validator client when the beacon chain is ready.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def WaitForActivation(self, request, context):
"""WaitForActivation checks if a validator public key exists in the active validator
registry of the current beacon state. If the validator is NOT yet active, it starts a
server-side stream which updates the client whenever the validator becomes active in
the beacon node's state.
The input to this endpoint is a list of validator public keys, and the corresponding
stream will respond until at least a single corresponding validator to those
keys is activated.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ValidatorIndex(self, request, context):
"""ValidatorIndex retrieves a validator's index location in the beacon state's
validator registry looking up whether the validator exists based on its
public key. This method returns NOT_FOUND if no index is found for the public key
specified in the request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ValidatorStatus(self, request, context):
"""ValidatorStatus returns a validator's status based on the current epoch.
The request can specify either a validator's public key or validator index.
The status response can be one of the following:
DEPOSITED - validator's deposit has been recognized by Ethereum 1, not yet recognized by Ethereum 2.
PENDING - validator is in Ethereum 2's activation queue.
ACTIVE - validator is active.
EXITING - validator has initiated an an exit request, or has dropped below the ejection balance and is being kicked out.
EXITED - validator is no longer validating.
SLASHING - validator has been kicked out due to meeting a slashing condition.
UNKNOWN_STATUS - validator does not have a known status in the network.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetBlock(self, request, context):
"""Retrieves the latest valid beacon block to be proposed on the beacon chain.
The server returns a new beacon block, without proposer signature, that can be
proposed on the beacon chain. The block should be filled with all the necessary
data for proposer to sign.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ProposeBlock(self, request, context):
"""Sends the newly signed beacon block to beacon node.
The validator sends the newly signed beacon block to the beacon node so the beacon block can
be included in the beacon chain. The beacon node is expected to validate and process the
beacon block into its state.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetAttestationData(self, request, context):
"""Retrieves the latest valid attestation data to be attested on the beacon chain.
The server returns the latest valid data which represents the correct vote
for the head of the beacon chain,
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ProposeAttestation(self, request, context):
"""Sends the newly signed attestation to beacon node.
The validator sends the newly signed attestation to the beacon node for the attestation to
be included in the beacon chain. The beacon node is expected to validate and publish attestation on
appropriate committee subnet.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubmitAggregateSelectionProof(self, request, context):
"""Submit selection proof to the beacon node to aggregate all matching wire attestations with the same data root.
the beacon node responses with an aggregate and proof object back to validator to sign over.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubmitSignedAggregateSelectionProof(self, request, context):
"""Submit a signed aggregate and proof object, the beacon node will broadcast the
signed aggregated attestation and proof object.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ProposeExit(self, request, context):
"""Propose to leave the list of active validators.
The beacon node is expected to validate the request and make it available for inclusion in
the next proposed block.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeCommitteeSubnets(self, request, context):
"""Subscribe to particular committee ID subnets given validator's duty.
The beacon node is expected to subscribe to the committee ID subnet given by the request. With this,
beacon node serving attesters can find persistent peers on the subnet to publish attestation,
and beacon node serving aggregator can join the subnet.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_BeaconNodeValidatorServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetDuties': grpc.unary_unary_rpc_method_handler(
servicer.GetDuties,
request_deserializer=validator__pb2.DutiesRequest.FromString,
response_serializer=validator__pb2.DutiesResponse.SerializeToString,
),
'StreamDuties': grpc.stream_stream_rpc_method_handler(
servicer.StreamDuties,
request_deserializer=validator__pb2.DutiesRequest.FromString,
response_serializer=validator__pb2.DutiesResponse.SerializeToString,
),
'DomainData': grpc.unary_unary_rpc_method_handler(
servicer.DomainData,
request_deserializer=validator__pb2.DomainRequest.FromString,
response_serializer=validator__pb2.DomainResponse.SerializeToString,
),
'WaitForChainStart': grpc.unary_stream_rpc_method_handler(
servicer.WaitForChainStart,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=validator__pb2.ChainStartResponse.SerializeToString,
),
'WaitForSynced': grpc.unary_stream_rpc_method_handler(
servicer.WaitForSynced,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=validator__pb2.SyncedResponse.SerializeToString,
),
'WaitForActivation': grpc.unary_stream_rpc_method_handler(
servicer.WaitForActivation,
request_deserializer=validator__pb2.ValidatorActivationRequest.FromString,
response_serializer=validator__pb2.ValidatorActivationResponse.SerializeToString,
),
'ValidatorIndex': grpc.unary_unary_rpc_method_handler(
servicer.ValidatorIndex,
request_deserializer=validator__pb2.ValidatorIndexRequest.FromString,
response_serializer=validator__pb2.ValidatorIndexResponse.SerializeToString,
),
'ValidatorStatus': grpc.unary_unary_rpc_method_handler(
servicer.ValidatorStatus,
request_deserializer=validator__pb2.ValidatorStatusRequest.FromString,
response_serializer=validator__pb2.ValidatorStatusResponse.SerializeToString,
),
'GetBlock': grpc.unary_unary_rpc_method_handler(
servicer.GetBlock,
request_deserializer=validator__pb2.BlockRequest.FromString,
response_serializer=beacon__block__pb2.BeaconBlock.SerializeToString,
),
'ProposeBlock': grpc.unary_unary_rpc_method_handler(
servicer.ProposeBlock,
request_deserializer=beacon__block__pb2.SignedBeaconBlock.FromString,
response_serializer=validator__pb2.ProposeResponse.SerializeToString,
),
'GetAttestationData': grpc.unary_unary_rpc_method_handler(
servicer.GetAttestationData,
request_deserializer=validator__pb2.AttestationDataRequest.FromString,
response_serializer=attestation__pb2.AttestationData.SerializeToString,
),
'ProposeAttestation': grpc.unary_unary_rpc_method_handler(
servicer.ProposeAttestation,
request_deserializer=attestation__pb2.Attestation.FromString,
response_serializer=validator__pb2.AttestResponse.SerializeToString,
),
'SubmitAggregateSelectionProof': grpc.unary_unary_rpc_method_handler(
servicer.SubmitAggregateSelectionProof,
request_deserializer=validator__pb2.AggregateSelectionRequest.FromString,
response_serializer=validator__pb2.AggregateSelectionResponse.SerializeToString,
),
'SubmitSignedAggregateSelectionProof': grpc.unary_unary_rpc_method_handler(
servicer.SubmitSignedAggregateSelectionProof,
request_deserializer=validator__pb2.SignedAggregateSubmitRequest.FromString,
response_serializer=validator__pb2.SignedAggregateSubmitResponse.SerializeToString,
),
'ProposeExit': grpc.unary_unary_rpc_method_handler(
servicer.ProposeExit,
request_deserializer=beacon__block__pb2.SignedVoluntaryExit.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'SubscribeCommitteeSubnets': grpc.unary_unary_rpc_method_handler(
servicer.SubscribeCommitteeSubnets,
request_deserializer=validator__pb2.CommitteeSubnetsSubscribeRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'ethereum.eth.v1alpha1.BeaconNodeValidator', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
StarcoderdataPython
|
1779636
|
from api.mon.backends.abstract import AbstractMonitoringBackend
from api.mon.backends.abstract.server import AbstractMonitoringServer
# noinspection PyAbstractClass
class DummyMonitoring(AbstractMonitoringBackend):
pass
class DummyMonitoringServer(AbstractMonitoringServer):
"""
Dummy model for representing a monitoring server in a DC.
"""
Meta = AbstractMonitoringServer.Meta
def get_monitoring(dc, **kwargs):
return DummyMonitoring(dc, **kwargs)
# noinspection PyUnusedLocal
def del_monitoring(dc):
return True
MonitoringBackendClass = DummyMonitoring
MonitoringServerClass = DummyMonitoringServer
|
StarcoderdataPython
|
1779983
|
<gh_stars>0
import json
import kiteconnect.exceptions as ex
import logging
from six.moves.urllib.parse import urljoin
import requests
from os import path
from kiteconnect import KiteConnect, KiteTicker
log = logging.getLogger(__name__)
class KiteExt(KiteConnect):
def login_with_credentials(self, userid, password, pin):
self.headers = {
'x-kite-version': '3',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36'
}
self.user_id = userid
self.password = password
self.twofa = pin
self.reqsession = requests.Session()
r = self.reqsession.post(self.root + self._routes['api.login'], data={
'user_id': self.user_id,
'password': <PASSWORD>
})
r = self.reqsession.post(self.root + self._routes['api.twofa'], data={
'request_id': r.json()['data']['request_id'],
'twofa_value': self.twofa,
'user_id': r.json()['data']['user_id']
})
self.enctoken = r.cookies.get('enctoken')
self.public_token = r.cookies.get('public_token')
self.user_id = r.cookies.get('user_id')
self.headers['Authorization'] = 'enctoken {}'.format(self.enctoken)
def __init__(self, api_key='kitefront', userid=None, *args, **kw):
KiteConnect.__init__(self, api_key=api_key,
*args, **kw)
if userid is not None:
self.user_id = userid
self._routes.update({
'api.login': '/api/login',
'api.twofa': '/api/twofa',
'api.misdata': '/margins/equity'
})
def set_headers(self, enctoken, userid=None):
self.public_token = enctoken
self.enctoken = enctoken
if userid is not None:
self.user_id = userid
if self.user_id is None:
raise Exception(
f'userid cannot be none, either login with credentials first or set userid here')
self.headers = {
'x-kite-version': '3',
'Authorization': 'enctoken {}'.format(self.enctoken)
}
def kws(self, api_key='kitefront'):
return KiteTicker(api_key=api_key, access_token=self.public_token+'&user_id='+self.user_id, root='wss://ws.zerodha.com')
def ticker(self, api_key='kitefront', enctoken=None, userid=None):
if enctoken is not None:
self.enctoken = enctoken
if userid is not None:
self.user_id = userid
if self.user_id is None:
raise Exception(
f'userid cannot be none, either login with credentials first or set userid here')
return KiteTicker(api_key=api_key, access_token=self.enctoken+'&user_id='+self.user_id, root='wss://ws.zerodha.com')
# NOTE NEW
def _request(self, route, method, url_args=None, params=None, is_json=False):
'''Make an HTTP request.'''
# Form a restful URL
if url_args:
uri = self._routes[route].format(**url_args)
else:
uri = self._routes[route]
url = urljoin(self.root, uri)
headers = self.headers
# Custom headers
# headers = {
# 'X-Kite-Version': '3', # For version 3
# 'User-Agent': self._user_agent()
# }
# if self.api_key and self.access_token:
# # set authorization header
# auth_header = self.api_key + ':' + self.access_token
# headers['Authorization'] = 'token {}'.format(auth_header)
if self.debug:
log.debug('Request: {method} {url} {params} {headers}'.format(
method=method, url=url, params=params, headers=headers))
try:
r = self.reqsession.request(method,
url,
json=params if (
method in ['POST', 'PUT'] and is_json) else None,
data=params if (
method in ['POST', 'PUT'] and not is_json) else None,
params=params if method in [
'GET', 'DELETE'] else None,
headers=headers,
verify=not self.disable_ssl,
allow_redirects=True,
timeout=self.timeout,
proxies=self.proxies)
# Any requests lib related exceptions are raised here - http://docs.python-requests.org/en/master/_modules/requests/exceptions/
except Exception as e:
raise e
if self.debug:
log.debug('Response: {code} {content}'.format(
code=r.status_code, content=r.content))
# Validate the content type.
if 'json' in r.headers['content-type']:
try:
data = json.loads(r.content.decode('utf8'))
except ValueError:
raise ex.DataException('Could not parse the JSON response received from the server: {content}'.format(
content=r.content))
# api error
if data.get('error_type'):
# Call session hook if its registered and TokenException is raised
if self.session_expiry_hook and r.status_code == 403 and data['error_type'] == 'TokenException':
self.session_expiry_hook()
# native Kite errors
exp = getattr(ex, data['error_type'], ex.GeneralException)
raise exp(data['message'], code=r.status_code)
return data['data']
elif 'csv' in r.headers['content-type']:
return r.content
else:
raise ex.DataException('Unknown Content-Type ({content_type}) with response: ({content})'.format(
content_type=r.headers['content-type'],
content=r.content))
|
StarcoderdataPython
|
6575104
|
"""
========================================================
06. Remove epochs based on peak-to-peak (PTP) amplitudes
========================================================
Epochs containing peak-to-peak above the thresholds defined
in the 'reject' parameter are removed from the data.
This step will drop epochs containing non-biological artifacts
but also epochs containing biological artifacts not sufficiently
corrected by the ICA or the SSP processing.
"""
import itertools
import logging
from typing import Optional
import mne
from mne.utils import BunchConst
from mne.parallel import parallel_func
from mne_bids import BIDSPath
import config
from config import gen_log_kwargs, on_error, failsafe_run
logger = logging.getLogger('mne-bids-pipeline')
@failsafe_run(on_error=on_error, script_path=__file__)
def drop_ptp(*, cfg, subject, session=None):
bids_path = BIDSPath(subject=subject,
session=session,
task=cfg.task,
acquisition=cfg.acq,
run=None,
recording=cfg.rec,
space=cfg.space,
suffix='epo',
extension='.fif',
datatype=cfg.datatype,
root=cfg.deriv_root,
check=False)
infile_processing = cfg.spatial_filter
fname_in = bids_path.copy().update(processing=infile_processing)
fname_out = bids_path.copy().update(processing='clean')
msg = f'Input: {fname_in}, Output: {fname_out}'
logger.info(**gen_log_kwargs(message=msg, subject=subject,
session=session))
# Get rejection parameters and drop bad epochs
epochs = mne.read_epochs(fname_in, preload=True)
reject = config.get_reject(epochs=epochs)
if cfg.ica_reject is not None:
for ch_type, threshold in cfg.ica_reject.items():
if (ch_type in reject and
threshold < reject[ch_type]):
# This can only ever happen in case of
# reject = 'autoreject_global'
msg = (f'Adjusting PTP rejection threshold proposed by '
f'autoreject, as it is greater than ica_reject: '
f'{ch_type}: {reject[ch_type]} -> {threshold}')
logger.info(**gen_log_kwargs(message=msg,
subject=subject, session=session))
reject[ch_type] = threshold
msg = f'Using PTP rejection thresholds: {reject}'
logger.info(**gen_log_kwargs(message=msg, subject=subject,
session=session))
n_epochs_before_reject = len(epochs)
epochs.reject_tmin = cfg.reject_tmin
epochs.reject_tmax = cfg.reject_tmax
epochs.drop_bad(reject=reject)
n_epochs_after_reject = len(epochs)
if 0 < n_epochs_after_reject < 0.5 * n_epochs_before_reject:
msg = ('More than 50% of all epochs rejected. Please check the '
'rejection thresholds.')
logger.warning(**gen_log_kwargs(message=msg, subject=subject,
session=session))
elif n_epochs_after_reject == 0:
raise RuntimeError('No epochs remaining after peak-to-peak-based '
'rejection. Cannot continue.')
msg = 'Saving cleaned, baseline-corrected epochs …'
epochs.apply_baseline(cfg.baseline)
epochs.save(fname_out, overwrite=True)
def get_config(
subject: Optional[str] = None,
session: Optional[str] = None
) -> BunchConst:
cfg = BunchConst(
task=config.get_task(),
datatype=config.get_datatype(),
acq=config.acq,
rec=config.rec,
space=config.space,
baseline=config.baseline,
reject_tmin=config.reject_tmin,
reject_tmax=config.reject_tmax,
spatial_filter=config.spatial_filter,
ica_reject=config.get_ica_reject(),
deriv_root=config.get_deriv_root(),
decim=config.decim
)
return cfg
def main():
"""Run epochs."""
parallel, run_func, _ = parallel_func(drop_ptp, n_jobs=config.get_n_jobs())
logs = parallel(
run_func(cfg=get_config(), subject=subject, session=session)
for subject, session in
itertools.product(config.get_subjects(),
config.get_sessions())
)
config.save_logs(logs)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
8116754
|
<filename>katana-nfv_mon/katana/utils/threadingUtis/threadingUtils.py
import threading
import logging
from katana.utils.mongoUtils import mongoUtils
from katana.utils.nfvoUtils import osmUtils
# Create the logger
logger = logging.getLogger(__name__)
stream_handler = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s")
stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s")
stream_handler.setFormatter(stream_formatter)
logger.setLevel(logging.DEBUG)
logger.addHandler(stream_handler)
class MonThread(threading.Thread):
"""
Class that implements a per Network Service thread for monitoring purposes
"""
def __init__(self, ns, ns_status, ns_name):
super().__init__()
self.ns = ns
self.ns_status = ns_status
self.ns_name = ns_name
# Create the stop parameter
self._stop = threading.Event()
def run(self):
"""
The function that will run to check the NS status
"""
while not self.stopped():
target_nfvo = mongoUtils.find("nfvo", {"id": self.ns["nfvo-id"]})
if target_nfvo["type"] == "OSM":
target_nfvo_obj = osmUtils.Osm(
target_nfvo["id"],
target_nfvo["nfvoip"],
target_nfvo["nfvousername"],
target_nfvo["nfvopassword"],
)
else:
logger.error("Not supported NFVO type")
return
insr = target_nfvo_obj.getNsr(self.ns["nfvo_inst_ns"])
if not insr:
self.ns_status.labels(self.ns["slice_id"], self.ns_name).set(2)
elif insr["operational-status"] == "terminating":
self.ns_status.labels(self.ns["slice_id"], self.ns_name).set(4)
elif insr["operational-status"] != "running":
self.ns_status.labels(self.ns["slice_id"], self.ns_name).set(3)
self._stop.wait(timeout=10)
def stopped(self):
"""
Checks if the thread has stopped
"""
return self._stop.is_set()
def stop(self):
"""
Stops the thread
"""
self._stop.set()
|
StarcoderdataPython
|
11364775
|
<gh_stars>0
from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
def get_short_name(self):
return self.username
class Article(models.Model):
STATUS = (("Draft", "Default"), ("Publish", "Publish"))
users = models.ForeignKey(User, null=True, on_delete=models.CASCADE, related_name="Article",
related_query_name="Art")
title = models.CharField(max_length=250)
Label = models.CharField(max_length=200, blank=True)
body = models.TextField(blank=True)
updated_on = models.DateTimeField(auto_now=True)
created_on = models.DateTimeField(auto_now_add=True)
status = models.CharField(max_length=20, choices=STATUS)
class Meta:
ordering = ['-created_on']
def __str__(self):
return self.title
class Comment(models.Model):
COMMENT_CHOICES = (("Publish", "Publish"), ("Spam", "Spam"))
articles = models.ForeignKey(Article, null=True, on_delete=models.CASCADE, related_name='Comment',
related_query_name='Commas')
commenter_name = models.CharField(max_length=20)
comment_body = models.CharField(max_length=1000)
comment_time = models.DateTimeField(auto_now=True)
comment_choice = models.CharField(max_length=200, choices=COMMENT_CHOICES)
|
StarcoderdataPython
|
3434051
|
"""
Carros: Escreva uma função que armazene informações sobre um carro em um dicionário. A função sempre deve receber o
nome de um fabricante e um modelo. Um número de arbitrário de argumentos nomeados deverá ser aceito. Chame a função
com as informações necessárias e dois outros pares nme-valor, por exemplo, uma cor ou um opcional. Sua função deve ser
apropriada para uma chama como esta:
car = make_car('subaru', 'outback', color='blue', tow_package = True) Mostre o dicionário devolvido para garantir que todas informa
ções foram armazenadas corretamente.
"""
def make_car(car, model, **car_info):
"""Exibe informações sobre um carro."""
car_information = {}
car_information['car'] = name_car
car_information['model'] = modelo
for key, value in car_info.items():
car_information[key] = value
return car_information
car_list = ['Argo', 'Onix', 'Nivus']
print('Escolha entre as opções listadas abaixo:')
for car in car_list:
print('\t -' + car)
name_car = input('Digite o nome do carro:\n')
if name_car not in car_list:
print('Escolha apenas os que estão na lista:')
elif name_car == 'Argo' or name_car == 'argo':
modelo = 'Fiat'
ano = 2021
elif name_car == 'Onix' or name_car == 'onix':
modelo = 'Chevrolet'
ano = 2021
else:
modelo = 'VolksWagen'
ano = 2020
car_profile = make_car('name_car', 'modelo', year = ano)
print(car_profile)
|
StarcoderdataPython
|
9707887
|
#-*-coding:utf-8-*-
from scrapy import cmdline
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from FangSpider.spiders import NewFangSpider
# cmdline.execute("scrapy crawl LjSpider -o LjSpider.csv".split())
# cmdline.execute("scrapy crawl LjSpider -o LjSpider.xml".split())
# cmdline.execute("scrapy crawl LjSpider -o LjSpider.pickle".split())
# cmdline.execute("scrapy crawl LjSpider -o LjSpider.marshal".split())
# cmdline.execute("scrapy crawl LjSpider -o LjSpider.json".split())
# cmdline.execute("scrapy crawl LjSpider -o ftp://user:[email protected]/path/to/LjSpider.csv".split())
settings = get_project_settings()
process = CrawlerProcess(settings=settings)
process.crawl(NewFangSpider.NewfangspiderSpider)
process.start()
|
StarcoderdataPython
|
252764
|
<reponame>tkaneko0204/python-rackclient<gh_stars>0
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
from rackclient import exceptions
from rackclient.v1 import base
class Process(base.Resource):
def __repr__(self):
return "<Process: %s>" % self.name
class ProcessManager(base.Manager):
resource_class = Process
def list(self, gid):
"""
Get a list of all processes in the specified group.
:param gid: ID of the group.
:rtype: list of Process.
"""
return self._list("/groups/%s/processes" % gid, "processes")
def get(self, gid, pid):
"""
Get a server.
:param gid: ID of the group.
:param pid: ID of the process to get.
:rtype: Process.
"""
return self._get("/groups/%s/processes/%s" % (gid, pid), "process")
def create(self, gid, ppid=None, **kwargs):
"""
Create a process.
If you give a ppid(Parent process ID),
all other parameters will be inherited to its child process,
but you can override them.
Parameters in kwargs:
:param name: Name of the new process
:param nova_flavor_id: ID of a flavor
:param glance_image_id: ID of a glance image
:param keypair_id: ID of a keypair
:param list securitygroup_ids: List of IDs of securitygroups
:param userdata: file type object or string of script
:param dict args: Dict of key-value pairs to be stored as metadata
"""
securitygroup_ids = kwargs.get('securitygroup_ids')
if securitygroup_ids is not None and not isinstance(
securitygroup_ids, list):
raise exceptions.CommandError("securitygroup_ids must be a list")
userdata = kwargs.get('userdata')
if userdata:
if hasattr(userdata, 'read'):
userdata = userdata.read()
userdata_b64 = base64.b64encode(userdata)
args = kwargs.get('args')
if args is not None and not isinstance(args, dict):
raise exceptions.CommandError("args must be a dict")
body = {
"process": {
"ppid": ppid,
"name": kwargs.get('name'),
"nova_flavor_id": kwargs.get('nova_flavor_id'),
"glance_image_id": kwargs.get('glance_image_id'),
"keypair_id": kwargs.get('keypair_id'),
"securitygroup_ids": securitygroup_ids,
"userdata": userdata_b64 if userdata else userdata,
"args": args
}
}
return self._create("/groups/%s/processes" % gid, body, "process")
def update(self, gid, pid, app_status):
"""
Update status of process.
:param gid: ID of the group.
:param pid: ID of the process.
:param app_status: Application layer status of the process.
"""
body = {
"process": {
"app_status": app_status
}
}
return self._update("/groups/%s/processes/%s" %
(gid, pid), body, "process")
def delete(self, gid, pid):
"""
Delete a process.
:param gid: ID of the group.
:param pid: ID of the process to delete.
"""
self._delete("/groups/%s/processes/%s" % (gid, pid))
|
StarcoderdataPython
|
3263322
|
from elasticsearch_dsl import DocType, Search, Date, Integer, Keyword, Text, Ip
from elasticsearch_dsl import connections, InnerDoc, Nested, Object
class IoTDetailsDoc(DocType):
'''
Document storage for IoT IP cache
'''
id = Text(analyzer='snowball', fields={'raw': Keyword()})
time = Keyword()
ip = Ip()
filetype = Text()
tag_name = Text()
public_tag_name = Text()
tag_description = Text()
tag_class = Text()
tag_group_name = Text()
class Index:
name = 'sfn-iot-details'
@classmethod
def get_indexable(cls):
return cls.get_model().get_objects()
@classmethod
def from_obj(cls, obj):
return cls(
id=obj.id,
time=obj.time,
ip=obj.ip,
filetype=obj.filetype,
tag_name=obj.tag_name,
public_tag_name=obj.public_tag_name,
tag_description=obj.family_description,
tag_class=obj.tag_class,
tag_group_name=obj.tag_group_name
)
def save(self, **kwargs):
return super(IoTDetailsDoc, self).save(**kwargs)
class SFNIOT(InnerDoc):
event_type = Text()
domain_name = Text(analyzer='snowball', fields={'raw': Keyword()})
device_name = Text(analyzer='snowball', fields={'raw': Keyword()})
host = Text(analyzer='snowball', fields={'raw': Keyword()})
threat_id = Text(analyzer='snowball')
threat_name = Text(analyzer='snowball')
tag_name = Text(fields={'raw': Keyword()})
tag_class = Text(fields={'raw': Keyword()})
tag_group = Text(fields={'raw': Keyword()})
tag_description = Text(analyzer='snowball')
public_tag_name = Text(analyzer='snowball')
confidence_level = Integer()
sample_date = Date()
file_type = Text(fields={'raw': Keyword()})
updated_at = Date()
processed = Integer()
src_ip = Ip()
dst_ip = Ip()
class IoTEventDoc(DocType):
'''
Each event is it's own entity in the DB. This is the structure of that entitiy
'''
IoT = Object(SFNIOT)
class Index:
name = 'iot-*'
@classmethod
def get_indexable(cls):
return cls.get_model().get_objects()
@classmethod
def from_obj(cls, obj):
return cls(
id=obj.id,
domain_name=obj.domain_name,
device_name=obj.device_name,
host=obj.host,
threat_id=obj.threat_id,
event_tag=obj.event_tag,
created_at=obj.created_at,
updated_at=obj.updated_at,
processed=obj.processed,
src_ip=obj.src_ip,
dst_ip=obj.dst_ip
)
def save(self, **kwargs):
return super(IoTEventDoc, self).save(**kwargs)
|
StarcoderdataPython
|
6504812
|
<filename>fabtools/require/mercurial.py<gh_stars>0
"""
Mercurial
=========
This module provides high-level tools for managing `Mercurial`_ repositories.
.. _Mercurial: http://mercurial.selenic.com/
"""
from __future__ import with_statement
from fabric.api import run
from fabtools import mercurial
from fabtools.files import is_dir
from fabtools.system import UnsupportedFamily
def command():
"""
Require the ``hg`` command-line tool.
Example::
from fabric.api import run
from fabtools import require
require.mercurial.command()
run('hg --help')
"""
from fabtools.require.deb import package as require_deb_package
from fabtools.require.rpm import package as require_rpm_package
from fabtools.require.portage import package as require_portage_package
from fabtools.system import distrib_family
res = run('hg --version', quiet=True)
if res.failed:
family = distrib_family()
if family == 'debian':
require_deb_package('mercurial')
elif family == 'gentoo':
require_portage_package('mercurial')
elif family == 'redhat':
require_rpm_package('mercurial')
else:
raise UnsupportedFamily(supported=['debian', 'redhat', 'gentoo'])
def working_copy(remote_url, path=None, branch="default", update=True,
use_sudo=False, user=None):
"""
Require a working copy of the repository from the ``remote_url``.
The ``path`` is optional, and defaults to the last segment of the
remote repository URL.
If the ``path`` does not exist, this will clone the remote
repository and check out the specified branch.
If the ``path`` exists and ``update`` is ``True``, it will pull
changes from the remote repository, check out the specified branch,
then update the working copy.
If the ``path`` exists and ``update`` is ``False``, it will only
check out the specified branch, without pulling remote changesets.
:param remote_url: URL of the remote repository
:type remote_url: str
:param path: Absolute or relative path of the working copy on the
filesystem. If this directory doesn't exist yet, a new
working copy is created through ``hg clone``. If the
directory does exist *and* ``update == True``, a
``hg pull && hg up`` is issued. If ``path is None`` the
``hg clone`` is issued in the current working directory and
the directory name of the working copy is created by ``hg``.
:type path: str
:param branch: Branch or tag to check out. If the given value is a tag
name, update must be ``False`` or consecutive calls will
fail.
:type branch: str
:param update: Whether or not to pull and update remote changesets.
:type update: bool
:param use_sudo: If ``True`` execute ``hg`` with
:func:`fabric.operations.sudo`, else with
:func:`fabric.operations.run`.
:type use_sudo: bool
:param user: If ``use_sudo is True``, run :func:`fabric.operations.sudo`
with the given user. If ``use_sudo is False`` this parameter
has no effect.
:type user: str
"""
command()
if path is None:
path = remote_url.split('/')[-1]
if is_dir(path, use_sudo=use_sudo):
mercurial.pull(path, use_sudo=use_sudo, user=user)
if update:
mercurial.update(path=path, branch=branch, use_sudo=use_sudo,
user=user)
elif not is_dir(path, use_sudo=use_sudo):
mercurial.clone(remote_url, path=path, use_sudo=use_sudo, user=user)
mercurial.update(path=path, branch=branch, use_sudo=use_sudo, user=user)
else:
raise ValueError("Invalid combination of parameters.")
|
StarcoderdataPython
|
1991617
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""=================================================================
@Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3
@File : LC-0003-Longest-Substring-Without-Repeating-Characters.py
@Author : [YuweiYin](https://github.com/YuweiYin)
@Date : 2022-01-06
=================================================================="""
# import functools
import sys
import time
# from typing import List
"""
LeetCode - 0003 - (Medium) - Longest Substring Without Repeating Characters
https://leetcode.com/problems/longest-substring-without-repeating-characters/
Description & Requirement:
Given a string s, find the length of the longest substring without repeating characters.
Example 1:
Input: s = "abcabcbb"
Output: 3
Explanation: The answer is "abc", with the length of 3.
Example 2:
Input: s = "bbbbb"
Output: 1
Explanation: The answer is "b", with the length of 1.
Example 3:
Input: s = "pwwkew"
Output: 3
Explanation: The answer is "wke", with the length of 3.
Notice that the answer must be a substring, "pwke" is a subsequence and not a substring.
Constraints:
0 <= s.length <= 5 * 10^4
s consists of English letters, digits, symbols and spaces.
"""
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
# exception case
if not isinstance(s, str) or len(s) <= 0:
return 0
# border case
if len(s) == 1:
return 1
if len(s) == 2:
return 1 if s[0] == s[1] else 2
# main method: Slide window & Two pointers
return self._lengthOfLongestSubstring(s)
def _lengthOfLongestSubstring(self, s: str) -> int:
len_s = len(s) # now, 3 <= len_s
longest_length = 1 # default: the first char only, so 1
current_length = 1
# longest_length cannot be longer than the number of distinct chars in s, so the upper bound can be determined
max_length = 0
set_stat = set({})
for ch in s:
if ch not in set_stat:
set_stat.add(ch)
max_length += 1
if max_length == len_s: # all chars are distinct in s
return max_length
# use a dict to guarantee there's no repeated char in current substring
dict_subs = dict({}) # key: char; value: the index of this char in the whole string
# if a char is in the current substring, then its value of dict_subs is >= 0; else, the value is -1
left_subs = 0 # the left index of current slide window
dict_subs[s[left_subs]] = left_subs
right_subs = 1 # the right index of current slide window
while right_subs < len_s:
if s[right_subs] in dict_subs:
if dict_subs[s[right_subs]] >= 0: # this char has existed in the current substring, slide window now
# now, s[repeated_index] == s[right_subs], so s[left_subs: repeated_index + 1] should be disregarded
# continue from s[repeated_index + 1: right_subs]
# find the repeated char by the value (index) of dict_subs, re-start from its right one
repeated_index = dict_subs[s[right_subs]]
if left_subs == repeated_index: # the leftmost char in substring is the one that is repeated
dict_subs[s[right_subs]] = right_subs
left_subs += 1
right_subs += 1
continue # keep the size of window and slide it by 1 step (keep current_length the same, too)
while left_subs < repeated_index: # remove all char in s[left_subs: repeated_index]
# assert s[left_subs] in dict_subs
dict_subs[s[left_subs]] = -1 # remove from dict_subs
current_length -= 1 # (shrink) decrease current_length
left_subs += 1
# now, left_subs == repeated_index
left_subs += 1 # continue from s[repeated_index + 1: right_subs]
dict_subs[s[left_subs]] = left_subs
dict_subs[s[right_subs]] = right_subs # use right_subs to replace repeated_index
right_subs += 1
else: # this char is not in the current substring, put it in
current_length += 1
longest_length = max(longest_length, current_length)
if longest_length == max_length:
return max_length
dict_subs[s[right_subs]] = right_subs # put it in the dict to avoid repetition
right_subs += 1 # keep moving
else: # new char, just put it in and expand the window
current_length += 1
longest_length = max(longest_length, current_length)
if longest_length == max_length:
return max_length
dict_subs[s[right_subs]] = right_subs # put it in the dict to avoid repetition
right_subs += 1 # keep moving
return longest_length
def main():
# Example 1: Output: 3
# s = "abcabcbb"
# Example 2: Output: 1
# s = "bbbbb"
# Example 3: Output: 3
# s = "pwwkew"
# Example 4: Output: 3
# s = "abcbacbb"
# Example 5: Output: 6
s = "wobgrovw"
# init instance
solution = Solution()
# run & time
start = time.process_time()
ans = solution.lengthOfLongestSubstring(s)
end = time.process_time()
# show answer
print('\nAnswer:')
print(ans)
# show time consumption
print('Running Time: %.5f ms' % ((end - start) * 1000))
if __name__ == "__main__":
sys.exit(main())
|
StarcoderdataPython
|
12846192
|
LEAGUES = [
'Scottish Premiership',
'Italy Serie A',
'French Ligue 1',
'Spanish Segunda Division',
'Australian A-League',
'Italy Serie B',
'Dutch Eredivisie',
'Mexican Primera Division Torneo Clausura',
'Russian Premier Liga',
'Spanish Primera Division',
'English League One',
'UEFA Europa League',
'Mexican Primera Division Torneo Apertura',
'German Bundesliga',
'South African ABSA Premier League',
'Austrian T-Mobile Bundesliga',
'Barclays Premier League',
'English League Two',
'Greek Super League',
'German 2. Bundesliga',
'United Soccer League',
'Chinese Super League',
'UEFA Champions League',
'Portuguese Liga',
'English League Championship',
'Belgian Jupiler League',
'Norwegian Tippeligaen',
'Turkish Turkcell Super Lig',
'Danish SAS-Ligaen',
'Japanese J League',
'Swedish Allsvenskan',
'Swiss Raiffeisen Super League',
'Brasileiro Série A',
'Major League Soccer',
'Argentina Primera Division',
'French Ligue 2'
]
TEAMS = {
'Manchester City': 94.25,
'Bayern Munich': 93.96,
'Liverpool': 92.92,
'Barcelona': 91.22,
'Paris Saint-Germain': 87.79,
'Chelsea': 85.96,
'Real Madrid': 85.23,
'Tottenham Hotspur': 85.23,
'Juventus': 84.07,
'Borussia Dortmund': 83.63,
'Atlet<NAME>': 83.11,
'<NAME>': 82.44,
'Ajax': 82.17,
'RB Leipzig': 81.72,
'Internazionale': 81.52,
'Napoli': 80.98,
'Manchester United': 79.79,
'Arsenal': 79.22,
'Everton': 78.53,
'FC Salzburg': 78.51,
'Atalanta': 78.14,
'FC Porto': 78.03,
'Valencia': 77.81,
'Benfica': 76.86,
'TSG Hoffenheim': 76.23,
'Leicester City': 75.82,
'Olympiacos': 75.73,
'AC Milan': 75.49,
'Sevilla FC': 74.86,
'Lyon': 74.57,
'Wolverhampton': 73.87,
'AS Roma': 73.69,
'Getafe': 73.57,
'Real Sociedad': 73.55,
'Athletic Bilbao': 73.38,
'Eibar': 73.31,
'Eintracht Frankfurt': 72.83,
'FC Krasnodar': 71.74,
'Real Betis': 71.51,
'Young Boys': 71.5,
'Villarreal': 71.3,
'Palmeiras': 71.27,
'<NAME>': 71.24,
'Zenit St Petersburg': 71.18,
'Lazio': 71.13,
'Crystal Palace': 71.09,
'VfL Wolfsburg': 70.72,
'Espanyol': 70.42,
'Leganes': 70.32,
'PSV': 70.14,
'<NAME>': 70.1,
'PAOK Salonika': 70.07,
'Newcastle': 69.88,
'West Ham United': 69.71,
'AFC Bournemouth': 69.62,
'Lille': 69.47,
'CSKA Moscow': 69.44,
'Galatasaray': 69.39,
'Fulham': 69.3,
'Sporting CP': 68.75,
'Southampton': 68.7,
'Flamengo': 68.34,
'<NAME>': 68.14,
'<NAME>': 68.02,
'Watford': 67.92,
'<NAME>': 67.77,
'Burnley': 67.76,
'Torino': 67.76,
'Mainz': 67.75,
'Genk': 67.46,
'FC Copenhagen': 67.37,
'Fiorentina': 67.14,
'Marseille': 66.56,
'Sampdoria': 66.48,
'<NAME>': 66.08,
'Alavés': 65.93,
'Club Brugge': 65.76,
'River Plate': 65.61,
'Boca Juniors': 65.43,
'Basel': 65.38,
'Lokomotiv Moscow': 65.26,
'Levante': 64.99,
'Dynamo Kyiv': 64.82,
'Bologna': 64.78,
'<NAME>': 64.76,
'Besiktas': 64.76,
'<NAME>': 64.72,
'Santos': 64.41,
'Real Valladolid': 64.29,
'St Etienne': 64.25,
'AS Monaco': 64.14,
'Osasuna': 64.11,
'<NAME>': 64.04,
'Montpellier': 63.95,
'Granada': 63.78,
'Mallorca': 63.77,
'Genoa': 63.48,
'Celtic': 63.39,
'Ludogorets': 63.34,
'Brighton and Hove Albion': 62.79,
'Norwich City': 62.76,
'Leeds United': 62.74,
'Nantes': 62.67,
'Guangzhou Evergrande': 62.64,
'Red Star Belgrade': 62.4,
'Grêmio': 62.21,
'Atlético Paranaense': 61.84,
'Sheffield United': 61.76,
'Club América': 61.7,
'FC Cologne': 61.4,
'Sassuolo': 61.3,
'<NAME>': 61.22,
'SC Freiburg': 60.9,
'Nice': 60.42,
'Angers': 60.15,
'<NAME>': 59.94,
'Los Angeles FC': 59.87,
'Stade Rennes': 59.76,
'Trabzonspor': 59.59,
'FC Augsburg': 59.56,
'Feyenoord': 59.49,
'Spal': 59.18,
'Monterrey': 58.88,
'Beijing Guoan': 58.81,
'Tigres UANL': 58.78,
'Cagliari': 58.67,
'1. FC Union Berlin': 58.66,
'Strasbourg': 58.39,
'Huddersfield Town': 58.38,
'Rangers': 58.36,
'FC Astana': 58.29,
'Braga': 58.28,
'Nimes': 58.22,
'<NAME>': 58.2,
'<NAME>': 58.2,
'SC Paderborn': 58.12,
'Internacional': 57.99,
'Udinese': 57.93,
'Jablonec': 57.83,
'Empoli': 57.74,
'Shanghai SIPG': 57.73,
'FC Midtjylland': 57.18,
'<NAME>': 57.05,
'São Paulo': 56.84,
'Bordeaux': 56.83,
'Corinthians': 56.28,
'Reims': 56.26,
'<NAME>': 56.08,
'<NAME>': 55.95,
'Toulouse': 55.55,
'Stoke City': 55.52,
'Brentford': 55.4,
'Kawasaki Frontale': 55.29,
'West Bromwich Albion': 55.12,
'Amiens': 54.82,
'AEK Athens': 54.46,
'Fenerbahce': 54.28,
'Racing Club': 54.18,
'Cardiff City': 54.00,
'Malmo FF': 53.82,
'FC Arsenal Tula': 53.55,
'Metz': 53.48,
'Frosinone': 53.36,
'Swansea City': 53.3,
'Brondby': 53.26,
'Vitesse': 53.01,
'Parma': 53.00,
'Molde': 52.92,
'Brest': 52.77,
'Dijon FCO': 52.75,
'<NAME>': 52.58,
'Bahía': 52.53,
'VfB Stuttgart': 52.47,
'Middlesbrough': 52.45,
'León': 52.45,
'Anderlecht': 52.41,
'CA Independiente': 52.38,
'Girona FC': 52.34,
'Standard Liege': 51.98,
'Bristol City': 51.95,
'Kashima Antlers': 51.94,
'Derby County': 51.83,
'Pachuca': 51.59,
'AZ': 51.32,
'Guimaraes': 51.04,
'Guingamp': 50.72,
'KAA Gent': 50.44,
'<NAME>': 50.03,
'FK Qarabag': 49.91,
'<NAME>': 49.76,
'Gazovik Orenburg': 49.68,
'<NAME>': 49.46,
'New York City FC': 49.34,
'<NAME>': 49.3,
'<NAME>': 49.27,
'<NAME>': 49.26,
'Apollon Limassol': 49.22,
'Cruzeiro': 49.03,
'Rostov': 48.88,
'AIK': 48.83,
'<NAME>': 48.74,
'Nottingham Forest': 48.59,
'Fluminense': 48.22,
'Rapid Vienna': 48.13,
'Defensa y Justicia': 48.07,
'Atlanta United FC': 48.06,
'ADO Den Haag': 47.8,
'<NAME>': 47.69,
'B<NAME>ov': 47.66,
'Blackburn': 47.63,
'Sheffield Wednesday': 47.59,
'Ceará': 47.18,
'Arizona United': 46.91,
'Hull City': 46.74,
'FC Utrecht': 46.5,
'Rio Ave': 46.41,
'Hammarby': 46.39,
'Jiangsu Suning FC': 46.38,
'Millwall': 46.37,
'FK Austria Vienna': 46.36,
'Desportivo Aves': 46.34,
'Birmingham': 46.25,
'Toluca': 46.21,
'Konyaspor': 46.04,
'Botafogo': 46.01,
'Portimonense': 45.95,
'Djurgardens IF': 45.8,
'FC Nordsjaelland': 45.73,
'Tijuana': 45.61,
'BK Hacken': 45.6,
'Hannover 96': 45.41,
'AEK Larnaca': 45.4,
'FC Luzern': 45.22,
'IFK Norrkoping': 45.14,
'FC Ufa': 45.09,
'Queens Park Rangers': 44.99,
'St. Truidense': 44.82,
'Boavista': 44.8,
'Lanus': 44.75,
'Pumas Unam': 44.64,
'<NAME>': 44.62,
'Moreirense': 44.44,
'<NAME>': 44.4,
'SK St<NAME>': 44.36,
'<NAME>': 44.21,
'<NAME>': 44.21,
'Sivasspor': 44.2,
'Guadalajara': 43.99,
'Hamburg SV': 43.91,
'Goztepe': 43.83,
'Preston North End': 43.78,
'Mouscron-Peruwelz': 43.73,
'Málaga': 43.71,
'KV Kortrijk': 43.71,
'Philadelphia Union': 43.65,
'Sporting Kansas City': 43.52,
'San Lorenzo': 43.5,
'Santa Clara': 43.46,
'<NAME>': 43.34,
'<NAME>': 43.32,
'Troyes': 43.27,
'Wigan': 43.27,
'Tigre': 43.21,
'Alanyaspor': 43.08,
'Wolfsberger AC': 42.98,
'Sunderland': 42.97,
'Banfield': 42.9,
'Videoton FC': 42.86,
'FC Groningen': 42.82,
'Union Santa Fe': 42.76,
'T<NAME>': 42.61,
'<NAME>': 42.61,
'FC Lugano': 42.58,
'Chapecoense AF': 42.54,
'Rosenborg': 42.42,
'Antwerp': 42.37,
'Cashpoint SC <NAME>': 42.26,
'<NAME>': 42.21,
'Seattle Sounders FC': 42.2,
'<NAME>': 42.17,
'Sporting de Charleroi': 42.09,
'New York Red Bulls': 42.07,
'Goiás': 42.06,
'Atromitos': 41.89,
'<NAME>': 41.86,
'Vorskla': 41.83,
"Newell's Old Boys": 41.61,
'Necaxa': 41.59,
'<NAME>': 41.39,
'Deportivo La Coruña': 41.33,
'Erzurumspor': 41.29,
'Odense BK': 41.26,
'FC Tokyo': 41.03,
'AGF Aarhus': 41.01,
'Caen': 40.93,
'Fortaleza': 40.83,
'Belenenses': 40.7,
'Chaves': 40.45,
'SV Zulte Waregem': 40.42,
'St Gallen': 40.4,
'Los Angeles Galaxy': 40.4,
'Colon Santa Fe': 40.26,
'FC Sion': 40.1,
'Esbjerg': 40.03,
'Mamelodi Sundowns': 39.98,
'Tondela': 39.98,
'Real Salt Lake': 39.97,
'Huracán': 39.85,
'Chicago Fire': 39.84,
'Atlas': 39.75,
'Morelia': 39.72,
'FC Dallas': 39.56,
'<NAME>': 39.55,
'Querétaro': 39.45,
'San Jose Earthquakes': 39.43,
'FC Zurich': 39.42,
'Estudiantes': 39.42,
'<NAME>': 39.31,
'Hebei China Fortune FC': 39.31,
'Reading': 39.26,
'Shanghai Greenland': 39.21,
'Avaí': 38.95,
'Rosario Central': 38.9,
'Kasimpasa': 38.86,
'Lorient': 38.86,
'Toronto FC': 38.86,
'Guangzhou RF': 38.74,
'Lens': 38.63,
'Sydney FC': 38.56,
'Valerenga': 38.53,
'Heracles': 38.4,
'Maritimo': 38.33,
'Heerenveen': 38.33,
'<NAME>': 38.24,
'Minnesota United FC': 38.22,
'<NAME>': 38.2,
'1. FC Nürnberg': 38.04,
'<NAME>': 38.03,
'Puebla': 37.85,
'Panathinaikos': 37.79,
'Ankaragucu': 37.73,
'PEC Zwolle': 37.54,
'<NAME>': 37.53,
'New England Revolution': 37.44,
'Bursaspor': 37.3,
'Aldosivi': 37.3,
'Barnsley': 37.3,
'<NAME>': 37.21,
'Thun': 37.2,
'<NAME>': 37.00,
'<NAME>': 36.99,
'AaB': 36.96,
'Cadiz': 36.86,
'SV Mattersburg': 36.85,
'Mont<NAME>': 36.82,
'Patronato': 36.77,
'<NAME>': 36.75,
'FC Ingolstadt 04': 36.73,
'<NAME>': 36.72,
'Kilmarnock': 36.62,
'<NAME>': 36.61,
'<NAME>': 36.48,
'<NAME>': 36.42,
'FC <NAME>': 36.38,
'<NAME>': 36.34,
'Aberdeen': 36.33,
'Kayserispor': 36.16,
'C.D. Nacional': 36.16,
'Almeria': 36.02,
'VVV Venlo': 35.95,
'Waasland-Beveren': 35.86,
'<NAME>': 35.84,
'FC T<NAME>': 35.73,
'Sporting Gijón': 35.67,
'<NAME>': 35.55,
'Gimnasia La Plata': 35.53,
'<NAME>': 35.52,
'Bodo/Glimt': 35.39,
'<NAME>': 35.27,
'DC United': 35.25,
'Consadole Sapporo': 35.14,
'Antalyaspor': 34.92,
'<NAME>': 34.82,
'Houston Dynamo': 34.79,
'<NAME>': 34.67,
'<NAME>': 34.59,
'Melbourne City': 34.57,
'F91 Dudelange': 34.49,
'Emmen': 34.46,
'1. FC Heidenheim 1846': 34.41,
'Hartberg': 34.3,
'Paris FC': 34.23,
'Albacete': 34.09,
'IFK Goteborg': 34.07,
'FC Wacker Innsbruck': 34.04,
'Orlando City SC': 33.94,
'OFI Crete': 33.93,
'Haugesund': 33.88,
'Asteras Tripolis': 33.84,
'Newcastle Jets': 33.78,
'Bidvest Wits': 33.7,
'<NAME>': 33.64,
'Urawa Red Diamonds': 33.57,
'Neuchate<NAME>': 33.54,
'Las Palmas': 33.52,
'Feirense': 33.38,
'Hibernian': 33.28,
'SD Huesca': 33.26,
'<NAME>': 33.25,
'Eupen': 33.19,
'SK Brann': 33.18,
'<NAME>': 33.13,
'Excelsior': 33.07,
'Sonderjyske': 32.99,
'KV Oostende': 32.85,
'<NAME>': 32.81,
'Kristiansund BK': 32.72,
'Lamia': 32.57,
'VfL Bochum': 32.34,
'<NAME>': 32.21,
'Larissa': 32.08,
'SV Sandhausen': 31.97,
'<NAME>': 31.67,
'Palermo': 31.44,
'<NAME>': 31.44,
'<NAME>': 31.43,
'Tenerife': 31.38,
'CSA': 31.38,
'SV Darmstadt 98': 31.35,
'Melbourne Victory': 31.32,
'Nashville SC': 31.29,
'<NAME>': 31,
'KSC Lokeren': 31,
'<NAME>': 30.95,
'Extremadura UD': 30.94,
'Elche': 30.89,
'Odd BK': 30.89,
'Real Oviedo': 30.87,
'Gamba Osaka': 30.76,
'Reno 1868 FC': 30.57,
'1. FC Magdeburg': 30.45,
'Lobos de la BUAP': 30.3,
'St. Pölten': 30.28,
'New York Red Bulls II': 30.21,
'Hobro IK': 30.13,
'<NAME>': 30.06,
'Benevento': 29.83,
'Veracruz': 29.73,
'Shenzhen FC': 29.66,
'Vendsyssel': 29.55,
'Östersunds FK': 29.53,
'FC Juárez': 29.48,
'Chateauroux': 29.36,
'Adelaide United': 29.25,
'De Graafschap': 29.25,
'Black Aces': 29.18,
'Lugo': 29.18,
'Cittadella': 29.16,
'FC St. Pauli': 29.04,
'Vejle': 29.03,
'Numancia': 28.98,
'T<NAME>': 28.68,
'AS Nancy Lorraine': 28.55,
'FC Xanthi': 28.54,
'<NAME>': 28.47,
'<NAME>': 28.47,
'Orléans': 28.4,
'Panetolikos': 28.33,
'<NAME>': 28.2,
'Colorado Rapids': 28.18,
'Orebro SK': 28.18,
'<NAME>': 27.98,
'Auxerre': 27.98,
'<NAME>': 27.94,
'Portsmouth': 27.65,
'Hearts': 27.65,
'Western Sydney FC': 27.57,
'SpVgg <NAME>': 27.42,
'Sarpsborg': 27.21,
'Panionios': 27.09,
'Crotone': 27.07,
'Bolton': 27.05,
'Valenciennes': 27.03,
'Pittsburgh Riverhounds': 27.02,
'<NAME>': 26.96,
'Perugia': 26.91,
'<NAME>': 26.87,
'Spezia': 26.33,
'<NAME>': 26.27,
'<NAME>': 26.25,
'Giannina': 26.12,
'<NAME>': 26.02,
'<NAME>': 25.86,
'<NAME>': 25.61,
'Motherwell': 25.53,
'GIF Sundsvall': 25.32,
'<NAME>': 24.83,
'<NAME>': 24.82,
'Cosenza': 24.61,
'FC Cincinnati': 24.46,
'Grenoble': 24.43,
'North Carolina FC': 24.33,
'<NAME>': 24.27,
'AD Alcorcon': 24.26,
'MSV Duisburg': 24.25,
'<NAME>': 24.13,
'Real Monarchs SLC': 24.09,
'Vancouver Whitecaps': 23.95,
'Scunthorpe': 23.88,
'Peterborough United': 23.86,
'IK Sirius': 23.78,
'Fresno FC': 23.66,
'Niort': 23.62,
'Rotherham United': 23.58,
'Cordoba': 23.39,
'IF Elfsborg': 23.35,
'Stabaek': 23.05,
'Louisville City FC': 22.79,
'Livingston': 22.74,
'Foggia': 22.69,
'Sochaux': 22.69,
'Viking FK': 22.65,
'Cremonese': 22.61,
'S<NAME>': 22.51,
'Mjondalen': 22.5,
'St Johnstone': 22.45,
'Lillestrom': 22.3,
'<NAME>': 22.2,
'San Antonio FC': 22.16,
'Fleetwood Town': 21.97,
'Ottawa Fury FC': 21.68,
'AC Ajaccio': 21.65,
'Coventry City': 21.61,
'SuperSport United': 21.58,
'<NAME>': 21.54,
'Bradford City': 21.53,
'Oxford United': 21.38,
'Beziers AS': 21.15,
'Lincoln City': 20.97,
'Highlands Park FC': 20.97,
'Polokwane City FC': 20.91,
'Helsingborgs IF': 20.78,
'NAC': 20.75,
'Bloem Celtic': 20.58,
'Ranheim': 20.56,
'<NAME>': 20.46,
'Brescia': 20.45,
'Stromsgodset': 20.41,
'Maritzburg Utd': 20.38,
'Orange County SC': 20.31,
'US Pescara': 20.15,
'Sacramento Republic FC': 20.03,
'New Mexico United': 19.99,
'Golden Arrows': 19.68,
'Gimnástic Tarragona': 19.51,
'Bristol Rovers': 19.43,
'AmaZulu': 19.28,
'Gillingham': 19.2,
'Shrewsbury Town': 18.97,
'F.B.C Unione Venezia': 18.89,
'Livorno': 18.84,
'Salernitana': 18.81,
'Portland Timbers 2': 18.71,
'Kalmar FF': 18.71,
'Charleston Battery': 18.69,
'Lecce': 18.66,
'Verona': 18.53,
'Tromso': 18.14,
'Austin Bold FC': 17.99,
'Chippa United': 17.97,
'Brisbane Roar': 17.89,
'Padova': 17.85,
'Oklahoma City Energy FC': 17.6,
'Mansfield Town': 17.08,
'Blackpool': 17.06,
'Ascoli': 17.06,
'Free State Stars': 17.00,
'Southend United': 16.98,
'Levadiakos': 16.8,
'Exeter City': 16.76,
'Saint Louis FC': 16.71,
'Red Star FC 93': 16.37,
'Hamilton Academical': 16.25,
'Baroka FC': 16.09,
'GFC Ajaccio': 15.91,
'<NAME>': 15.69,
'Falkenbergs FF': 15.6,
'Tranmere Rovers': 15.57,
'Carpi': 15.5,
'Vasby United': 15.27,
'Matsumoto Yamaga FC': 14.98,
'Central Coast Mariners': 14.88,
'Bury': 14.87,
'Charlotte Independence': 14.84,
'Wycombe Wanderers': 14.69,
'LA Galaxy II': 14.67,
'Oldham Athletic': 14.62,
'Swindon Town': 14.56,
'Black Leopards': 14.41,
'Plym<NAME>yle': 14.2,
'AFC Wimbledon': 14.07,
'El Paso Locomotive FC': 13.7,
'Las Vegas Lights FC': 13.18,
'Bethlehem Steel FC': 13.11,
'A<NAME>': 13.08,
'Forest Green Rovers': 12.88,
'Rochdale': 12.68,
'Dundee': 12.52,
'Rio Grande Valley FC Toros': 12.48,
'Northampton Town': 12.36,
'Loudoun United FC': 11.49,
'<NAME>': 11.05,
'Colchester United': 10.65,
'Carlisle United': 10.37,
'Stevenage': 10.24,
'Cheltenham Town': 10.22,
'Memphis 901 FC': 10.15,
'Walsall': 9.95,
'Swope Park Rangers': 9.9,
'<NAME>': 9.63,
'Birmingham Legion FC': 9.29,
'Morecambe': 8.8,
'Newport County': 8.19,
'Grimsby Town': 7.39,
'Crawley Town': 7.18,
'Port Vale': 6.94,
'Tulsa Roughnecks': 6.63,
'Colorado Springs Switchbacks FC': 6.45,
'Cambridge United': 6.41,
'Macclesfield': 6.28,
'Atlanta United 2': 4.73,
'Notts County': 4.52,
'Yeovil Town': 4.35,
'Tacoma Defiance': 4.14
}
|
StarcoderdataPython
|
3411660
|
import cv2
from dbr import DynamsoftBarcodeReader
dbr = DynamsoftBarcodeReader()
import time
import os
import sys
sys.path.append('../')
import config
results = None
# The callback function for receiving barcode results
def onBarcodeResult(data):
global results
results = data
def get_time():
localtime = time.localtime()
capturetime = time.strftime("%Y%m%d%H%M%S", localtime)
return capturetime
def read_barcode():
global results
video_width = 640
video_height = 480
vc = cv2.VideoCapture(0)
vc.set(3, video_width) #set width
vc.set(4, video_height) #set height
if vc.isOpened():
dbr.initLicense('LICENSE-KEY')
rval, frame = vc.read()
else:
return
windowName = "Barcode Reader"
max_buffer = 2
max_results = 10
barcodeTypes = config.barcodeTypes
image_format = 1 # 0: gray; 1: rgb888
dbr.startVideoMode(max_buffer, max_results, video_width, video_height, image_format, barcodeTypes, onBarcodeResult)
while True:
if results != None:
thickness = 2
color = (0,255,0)
for result in results:
print("barcode format: " + result[0])
print("barcode value: " + result[1])
x1 = result[2]
y1 = result[3]
x2 = result[4]
y2 = result[5]
x3 = result[6]
y3 = result[7]
x4 = result[8]
y4 = result[9]
cv2.line(frame, (x1, y1), (x2, y2), color, thickness)
cv2.line(frame, (x2, y2), (x3, y3), color, thickness)
cv2.line(frame, (x3, y3), (x4, y4), color, thickness)
cv2.line(frame, (x4, y4), (x1, y1), color, thickness)
results = None
cv2.imshow(windowName, frame)
rval, frame = vc.read()
# start = time.time()
try:
ret = dbr.appendVideoFrame(frame)
except:
pass
# cost = (time.time() - start) * 1000
# print('time cost: ' + str(cost) + ' ms')
# 'ESC' for quit
key = cv2.waitKey(1)
if key == 27:
break
dbr.stopVideoMode()
cv2.destroyWindow(windowName)
if __name__ == "__main__":
print("OpenCV version: " + cv2.__version__)
read_barcode()
|
StarcoderdataPython
|
200202
|
"""This module contains the general information for VnicIPv4StaticRoute ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class VnicIPv4StaticRouteConsts:
pass
class VnicIPv4StaticRoute(ManagedObject):
"""This is VnicIPv4StaticRoute class."""
consts = VnicIPv4StaticRouteConsts()
naming_props = set([u'addr'])
mo_meta = MoMeta("VnicIPv4StaticRoute", "vnicIPv4StaticRoute", "ipv4-route-[addr]", VersionMeta.Version101e, "InputOutput", 0x1ff, [], ["admin", "ls-compute", "ls-config", "ls-network", "ls-server"], [u'adaptorHostIscsiIf', u'adaptorVlan', u'vnicIPv4If'], [], ["Get"])
prop_meta = {
"addr": MoPropertyMeta("addr", "addr", "string", VersionMeta.Version101e, MoPropertyMeta.NAMING, 0x2, 0, 256, r"""((([0-9]){1,3}\.){3}[0-9]{1,3})""", [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x4, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"def_gw": MoPropertyMeta("def_gw", "defGw", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, 0, 256, r"""((([0-9]){1,3}\.){3}[0-9]{1,3})""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"gw_addr": MoPropertyMeta("gw_addr", "gwAddr", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x10, 0, 256, r"""((([0-9]){1,3}\.){3}[0-9]{1,3})""", [], []),
"gw_subnet": MoPropertyMeta("gw_subnet", "gwSubnet", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x20, 0, 256, r"""((([0-9]){1,3}\.){3}[0-9]{1,3})""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x40, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x80, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"subnet": MoPropertyMeta("subnet", "subnet", "string", VersionMeta.Version101e, MoPropertyMeta.CREATE_ONLY, 0x100, 0, 256, r"""((([0-9]){1,3}\.){3}[0-9]{1,3})""", [], []),
}
prop_map = {
"addr": "addr",
"childAction": "child_action",
"defGw": "def_gw",
"dn": "dn",
"gwAddr": "gw_addr",
"gwSubnet": "gw_subnet",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"subnet": "subnet",
}
def __init__(self, parent_mo_or_dn, addr, **kwargs):
self._dirty_mask = 0
self.addr = addr
self.child_action = None
self.def_gw = None
self.gw_addr = None
self.gw_subnet = None
self.sacl = None
self.status = None
self.subnet = None
ManagedObject.__init__(self, "VnicIPv4StaticRoute", parent_mo_or_dn, **kwargs)
|
StarcoderdataPython
|
9719769
|
<reponame>datahounds/fantasy-premier-league<gh_stars>1-10
import pandas as pd
import sasoptpy as so
import os
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from subprocess import Popen, DEVNULL
from datasets import FplApiData
class SelectionModel:
def __init__(self, team_id, gw, forecasts_file):
'''Downloads data from fpl api
and combines with input from fplreview.com'''
# API data
api_data = FplApiData(team_id=team_id, gw=gw-1)
# players
self.players = api_data.players
# position IDs, names and squad limits
self.positions = api_data.positions
# team IDs and names
self.teams = api_data.teams
# current squad
self.current_squad = api_data.current_squad
# in the bank
self.bank = api_data.bank
# upcoming gameweek
self.gw = gw
# Review data
self.forecasts = api_data.make_opt_df(forecasts_file)
def solve_optimal_squad(self, budget=100):
players = self.forecasts.index
positions = self.positions.index
teams = self.teams.index
model_name = f'gw{self.gw}_{budget}budget'
model = so.Model(model_name)
# Variables
squad = model.add_variables(players,name='squad',
vartype=so.binary)
lineup = model.add_variables(players, name='lineup',
vartype=so.binary)
captain = model.add_variables(players, name='captain',
vartype=so.binary)
vicecap = model.add_variables(players, name='vicecap',
vartype=so.binary)
# Constraints
# 15 players in squad
squad_count = so.expr_sum(squad[p] for p in players)
model.add_constraint(squad_count == 15, name='squad_count')
# 11 players in starting lineup
model.add_constraint(so.expr_sum(lineup[p] for p in players) == 11,
name='lineup_count')
# 1 captain
model.add_constraint(so.expr_sum(captain[p] for p in players) == 1,
name='captain_count')
# 1 vice-captain
model.add_constraint(so.expr_sum(vicecap[p] for p in players) == 1,
name='vicecap_count')
# players in starting lineup must also be in squad
model.add_constraints((lineup[p] <= squad[p] for p in players),
name='lineup_squad_rel')
# captain must come from within squad
model.add_constraints((captain[p] <= lineup[p] for p in players),
name='captain_lineup_rel')
# vice-captain must come from within squad
model.add_constraints((vicecap[p] <= lineup[p] for p in players),
name='vicecap_lineup_rel')
# captain and vice-captain can't be same person
model.add_constraints((captain[p] + vicecap[p] <= 1 for p in players),
name='cap_vc_rel')
# count of each player per position in starting lineup
lineup_type_count = {
t: so.expr_sum(lineup[p] for p in players
if self.forecasts.loc[p, 'position_id'] == t)
for t in positions}
# count of all players in lineup must be at least 'squad_min_play'
# and no more than 'squad_max_play' for each position type
model.add_constraints(
(lineup_type_count[t] == [self.positions.loc[t, 'squad_min_play'],
self.positions.loc[t, 'squad_max_play']]
for t in positions),
name='valid_formation')
# count of each player per position in squad
squad_type_count = {
t: so.expr_sum(squad[p] for p in players
if self.forecasts.loc[p, 'position_id'] == t)
for t in positions}
# count of all players in squad must be equal to 'squad_select'
# for each position type
model.add_constraints(
(squad_type_count[t] == self.positions.loc[t, 'squad_select']
for t in positions),
name='valid_squad')
# total value of squad cannot exceed budget
price = so.expr_sum(
self.forecasts.loc[p, 'bv'] * squad[p] for p in players)
model.add_constraint(price <= budget, name='budget_limit')
# no more than 3 players per team
model.add_constraints(
(
so.expr_sum(squad[p] for p in players
if self.forecasts.loc[p, 'team_id'] == t)
<= 3 for t in teams),
name='team_limit'
)
# sum of starting 11 players, plus double captain score
# and upweight vice-captain
total_points = so.expr_sum(self.forecasts.loc[p, f'{self.gw}_pts']
* (lineup[p] + captain[p] + 0.1 * vicecap[p])
for p in players)
# Objective
model.set_objective(-total_points, sense='N', name='total_xp')
model.export_mps(f'{model_name}.mps')
command = f'cbc {model_name}.mps solve solu {model_name}.txt'
Popen(command, shell=False, stdout=DEVNULL).wait()
for v in model.get_variables():
v.set_value(0)
with open(f'{model_name}.txt', 'r') as f:
for line in f:
if 'objective value' in line:
continue
words = line.split()
var = model.get_variable(words[1])
var.set_value(float(words[2]))
picks = []
for p in players:
if squad[p].get_value() > .5:
lp = self.forecasts.loc[p]
is_captain = 1 if captain[p].get_value() > .5 else 0
is_lineup = 1 if lineup[p].get_value() > .5 else 0
is_vice = 1 if vicecap[p].get_value() > .5 else 0
position = self.positions.loc[lp['position_id'], 'position_name']
team = self.teams.loc[lp['team_id'], 'team_name']
picks.append([lp['web_name'], lp['position_id'], position, team,
lp['bv'], round(lp[f'{self.gw}_pts'], 2),
is_lineup, is_captain, is_vice])
picks_df = pd.DataFrame(
picks,
columns=['Name', 'Pos_id', 'Pos', 'Team', 'Price', 'xP', 'lineup',
'captain', 'vicecaptain']
).sort_values(by=['lineup', 'Pos_id', 'xP'], ascending=[False, True, True])
total_xp = so.expr_sum((lineup[p] + captain[p])
* self.forecasts.loc[p, f'{self.gw}_pts']
for p in players).get_value()
print(f'Total expected value for budget {budget:.1f}: {total_xp:.2f}')
os.remove(f'{model_name}.mps')
os.remove(f'{model_name}.txt')
return picks_df
def solve_multi_week(self, ft, horizon, decay_base=1.0):
# ToDo: absorb optimal squad method into this one
# compare your own team outcomes with the optimal squad
# useful for deciding when to wildcard
# ToDo: add parameter for helping to decide on when to use bench boost
'''
Solves multi-objective FPL problem with transfers
Parameters
----------
ft: integer
Number of available free transfers (currently)
horizon: integer
Number of weeks to consider in optimization
decay_base: float
Base for the decay function, default of 1 means no decay
'''
# Data
players = self.forecasts.index
positions = self.positions.index
teams = self.teams.index
current_squad = self.current_squad['player_id'].tolist()
itb = self.bank
gameweeks = list(range(self.gw, self.gw+horizon))
all_gw = [self.gw-1] + gameweeks
# Model
model_name = f'w{self.gw}_h{horizon}_d{decay_base}'
model = so.Model(model_name)
# Variables
squad = model.add_variables(
players, all_gw, name='squad', vartype=so.binary)
lineup = model.add_variables(
players, gameweeks, name='lineup', vartype=so.binary)
captain = model.add_variables(
players, gameweeks, name='captain', vartype=so.binary)
vicecap = model.add_variables(
players, gameweeks, name='vicecap', vartype=so.binary)
transfer_in = model.add_variables(
players, gameweeks, name='transfer_in', vartype=so.binary)
transfer_out = model.add_variables(
players, gameweeks, name='transfer_out', vartype=so.binary)
in_the_bank = model.add_variables(
all_gw, name='itb', vartype=so.continuous, lb=0)
free_transfers = model.add_variables(
all_gw, name='ft', vartype=so.integer, lb=1, ub=2)
penalized_transfers = model.add_variables(
gameweeks, name='pt', vartype=so.integer, lb=0)
# artificial binary variable to handle transfer logic
aux = model.add_variables(
gameweeks, name='aux', vartype=so.binary)
# Dictionaries
# sell prices of all players
sell_price = self.forecasts['sv'].to_dict()
# buy prices of all players
buy_price = self.forecasts['bv'].to_dict()
# total bank earned from selling players across gameweeks
sold_amount = {w: so.expr_sum(sell_price[p] * transfer_out[p,w]
for p in players)
for w in gameweeks}
# total bank spent on buying players across gameweeks
bought_amount = {w: so.expr_sum(buy_price[p] * transfer_in[p,w]
for p in players)
for w in gameweeks}
# player weekly forecast points
points_player_week = {(p,w): self.forecasts.loc[p, f'{w}_pts']
for p in players for w in gameweeks}
# number of transfers made each week
number_of_transfers = {w: so.expr_sum(transfer_out[p,w] for p in players)
for w in gameweeks}
# assume one transfer was made last week ?? why ??
number_of_transfers[self.gw-1] = 1
transfer_diff = {w: number_of_transfers[w] - free_transfers[w]
for w in gameweeks}
# Initial conditions
# set squad = 1 for all players currently in squad at previous GW deadline
model.add_constraints(
(squad[p, self.gw-1] == 1 for p in current_squad),
name='initial_squad_players')
# set squad = 0 for all other players
model.add_constraints(
(squad[p, self.gw-1] == 0 for p in players if p not in current_squad),
name='initial_squad_others')
# add current bank value
model.add_constraint(in_the_bank[self.gw-1] == itb, name='initial_itb')
# add current free transfers
model.add_constraint(free_transfers[self.gw-1] == ft, name='initial_ft')
# Constraints (per week)
# 15 players in squad
squad_count = {
w: so.expr_sum(squad[p, w] for p in players)
for w in gameweeks}
model.add_constraints(
(squad_count[w] == 15 for w in gameweeks), name='squad_count')
# 11 players in starting lineup
model.add_constraints(
(so.expr_sum(lineup[p,w] for p in players) == 11 for w in gameweeks),
name='lineup_count')
# 1 captain
model.add_constraints(
(so.expr_sum(captain[p,w] for p in players) == 1 for w in gameweeks),
name='captain_count')
# 1 vice-captain
model.add_constraints(
(so.expr_sum(vicecap[p,w] for p in players) == 1 for w in gameweeks),
name='vicecap_count')
# players in starting lineup must also be in squad
model.add_constraints(
(lineup[p,w] <= squad[p,w] for p in players for w in gameweeks),
name='lineup_squad_rel')
# captain must come from within squad
model.add_constraints(
(captain[p,w] <= lineup[p,w] for p in players for w in gameweeks),
name='captain_lineup_rel')
# vice-captain must come from within squad
model.add_constraints(
(vicecap[p,w] <= lineup[p,w] for p in players for w in gameweeks),
name='vicecap_lineup_rel')
# captain and vice-captain can't be same person
model.add_constraints(
(captain[p,w] + vicecap[p,w] <= 1 for p in players for w in gameweeks),
name='cap_vc_rel')
# count of each player per position in starting lineup
lineup_type_count = {
(t,w): so.expr_sum(lineup[p,w] for p in players
if self.forecasts.loc[p, 'position_id'] == t)
for t in positions for w in gameweeks}
# count of all players in lineup must be at least 'squad_min_play'
# and no more than 'squad_max_play' for each position type
model.add_constraints(
(
lineup_type_count[t,w] == [
self.positions.loc[t, 'squad_min_play'],
self.positions.loc[t, 'squad_max_play']]
for t in positions for w in gameweeks),
name='valid_formation')
# count of each player per position in squad
squad_type_count = {
(t,w): so.expr_sum(squad[p,w] for p in players
if self.forecasts.loc[p, 'position_id'] == t)
for t in positions for w in gameweeks}
# count of all players in squad must be equal to 'squad_select'
# for each position type
model.add_constraints(
(
squad_type_count[t,w] == self.positions.loc[t, 'squad_select']
for t in positions for w in gameweeks),
name='valid_squad')
# no more than 3 players per team
model.add_constraints(
(
so.expr_sum(squad[p,w] for p in players
if self.forecasts.loc[p, 'team_id'] == t)
<= 3 for t in teams for w in gameweeks),
name='team_limit')
# Transfer constraints
# squad is equal to squad from previous week, minus transfers out, plus in
model.add_constraints(
(
squad[p,w] == squad[p,w-1] + transfer_in[p,w] - transfer_out[p,w]
for p in players for w in gameweeks),
name='squad_transfer_rel')
# handles running bank balance (assumes no changes in player values)
model.add_constraints(
(
in_the_bank[w] == in_the_bank[w-1] + sold_amount[w] - bought_amount[w]
for w in gameweeks),
name='cont_budget')
# Free transfer constraints
# 1 free transfer per week
model.add_constraints(
(free_transfers[w] == aux[w] + 1 for w in gameweeks),
name='aux_ft_rel')
# no more than 2 free transfers per week
model.add_constraints(
(
free_transfers[w-1] - number_of_transfers[w-1] <= 2 * aux[w]
for w in gameweeks),
name='force_aux_1')
# cannot make more than 14 penalized transfers in a week
model.add_constraints(
(
free_transfers[w-1] - number_of_transfers[w-1] >= aux[w]
+ (-14)*(1-aux[w])
for w in gameweeks),
name='force_aux_2')
# not sure what this does ??
model.add_constraints(
(penalized_transfers[w] >= transfer_diff[w] for w in gameweeks),
name='pen_transfer_rel')
# Objectives
# sum of starting 11 players, plus double captain score
# and upweight vice-captain
gw_xp = {
w: so.expr_sum(points_player_week[p,w]
* (lineup[p,w] + captain[p,w]
+ 0.1*vicecap[p,w]) for p in players)
for w in gameweeks}
# subtract transfer costs
gw_total = {w: gw_xp[w] - 4 * penalized_transfers[w] for w in gameweeks}
total_xp = so.expr_sum(
gw_total[w] * pow(decay_base, w-self.gw) for w in gameweeks)
model.set_objective(-total_xp, sense='N', name='total_xp')
# Solve
model.export_mps(f'{model_name}.mps')
command = f'cbc {model_name}.mps solve solu {model_name}.txt'
process = Popen(command, stdout=DEVNULL, shell=False) # DEVNULL: nologs
process.wait()
# Parsing
with open(f'{model_name}.txt', 'r') as f:
for line in f:
if 'objective value' in line:
continue
words = line.split()
var = model.get_variable(words[1])
var.set_value(float(words[2]))
# DataFrame generation
picks = []
for w in gameweeks:
for p in players:
if squad[p,w].get_value() + transfer_out[p,w].get_value() > .5:
lp = self.forecasts.loc[p]
is_captain = 1 if captain[p,w].get_value() > .5 else 0
is_lineup = 1 if lineup[p,w].get_value() > .5 else 0
is_vice = 1 if vicecap[p,w].get_value() > .5 else 0
is_transfer_in = 1 if transfer_in[p,w].get_value() > .5 else 0
is_transfer_out = 1 if transfer_out[p,w].get_value() > .5 else 0
position = self.positions.loc[lp['position_id'], 'position_name']
team = self.teams.loc[lp['team_id'], 'team_name']
picks.append([
w, lp['web_name'], lp['position_id'], position, team,
buy_price[p], sell_price[p], round(points_player_week[p,w],2),
is_lineup, is_captain, is_vice, is_transfer_in, is_transfer_out
])
picks_df = pd.DataFrame(
picks,
columns=['GW', 'Name', 'Pos_id', 'Pos', 'Team', 'BV', 'SV', 'xP',
'lineup', 'captain', 'vicecaptain', 'transfer_in', 'transfer_out']
).sort_values(
by=['GW', 'lineup', 'Pos_id', 'xP'],
ascending=[True, False, True, True])
total_xp = so.expr_sum(
(lineup[p,w] + captain[p,w]) * points_player_week[p,w]
for p in players for w in gameweeks
).get_value()
print('SUMMARY OF ACTIONS', '-----------', sep='\n')
for w in gameweeks:
print(f'GW {w}:')
print(f'ITB {in_the_bank[w].get_value()}:',
f'FT={free_transfers[w].get_value()}',
f'PT={penalized_transfers[w].get_value()}')
for p in players:
if transfer_in[p,w].get_value() > .5:
print(f' Buy {p} - {self.forecasts["web_name"][p]}')
if transfer_out[p,w].get_value() > .5:
print(f' Sell {p} - {self.forecasts["web_name"][p]}')
print(f'\nTotal expected value: {total_xp:.2f} ({total_xp/horizon:.2f} / week)')
os.remove(f'{model_name}.mps')
os.remove(f'{model_name}.txt')
return picks_df
if __name__ == '__main__':
parser = ArgumentParser(
description='Optimises squad selection for given time horizon',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('-t', type=str, default=269471,
help='unique ID of FPL manager', dest='team_id')
parser.add_argument('-w', type=int, required=True,
help='upcoming gameweek number', dest='gameweek')
parser.add_argument('-f', type=str, required=True,
help='path to FPLreview forecasts file',
dest='forecasts')
parser.add_argument('-ft', type=int, default=1,
help='number of free transfers for upcoming week',
dest='free_transfers')
parser.add_argument('-hz', type=int, default=5,
help='number of weeks to look forward',
dest='horizon')
parser.add_argument('-ud', type=float, default=1.0,
help='future uncertainty decay parameter',
dest='uncertainty')
args = parser.parse_args()
# make sure using correct separators for path names
forecasts = args.forecasts.replace('/', os.sep)
model = SelectionModel(team_id=args.team_id, gw=args.gameweek,
forecasts_file=forecasts)
print('OPTIMIZING ACTION PLAN')
picks = model.solve_multi_week(
ft=args.free_transfers, horizon=args.horizon, decay_base=args.uncertainty)
print('\nSQUAD PICKS:')
print(picks)
print('\n\nFINDING OPTIMAL SQUAD FOR UPCOMING WEEK')
optimal_squad = model.solve_optimal_squad()
print('\nOPTIMAL SQUAD:')
print(optimal_squad)
|
StarcoderdataPython
|
6666473
|
import flask
from contextvars_extras.context_management import bind_to_sandbox_context
class Flask(flask.Flask):
"""Flask app with contextvars extensions.
This is a subclass of :class:`flask.Flask`, that adds some integration
:mod:`contextvars` module.
Currently, it adds only 1 feature: it puts each HTTP request to its own context.
That means that inside a view function, you can freely change any context variables,
and your changes stay private to the current HTTP request.
Once HTTP request is handled, all context variables are automatically restored.
Example::
>>> import pytz
>>> from contextvars import ContextVar
>>> from contextvars_extras.integrations.flask import Flask
>>> timezone_var = ContextVar("timezone_var", default="UTC")
>>> flask_app = Flask(__name__)
>>> @flask_app.route("/test_url")
... def test_view_function():
... timezone_var.set("Antarctica/Troll")
... return timezone_var.get()
>>> client = flask_app.test_client()
>>> response = client.get("/test_url")
>>> response.data
b'Antarctica/Troll'
# timezone_var var was change by test_view_function() above, but that
# change isn't seen here, because each HTTP context is put to its own sandbox context.
>>> timezone_var.get()
'UTC'
"""
@bind_to_sandbox_context
def __call__(self, environ, start_response):
"""Call Flask as WSGI application.
This is the entry point to Flask application.
It just calls ``Flask.wsgi_app``, where all the interesting stuff happens.
Also, it puts each WSGI request into its own context (by calling
:func:`contextvars.copy_context`), so each HTTP request can modify context vars
without affecting other HTTP requests.
"""
return super().__call__(environ, start_response)
|
StarcoderdataPython
|
169163
|
<reponame>caioaraujo/bakery_payments_api_v2
# Generated by Django 2.1.5 on 2019-02-02 14:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payment', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='payment',
name='value',
field=models.DecimalField(decimal_places=2, max_digits=15),
),
]
|
StarcoderdataPython
|
4939223
|
import os, sys
from argparse import ArgumentParser
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from tqdm import tqdm
from util.util_funcs import load_jsonl, replace_entities, extract_sents, store_jsonl
from util.logger import get_logger
DIR_PATH = os.path.abspath(os.getcwd())
FEVEROUS_PATH = DIR_PATH + "/FEVEROUS/src"
sys.path.insert(0, FEVEROUS_PATH)
from database.feverous_db import FeverousDB
from utils.wiki_page import WikiPage
logger = get_logger()
# TODO: The table parts are currently not used
def extract_table_text(table):
cell_ids = table.get_ids()
table_rows = []
for i, cell_id in enumerate(cell_ids):
if "table_caption" in cell_id:
continue
cell_id_list = cell_id.split("_")
row = int(cell_id_list[-2])
if len(table_rows) < row + 1:
table_rows.append(replace_entities(table.get_cell_content(cell_id)))
else:
table_rows[row] += " " + replace_entities(table.get_cell_content(cell_id))
return table_rows
def extract_tables(doc_json):
page = WikiPage(doc_json["title"], doc_json)
tables = page.get_tables()
tables_content = []
for table in tables:
table_rows = extract_table_text(table)
tables_content.append(table_rows)
return tables_content
def expand_table_id(db, table_id):
split_id = table_id.split("_")
doc_json = db.get_doc_json(split_id[0])
page = WikiPage(doc_json["title"], doc_json)
tables = page.get_tables()
result = []
for i, table in enumerate(tables):
cell_ids = table.get_ids()
for cell_id in cell_ids:
if not "_cell_" in cell_id:
continue
splitted_cell_id = cell_id.split("_")
row = int(splitted_cell_id[-2])
if "table_{}_{}".format(i, row) in table_id:
result.append("{}_{}".format(doc_json["title"], cell_id))
return result
def get_top_sents(db, doc_ids, claim, use_tables, n_gram_min, n_gram_max, nr_of_sents):
sent_ids = []
table_ids = []
all_sents = []
all_table_rows = []
for doc_id in doc_ids:
doc_json = db.get_doc_json(doc_id)
sents = extract_sents(doc_json)
for i in range(len(sents)):
sent_ids.append("{}_sentence_{}".format(doc_json["title"], i))
all_sents += sents
if use_tables:
tables_content = extract_tables(doc_json)
for i, table_content in enumerate(tables_content):
for j in range(len(table_content)):
table_ids.append("{}_table_{}_{}".format(doc_json["title"], i, j))
all_table_rows += table_content
sent_vectorizer = TfidfVectorizer(
analyzer="word", stop_words="english", ngram_range=(n_gram_min, n_gram_max)
)
sent_wm = sent_vectorizer.fit_transform(all_sents + all_table_rows)
claim_tfidf = sent_vectorizer.transform([claim])
cosine_similarities = cosine_similarity(claim_tfidf, sent_wm).flatten()
top_sents_indices = cosine_similarities.argsort()[: -nr_of_sents - 1 : -1]
top_sents = [
sent for i, sent in enumerate(sent_ids + table_ids) if i in top_sents_indices
]
for sent in top_sents:
if "_table_" in sent:
top_sents += expand_table_id(db, sent)
top_sents = [sent for sent in top_sents if "_table_" not in sent]
top_sents = list(set(top_sents))
return top_sents
def get_top_sents_for_claims(
db_path, top_docs_file, nr_of_sents, use_tables, n_gram_min, n_gram_max
):
db = FeverousDB(db_path)
logger.info("Loading previously retrieved docs for claims...")
top_k_docs = load_jsonl(top_docs_file)
logger.info("Finished loading top docs")
result = []
for obj in tqdm(top_k_docs):
top_sents = get_top_sents(
db,
obj["docs"],
obj["claim"],
use_tables,
n_gram_min,
n_gram_max,
nr_of_sents,
)
obj["top_sents"] = top_sents
result.append(obj)
return result
def get_top_sents_for_claim(
db_path: str,
top_k_docs: list,
claim: str,
nr_of_sents: int,
n_gram_min=1,
n_gram_max=3,
):
""" Retrieves the top sentences for a claim from the previously retrieved documents
Parameters
----------
db_path : str
The path to the database file
top_k_docs : list
The previously retrieved top docs for the claim
nr_of_sents : int
The number of sentences to retrieve
n_gram_min : int
The smallest n-gram to use in the retrieval (default is 1 e.g. unigram)
n_gram_max : int
The largest n-gram to use in the retrieval (default is 3 e.g. trigram)
"""
db = FeverousDB(db_path)
use_tables = False
top_sents = get_top_sents(
db, top_k_docs, claim, use_tables, n_gram_min, n_gram_max, nr_of_sents
)
return top_sents
def main():
parser = ArgumentParser(
description="Retrieves the most similar sentences from the given documents"
)
parser.add_argument(
"--db_path", default=None, type=str, help="Path to the FEVEROUS database"
)
parser.add_argument(
"--top_docs_file",
default=None,
type=str,
help="Path to the file for the top docs predictions",
)
parser.add_argument(
"--out_file",
default=None,
type=str,
help="Path to the output jsonl file, where the top k sentences should be stored",
)
parser.add_argument(
"--nr_of_sents",
default=5,
type=int,
help="The number of sentences to retrieve from each document",
)
parser.add_argument(
"--use_tables",
default=False,
action="store_true",
help="Tells the script if it should use table content when matching",
)
parser.add_argument(
"--n_gram_min",
default=1,
type=int,
help="The lower bound of the ngrams, e.g. 1 for unigrams and 2 for bigrams",
)
parser.add_argument(
"--n_gram_max",
default=1,
type=int,
help="The upper bound of the ngrams, e.g. 1 for unigrams and 2 for bigrams",
)
args = parser.parse_args()
if not args.db_path:
raise RuntimeError("Invalid database path")
if ".db" not in args.db_path:
raise RuntimeError("The database path should include the name of the .db file")
if not args.top_docs_file:
raise RuntimeError("Invalid top docs path")
if ".jsonl" not in args.top_docs_file:
raise RuntimeError(
"The top docs path should include the name of the .jsonl file"
)
if not args.out_file:
raise RuntimeError("Invalid output file path")
if ".jsonl" not in args.out_file:
raise RuntimeError(
"The output file path should include the name of the .jsonl file"
)
logger.info(
"Retrieving top {} sentences for each claim from the retrieved docs...".format(
args.nr_of_sents
)
)
top_sents = get_top_sents_for_claims(
args.db_path,
args.top_docs_file,
args.nr_of_sents,
args.use_tables,
args.n_gram_min,
args.n_gram_max,
)
logger.info("Finished retrieving top sentences")
logger.info("Storing top sentences...")
store_jsonl(top_sents, args.out_file)
logger.info("Top sents for each claim stored in {}".format(args.out_file))
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.