text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def read_text_from_conll_file( file_name, layer_name=LAYER_CONLL, **kwargs ): ''' Reads the CONLL format syntactic analysis from given file, and returns as a Text object. The Text object has been tokenized for paragraphs, sentences, words, and it contains syntactic analyses aligned with word spans, in the layer *layer_name* (by default: LAYER_CONLL); Attached syntactic analyses are in the format as is the output of utils.normalise_alignments(); Parameters ----------- file_name : str Name of the input file; Should contain syntactically analysed text, following the CONLL format; layer_name : str Name of the Text's layer in which syntactic analyses are stored; Defaults to 'conll_syntax'; For other parameters, see optional parameters of the methods: utils.normalise_alignments(): "rep_miss_w_dummy", "fix_selfrefs", "keep_old", "mark_root"; maltparser_support.align_CONLL_with_Text(): "check_tokens", "add_word_ids"; ''' # 1) Load conll analysed text from file conll_lines = [] in_f = codecs.open(file_name, mode='r', encoding='utf-8') for line in in_f: # Skip comment lines if line.startswith('#'): continue conll_lines.append( line.rstrip() ) in_f.close() # 2) Extract sentences and word tokens sentences = [] sentence = [] for i, line in enumerate( conll_lines ): if len(line) > 0 and '\t' in line: features = line.split('\t') if len(features) != 10: raise Exception(' In file '+in_file+', line '+str(i)+\ ' with unexpected format: "'+line+'" ') word_id = features[0] token = features[1] sentence.append( token ) elif len(line)==0 or re.match('^\s+$', line): # End of a sentence if sentence: # (!) Use double space instead of single space in order to distinguish # word-tokenizing space from the single space in the multiwords # (e.g. 'Rio de Janeiro' as a single word); sentences.append( ' '.join(sentence) ) sentence = [] if sentence: sentences.append( ' '.join(sentence) ) # 3) Construct the estnltk's Text kwargs4text = { # Use custom tokenization utils in order to preserve exactly the same # tokenization as was in the input; "word_tokenizer": RegexpTokenizer(" ", gaps=True), "sentence_tokenizer": LineTokenizer() } from estnltk.text import Text text = Text( '\n'.join(sentences), **kwargs4text ) # Tokenize up to the words layer text.tokenize_words() # 4) Align syntactic analyses with the Text alignments = align_CONLL_with_Text( conll_lines, text, None, **kwargs ) normalise_alignments( alignments, data_type=CONLL_DATA, **kwargs ) # Attach alignments to the text text[ layer_name ] = alignments return text
[ "def", "read_text_from_conll_file", "(", "file_name", ",", "layer_name", "=", "LAYER_CONLL", ",", "*", "*", "kwargs", ")", ":", "# 1) Load conll analysed text from file", "conll_lines", "=", "[", "]", "in_f", "=", "codecs", ".", "open", "(", "file_name", ",", "mode", "=", "'r'", ",", "encoding", "=", "'utf-8'", ")", "for", "line", "in", "in_f", ":", "# Skip comment lines", "if", "line", ".", "startswith", "(", "'#'", ")", ":", "continue", "conll_lines", ".", "append", "(", "line", ".", "rstrip", "(", ")", ")", "in_f", ".", "close", "(", ")", "# 2) Extract sentences and word tokens", "sentences", "=", "[", "]", "sentence", "=", "[", "]", "for", "i", ",", "line", "in", "enumerate", "(", "conll_lines", ")", ":", "if", "len", "(", "line", ")", ">", "0", "and", "'\\t'", "in", "line", ":", "features", "=", "line", ".", "split", "(", "'\\t'", ")", "if", "len", "(", "features", ")", "!=", "10", ":", "raise", "Exception", "(", "' In file '", "+", "in_file", "+", "', line '", "+", "str", "(", "i", ")", "+", "' with unexpected format: \"'", "+", "line", "+", "'\" '", ")", "word_id", "=", "features", "[", "0", "]", "token", "=", "features", "[", "1", "]", "sentence", ".", "append", "(", "token", ")", "elif", "len", "(", "line", ")", "==", "0", "or", "re", ".", "match", "(", "'^\\s+$'", ",", "line", ")", ":", "# End of a sentence ", "if", "sentence", ":", "# (!) Use double space instead of single space in order to distinguish", "# word-tokenizing space from the single space in the multiwords", "# (e.g. 'Rio de Janeiro' as a single word);", "sentences", ".", "append", "(", "' '", ".", "join", "(", "sentence", ")", ")", "sentence", "=", "[", "]", "if", "sentence", ":", "sentences", ".", "append", "(", "' '", ".", "join", "(", "sentence", ")", ")", "# 3) Construct the estnltk's Text", "kwargs4text", "=", "{", "# Use custom tokenization utils in order to preserve exactly the same ", "# tokenization as was in the input;", "\"word_tokenizer\"", ":", "RegexpTokenizer", "(", "\" \"", ",", "gaps", "=", "True", ")", ",", "\"sentence_tokenizer\"", ":", "LineTokenizer", "(", ")", "}", "from", "estnltk", ".", "text", "import", "Text", "text", "=", "Text", "(", "'\\n'", ".", "join", "(", "sentences", ")", ",", "*", "*", "kwargs4text", ")", "# Tokenize up to the words layer", "text", ".", "tokenize_words", "(", ")", "# 4) Align syntactic analyses with the Text", "alignments", "=", "align_CONLL_with_Text", "(", "conll_lines", ",", "text", ",", "None", ",", "*", "*", "kwargs", ")", "normalise_alignments", "(", "alignments", ",", "data_type", "=", "CONLL_DATA", ",", "*", "*", "kwargs", ")", "# Attach alignments to the text", "text", "[", "layer_name", "]", "=", "alignments", "return", "text" ]
40.012658
20.924051
def create_groups(orientations, *groups, **kwargs): """ Create groups of an orientation measurement dataset """ grouped = [] # Copy all datasets to be safe (this could be bad for # memory usage, so can be disabled). if kwargs.pop('copy', True): orientations = [copy(o) for o in orientations] for o in orientations: # Get rid of and recreate group membership o.member_of = None try: grouped += o.members for a in o.members: a.member_of = o except AttributeError: pass def find(uid): try: val = next(x for x in orientations if x.hash == uid) if val in grouped: raise GroupedPlaneError("{} is already in a group." .format(val.hash)) return val except StopIteration: raise KeyError("No measurement of with hash {} found" .format(uid)) for uid_list in groups: vals = [find(uid) for uid in uid_list] o = GroupedOrientation(*vals, **kwargs) orientations.append(o) return orientations
[ "def", "create_groups", "(", "orientations", ",", "*", "groups", ",", "*", "*", "kwargs", ")", ":", "grouped", "=", "[", "]", "# Copy all datasets to be safe (this could be bad for", "# memory usage, so can be disabled).", "if", "kwargs", ".", "pop", "(", "'copy'", ",", "True", ")", ":", "orientations", "=", "[", "copy", "(", "o", ")", "for", "o", "in", "orientations", "]", "for", "o", "in", "orientations", ":", "# Get rid of and recreate group membership", "o", ".", "member_of", "=", "None", "try", ":", "grouped", "+=", "o", ".", "members", "for", "a", "in", "o", ".", "members", ":", "a", ".", "member_of", "=", "o", "except", "AttributeError", ":", "pass", "def", "find", "(", "uid", ")", ":", "try", ":", "val", "=", "next", "(", "x", "for", "x", "in", "orientations", "if", "x", ".", "hash", "==", "uid", ")", "if", "val", "in", "grouped", ":", "raise", "GroupedPlaneError", "(", "\"{} is already in a group.\"", ".", "format", "(", "val", ".", "hash", ")", ")", "return", "val", "except", "StopIteration", ":", "raise", "KeyError", "(", "\"No measurement of with hash {} found\"", ".", "format", "(", "uid", ")", ")", "for", "uid_list", "in", "groups", ":", "vals", "=", "[", "find", "(", "uid", ")", "for", "uid", "in", "uid_list", "]", "o", "=", "GroupedOrientation", "(", "*", "vals", ",", "*", "*", "kwargs", ")", "orientations", ".", "append", "(", "o", ")", "return", "orientations" ]
31.189189
16.594595
def _SkipFieldContents(tokenizer): """Skips over contents (value or message) of a field. Args: tokenizer: A tokenizer to parse the field name and values. """ # Try to guess the type of this field. # If this field is not a message, there should be a ":" between the # field name and the field value and also the field value should not # start with "{" or "<" which indicates the beginning of a message body. # If there is no ":" or there is a "{" or "<" after ":", this field has # to be a message or the input is ill-formed. if tokenizer.TryConsume(':') and not tokenizer.LookingAt( '{') and not tokenizer.LookingAt('<'): _SkipFieldValue(tokenizer) else: _SkipFieldMessage(tokenizer)
[ "def", "_SkipFieldContents", "(", "tokenizer", ")", ":", "# Try to guess the type of this field.", "# If this field is not a message, there should be a \":\" between the", "# field name and the field value and also the field value should not", "# start with \"{\" or \"<\" which indicates the beginning of a message body.", "# If there is no \":\" or there is a \"{\" or \"<\" after \":\", this field has", "# to be a message or the input is ill-formed.", "if", "tokenizer", ".", "TryConsume", "(", "':'", ")", "and", "not", "tokenizer", ".", "LookingAt", "(", "'{'", ")", "and", "not", "tokenizer", ".", "LookingAt", "(", "'<'", ")", ":", "_SkipFieldValue", "(", "tokenizer", ")", "else", ":", "_SkipFieldMessage", "(", "tokenizer", ")" ]
41.647059
18.117647
def bulk_modify(self, *filters_or_records, **kwargs): """Shortcut to bulk modify records .. versionadded:: 2.17.0 Args: *filters_or_records (tuple) or (Record): Either a list of Records, or a list of filters. Keyword Args: values (dict): Dictionary of one or more 'field_name': 'new_value' pairs to update Notes: Requires Swimlane 2.17+ Examples: :: # Bulk update records by filter app.records.bulk_modify( # Query filters ('Field_1', 'equals', value1), ('Field_2', 'equals', value2), ... # New values for records values={ "Field_3": value3, "Field_4": value4, ... } ) # Bulk update records record1 = app.records.get(tracking_id='APP-1') record2 = app.records.get(tracking_id='APP-2') record3 = app.records.get(tracking_id='APP-3') app.records.bulk_modify(record1, record2, record3, values={"Field_Name": 'new value'}) Returns: :class:`string`: Bulk Modify Job ID """ values = kwargs.pop('values', None) if kwargs: raise ValueError('Unexpected arguments: {}'.format(kwargs)) if not values: raise ValueError('Must provide "values" as keyword argument') if not isinstance(values, dict): raise ValueError("values parameter must be dict of {'field_name': 'update_value'} pairs") _type = validate_filters_or_records(filters_or_records) request_payload = {} record_stub = record_factory(self._app) # build record_id list if _type is Record: request_payload['recordIds'] = [record.id for record in filters_or_records] # build filters else: filters = [] for filter_tuples in filters_or_records: field_name = record_stub.get_field(filter_tuples[0]) filters.append({ "fieldId": field_name.id, "filterType": filter_tuples[1], "value": field_name.get_report(filter_tuples[2]) }) request_payload['filters'] = filters # Ensure all values are wrapped in a bulk modification operation, defaulting to Replace if not provided for # backwards compatibility for field_name in list(values.keys()): modification_operation = values[field_name] if not isinstance(modification_operation, _BulkModificationOperation): values[field_name] = Replace(modification_operation) # build modifications modifications = [] for field_name, modification_operation in values.items(): # Lookup target field modification_field = record_stub.get_field(field_name) if not modification_field.bulk_modify_support: raise ValueError("Field '{}' of Type '{}', is not supported for bulk modify".format( field_name, modification_field.__class__.__name__ )) modifications.append({ "fieldId": { "value": modification_field.id, "type": "id" }, "value": modification_field.get_bulk_modify(modification_operation.value), "type": modification_operation.type }) request_payload['modifications'] = modifications response = self._swimlane.request('put', "app/{0}/record/batch".format(self._app.id), json=request_payload) # Update records if instances were used to submit bulk modify request after request was successful if _type is Record: for record in filters_or_records: for field_name, modification_operation in six.iteritems(values): record[field_name] = modification_operation.value return response.text
[ "def", "bulk_modify", "(", "self", ",", "*", "filters_or_records", ",", "*", "*", "kwargs", ")", ":", "values", "=", "kwargs", ".", "pop", "(", "'values'", ",", "None", ")", "if", "kwargs", ":", "raise", "ValueError", "(", "'Unexpected arguments: {}'", ".", "format", "(", "kwargs", ")", ")", "if", "not", "values", ":", "raise", "ValueError", "(", "'Must provide \"values\" as keyword argument'", ")", "if", "not", "isinstance", "(", "values", ",", "dict", ")", ":", "raise", "ValueError", "(", "\"values parameter must be dict of {'field_name': 'update_value'} pairs\"", ")", "_type", "=", "validate_filters_or_records", "(", "filters_or_records", ")", "request_payload", "=", "{", "}", "record_stub", "=", "record_factory", "(", "self", ".", "_app", ")", "# build record_id list", "if", "_type", "is", "Record", ":", "request_payload", "[", "'recordIds'", "]", "=", "[", "record", ".", "id", "for", "record", "in", "filters_or_records", "]", "# build filters", "else", ":", "filters", "=", "[", "]", "for", "filter_tuples", "in", "filters_or_records", ":", "field_name", "=", "record_stub", ".", "get_field", "(", "filter_tuples", "[", "0", "]", ")", "filters", ".", "append", "(", "{", "\"fieldId\"", ":", "field_name", ".", "id", ",", "\"filterType\"", ":", "filter_tuples", "[", "1", "]", ",", "\"value\"", ":", "field_name", ".", "get_report", "(", "filter_tuples", "[", "2", "]", ")", "}", ")", "request_payload", "[", "'filters'", "]", "=", "filters", "# Ensure all values are wrapped in a bulk modification operation, defaulting to Replace if not provided for", "# backwards compatibility", "for", "field_name", "in", "list", "(", "values", ".", "keys", "(", ")", ")", ":", "modification_operation", "=", "values", "[", "field_name", "]", "if", "not", "isinstance", "(", "modification_operation", ",", "_BulkModificationOperation", ")", ":", "values", "[", "field_name", "]", "=", "Replace", "(", "modification_operation", ")", "# build modifications", "modifications", "=", "[", "]", "for", "field_name", ",", "modification_operation", "in", "values", ".", "items", "(", ")", ":", "# Lookup target field", "modification_field", "=", "record_stub", ".", "get_field", "(", "field_name", ")", "if", "not", "modification_field", ".", "bulk_modify_support", ":", "raise", "ValueError", "(", "\"Field '{}' of Type '{}', is not supported for bulk modify\"", ".", "format", "(", "field_name", ",", "modification_field", ".", "__class__", ".", "__name__", ")", ")", "modifications", ".", "append", "(", "{", "\"fieldId\"", ":", "{", "\"value\"", ":", "modification_field", ".", "id", ",", "\"type\"", ":", "\"id\"", "}", ",", "\"value\"", ":", "modification_field", ".", "get_bulk_modify", "(", "modification_operation", ".", "value", ")", ",", "\"type\"", ":", "modification_operation", ".", "type", "}", ")", "request_payload", "[", "'modifications'", "]", "=", "modifications", "response", "=", "self", ".", "_swimlane", ".", "request", "(", "'put'", ",", "\"app/{0}/record/batch\"", ".", "format", "(", "self", ".", "_app", ".", "id", ")", ",", "json", "=", "request_payload", ")", "# Update records if instances were used to submit bulk modify request after request was successful", "if", "_type", "is", "Record", ":", "for", "record", "in", "filters_or_records", ":", "for", "field_name", ",", "modification_operation", "in", "six", ".", "iteritems", "(", "values", ")", ":", "record", "[", "field_name", "]", "=", "modification_operation", ".", "value", "return", "response", ".", "text" ]
36.910714
24.4375
def create_parser(self): """Create the CLI parser.""" parser = argparse.ArgumentParser( description=PROGRAM_DESCRIPTION, formatter_class=argparse.RawDescriptionHelpFormatter, epilog=textwrap.dedent(PROGRAM_EPILOG)) parser.add_argument( 'filename', metavar='FILE_NAME', nargs='*', help='the I/O file name') subparsers = parser.add_subparsers( dest='parser', title='markdown parser') subparsers.required = True # github + cmark + gitlab + commonmarker. github = subparsers.add_parser( 'github', aliases=['cmark', 'gitlab', 'commonmarker'], description='Use Commonmark rules to generate an output. If no \ option is selected, the default output will be an \ unordered list with the respective default values \ as listed below') megroup = github.add_mutually_exclusive_group() megroup.add_argument( '-u', '--unordered-list-marker', choices=md_parser['github']['list']['unordered']['bullet_markers'], nargs='?', const=md_parser['github']['list']['unordered']['default_marker'], help='set the marker and enables unordered list. Defaults to ' + md_parser['github']['list']['unordered']['default_marker']) megroup.add_argument( '-o', '--ordered-list-marker', choices=md_parser['github']['list']['ordered']['closing_markers'], nargs='?', const=md_parser['github']['list']['ordered'] ['default_closing_marker'], help='set the marker and enables ordered lists. Defaults to ' + md_parser['github']['list']['ordered']['default_closing_marker']) github.add_argument( '-l', '--header-levels', choices=[ str(i) for i in range(1, md_parser['github']['header']['max_levels'] + 1) ], nargs='?', const=str(md_parser['github']['header']['default_keep_levels']), help='set the maximum level of headers to be considered as part \ of the TOC. Defaults to ' + str( md_parser['github']['header']['default_keep_levels'])) github.set_defaults( header_levels=md_parser['github']['header']['default_keep_levels']) # Redcarpet. redcarpet = subparsers.add_parser( 'redcarpet', description='Use Redcarpet rules to generate an output. If no \ option is selected, the default output will be an \ unordered list with the respective default values \ as listed below. Gitlab rules are the same as \ Redcarpet except that conflicts are avoided with \ duplicate headers.') megroup = redcarpet.add_mutually_exclusive_group() megroup.add_argument( '-u', '--unordered-list-marker', choices=md_parser['redcarpet']['list']['unordered'] ['bullet_markers'], nargs='?', const=md_parser['redcarpet']['list']['unordered'] ['default_marker'], help='set the marker and enables unordered list. Defaults to ' + md_parser['redcarpet']['list']['unordered']['default_marker']) megroup.add_argument( '-o', '--ordered-list-marker', choices=md_parser['redcarpet']['list']['ordered'] ['closing_markers'], nargs='?', const=md_parser['redcarpet']['list']['ordered'] ['default_closing_marker'], help='set the marker and enables ordered lists. Defaults to ' + md_parser['redcarpet']['list']['ordered']['default_closing_marker'] ) redcarpet.add_argument( '-l', '--header-levels', choices=[ str(i) for i in range( 1, md_parser['redcarpet']['header']['max_levels'] + 1) ], nargs='?', const=str(md_parser['redcarpet']['header']['default_keep_levels']), help='set the maximum level of headers to be considered as part \ of the TOC. Defaults to ' + str( md_parser['redcarpet']['header']['default_keep_levels'])) redcarpet.set_defaults(header_levels=md_parser['redcarpet']['header'] ['default_keep_levels']) c_or_i = parser.add_mutually_exclusive_group() c_or_i.add_argument( '-c', '--no-list-coherence', action='store_true', help='avoids checking for TOC list coherence') c_or_i.add_argument( '-i', '--no-indentation', action='store_true', help='avoids adding indentations to the TOC') parser.add_argument( '-l', '--no-links', action='store_true', help='avoids adding links to the TOC') parser.add_argument( '-m', '--toc-marker', metavar='TOC_MARKER', help='set the string to be used as the marker for positioning the \ table of contents. Defaults to ' + common_defaults['toc_marker']) parser.add_argument( '-p', '--in-place', action='store_true', help='overwrite the input file') parser.add_argument( '-v', '--version', action='version', version=VERSION_NAME + ' ' + VERSION_NUMBER) parser.set_defaults(toc_marker=common_defaults['toc_marker']) parser.set_defaults(func=CliToApi().write_toc) return parser
[ "def", "create_parser", "(", "self", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "PROGRAM_DESCRIPTION", ",", "formatter_class", "=", "argparse", ".", "RawDescriptionHelpFormatter", ",", "epilog", "=", "textwrap", ".", "dedent", "(", "PROGRAM_EPILOG", ")", ")", "parser", ".", "add_argument", "(", "'filename'", ",", "metavar", "=", "'FILE_NAME'", ",", "nargs", "=", "'*'", ",", "help", "=", "'the I/O file name'", ")", "subparsers", "=", "parser", ".", "add_subparsers", "(", "dest", "=", "'parser'", ",", "title", "=", "'markdown parser'", ")", "subparsers", ".", "required", "=", "True", "# github + cmark + gitlab + commonmarker.", "github", "=", "subparsers", ".", "add_parser", "(", "'github'", ",", "aliases", "=", "[", "'cmark'", ",", "'gitlab'", ",", "'commonmarker'", "]", ",", "description", "=", "'Use Commonmark rules to generate an output. If no \\\n option is selected, the default output will be an \\\n unordered list with the respective default values \\\n as listed below'", ")", "megroup", "=", "github", ".", "add_mutually_exclusive_group", "(", ")", "megroup", ".", "add_argument", "(", "'-u'", ",", "'--unordered-list-marker'", ",", "choices", "=", "md_parser", "[", "'github'", "]", "[", "'list'", "]", "[", "'unordered'", "]", "[", "'bullet_markers'", "]", ",", "nargs", "=", "'?'", ",", "const", "=", "md_parser", "[", "'github'", "]", "[", "'list'", "]", "[", "'unordered'", "]", "[", "'default_marker'", "]", ",", "help", "=", "'set the marker and enables unordered list. Defaults to '", "+", "md_parser", "[", "'github'", "]", "[", "'list'", "]", "[", "'unordered'", "]", "[", "'default_marker'", "]", ")", "megroup", ".", "add_argument", "(", "'-o'", ",", "'--ordered-list-marker'", ",", "choices", "=", "md_parser", "[", "'github'", "]", "[", "'list'", "]", "[", "'ordered'", "]", "[", "'closing_markers'", "]", ",", "nargs", "=", "'?'", ",", "const", "=", "md_parser", "[", "'github'", "]", "[", "'list'", "]", "[", "'ordered'", "]", "[", "'default_closing_marker'", "]", ",", "help", "=", "'set the marker and enables ordered lists. Defaults to '", "+", "md_parser", "[", "'github'", "]", "[", "'list'", "]", "[", "'ordered'", "]", "[", "'default_closing_marker'", "]", ")", "github", ".", "add_argument", "(", "'-l'", ",", "'--header-levels'", ",", "choices", "=", "[", "str", "(", "i", ")", "for", "i", "in", "range", "(", "1", ",", "md_parser", "[", "'github'", "]", "[", "'header'", "]", "[", "'max_levels'", "]", "+", "1", ")", "]", ",", "nargs", "=", "'?'", ",", "const", "=", "str", "(", "md_parser", "[", "'github'", "]", "[", "'header'", "]", "[", "'default_keep_levels'", "]", ")", ",", "help", "=", "'set the maximum level of headers to be considered as part \\\n of the TOC. Defaults to '", "+", "str", "(", "md_parser", "[", "'github'", "]", "[", "'header'", "]", "[", "'default_keep_levels'", "]", ")", ")", "github", ".", "set_defaults", "(", "header_levels", "=", "md_parser", "[", "'github'", "]", "[", "'header'", "]", "[", "'default_keep_levels'", "]", ")", "# Redcarpet.", "redcarpet", "=", "subparsers", ".", "add_parser", "(", "'redcarpet'", ",", "description", "=", "'Use Redcarpet rules to generate an output. If no \\\n option is selected, the default output will be an \\\n unordered list with the respective default values \\\n as listed below. Gitlab rules are the same as \\\n Redcarpet except that conflicts are avoided with \\\n duplicate headers.'", ")", "megroup", "=", "redcarpet", ".", "add_mutually_exclusive_group", "(", ")", "megroup", ".", "add_argument", "(", "'-u'", ",", "'--unordered-list-marker'", ",", "choices", "=", "md_parser", "[", "'redcarpet'", "]", "[", "'list'", "]", "[", "'unordered'", "]", "[", "'bullet_markers'", "]", ",", "nargs", "=", "'?'", ",", "const", "=", "md_parser", "[", "'redcarpet'", "]", "[", "'list'", "]", "[", "'unordered'", "]", "[", "'default_marker'", "]", ",", "help", "=", "'set the marker and enables unordered list. Defaults to '", "+", "md_parser", "[", "'redcarpet'", "]", "[", "'list'", "]", "[", "'unordered'", "]", "[", "'default_marker'", "]", ")", "megroup", ".", "add_argument", "(", "'-o'", ",", "'--ordered-list-marker'", ",", "choices", "=", "md_parser", "[", "'redcarpet'", "]", "[", "'list'", "]", "[", "'ordered'", "]", "[", "'closing_markers'", "]", ",", "nargs", "=", "'?'", ",", "const", "=", "md_parser", "[", "'redcarpet'", "]", "[", "'list'", "]", "[", "'ordered'", "]", "[", "'default_closing_marker'", "]", ",", "help", "=", "'set the marker and enables ordered lists. Defaults to '", "+", "md_parser", "[", "'redcarpet'", "]", "[", "'list'", "]", "[", "'ordered'", "]", "[", "'default_closing_marker'", "]", ")", "redcarpet", ".", "add_argument", "(", "'-l'", ",", "'--header-levels'", ",", "choices", "=", "[", "str", "(", "i", ")", "for", "i", "in", "range", "(", "1", ",", "md_parser", "[", "'redcarpet'", "]", "[", "'header'", "]", "[", "'max_levels'", "]", "+", "1", ")", "]", ",", "nargs", "=", "'?'", ",", "const", "=", "str", "(", "md_parser", "[", "'redcarpet'", "]", "[", "'header'", "]", "[", "'default_keep_levels'", "]", ")", ",", "help", "=", "'set the maximum level of headers to be considered as part \\\n of the TOC. Defaults to '", "+", "str", "(", "md_parser", "[", "'redcarpet'", "]", "[", "'header'", "]", "[", "'default_keep_levels'", "]", ")", ")", "redcarpet", ".", "set_defaults", "(", "header_levels", "=", "md_parser", "[", "'redcarpet'", "]", "[", "'header'", "]", "[", "'default_keep_levels'", "]", ")", "c_or_i", "=", "parser", ".", "add_mutually_exclusive_group", "(", ")", "c_or_i", ".", "add_argument", "(", "'-c'", ",", "'--no-list-coherence'", ",", "action", "=", "'store_true'", ",", "help", "=", "'avoids checking for TOC list coherence'", ")", "c_or_i", ".", "add_argument", "(", "'-i'", ",", "'--no-indentation'", ",", "action", "=", "'store_true'", ",", "help", "=", "'avoids adding indentations to the TOC'", ")", "parser", ".", "add_argument", "(", "'-l'", ",", "'--no-links'", ",", "action", "=", "'store_true'", ",", "help", "=", "'avoids adding links to the TOC'", ")", "parser", ".", "add_argument", "(", "'-m'", ",", "'--toc-marker'", ",", "metavar", "=", "'TOC_MARKER'", ",", "help", "=", "'set the string to be used as the marker for positioning the \\\n table of contents. Defaults to '", "+", "common_defaults", "[", "'toc_marker'", "]", ")", "parser", ".", "add_argument", "(", "'-p'", ",", "'--in-place'", ",", "action", "=", "'store_true'", ",", "help", "=", "'overwrite the input file'", ")", "parser", ".", "add_argument", "(", "'-v'", ",", "'--version'", ",", "action", "=", "'version'", ",", "version", "=", "VERSION_NAME", "+", "' '", "+", "VERSION_NUMBER", ")", "parser", ".", "set_defaults", "(", "toc_marker", "=", "common_defaults", "[", "'toc_marker'", "]", ")", "parser", ".", "set_defaults", "(", "func", "=", "CliToApi", "(", ")", ".", "write_toc", ")", "return", "parser" ]
40.606897
19.668966
def get_pseudo_salt(length, *args): """ generate a pseudo salt (used, if user is wrong) """ temp = "".join([arg for arg in args]) return hash_hexdigest(temp)[:length]
[ "def", "get_pseudo_salt", "(", "length", ",", "*", "args", ")", ":", "temp", "=", "\"\"", ".", "join", "(", "[", "arg", "for", "arg", "in", "args", "]", ")", "return", "hash_hexdigest", "(", "temp", ")", "[", ":", "length", "]" ]
30.166667
2.833333
def _peer_bfd_tx(self, **kwargs): """Return the BFD minimum transmit interval XML. You should not use this method. You probably want `BGP.bfd`. Args: peer_ip (str): Peer IPv4 address for BFD setting. min_tx (str): BFD transmit interval in milliseconds (300, 500, etc) delete (bool): Remove the configuration if ``True``. Returns: XML to be passed to the switch. Raises: None """ method_name = 'rbridge_id_router_router_bgp_router_bgp_attributes_' \ 'neighbor_neighbor_ips_neighbor_addr_bfd_interval_min_tx' bfd_tx = getattr(self._rbridge, method_name) config = bfd_tx(**kwargs) if kwargs['delete']: tag = 'min-tx' config.find('.//*%s' % tag).set('operation', 'delete') return config
[ "def", "_peer_bfd_tx", "(", "self", ",", "*", "*", "kwargs", ")", ":", "method_name", "=", "'rbridge_id_router_router_bgp_router_bgp_attributes_'", "'neighbor_neighbor_ips_neighbor_addr_bfd_interval_min_tx'", "bfd_tx", "=", "getattr", "(", "self", ".", "_rbridge", ",", "method_name", ")", "config", "=", "bfd_tx", "(", "*", "*", "kwargs", ")", "if", "kwargs", "[", "'delete'", "]", ":", "tag", "=", "'min-tx'", "config", ".", "find", "(", "'.//*%s'", "%", "tag", ")", ".", "set", "(", "'operation'", ",", "'delete'", ")", "return", "config" ]
34.56
21
def put(self, source, rel_path, metadata=None): '''Copy a file to the repository Args: source: Absolute path to the source file, or a file-like object rel_path: path relative to the root of the repository ''' # This case should probably be deprecated. if not isinstance(rel_path, basestring): rel_path = rel_path.cache_key sink = self.put_stream(rel_path, metadata=metadata) try: copy_file_or_flo(source, sink) except (KeyboardInterrupt, SystemExit): path_ = self.path(rel_path) if os.path.exists(path_): os.remove(path_) raise sink.close() return os.path.join(self.cache_dir, rel_path)
[ "def", "put", "(", "self", ",", "source", ",", "rel_path", ",", "metadata", "=", "None", ")", ":", "# This case should probably be deprecated.", "if", "not", "isinstance", "(", "rel_path", ",", "basestring", ")", ":", "rel_path", "=", "rel_path", ".", "cache_key", "sink", "=", "self", ".", "put_stream", "(", "rel_path", ",", "metadata", "=", "metadata", ")", "try", ":", "copy_file_or_flo", "(", "source", ",", "sink", ")", "except", "(", "KeyboardInterrupt", ",", "SystemExit", ")", ":", "path_", "=", "self", ".", "path", "(", "rel_path", ")", "if", "os", ".", "path", ".", "exists", "(", "path_", ")", ":", "os", ".", "remove", "(", "path_", ")", "raise", "sink", ".", "close", "(", ")", "return", "os", ".", "path", ".", "join", "(", "self", ".", "cache_dir", ",", "rel_path", ")" ]
28.769231
21
def reorder_image(dst_order, src_arr, src_order): """Reorder src_arr, with order of color planes in src_order, as dst_order. """ depth = src_arr.shape[2] if depth != len(src_order): raise ValueError("src_order (%s) does not match array depth (%d)" % ( src_order, depth)) bands = [] if dst_order == src_order: return np.ascontiguousarray(src_arr) elif 'A' not in dst_order or 'A' in src_order: # <-- we don't have to add an alpha plane, just create a new view idx = np.array([src_order.index(c) for c in dst_order]) return np.ascontiguousarray(src_arr[..., idx]) else: # <-- dst order requires missing alpha channel indexes = [src_order.index(c) for c in dst_order.replace('A', '')] bands = [src_arr[..., idx, np.newaxis] for idx in indexes] ht, wd = src_arr.shape[:2] dst_type = src_arr.dtype dst_max_val = np.iinfo(dst_type).max alpha = np.full((ht, wd, 1), dst_max_val, dtype=dst_type) bands.insert(dst_order.index('A'), alpha) return np.concatenate(bands, axis=-1)
[ "def", "reorder_image", "(", "dst_order", ",", "src_arr", ",", "src_order", ")", ":", "depth", "=", "src_arr", ".", "shape", "[", "2", "]", "if", "depth", "!=", "len", "(", "src_order", ")", ":", "raise", "ValueError", "(", "\"src_order (%s) does not match array depth (%d)\"", "%", "(", "src_order", ",", "depth", ")", ")", "bands", "=", "[", "]", "if", "dst_order", "==", "src_order", ":", "return", "np", ".", "ascontiguousarray", "(", "src_arr", ")", "elif", "'A'", "not", "in", "dst_order", "or", "'A'", "in", "src_order", ":", "# <-- we don't have to add an alpha plane, just create a new view", "idx", "=", "np", ".", "array", "(", "[", "src_order", ".", "index", "(", "c", ")", "for", "c", "in", "dst_order", "]", ")", "return", "np", ".", "ascontiguousarray", "(", "src_arr", "[", "...", ",", "idx", "]", ")", "else", ":", "# <-- dst order requires missing alpha channel", "indexes", "=", "[", "src_order", ".", "index", "(", "c", ")", "for", "c", "in", "dst_order", ".", "replace", "(", "'A'", ",", "''", ")", "]", "bands", "=", "[", "src_arr", "[", "...", ",", "idx", ",", "np", ".", "newaxis", "]", "for", "idx", "in", "indexes", "]", "ht", ",", "wd", "=", "src_arr", ".", "shape", "[", ":", "2", "]", "dst_type", "=", "src_arr", ".", "dtype", "dst_max_val", "=", "np", ".", "iinfo", "(", "dst_type", ")", ".", "max", "alpha", "=", "np", ".", "full", "(", "(", "ht", ",", "wd", ",", "1", ")", ",", "dst_max_val", ",", "dtype", "=", "dst_type", ")", "bands", ".", "insert", "(", "dst_order", ".", "index", "(", "'A'", ")", ",", "alpha", ")", "return", "np", ".", "concatenate", "(", "bands", ",", "axis", "=", "-", "1", ")" ]
37.896552
18.655172
def draw_lines(): """ Draws a line between a set of random values """ r = numpy.random.randn(200) fig = pyplot.figure() ax = fig.add_subplot(111) ax.plot(r) ax.grid(True) pyplot.savefig(lines_filename)
[ "def", "draw_lines", "(", ")", ":", "r", "=", "numpy", ".", "random", ".", "randn", "(", "200", ")", "fig", "=", "pyplot", ".", "figure", "(", ")", "ax", "=", "fig", ".", "add_subplot", "(", "111", ")", "ax", ".", "plot", "(", "r", ")", "ax", ".", "grid", "(", "True", ")", "pyplot", ".", "savefig", "(", "lines_filename", ")" ]
19
16.666667
def decrypt_filedata(data, keys): '''Decrypts a file from Send''' # The last 16 bytes / 128 bits of data is the GCM tag # https://www.w3.org/TR/WebCryptoAPI/#aes-gcm-operations :- # 7. Let ciphertext be equal to C | T, where '|' denotes concatenation. data.seek(-16, 2) tag = data.read() # now truncate the file to only contain encrypted data data.seek(-16, 2) data.truncate() data.seek(0) plain = tempfile.NamedTemporaryFile(mode='w+b', delete=False) pbar = progbar(fileSize(data)) obj = Cryptodome.Cipher.AES.new(keys.encryptKey, Cryptodome.Cipher.AES.MODE_GCM, keys.encryptIV) prev_chunk = b'' for chunk in iter(lambda: data.read(CHUNK_SIZE), b''): plain.write(obj.decrypt(prev_chunk)) pbar.update(len(chunk)) prev_chunk = chunk plain.write(obj.decrypt_and_verify(prev_chunk, tag)) data.close() pbar.close() plain.seek(0) return plain
[ "def", "decrypt_filedata", "(", "data", ",", "keys", ")", ":", "# The last 16 bytes / 128 bits of data is the GCM tag", "# https://www.w3.org/TR/WebCryptoAPI/#aes-gcm-operations :-", "# 7. Let ciphertext be equal to C | T, where '|' denotes concatenation.", "data", ".", "seek", "(", "-", "16", ",", "2", ")", "tag", "=", "data", ".", "read", "(", ")", "# now truncate the file to only contain encrypted data", "data", ".", "seek", "(", "-", "16", ",", "2", ")", "data", ".", "truncate", "(", ")", "data", ".", "seek", "(", "0", ")", "plain", "=", "tempfile", ".", "NamedTemporaryFile", "(", "mode", "=", "'w+b'", ",", "delete", "=", "False", ")", "pbar", "=", "progbar", "(", "fileSize", "(", "data", ")", ")", "obj", "=", "Cryptodome", ".", "Cipher", ".", "AES", ".", "new", "(", "keys", ".", "encryptKey", ",", "Cryptodome", ".", "Cipher", ".", "AES", ".", "MODE_GCM", ",", "keys", ".", "encryptIV", ")", "prev_chunk", "=", "b''", "for", "chunk", "in", "iter", "(", "lambda", ":", "data", ".", "read", "(", "CHUNK_SIZE", ")", ",", "b''", ")", ":", "plain", ".", "write", "(", "obj", ".", "decrypt", "(", "prev_chunk", ")", ")", "pbar", ".", "update", "(", "len", "(", "chunk", ")", ")", "prev_chunk", "=", "chunk", "plain", ".", "write", "(", "obj", ".", "decrypt_and_verify", "(", "prev_chunk", ",", "tag", ")", ")", "data", ".", "close", "(", ")", "pbar", ".", "close", "(", ")", "plain", ".", "seek", "(", "0", ")", "return", "plain" ]
30.6
23.8
def decode(cls, phrase): """Calculate hexadecimal representation of the phrase. """ phrase = phrase.split(" ") out = "" for i in range(len(phrase) // 3): word1, word2, word3 = phrase[3*i:3*i+3] w1 = cls.word_list.index(word1) w2 = cls.word_list.index(word2) % cls.n w3 = cls.word_list.index(word3) % cls.n x = w1 + cls.n *((w2 - w1) % cls.n) + cls.n * cls.n * ((w3 - w2) % cls.n) out += endian_swap("%08x" % x) return out
[ "def", "decode", "(", "cls", ",", "phrase", ")", ":", "phrase", "=", "phrase", ".", "split", "(", "\" \"", ")", "out", "=", "\"\"", "for", "i", "in", "range", "(", "len", "(", "phrase", ")", "//", "3", ")", ":", "word1", ",", "word2", ",", "word3", "=", "phrase", "[", "3", "*", "i", ":", "3", "*", "i", "+", "3", "]", "w1", "=", "cls", ".", "word_list", ".", "index", "(", "word1", ")", "w2", "=", "cls", ".", "word_list", ".", "index", "(", "word2", ")", "%", "cls", ".", "n", "w3", "=", "cls", ".", "word_list", ".", "index", "(", "word3", ")", "%", "cls", ".", "n", "x", "=", "w1", "+", "cls", ".", "n", "*", "(", "(", "w2", "-", "w1", ")", "%", "cls", ".", "n", ")", "+", "cls", ".", "n", "*", "cls", ".", "n", "*", "(", "(", "w3", "-", "w2", ")", "%", "cls", ".", "n", ")", "out", "+=", "endian_swap", "(", "\"%08x\"", "%", "x", ")", "return", "out" ]
40.692308
11.692308
def _setup_user_dir(self): """Returns user config dir, create it when it doesn't exist.""" user_dir = self._get_user_dir_path() rules_dir = user_dir.joinpath('rules') if not rules_dir.is_dir(): rules_dir.mkdir(parents=True) self.user_dir = user_dir
[ "def", "_setup_user_dir", "(", "self", ")", ":", "user_dir", "=", "self", ".", "_get_user_dir_path", "(", ")", "rules_dir", "=", "user_dir", ".", "joinpath", "(", "'rules'", ")", "if", "not", "rules_dir", ".", "is_dir", "(", ")", ":", "rules_dir", ".", "mkdir", "(", "parents", "=", "True", ")", "self", ".", "user_dir", "=", "user_dir" ]
36.75
9.875
def write(self, data): ''' Write some bytes to the transport. ''' # MUST use a lock here else gevent could raise an exception if 2 # greenlets try to write at the same time. I was hoping that # sendall() would do that blocking for me, but I guess not. May # require an eventsocket-like buffer to speed up under high load. self._write_lock.acquire() try: return super(GeventTransport, self).write(data) finally: self._write_lock.release()
[ "def", "write", "(", "self", ",", "data", ")", ":", "# MUST use a lock here else gevent could raise an exception if 2", "# greenlets try to write at the same time. I was hoping that", "# sendall() would do that blocking for me, but I guess not. May", "# require an eventsocket-like buffer to speed up under high load.", "self", ".", "_write_lock", ".", "acquire", "(", ")", "try", ":", "return", "super", "(", "GeventTransport", ",", "self", ")", ".", "write", "(", "data", ")", "finally", ":", "self", ".", "_write_lock", ".", "release", "(", ")" ]
40.692308
21.615385
def _parse_packet(rawdata): """ Returns a tupel (opcode, minusconf-data). opcode is None if this isn't a -conf packet.""" if (len(rawdata) < len(_MAGIC) + 1) or (_MAGIC != rawdata[:len(_MAGIC)]): # Wrong protocol return (None, None) opcode = rawdata[len(_MAGIC):len(_MAGIC)+1] payload = rawdata[len(_MAGIC)+1:] return (opcode, payload)
[ "def", "_parse_packet", "(", "rawdata", ")", ":", "if", "(", "len", "(", "rawdata", ")", "<", "len", "(", "_MAGIC", ")", "+", "1", ")", "or", "(", "_MAGIC", "!=", "rawdata", "[", ":", "len", "(", "_MAGIC", ")", "]", ")", ":", "# Wrong protocol", "return", "(", "None", ",", "None", ")", "opcode", "=", "rawdata", "[", "len", "(", "_MAGIC", ")", ":", "len", "(", "_MAGIC", ")", "+", "1", "]", "payload", "=", "rawdata", "[", "len", "(", "_MAGIC", ")", "+", "1", ":", "]", "return", "(", "opcode", ",", "payload", ")" ]
33.090909
20.090909
def comment (self, s, **args): """Write CSV comment.""" self.writeln(s=u"# %s" % s, **args)
[ "def", "comment", "(", "self", ",", "s", ",", "*", "*", "args", ")", ":", "self", ".", "writeln", "(", "s", "=", "u\"# %s\"", "%", "s", ",", "*", "*", "args", ")" ]
35
4.333333
def alignment_to_partials(alignment, missing_data=None): """ Generate a partials dictionary from a treeCl.Alignment """ partials_dict = {} for (name, sequence) in alignment.get_sequences(): datatype = 'dna' if alignment.is_dna() else 'protein' partials_dict[name] = seq_to_partials(sequence, datatype) if missing_data is not None: l = len(alignment) for name in missing_data: if name not in partials_dict: partials_dict[name] = seq_to_partials('-'*l, datatype) return partials_dict
[ "def", "alignment_to_partials", "(", "alignment", ",", "missing_data", "=", "None", ")", ":", "partials_dict", "=", "{", "}", "for", "(", "name", ",", "sequence", ")", "in", "alignment", ".", "get_sequences", "(", ")", ":", "datatype", "=", "'dna'", "if", "alignment", ".", "is_dna", "(", ")", "else", "'protein'", "partials_dict", "[", "name", "]", "=", "seq_to_partials", "(", "sequence", ",", "datatype", ")", "if", "missing_data", "is", "not", "None", ":", "l", "=", "len", "(", "alignment", ")", "for", "name", "in", "missing_data", ":", "if", "name", "not", "in", "partials_dict", ":", "partials_dict", "[", "name", "]", "=", "seq_to_partials", "(", "'-'", "*", "l", ",", "datatype", ")", "return", "partials_dict" ]
42.307692
16.153846
def __substitute_replace_pairs(self): """ Substitutes all replace pairs in the source of the stored routine. """ self._set_magic_constants() routine_source = [] i = 0 for line in self._routine_source_code_lines: self._replace['__LINE__'] = "'%d'" % (i + 1) for search, replace in self._replace.items(): tmp = re.findall(search, line, re.IGNORECASE) if tmp: line = line.replace(tmp[0], replace) routine_source.append(line) i += 1 self._routine_source_code = "\n".join(routine_source)
[ "def", "__substitute_replace_pairs", "(", "self", ")", ":", "self", ".", "_set_magic_constants", "(", ")", "routine_source", "=", "[", "]", "i", "=", "0", "for", "line", "in", "self", ".", "_routine_source_code_lines", ":", "self", ".", "_replace", "[", "'__LINE__'", "]", "=", "\"'%d'\"", "%", "(", "i", "+", "1", ")", "for", "search", ",", "replace", "in", "self", ".", "_replace", ".", "items", "(", ")", ":", "tmp", "=", "re", ".", "findall", "(", "search", ",", "line", ",", "re", ".", "IGNORECASE", ")", "if", "tmp", ":", "line", "=", "line", ".", "replace", "(", "tmp", "[", "0", "]", ",", "replace", ")", "routine_source", ".", "append", "(", "line", ")", "i", "+=", "1", "self", ".", "_routine_source_code", "=", "\"\\n\"", ".", "join", "(", "routine_source", ")" ]
35.055556
16.944444
def _name_with_flags(self, include_restricted, title=None): """Generate the name with flags.""" name = "Special: " if self.special else "" name += self.name if title: name += " - {}".format(title) if include_restricted and self.restricted: name += " (R)" name += " (BB)" if self.both_blocks else "" name += " (A)" if self.administrative else "" name += " (S)" if self.sticky else "" name += " (Deleted)" if self.deleted else "" return name
[ "def", "_name_with_flags", "(", "self", ",", "include_restricted", ",", "title", "=", "None", ")", ":", "name", "=", "\"Special: \"", "if", "self", ".", "special", "else", "\"\"", "name", "+=", "self", ".", "name", "if", "title", ":", "name", "+=", "\" - {}\"", ".", "format", "(", "title", ")", "if", "include_restricted", "and", "self", ".", "restricted", ":", "name", "+=", "\" (R)\"", "name", "+=", "\" (BB)\"", "if", "self", ".", "both_blocks", "else", "\"\"", "name", "+=", "\" (A)\"", "if", "self", ".", "administrative", "else", "\"\"", "name", "+=", "\" (S)\"", "if", "self", ".", "sticky", "else", "\"\"", "name", "+=", "\" (Deleted)\"", "if", "self", ".", "deleted", "else", "\"\"", "return", "name" ]
40.846154
11.846154
def classify_intersection(intersection, edge_nodes1, edge_nodes2): r"""Determine which curve is on the "inside of the intersection". .. note:: This is a helper used only by :meth:`.Surface.intersect`. This is intended to be a helper for forming a :class:`.CurvedPolygon` from the edge intersections of two :class:`.Surface`-s. In order to move from one intersection to another (or to the end of an edge), the interior edge must be determined at the point of intersection. The "typical" case is on the interior of both edges: .. image:: ../images/classify_intersection1.png :align: center .. testsetup:: classify-intersection1, classify-intersection2, classify-intersection3, classify-intersection4, classify-intersection5, classify-intersection6, classify-intersection7, classify-intersection8, classify-intersection9 import numpy as np import bezier from bezier import _curve_helpers from bezier._intersection_helpers import Intersection from bezier._surface_helpers import classify_intersection def hodograph(curve, s): return _curve_helpers.evaluate_hodograph( s, curve._nodes) def curvature(curve, s): nodes = curve._nodes tangent = _curve_helpers.evaluate_hodograph( s, nodes) return _curve_helpers.get_curvature( nodes, tangent, s) .. doctest:: classify-intersection1 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [1.0, 1.75, 2.0], ... [0.0, 0.25, 1.0], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [0.0, 1.6875, 2.0], ... [0.0, 0.0625, 0.5], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.25, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> tangent1 = hodograph(curve1, s) >>> tangent1 array([[1.25], [0.75]]) >>> tangent2 = hodograph(curve2, t) >>> tangent2 array([[2. ], [0.5]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.FIRST: 0> .. testcleanup:: classify-intersection1 import make_images make_images.classify_intersection1( s, curve1, tangent1, curve2, tangent2) We determine the interior (i.e. left) one by using the `right-hand rule`_: by embedding the tangent vectors in :math:`\mathbf{R}^3`, we compute .. _right-hand rule: https://en.wikipedia.org/wiki/Right-hand_rule .. math:: \left[\begin{array}{c} x_1'(s) \\ y_1'(s) \\ 0 \end{array}\right] \times \left[\begin{array}{c} x_2'(t) \\ y_2'(t) \\ 0 \end{array}\right] = \left[\begin{array}{c} 0 \\ 0 \\ x_1'(s) y_2'(t) - x_2'(t) y_1'(s) \end{array}\right]. If the cross product quantity :math:`B_1'(s) \times B_2'(t) = x_1'(s) y_2'(t) - x_2'(t) y_1'(s)` is positive, then the first curve is "outside" / "to the right", i.e. the second curve is interior. If the cross product is negative, the first curve is interior. When :math:`B_1'(s) \times B_2'(t) = 0`, the tangent vectors are parallel, i.e. the intersection is a point of tangency: .. image:: ../images/classify_intersection2.png :align: center .. doctest:: classify-intersection2 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [1.0, 1.5, 2.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [0.0, 1.5, 3.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.5, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.TANGENT_SECOND: 4> .. testcleanup:: classify-intersection2 import make_images make_images.classify_intersection2(s, curve1, curve2) Depending on the direction of the parameterizations, the interior curve may change, but we can use the (signed) `curvature`_ of each curve at that point to determine which is on the interior: .. _curvature: https://en.wikipedia.org/wiki/Curvature .. image:: ../images/classify_intersection3.png :align: center .. doctest:: classify-intersection3 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [2.0, 1.5, 1.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [3.0, 1.5, 0.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.5, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.TANGENT_FIRST: 3> .. testcleanup:: classify-intersection3 import make_images make_images.classify_intersection3(s, curve1, curve2) When the curves are moving in opposite directions at a point of tangency, there is no side to choose. Either the point of tangency is not part of any :class:`.CurvedPolygon` intersection .. image:: ../images/classify_intersection4.png :align: center .. doctest:: classify-intersection4 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [2.0, 1.5, 1.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [0.0, 1.5, 3.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.5, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.OPPOSED: 2> .. testcleanup:: classify-intersection4 import make_images make_images.classify_intersection4(s, curve1, curve2) or the point of tangency is a "degenerate" part of two :class:`.CurvedPolygon` intersections. It is "degenerate" because from one direction, the point should be classified as :attr:`~.IntersectionClassification.FIRST` and from another as :attr:`~.IntersectionClassification.SECOND`. .. image:: ../images/classify_intersection5.png :align: center .. doctest:: classify-intersection5 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [1.0, 1.5, 2.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [3.0, 1.5, 0.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.5, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.TANGENT_BOTH: 6> .. testcleanup:: classify-intersection5 import make_images make_images.classify_intersection5(s, curve1, curve2) The :attr:`~.IntersectionClassification.TANGENT_BOTH` classification can also occur if the curves are "kissing" but share a zero width interior at the point of tangency: .. image:: ../images/classify_intersection9.png :align: center .. doctest:: classify-intersection9 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [0.0, 20.0, 40.0], ... [0.0, 40.0, 0.0], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [40.0, 20.0, 0.0], ... [40.0, 0.0, 40.0], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.5, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.TANGENT_BOTH: 6> .. testcleanup:: classify-intersection9 import make_images make_images.classify_intersection9(s, curve1, curve2) However, if the `curvature`_ of each curve is identical, we don't try to distinguish further: .. image:: ../images/classify_intersection6.png :align: center .. doctest:: classify-intersection6 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [-0.125 , -0.125 , 0.375 ], ... [ 0.0625, -0.0625, 0.0625], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [-0.25, -0.25, 0.75], ... [ 0.25, -0.25, 0.25], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.5, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> hodograph(curve1, s) array([[0.5], [0. ]]) >>> hodograph(curve2, t) array([[1.], [0.]]) >>> curvature(curve1, s) 2.0 >>> curvature(curve2, t) 2.0 >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) Traceback (most recent call last): ... NotImplementedError: Tangent curves have same curvature. .. testcleanup:: classify-intersection6 import make_images make_images.classify_intersection6(s, curve1, curve2) In addition to points of tangency, intersections that happen at the end of an edge need special handling: .. image:: ../images/classify_intersection7.png :align: center .. doctest:: classify-intersection7 :options: +NORMALIZE_WHITESPACE >>> nodes1a = np.asfortranarray([ ... [0.0, 4.5, 9.0 ], ... [0.0, 0.0, 2.25], ... ]) >>> curve1a = bezier.Curve(nodes1a, degree=2) >>> nodes2 = np.asfortranarray([ ... [11.25, 9.0, 2.75], ... [ 0.0 , 4.5, 1.0 ], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 1.0, 0.375 >>> curve1a.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1a, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) Traceback (most recent call last): ... ValueError: ('Intersection occurs at the end of an edge', 's', 1.0, 't', 0.375) >>> >>> nodes1b = np.asfortranarray([ ... [9.0, 4.5, 0.0], ... [2.25, 2.375, 2.5], ... ]) >>> curve1b = bezier.Curve(nodes1b, degree=2) >>> curve1b.evaluate(0.0) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(1, 0.0, 0, t) >>> edge_nodes1 = (nodes1a, nodes1b, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.FIRST: 0> .. testcleanup:: classify-intersection7 import make_images make_images.classify_intersection7(s, curve1a, curve1b, curve2) As above, some intersections at the end of an edge are part of an actual intersection. However, some surfaces may just "kiss" at a corner intersection: .. image:: ../images/classify_intersection8.png :align: center .. doctest:: classify-intersection8 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [0.25, 0.0, 0.0, 0.625, 0.5 , 1.0 ], ... [1.0 , 0.5, 0.0, 0.875, 0.375, 0.75], ... ]) >>> surface1 = bezier.Surface(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [0.0625, -0.25, -1.0, -0.5 , -1.0, -1.0], ... [0.5 , 1.0 , 1.0, 0.125, 0.5, 0.0], ... ]) >>> surface2 = bezier.Surface(nodes2, degree=2) >>> curve1, _, _ = surface1.edges >>> edge_nodes1 = [curve.nodes for curve in surface1.edges] >>> curve2, _, _ = surface2.edges >>> edge_nodes2 = [curve.nodes for curve in surface2.edges] >>> s, t = 0.5, 0.0 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.IGNORED_CORNER: 5> .. testcleanup:: classify-intersection8 import make_images make_images.classify_intersection8( s, curve1, surface1, curve2, surface2) .. note:: This assumes the intersection occurs in :math:`\mathbf{R}^2` but doesn't check this. .. note:: This function doesn't allow wiggle room / round-off when checking endpoints, nor when checking if the cross product is near zero, nor when curvatures are compared. However, the most "correct" version of this function likely should allow for some round off. Args: intersection (.Intersection): An intersection object. edge_nodes1 (Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]): The nodes of the three edges of the first surface being intersected. edge_nodes2 (Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]): The nodes of the three edges of the second surface being intersected. Returns: IntersectionClassification: The "inside" curve type, based on the classification enum. Raises: ValueError: If the intersection occurs at the end of either curve involved. This is because we want to classify which curve to **move forward** on, and we can't move past the end of a segment. """ if intersection.s == 1.0 or intersection.t == 1.0: raise ValueError( "Intersection occurs at the end of an edge", "s", intersection.s, "t", intersection.t, ) nodes1 = edge_nodes1[intersection.index_first] tangent1 = _curve_helpers.evaluate_hodograph(intersection.s, nodes1) nodes2 = edge_nodes2[intersection.index_second] tangent2 = _curve_helpers.evaluate_hodograph(intersection.t, nodes2) if ignored_corner( intersection, tangent1, tangent2, edge_nodes1, edge_nodes2 ): return CLASSIFICATION_T.IGNORED_CORNER # Take the cross product of tangent vectors to determine which one # is more "inside" / "to the left". cross_prod = _helpers.cross_product( tangent1.ravel(order="F"), tangent2.ravel(order="F") ) if cross_prod < -ALMOST_TANGENT: return CLASSIFICATION_T.FIRST elif cross_prod > ALMOST_TANGENT: return CLASSIFICATION_T.SECOND else: # NOTE: A more robust approach would take ||tangent1|| and ||tangent2|| # into account when comparing (tangent1 x tangent2) to the # "almost zero" threshold. We (for now) avoid doing this because # normalizing the tangent vectors has a "cost" of ~6 flops each # and that cost would happen for **every** single intersection. return classify_tangent_intersection( intersection, nodes1, tangent1, nodes2, tangent2 )
[ "def", "classify_intersection", "(", "intersection", ",", "edge_nodes1", ",", "edge_nodes2", ")", ":", "if", "intersection", ".", "s", "==", "1.0", "or", "intersection", ".", "t", "==", "1.0", ":", "raise", "ValueError", "(", "\"Intersection occurs at the end of an edge\"", ",", "\"s\"", ",", "intersection", ".", "s", ",", "\"t\"", ",", "intersection", ".", "t", ",", ")", "nodes1", "=", "edge_nodes1", "[", "intersection", ".", "index_first", "]", "tangent1", "=", "_curve_helpers", ".", "evaluate_hodograph", "(", "intersection", ".", "s", ",", "nodes1", ")", "nodes2", "=", "edge_nodes2", "[", "intersection", ".", "index_second", "]", "tangent2", "=", "_curve_helpers", ".", "evaluate_hodograph", "(", "intersection", ".", "t", ",", "nodes2", ")", "if", "ignored_corner", "(", "intersection", ",", "tangent1", ",", "tangent2", ",", "edge_nodes1", ",", "edge_nodes2", ")", ":", "return", "CLASSIFICATION_T", ".", "IGNORED_CORNER", "# Take the cross product of tangent vectors to determine which one", "# is more \"inside\" / \"to the left\".", "cross_prod", "=", "_helpers", ".", "cross_product", "(", "tangent1", ".", "ravel", "(", "order", "=", "\"F\"", ")", ",", "tangent2", ".", "ravel", "(", "order", "=", "\"F\"", ")", ")", "if", "cross_prod", "<", "-", "ALMOST_TANGENT", ":", "return", "CLASSIFICATION_T", ".", "FIRST", "elif", "cross_prod", ">", "ALMOST_TANGENT", ":", "return", "CLASSIFICATION_T", ".", "SECOND", "else", ":", "# NOTE: A more robust approach would take ||tangent1|| and ||tangent2||", "# into account when comparing (tangent1 x tangent2) to the", "# \"almost zero\" threshold. We (for now) avoid doing this because", "# normalizing the tangent vectors has a \"cost\" of ~6 flops each", "# and that cost would happen for **every** single intersection.", "return", "classify_tangent_intersection", "(", "intersection", ",", "nodes1", ",", "tangent1", ",", "nodes2", ",", "tangent2", ")" ]
34.821803
19.098532
def _create_clause_based_dep_links( orig_text, layer=LAYER_CONLL ): ''' Rewrites dependency links in the text from sentence-based linking to clause- based linking: *) words which have their parent outside-the-clause will become root nodes (will obtain link value -1), and *) words which have their parent inside-the-clause will have parent index according to word indices inside the clause; ''' sent_start_index = 0 for sent_text in orig_text.split_by( SENTENCES ): # 1) Create a mapping: from sentence-based dependency links to clause-based dependency links mapping = dict() cl_ind = sent_text.clause_indices for wid, word in enumerate(sent_text[WORDS]): firstSyntaxRel = sent_text[layer][wid][PARSER_OUT][0] parentIndex = firstSyntaxRel[1] if parentIndex != -1: if cl_ind[parentIndex] != cl_ind[wid]: # Parent of the word is outside the current clause: make root # node from the current node mapping[wid] = -1 else: # Find the beginning of the clause clause_start = cl_ind.index( cl_ind[wid] ) # Find the index of parent label in the clause j = 0 k = 0 while clause_start + j < len(cl_ind): if clause_start + j == parentIndex: break if cl_ind[clause_start + j] == cl_ind[wid]: k += 1 j += 1 assert clause_start + j < len(cl_ind), '(!) Parent index not found for: '+str(parentIndex) mapping[wid] = k else: mapping[wid] = -1 # 2) Overwrite old links with new ones for local_wid in mapping.keys(): global_wid = sent_start_index + local_wid for syntax_rel in orig_text[layer][global_wid][PARSER_OUT]: syntax_rel[1] = mapping[local_wid] # 3) Advance the index for processing the next sentence sent_start_index += len(cl_ind) return orig_text
[ "def", "_create_clause_based_dep_links", "(", "orig_text", ",", "layer", "=", "LAYER_CONLL", ")", ":", "sent_start_index", "=", "0", "for", "sent_text", "in", "orig_text", ".", "split_by", "(", "SENTENCES", ")", ":", "# 1) Create a mapping: from sentence-based dependency links to clause-based dependency links", "mapping", "=", "dict", "(", ")", "cl_ind", "=", "sent_text", ".", "clause_indices", "for", "wid", ",", "word", "in", "enumerate", "(", "sent_text", "[", "WORDS", "]", ")", ":", "firstSyntaxRel", "=", "sent_text", "[", "layer", "]", "[", "wid", "]", "[", "PARSER_OUT", "]", "[", "0", "]", "parentIndex", "=", "firstSyntaxRel", "[", "1", "]", "if", "parentIndex", "!=", "-", "1", ":", "if", "cl_ind", "[", "parentIndex", "]", "!=", "cl_ind", "[", "wid", "]", ":", "# Parent of the word is outside the current clause: make root ", "# node from the current node ", "mapping", "[", "wid", "]", "=", "-", "1", "else", ":", "# Find the beginning of the clause ", "clause_start", "=", "cl_ind", ".", "index", "(", "cl_ind", "[", "wid", "]", ")", "# Find the index of parent label in the clause", "j", "=", "0", "k", "=", "0", "while", "clause_start", "+", "j", "<", "len", "(", "cl_ind", ")", ":", "if", "clause_start", "+", "j", "==", "parentIndex", ":", "break", "if", "cl_ind", "[", "clause_start", "+", "j", "]", "==", "cl_ind", "[", "wid", "]", ":", "k", "+=", "1", "j", "+=", "1", "assert", "clause_start", "+", "j", "<", "len", "(", "cl_ind", ")", ",", "'(!) Parent index not found for: '", "+", "str", "(", "parentIndex", ")", "mapping", "[", "wid", "]", "=", "k", "else", ":", "mapping", "[", "wid", "]", "=", "-", "1", "# 2) Overwrite old links with new ones", "for", "local_wid", "in", "mapping", ".", "keys", "(", ")", ":", "global_wid", "=", "sent_start_index", "+", "local_wid", "for", "syntax_rel", "in", "orig_text", "[", "layer", "]", "[", "global_wid", "]", "[", "PARSER_OUT", "]", ":", "syntax_rel", "[", "1", "]", "=", "mapping", "[", "local_wid", "]", "# 3) Advance the index for processing the next sentence", "sent_start_index", "+=", "len", "(", "cl_ind", ")", "return", "orig_text" ]
48.608696
19.478261
def element(self): """ :return: the :class:`Element` that contains these attributes. """ return self.adapter.wrap_node( self.impl_element, self.adapter.impl_document, self.adapter)
[ "def", "element", "(", "self", ")", ":", "return", "self", ".", "adapter", ".", "wrap_node", "(", "self", ".", "impl_element", ",", "self", ".", "adapter", ".", "impl_document", ",", "self", ".", "adapter", ")" ]
36.5
14.166667
def list_data_type(type_list): """This function takes a list of format specifiers and returns a list of data types represented by the format specifiers.""" data_type = [] for item in type_list: match = re.match(r"(\d+)(.+)", item) if not match: reps = 1 if item[0] in "FfEegG": data_type.append("REAL") elif item[0] in "Ii": data_type.append("INTEGER") else: reps = match.group(1) fmt = match.group(2) if "(" in fmt and "," in fmt: fmt = fmt[1:-1].split(",") elif "(" in fmt: fmt = [fmt[1:-1]] else: fmt = [fmt] for i in range(int(reps)): for ft in fmt: if ft[0] in "FfEegG": data_type.append("REAL") elif ft[0] in "Ii": data_type.append("INTEGER") return data_type
[ "def", "list_data_type", "(", "type_list", ")", ":", "data_type", "=", "[", "]", "for", "item", "in", "type_list", ":", "match", "=", "re", ".", "match", "(", "r\"(\\d+)(.+)\"", ",", "item", ")", "if", "not", "match", ":", "reps", "=", "1", "if", "item", "[", "0", "]", "in", "\"FfEegG\"", ":", "data_type", ".", "append", "(", "\"REAL\"", ")", "elif", "item", "[", "0", "]", "in", "\"Ii\"", ":", "data_type", ".", "append", "(", "\"INTEGER\"", ")", "else", ":", "reps", "=", "match", ".", "group", "(", "1", ")", "fmt", "=", "match", ".", "group", "(", "2", ")", "if", "\"(\"", "in", "fmt", "and", "\",\"", "in", "fmt", ":", "fmt", "=", "fmt", "[", "1", ":", "-", "1", "]", ".", "split", "(", "\",\"", ")", "elif", "\"(\"", "in", "fmt", ":", "fmt", "=", "[", "fmt", "[", "1", ":", "-", "1", "]", "]", "else", ":", "fmt", "=", "[", "fmt", "]", "for", "i", "in", "range", "(", "int", "(", "reps", ")", ")", ":", "for", "ft", "in", "fmt", ":", "if", "ft", "[", "0", "]", "in", "\"FfEegG\"", ":", "data_type", ".", "append", "(", "\"REAL\"", ")", "elif", "ft", "[", "0", "]", "in", "\"Ii\"", ":", "data_type", ".", "append", "(", "\"INTEGER\"", ")", "return", "data_type" ]
34.785714
9.178571
def build_result(data): """Create a dictionary with the contents of result.json""" more = {} for key, value in data.items(): if key != 'elements': newnode = value else: newnode = {} for el in value: nkey, nvalue = process_node(el) newnode[nkey] = nvalue more[key] = newnode return more
[ "def", "build_result", "(", "data", ")", ":", "more", "=", "{", "}", "for", "key", ",", "value", "in", "data", ".", "items", "(", ")", ":", "if", "key", "!=", "'elements'", ":", "newnode", "=", "value", "else", ":", "newnode", "=", "{", "}", "for", "el", "in", "value", ":", "nkey", ",", "nvalue", "=", "process_node", "(", "el", ")", "newnode", "[", "nkey", "]", "=", "nvalue", "more", "[", "key", "]", "=", "newnode", "return", "more" ]
25.466667
16.933333
def minion_mods( opts, context=None, utils=None, whitelist=None, initial_load=False, loaded_base_name=None, notify=False, static_modules=None, proxy=None): ''' Load execution modules Returns a dictionary of execution modules appropriate for the current system by evaluating the __virtual__() function in each module. :param dict opts: The Salt options dictionary :param dict context: A Salt context that should be made present inside generated modules in __context__ :param dict utils: Utility functions which should be made available to Salt modules in __utils__. See `utils_dirs` in salt.config for additional information about configuration. :param list whitelist: A list of modules which should be whitelisted. :param bool initial_load: Deprecated flag! Unused. :param str loaded_base_name: A string marker for the loaded base name. :param bool notify: Flag indicating that an event should be fired upon completion of module loading. .. code-block:: python import salt.config import salt.loader __opts__ = salt.config.minion_config('/etc/salt/minion') __grains__ = salt.loader.grains(__opts__) __opts__['grains'] = __grains__ __utils__ = salt.loader.utils(__opts__) __salt__ = salt.loader.minion_mods(__opts__, utils=__utils__) __salt__['test.ping']() ''' # TODO Publish documentation for module whitelisting if not whitelist: whitelist = opts.get('whitelist_modules', None) ret = LazyLoader( _module_dirs(opts, 'modules', 'module'), opts, tag='module', pack={'__context__': context, '__utils__': utils, '__proxy__': proxy}, whitelist=whitelist, loaded_base_name=loaded_base_name, static_modules=static_modules, ) ret.pack['__salt__'] = ret # Load any provider overrides from the configuration file providers option # Note: Providers can be pkg, service, user or group - not to be confused # with cloud providers. providers = opts.get('providers', False) if providers and isinstance(providers, dict): for mod in providers: # sometimes providers opts is not to diverge modules but # for other configuration try: funcs = raw_mod(opts, providers[mod], ret) except TypeError: break else: if funcs: for func in funcs: f_key = '{0}{1}'.format(mod, func[func.rindex('.'):]) ret[f_key] = funcs[func] if notify: evt = salt.utils.event.get_event('minion', opts=opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_MOD_COMPLETE) return ret
[ "def", "minion_mods", "(", "opts", ",", "context", "=", "None", ",", "utils", "=", "None", ",", "whitelist", "=", "None", ",", "initial_load", "=", "False", ",", "loaded_base_name", "=", "None", ",", "notify", "=", "False", ",", "static_modules", "=", "None", ",", "proxy", "=", "None", ")", ":", "# TODO Publish documentation for module whitelisting", "if", "not", "whitelist", ":", "whitelist", "=", "opts", ".", "get", "(", "'whitelist_modules'", ",", "None", ")", "ret", "=", "LazyLoader", "(", "_module_dirs", "(", "opts", ",", "'modules'", ",", "'module'", ")", ",", "opts", ",", "tag", "=", "'module'", ",", "pack", "=", "{", "'__context__'", ":", "context", ",", "'__utils__'", ":", "utils", ",", "'__proxy__'", ":", "proxy", "}", ",", "whitelist", "=", "whitelist", ",", "loaded_base_name", "=", "loaded_base_name", ",", "static_modules", "=", "static_modules", ",", ")", "ret", ".", "pack", "[", "'__salt__'", "]", "=", "ret", "# Load any provider overrides from the configuration file providers option", "# Note: Providers can be pkg, service, user or group - not to be confused", "# with cloud providers.", "providers", "=", "opts", ".", "get", "(", "'providers'", ",", "False", ")", "if", "providers", "and", "isinstance", "(", "providers", ",", "dict", ")", ":", "for", "mod", "in", "providers", ":", "# sometimes providers opts is not to diverge modules but", "# for other configuration", "try", ":", "funcs", "=", "raw_mod", "(", "opts", ",", "providers", "[", "mod", "]", ",", "ret", ")", "except", "TypeError", ":", "break", "else", ":", "if", "funcs", ":", "for", "func", "in", "funcs", ":", "f_key", "=", "'{0}{1}'", ".", "format", "(", "mod", ",", "func", "[", "func", ".", "rindex", "(", "'.'", ")", ":", "]", ")", "ret", "[", "f_key", "]", "=", "funcs", "[", "func", "]", "if", "notify", ":", "evt", "=", "salt", ".", "utils", ".", "event", ".", "get_event", "(", "'minion'", ",", "opts", "=", "opts", ",", "listen", "=", "False", ")", "evt", ".", "fire_event", "(", "{", "'complete'", ":", "True", "}", ",", "tag", "=", "salt", ".", "defaults", ".", "events", ".", "MINION_MOD_COMPLETE", ")", "return", "ret" ]
35.698795
22.277108
def start(self): ''' Start the actual master. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).start() NOTE: Run any required code before calling `super()`. ''' super(SaltAPI, self).start() if check_user(self.config['user']): log.info('The salt-api is starting up') self.api.run()
[ "def", "start", "(", "self", ")", ":", "super", "(", "SaltAPI", ",", "self", ")", ".", "start", "(", ")", "if", "check_user", "(", "self", ".", "config", "[", "'user'", "]", ")", ":", "log", ".", "info", "(", "'The salt-api is starting up'", ")", "self", ".", "api", ".", "run", "(", ")" ]
27.5
20.071429
def sign(self, cert, pkey, digest_type=None, data=None, flags=Flags.BINARY): """ Adds another signer to already signed message @param cert - signer's certificate @param pkey - signer's private key @param digest_type - message digest to use as DigestType object (if None - default for key would be used) @param data - data to sign (if detached and Flags.REUSE_DIGEST is not specified) @param flags - ORed combination of Flags consants """ if not pkey.cansign: raise ValueError("Specified keypair has no private part") if cert.pubkey != pkey: raise ValueError("Certificate doesn't match public key") if libcrypto.CMS_add1_signer(self.ptr, cert.cert, pkey.key, digest_type.digest, flags) is None: raise CMSError("adding signer") if flags & Flags.REUSE_DIGEST == 0: if data is not None: bio = Membio(data) biodata = bio.bio else: biodata = None res = libcrypto.CMS_final(self.ptr, biodata, None, flags) if res <= 0: raise CMSError("Cannot finalize CMS")
[ "def", "sign", "(", "self", ",", "cert", ",", "pkey", ",", "digest_type", "=", "None", ",", "data", "=", "None", ",", "flags", "=", "Flags", ".", "BINARY", ")", ":", "if", "not", "pkey", ".", "cansign", ":", "raise", "ValueError", "(", "\"Specified keypair has no private part\"", ")", "if", "cert", ".", "pubkey", "!=", "pkey", ":", "raise", "ValueError", "(", "\"Certificate doesn't match public key\"", ")", "if", "libcrypto", ".", "CMS_add1_signer", "(", "self", ".", "ptr", ",", "cert", ".", "cert", ",", "pkey", ".", "key", ",", "digest_type", ".", "digest", ",", "flags", ")", "is", "None", ":", "raise", "CMSError", "(", "\"adding signer\"", ")", "if", "flags", "&", "Flags", ".", "REUSE_DIGEST", "==", "0", ":", "if", "data", "is", "not", "None", ":", "bio", "=", "Membio", "(", "data", ")", "biodata", "=", "bio", ".", "bio", "else", ":", "biodata", "=", "None", "res", "=", "libcrypto", ".", "CMS_final", "(", "self", ".", "ptr", ",", "biodata", ",", "None", ",", "flags", ")", "if", "res", "<=", "0", ":", "raise", "CMSError", "(", "\"Cannot finalize CMS\"", ")" ]
47
15.888889
def deprecated_for(replace_message): """ Decorate a deprecated function, with info about what to use instead, like: @deprecated_for("toBytes()") def toAscii(arg): ... """ def decorator(to_wrap): @functools.wraps(to_wrap) def wrapper(*args, **kwargs): warnings.warn( "%s is deprecated in favor of %s" % (to_wrap.__name__, replace_message), category=DeprecationWarning, stacklevel=2) return to_wrap(*args, **kwargs) return wrapper return decorator
[ "def", "deprecated_for", "(", "replace_message", ")", ":", "def", "decorator", "(", "to_wrap", ")", ":", "@", "functools", ".", "wraps", "(", "to_wrap", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "\"%s is deprecated in favor of %s\"", "%", "(", "to_wrap", ".", "__name__", ",", "replace_message", ")", ",", "category", "=", "DeprecationWarning", ",", "stacklevel", "=", "2", ")", "return", "to_wrap", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper", "return", "decorator" ]
31.166667
15.5
def setup(self, app): ''' Setup properties from parent app on the command ''' self.logger = app.logger self.shell.logger = self.logger if not self.command_name: raise EmptyCommandNameException() self.app = app self.arguments_declaration = self.arguments self.arguments = app.arguments if self.use_subconfig: _init_config(self) else: self.config = self.app.config
[ "def", "setup", "(", "self", ",", "app", ")", ":", "self", ".", "logger", "=", "app", ".", "logger", "self", ".", "shell", ".", "logger", "=", "self", ".", "logger", "if", "not", "self", ".", "command_name", ":", "raise", "EmptyCommandNameException", "(", ")", "self", ".", "app", "=", "app", "self", ".", "arguments_declaration", "=", "self", ".", "arguments", "self", ".", "arguments", "=", "app", ".", "arguments", "if", "self", ".", "use_subconfig", ":", "_init_config", "(", "self", ")", "else", ":", "self", ".", "config", "=", "self", ".", "app", ".", "config" ]
26.222222
17.333333
def solution(events, slots, objective_function=None, solver=None, **kwargs): """Compute a schedule in solution form Parameters ---------- events : list or tuple of :py:class:`resources.Event` instances slots : list or tuple of :py:class:`resources.Slot` instances solver : pulp.solver a pulp solver objective_function: callable from lp_problem.objective_functions kwargs : keyword arguments arguments for the objective function Returns ------- list A list of tuples giving the event and slot index (for the given events and slots lists) for all scheduled items. Example ------- For a solution where * event 0 is scheduled in slot 1 * event 1 is scheduled in slot 4 * event 2 is scheduled in slot 5 the resulting list would be:: [(0, 1), (1, 4), (2, 5)] """ shape = Shape(len(events), len(slots)) problem = pulp.LpProblem() X = lp.utils.variables(shape) beta = pulp.LpVariable("upper_bound") for constraint in lp.constraints.all_constraints( events, slots, X, beta, 'lpsum' ): problem += constraint.condition if objective_function is not None: problem += objective_function(events=events, slots=slots, X=X, beta=beta, **kwargs) status = problem.solve(solver=solver) if status == 1: return [item for item, variable in X.items() if variable.value() > 0] else: raise ValueError('No valid solution found')
[ "def", "solution", "(", "events", ",", "slots", ",", "objective_function", "=", "None", ",", "solver", "=", "None", ",", "*", "*", "kwargs", ")", ":", "shape", "=", "Shape", "(", "len", "(", "events", ")", ",", "len", "(", "slots", ")", ")", "problem", "=", "pulp", ".", "LpProblem", "(", ")", "X", "=", "lp", ".", "utils", ".", "variables", "(", "shape", ")", "beta", "=", "pulp", ".", "LpVariable", "(", "\"upper_bound\"", ")", "for", "constraint", "in", "lp", ".", "constraints", ".", "all_constraints", "(", "events", ",", "slots", ",", "X", ",", "beta", ",", "'lpsum'", ")", ":", "problem", "+=", "constraint", ".", "condition", "if", "objective_function", "is", "not", "None", ":", "problem", "+=", "objective_function", "(", "events", "=", "events", ",", "slots", "=", "slots", ",", "X", "=", "X", ",", "beta", "=", "beta", ",", "*", "*", "kwargs", ")", "status", "=", "problem", ".", "solve", "(", "solver", "=", "solver", ")", "if", "status", "==", "1", ":", "return", "[", "item", "for", "item", ",", "variable", "in", "X", ".", "items", "(", ")", "if", "variable", ".", "value", "(", ")", ">", "0", "]", "else", ":", "raise", "ValueError", "(", "'No valid solution found'", ")" ]
28.962963
18.425926
def strong(node): """ A bolded section """ o = nodes.strong() for n in MarkDown(node): o += n return o
[ "def", "strong", "(", "node", ")", ":", "o", "=", "nodes", ".", "strong", "(", ")", "for", "n", "in", "MarkDown", "(", "node", ")", ":", "o", "+=", "n", "return", "o" ]
15.875
15.875
def explore(args): """Create mapping of sequences of two clusters """ logger.info("reading sequeces") data = load_data(args.json) logger.info("get sequences from json") #get_sequences_from_cluster() c1, c2 = args.names.split(",") seqs, names = get_sequences_from_cluster(c1, c2, data[0]) loci = get_precursors_from_cluster(c1, c2, data[0]) logger.info("map all sequences to all loci") print("%s" % (loci)) map_to_precursors(seqs, names, loci, os.path.join(args.out, "map.tsv"), args) #map_sequences_w_bowtie(sequences, precursors) logger.info("plot sequences on loci") #get_matrix_position() #plot_sequences() logger.info("Done")
[ "def", "explore", "(", "args", ")", ":", "logger", ".", "info", "(", "\"reading sequeces\"", ")", "data", "=", "load_data", "(", "args", ".", "json", ")", "logger", ".", "info", "(", "\"get sequences from json\"", ")", "#get_sequences_from_cluster()", "c1", ",", "c2", "=", "args", ".", "names", ".", "split", "(", "\",\"", ")", "seqs", ",", "names", "=", "get_sequences_from_cluster", "(", "c1", ",", "c2", ",", "data", "[", "0", "]", ")", "loci", "=", "get_precursors_from_cluster", "(", "c1", ",", "c2", ",", "data", "[", "0", "]", ")", "logger", ".", "info", "(", "\"map all sequences to all loci\"", ")", "print", "(", "\"%s\"", "%", "(", "loci", ")", ")", "map_to_precursors", "(", "seqs", ",", "names", ",", "loci", ",", "os", ".", "path", ".", "join", "(", "args", ".", "out", ",", "\"map.tsv\"", ")", ",", "args", ")", "#map_sequences_w_bowtie(sequences, precursors)", "logger", ".", "info", "(", "\"plot sequences on loci\"", ")", "#get_matrix_position()", "#plot_sequences()", "logger", ".", "info", "(", "\"Done\"", ")" ]
37.777778
11.833333
def get_rng(obj=None): """ Get a good RNG seeded with time, pid and the object. Args: obj: some object to use to generate random seed. Returns: np.random.RandomState: the RNG. """ seed = (id(obj) + os.getpid() + int(datetime.now().strftime("%Y%m%d%H%M%S%f"))) % 4294967295 if _RNG_SEED is not None: seed = _RNG_SEED return np.random.RandomState(seed)
[ "def", "get_rng", "(", "obj", "=", "None", ")", ":", "seed", "=", "(", "id", "(", "obj", ")", "+", "os", ".", "getpid", "(", ")", "+", "int", "(", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%Y%m%d%H%M%S%f\"", ")", ")", ")", "%", "4294967295", "if", "_RNG_SEED", "is", "not", "None", ":", "seed", "=", "_RNG_SEED", "return", "np", ".", "random", ".", "RandomState", "(", "seed", ")" ]
29
15.428571
def chunked_join(iterable, int1, int2, str1, str2, func): """Chunk and join.""" chunks = list(chunked(iterable, int1)) logging.debug(chunks) groups = [list(chunked(chunk, int2)) for chunk in chunks] logging.debug(groups) return str1.join([ str2.join([func(''.join(chunk)) for chunk in chunks]) for chunks in groups ])
[ "def", "chunked_join", "(", "iterable", ",", "int1", ",", "int2", ",", "str1", ",", "str2", ",", "func", ")", ":", "chunks", "=", "list", "(", "chunked", "(", "iterable", ",", "int1", ")", ")", "logging", ".", "debug", "(", "chunks", ")", "groups", "=", "[", "list", "(", "chunked", "(", "chunk", ",", "int2", ")", ")", "for", "chunk", "in", "chunks", "]", "logging", ".", "debug", "(", "groups", ")", "return", "str1", ".", "join", "(", "[", "str2", ".", "join", "(", "[", "func", "(", "''", ".", "join", "(", "chunk", ")", ")", "for", "chunk", "in", "chunks", "]", ")", "for", "chunks", "in", "groups", "]", ")" ]
35.2
15.5
def get_files(path, ext=[], include=True): """遍历提供的文件夹的所有子文件夹,饭后生成器对象。 :param str path: 待处理的文件夹。 :param list ext: 扩展名列表。 :param bool include: 若值为 True,代表 ext 提供的是包含列表; 否则是排除列表。 :returns: 一个生成器对象。 """ has_ext = len(ext)>0 for p, d, fs in os.walk(path): for f in fs: if has_ext: in_ext = False for name in ext: if f.endswith(name): in_ext = True break if (include and in_ext) or \ (not include and not in_ext): yield os.path.join(p,f) else: yield os.path.join(p, f)
[ "def", "get_files", "(", "path", ",", "ext", "=", "[", "]", ",", "include", "=", "True", ")", ":", "has_ext", "=", "len", "(", "ext", ")", ">", "0", "for", "p", ",", "d", ",", "fs", "in", "os", ".", "walk", "(", "path", ")", ":", "for", "f", "in", "fs", ":", "if", "has_ext", ":", "in_ext", "=", "False", "for", "name", "in", "ext", ":", "if", "f", ".", "endswith", "(", "name", ")", ":", "in_ext", "=", "True", "break", "if", "(", "include", "and", "in_ext", ")", "or", "(", "not", "include", "and", "not", "in_ext", ")", ":", "yield", "os", ".", "path", ".", "join", "(", "p", ",", "f", ")", "else", ":", "yield", "os", ".", "path", ".", "join", "(", "p", ",", "f", ")" ]
29.416667
11.083333
def iter_logical_lines(self, blob): """Returns an iterator of (start_line, stop_line, indent) for logical lines """ indent_stack = [] contents = [] line_number_start = None for token in self.iter_tokens(blob): token_type, token_text, token_start = token[0:3] if token_type == tokenize.INDENT: indent_stack.append(token_text) if token_type == tokenize.DEDENT: indent_stack.pop() if token_type in self.SKIP_TOKENS: continue contents.append(token_text) if line_number_start is None: line_number_start = token_start[0] elif token_type in (tokenize.NEWLINE, tokenize.ENDMARKER): yield self.translate_logical_line( line_number_start, token_start[0] + (1 if token_type is tokenize.NEWLINE else -1), [_f for _f in contents if _f], indent_stack, endmarker=token_type == tokenize.ENDMARKER) contents = [] line_number_start = None
[ "def", "iter_logical_lines", "(", "self", ",", "blob", ")", ":", "indent_stack", "=", "[", "]", "contents", "=", "[", "]", "line_number_start", "=", "None", "for", "token", "in", "self", ".", "iter_tokens", "(", "blob", ")", ":", "token_type", ",", "token_text", ",", "token_start", "=", "token", "[", "0", ":", "3", "]", "if", "token_type", "==", "tokenize", ".", "INDENT", ":", "indent_stack", ".", "append", "(", "token_text", ")", "if", "token_type", "==", "tokenize", ".", "DEDENT", ":", "indent_stack", ".", "pop", "(", ")", "if", "token_type", "in", "self", ".", "SKIP_TOKENS", ":", "continue", "contents", ".", "append", "(", "token_text", ")", "if", "line_number_start", "is", "None", ":", "line_number_start", "=", "token_start", "[", "0", "]", "elif", "token_type", "in", "(", "tokenize", ".", "NEWLINE", ",", "tokenize", ".", "ENDMARKER", ")", ":", "yield", "self", ".", "translate_logical_line", "(", "line_number_start", ",", "token_start", "[", "0", "]", "+", "(", "1", "if", "token_type", "is", "tokenize", ".", "NEWLINE", "else", "-", "1", ")", ",", "[", "_f", "for", "_f", "in", "contents", "if", "_f", "]", ",", "indent_stack", ",", "endmarker", "=", "token_type", "==", "tokenize", ".", "ENDMARKER", ")", "contents", "=", "[", "]", "line_number_start", "=", "None" ]
38.92
10.32
def payload_body(req): """ A generator that will include the sha256 signature of the request's body in the JWT payload. This is only done if the request could have a body: if the method is POST or PUT. >>> auth = JWTAuth('secret') >>> auth.add_field('body', payload_body) """ to_hash = req.body if type(req.body) is bytes else req.body.encode('utf-8') if req.method in ('POST', 'PUT'): return { 'hash': hashlib.sha256(to_hash).hexdigest(), 'alg': 'sha256', }
[ "def", "payload_body", "(", "req", ")", ":", "to_hash", "=", "req", ".", "body", "if", "type", "(", "req", ".", "body", ")", "is", "bytes", "else", "req", ".", "body", ".", "encode", "(", "'utf-8'", ")", "if", "req", ".", "method", "in", "(", "'POST'", ",", "'PUT'", ")", ":", "return", "{", "'hash'", ":", "hashlib", ".", "sha256", "(", "to_hash", ")", ".", "hexdigest", "(", ")", ",", "'alg'", ":", "'sha256'", ",", "}" ]
33.625
19.125
def unparse(self, indent_step = 4, max_linelen = 72) : "returns an XML string description of this Introspection tree." out = io.StringIO() def to_string(obj, indent) : tag_name = obj.tag_name attrs = [] for attrname in obj.tag_attrs : attr = getattr(obj, attrname) if attr != None : if isinstance(attr, enum.Enum) : attr = attr.value elif isinstance(attr, Type) : attr = unparse_signature(attr) elif not isinstance(attr, str) : raise TypeError("unexpected attribute type %s for %s" % (type(attr).__name__, repr(attr))) #end if attrs.append("%s=%s" % (attrname, quote_xml_attr(attr))) #end if #end for has_elts = \ ( sum ( len(getattr(obj, attrname)) for attrname in tuple(obj.tag_elts.keys()) + ((), ("annotations",)) [not isinstance(obj, Introspection.Annotation)] ) != 0 ) out.write(" " * indent + "<" + tag_name) if ( max_linelen != None and indent + len(tag_name) + sum((len(s) + 1) for s in attrs) + 2 + int(has_elts) > max_linelen ) : out.write("\n") for attr in attrs : out.write(" " * (indent + indent_step)) out.write(attr) out.write("\n") #end for out.write(" " * indent) else : for attr in attrs : out.write(" ") out.write(attr) #end for #end if if not has_elts : out.write("/") #end if out.write(">\n") if has_elts : for attrname in sorted(obj.tag_elts.keys()) + ["annotations"] : for elt in getattr(obj, attrname) : to_string(elt, indent + indent_step) #end for #end for out.write(" " * indent + "</" + tag_name + ">\n") #end if #end to_string #begin unparse out.write(DBUS.INTROSPECT_1_0_XML_DOCTYPE_DECL_NODE) out.write("<node") if self.name != None : out.write(" name=%s" % quote_xml_attr(self.name)) #end if out.write(">\n") for elt in self.interfaces : to_string(elt, indent_step) #end for for elt in self.nodes : to_string(elt, indent_step) #end for out.write("</node>\n") return \ out.getvalue()
[ "def", "unparse", "(", "self", ",", "indent_step", "=", "4", ",", "max_linelen", "=", "72", ")", ":", "out", "=", "io", ".", "StringIO", "(", ")", "def", "to_string", "(", "obj", ",", "indent", ")", ":", "tag_name", "=", "obj", ".", "tag_name", "attrs", "=", "[", "]", "for", "attrname", "in", "obj", ".", "tag_attrs", ":", "attr", "=", "getattr", "(", "obj", ",", "attrname", ")", "if", "attr", "!=", "None", ":", "if", "isinstance", "(", "attr", ",", "enum", ".", "Enum", ")", ":", "attr", "=", "attr", ".", "value", "elif", "isinstance", "(", "attr", ",", "Type", ")", ":", "attr", "=", "unparse_signature", "(", "attr", ")", "elif", "not", "isinstance", "(", "attr", ",", "str", ")", ":", "raise", "TypeError", "(", "\"unexpected attribute type %s for %s\"", "%", "(", "type", "(", "attr", ")", ".", "__name__", ",", "repr", "(", "attr", ")", ")", ")", "#end if", "attrs", ".", "append", "(", "\"%s=%s\"", "%", "(", "attrname", ",", "quote_xml_attr", "(", "attr", ")", ")", ")", "#end if", "#end for", "has_elts", "=", "(", "sum", "(", "len", "(", "getattr", "(", "obj", ",", "attrname", ")", ")", "for", "attrname", "in", "tuple", "(", "obj", ".", "tag_elts", ".", "keys", "(", ")", ")", "+", "(", "(", ")", ",", "(", "\"annotations\"", ",", ")", ")", "[", "not", "isinstance", "(", "obj", ",", "Introspection", ".", "Annotation", ")", "]", ")", "!=", "0", ")", "out", ".", "write", "(", "\" \"", "*", "indent", "+", "\"<\"", "+", "tag_name", ")", "if", "(", "max_linelen", "!=", "None", "and", "indent", "+", "len", "(", "tag_name", ")", "+", "sum", "(", "(", "len", "(", "s", ")", "+", "1", ")", "for", "s", "in", "attrs", ")", "+", "2", "+", "int", "(", "has_elts", ")", ">", "max_linelen", ")", ":", "out", ".", "write", "(", "\"\\n\"", ")", "for", "attr", "in", "attrs", ":", "out", ".", "write", "(", "\" \"", "*", "(", "indent", "+", "indent_step", ")", ")", "out", ".", "write", "(", "attr", ")", "out", ".", "write", "(", "\"\\n\"", ")", "#end for", "out", ".", "write", "(", "\" \"", "*", "indent", ")", "else", ":", "for", "attr", "in", "attrs", ":", "out", ".", "write", "(", "\" \"", ")", "out", ".", "write", "(", "attr", ")", "#end for", "#end if", "if", "not", "has_elts", ":", "out", ".", "write", "(", "\"/\"", ")", "#end if", "out", ".", "write", "(", "\">\\n\"", ")", "if", "has_elts", ":", "for", "attrname", "in", "sorted", "(", "obj", ".", "tag_elts", ".", "keys", "(", ")", ")", "+", "[", "\"annotations\"", "]", ":", "for", "elt", "in", "getattr", "(", "obj", ",", "attrname", ")", ":", "to_string", "(", "elt", ",", "indent", "+", "indent_step", ")", "#end for", "#end for", "out", ".", "write", "(", "\" \"", "*", "indent", "+", "\"</\"", "+", "tag_name", "+", "\">\\n\"", ")", "#end if", "#end to_string", "#begin unparse", "out", ".", "write", "(", "DBUS", ".", "INTROSPECT_1_0_XML_DOCTYPE_DECL_NODE", ")", "out", ".", "write", "(", "\"<node\"", ")", "if", "self", ".", "name", "!=", "None", ":", "out", ".", "write", "(", "\" name=%s\"", "%", "quote_xml_attr", "(", "self", ".", "name", ")", ")", "#end if", "out", ".", "write", "(", "\">\\n\"", ")", "for", "elt", "in", "self", ".", "interfaces", ":", "to_string", "(", "elt", ",", "indent_step", ")", "#end for", "for", "elt", "in", "self", ".", "nodes", ":", "to_string", "(", "elt", ",", "indent_step", ")", "#end for", "out", ".", "write", "(", "\"</node>\\n\"", ")", "return", "out", ".", "getvalue", "(", ")" ]
34.478723
15.946809
def collapse_all(self): """collapse all messages in thread""" for MT in self.messagetrees(): MT.collapse(MT.root) self.focus_selected_message()
[ "def", "collapse_all", "(", "self", ")", ":", "for", "MT", "in", "self", ".", "messagetrees", "(", ")", ":", "MT", ".", "collapse", "(", "MT", ".", "root", ")", "self", ".", "focus_selected_message", "(", ")" ]
35
6
def from_ini(cls, folder, ini_file='fpp.ini', ichrone='mist', recalc=False, refit_trap=False, **kwargs): """ To enable simple usage, initializes a FPPCalculation from a .ini file By default, a file called ``fpp.ini`` will be looked for in the current folder. Also present must be a ``star.ini`` file that contains the observed properties of the target star. ``fpp.ini`` must be of the following form:: name = k2oi ra = 11:30:14.510 dec = +07:35:18.21 period = 32.988 #days rprs = 0.0534 #Rp/Rstar photfile = lc_k2oi.csv [constraints] maxrad = 10 #exclusion radius [arcsec] secthresh = 0.001 #maximum allowed secondary signal depth #This variable defines contrast curves #ccfiles = Keck_J.cc, Lick_J.cc Photfile must be a text file with columns ``(days_from_midtransit, flux, flux_err)``. Both whitespace- and comma-delimited will be tried, using ``np.loadtxt``. Photfile need not be there if there is a pickled :class:`TransitSignal` saved in the same directory as ``ini_file``, named ``trsig.pkl`` (or another name as defined by ``trsig`` keyword in ``.ini`` file). ``star.ini`` should look something like the following:: B = 15.005, 0.06 V = 13.496, 0.05 g = 14.223, 0.05 r = 12.858, 0.04 i = 11.661, 0.08 J = 9.763, 0.03 H = 9.135, 0.03 K = 8.899, 0.02 W1 = 8.769, 0.023 W2 = 8.668, 0.02 W3 = 8.552, 0.025 Kepler = 12.473 #Teff = 3503, 80 #feh = 0.09, 0.09 #logg = 4.89, 0.1 Any star properties can be defined; if errors are included then they will be used in the :class:`isochrones.StarModel` MCMC fit. Spectroscopic parameters (``Teff, feh, logg``) are optional. If included, then they will also be included in :class:`isochrones.StarModel` fit. A magnitude for the band in which the transit signal is observed (e.g., ``Kepler``) is required, though need not have associated uncertainty. :param folder: Folder to find configuration files. :param ini_file: Input configuration file. :param star_ini_file: Input config file for :class:`isochrones.StarModel` fits. :param recalc: Whether to re-calculate :class:`PopulationSet`, if a ``popset.h5`` file is already present :param **kwargs: Keyword arguments passed to :class:`PopulationSet`. Creates: * ``trsig.pkl``: the pickled :class:`vespa.TransitSignal` object. * ``starfield.h5``: the TRILEGAL field star simulation * ``starmodel.h5``: the :class:`isochrones.StarModel` fit * ``popset.h5``: the :class:`vespa.PopulationSet` object representing the model population simulations. Raises ------ RuntimeError : If single, double, and triple starmodels are not computed, then raises with admonition to run `starfit --all`. AttributeError : If `trsig.pkl` not present in folder, and `photfile` is not defined in config file. """ # Check if all starmodel fits are done. # If not, tell user to run 'starfit --all' config = ConfigObj(os.path.join(folder, ini_file)) # Load required entries from ini_file try: name = config['name'] ra, dec = config['ra'], config['dec'] period = float(config['period']) rprs = float(config['rprs']) except KeyError as err: raise KeyError('Missing required element of ini file: {}'.format(err)) try: cadence = float(config['cadence']) except KeyError: logging.warning('Cadence not provided in fpp.ini; defaulting to Kepler cadence.') logging.warning('If this is not a Kepler target, please set cadence (in days).') cadence = 1626./86400 # Default to Kepler cadence def fullpath(filename): if os.path.isabs(filename): return filename else: return os.path.join(folder, filename) # Non-required entries with default values popset_file = fullpath(config.get('popset', 'popset.h5')) starfield_file = fullpath(config.get('starfield', 'starfield.h5')) trsig_file = fullpath(config.get('trsig', 'trsig.pkl')) # Check for StarModel fits starmodel_basename = config.get('starmodel_basename', '{}_starmodel'.format(ichrone)) single_starmodel_file = os.path.join(folder,'{}_single.h5'.format(starmodel_basename)) binary_starmodel_file = os.path.join(folder,'{}_binary.h5'.format(starmodel_basename)) triple_starmodel_file = os.path.join(folder,'{}_triple.h5'.format(starmodel_basename)) try: single_starmodel = StarModel.load_hdf(single_starmodel_file) binary_starmodel = StarModel.load_hdf(binary_starmodel_file) triple_starmodel = StarModel.load_hdf(triple_starmodel_file) except Exception as e: print(e) raise RuntimeError('Cannot load StarModels. ' + 'Please run `starfit --all {}`.'.format(folder)) # Create (or load) TransitSignal if os.path.exists(trsig_file): logging.info('Loading transit signal from {}...'.format(trsig_file)) with open(trsig_file, 'rb') as f: trsig = pickle.load(f) else: try: photfile = fullpath(config['photfile']) except KeyError: raise AttributeError('If transit pickle file (trsig.pkl) ' + 'not present, "photfile" must be' + 'defined.') trsig = TransitSignal.from_ascii(photfile, P=period, name=name) if not trsig.hasMCMC or refit_trap: logging.info('Fitting transitsignal with MCMC...') trsig.MCMC() trsig.save(trsig_file) # Create (or load) PopulationSet do_only = DEFAULT_MODELS if os.path.exists(popset_file): if recalc: os.remove(popset_file) else: with pd.HDFStore(popset_file) as store: do_only = [m for m in DEFAULT_MODELS if m not in store] # Check that properties of saved population match requested try: popset = PopulationSet.load_hdf(popset_file) for pop in popset.poplist: if pop.cadence != cadence: raise ValueError('Requested cadence ({}) '.format(cadence) + 'does not match stored {})! Set recalc=True.'.format(pop.cadence)) except: raise if do_only: logging.info('Generating {} models for PopulationSet...'.format(do_only)) else: logging.info('Populations ({}) already generated.'.format(DEFAULT_MODELS)) popset = PopulationSet(period=period, cadence=cadence, mags=single_starmodel.mags, ra=ra, dec=dec, trilegal_filename=starfield_file, # Maybe change parameter name? starmodel=single_starmodel, binary_starmodel=binary_starmodel, triple_starmodel=triple_starmodel, rprs=rprs, do_only=do_only, savefile=popset_file, **kwargs) fpp = cls(trsig, popset, folder=folder) ############# # Apply constraints # Exclusion radius maxrad = float(config['constraints']['maxrad']) fpp.set_maxrad(maxrad) if 'secthresh' in config['constraints']: secthresh = float(config['constraints']['secthresh']) if not np.isnan(secthresh): fpp.apply_secthresh(secthresh) # Odd-even constraint diff = 3 * np.max(trsig.depthfit[1]) fpp.constrain_oddeven(diff) #apply contrast curve constraints if present if 'ccfiles' in config['constraints']: ccfiles = config['constraints']['ccfiles'] if isinstance(ccfiles, string_types): ccfiles = [ccfiles] for ccfile in ccfiles: if not os.path.isabs(ccfile): ccfile = os.path.join(folder, ccfile) m = re.search('(\w+)_(\w+)\.cc',os.path.basename(ccfile)) if not m: logging.warning('Invalid CC filename ({}); '.format(ccfile) + 'skipping.') continue else: band = m.group(2) inst = m.group(1) name = '{} {}-band'.format(inst, band) cc = ContrastCurveFromFile(ccfile, band, name=name) fpp.apply_cc(cc) #apply "velocity contrast curve" if present if 'vcc' in config['constraints']: dv = float(config['constraints']['vcc'][0]) dmag = float(config['constraints']['vcc'][1]) vcc = VelocityContrastCurve(dv, dmag) fpp.apply_vcc(vcc) return fpp
[ "def", "from_ini", "(", "cls", ",", "folder", ",", "ini_file", "=", "'fpp.ini'", ",", "ichrone", "=", "'mist'", ",", "recalc", "=", "False", ",", "refit_trap", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# Check if all starmodel fits are done.", "# If not, tell user to run 'starfit --all'", "config", "=", "ConfigObj", "(", "os", ".", "path", ".", "join", "(", "folder", ",", "ini_file", ")", ")", "# Load required entries from ini_file", "try", ":", "name", "=", "config", "[", "'name'", "]", "ra", ",", "dec", "=", "config", "[", "'ra'", "]", ",", "config", "[", "'dec'", "]", "period", "=", "float", "(", "config", "[", "'period'", "]", ")", "rprs", "=", "float", "(", "config", "[", "'rprs'", "]", ")", "except", "KeyError", "as", "err", ":", "raise", "KeyError", "(", "'Missing required element of ini file: {}'", ".", "format", "(", "err", ")", ")", "try", ":", "cadence", "=", "float", "(", "config", "[", "'cadence'", "]", ")", "except", "KeyError", ":", "logging", ".", "warning", "(", "'Cadence not provided in fpp.ini; defaulting to Kepler cadence.'", ")", "logging", ".", "warning", "(", "'If this is not a Kepler target, please set cadence (in days).'", ")", "cadence", "=", "1626.", "/", "86400", "# Default to Kepler cadence", "def", "fullpath", "(", "filename", ")", ":", "if", "os", ".", "path", ".", "isabs", "(", "filename", ")", ":", "return", "filename", "else", ":", "return", "os", ".", "path", ".", "join", "(", "folder", ",", "filename", ")", "# Non-required entries with default values", "popset_file", "=", "fullpath", "(", "config", ".", "get", "(", "'popset'", ",", "'popset.h5'", ")", ")", "starfield_file", "=", "fullpath", "(", "config", ".", "get", "(", "'starfield'", ",", "'starfield.h5'", ")", ")", "trsig_file", "=", "fullpath", "(", "config", ".", "get", "(", "'trsig'", ",", "'trsig.pkl'", ")", ")", "# Check for StarModel fits", "starmodel_basename", "=", "config", ".", "get", "(", "'starmodel_basename'", ",", "'{}_starmodel'", ".", "format", "(", "ichrone", ")", ")", "single_starmodel_file", "=", "os", ".", "path", ".", "join", "(", "folder", ",", "'{}_single.h5'", ".", "format", "(", "starmodel_basename", ")", ")", "binary_starmodel_file", "=", "os", ".", "path", ".", "join", "(", "folder", ",", "'{}_binary.h5'", ".", "format", "(", "starmodel_basename", ")", ")", "triple_starmodel_file", "=", "os", ".", "path", ".", "join", "(", "folder", ",", "'{}_triple.h5'", ".", "format", "(", "starmodel_basename", ")", ")", "try", ":", "single_starmodel", "=", "StarModel", ".", "load_hdf", "(", "single_starmodel_file", ")", "binary_starmodel", "=", "StarModel", ".", "load_hdf", "(", "binary_starmodel_file", ")", "triple_starmodel", "=", "StarModel", ".", "load_hdf", "(", "triple_starmodel_file", ")", "except", "Exception", "as", "e", ":", "print", "(", "e", ")", "raise", "RuntimeError", "(", "'Cannot load StarModels. '", "+", "'Please run `starfit --all {}`.'", ".", "format", "(", "folder", ")", ")", "# Create (or load) TransitSignal", "if", "os", ".", "path", ".", "exists", "(", "trsig_file", ")", ":", "logging", ".", "info", "(", "'Loading transit signal from {}...'", ".", "format", "(", "trsig_file", ")", ")", "with", "open", "(", "trsig_file", ",", "'rb'", ")", "as", "f", ":", "trsig", "=", "pickle", ".", "load", "(", "f", ")", "else", ":", "try", ":", "photfile", "=", "fullpath", "(", "config", "[", "'photfile'", "]", ")", "except", "KeyError", ":", "raise", "AttributeError", "(", "'If transit pickle file (trsig.pkl) '", "+", "'not present, \"photfile\" must be'", "+", "'defined.'", ")", "trsig", "=", "TransitSignal", ".", "from_ascii", "(", "photfile", ",", "P", "=", "period", ",", "name", "=", "name", ")", "if", "not", "trsig", ".", "hasMCMC", "or", "refit_trap", ":", "logging", ".", "info", "(", "'Fitting transitsignal with MCMC...'", ")", "trsig", ".", "MCMC", "(", ")", "trsig", ".", "save", "(", "trsig_file", ")", "# Create (or load) PopulationSet", "do_only", "=", "DEFAULT_MODELS", "if", "os", ".", "path", ".", "exists", "(", "popset_file", ")", ":", "if", "recalc", ":", "os", ".", "remove", "(", "popset_file", ")", "else", ":", "with", "pd", ".", "HDFStore", "(", "popset_file", ")", "as", "store", ":", "do_only", "=", "[", "m", "for", "m", "in", "DEFAULT_MODELS", "if", "m", "not", "in", "store", "]", "# Check that properties of saved population match requested", "try", ":", "popset", "=", "PopulationSet", ".", "load_hdf", "(", "popset_file", ")", "for", "pop", "in", "popset", ".", "poplist", ":", "if", "pop", ".", "cadence", "!=", "cadence", ":", "raise", "ValueError", "(", "'Requested cadence ({}) '", ".", "format", "(", "cadence", ")", "+", "'does not match stored {})! Set recalc=True.'", ".", "format", "(", "pop", ".", "cadence", ")", ")", "except", ":", "raise", "if", "do_only", ":", "logging", ".", "info", "(", "'Generating {} models for PopulationSet...'", ".", "format", "(", "do_only", ")", ")", "else", ":", "logging", ".", "info", "(", "'Populations ({}) already generated.'", ".", "format", "(", "DEFAULT_MODELS", ")", ")", "popset", "=", "PopulationSet", "(", "period", "=", "period", ",", "cadence", "=", "cadence", ",", "mags", "=", "single_starmodel", ".", "mags", ",", "ra", "=", "ra", ",", "dec", "=", "dec", ",", "trilegal_filename", "=", "starfield_file", ",", "# Maybe change parameter name?", "starmodel", "=", "single_starmodel", ",", "binary_starmodel", "=", "binary_starmodel", ",", "triple_starmodel", "=", "triple_starmodel", ",", "rprs", "=", "rprs", ",", "do_only", "=", "do_only", ",", "savefile", "=", "popset_file", ",", "*", "*", "kwargs", ")", "fpp", "=", "cls", "(", "trsig", ",", "popset", ",", "folder", "=", "folder", ")", "#############", "# Apply constraints", "# Exclusion radius", "maxrad", "=", "float", "(", "config", "[", "'constraints'", "]", "[", "'maxrad'", "]", ")", "fpp", ".", "set_maxrad", "(", "maxrad", ")", "if", "'secthresh'", "in", "config", "[", "'constraints'", "]", ":", "secthresh", "=", "float", "(", "config", "[", "'constraints'", "]", "[", "'secthresh'", "]", ")", "if", "not", "np", ".", "isnan", "(", "secthresh", ")", ":", "fpp", ".", "apply_secthresh", "(", "secthresh", ")", "# Odd-even constraint", "diff", "=", "3", "*", "np", ".", "max", "(", "trsig", ".", "depthfit", "[", "1", "]", ")", "fpp", ".", "constrain_oddeven", "(", "diff", ")", "#apply contrast curve constraints if present", "if", "'ccfiles'", "in", "config", "[", "'constraints'", "]", ":", "ccfiles", "=", "config", "[", "'constraints'", "]", "[", "'ccfiles'", "]", "if", "isinstance", "(", "ccfiles", ",", "string_types", ")", ":", "ccfiles", "=", "[", "ccfiles", "]", "for", "ccfile", "in", "ccfiles", ":", "if", "not", "os", ".", "path", ".", "isabs", "(", "ccfile", ")", ":", "ccfile", "=", "os", ".", "path", ".", "join", "(", "folder", ",", "ccfile", ")", "m", "=", "re", ".", "search", "(", "'(\\w+)_(\\w+)\\.cc'", ",", "os", ".", "path", ".", "basename", "(", "ccfile", ")", ")", "if", "not", "m", ":", "logging", ".", "warning", "(", "'Invalid CC filename ({}); '", ".", "format", "(", "ccfile", ")", "+", "'skipping.'", ")", "continue", "else", ":", "band", "=", "m", ".", "group", "(", "2", ")", "inst", "=", "m", ".", "group", "(", "1", ")", "name", "=", "'{} {}-band'", ".", "format", "(", "inst", ",", "band", ")", "cc", "=", "ContrastCurveFromFile", "(", "ccfile", ",", "band", ",", "name", "=", "name", ")", "fpp", ".", "apply_cc", "(", "cc", ")", "#apply \"velocity contrast curve\" if present", "if", "'vcc'", "in", "config", "[", "'constraints'", "]", ":", "dv", "=", "float", "(", "config", "[", "'constraints'", "]", "[", "'vcc'", "]", "[", "0", "]", ")", "dmag", "=", "float", "(", "config", "[", "'constraints'", "]", "[", "'vcc'", "]", "[", "1", "]", ")", "vcc", "=", "VelocityContrastCurve", "(", "dv", ",", "dmag", ")", "fpp", ".", "apply_vcc", "(", "vcc", ")", "return", "fpp" ]
38.691057
22.186992
def parse(self, uri, defaults=None): """Parse the URI. uri is the uri to parse. defaults is a scheme-dependent list of values to use if there is no value for that part in the supplied URI. The return value is a tuple of scheme-dependent length. """ return tuple([self.scheme_of(uri)] + list(self.parser_for(uri)(defaults).parse(uri)))
[ "def", "parse", "(", "self", ",", "uri", ",", "defaults", "=", "None", ")", ":", "return", "tuple", "(", "[", "self", ".", "scheme_of", "(", "uri", ")", "]", "+", "list", "(", "self", ".", "parser_for", "(", "uri", ")", "(", "defaults", ")", ".", "parse", "(", "uri", ")", ")", ")" ]
38.3
21.1
def get_stacked_rnn(config: RNNConfig, prefix: str, parallel_inputs: bool = False, layers: Optional[Iterable[int]] = None) -> mx.rnn.SequentialRNNCell: """ Returns (stacked) RNN cell given parameters. :param config: rnn configuration. :param prefix: Symbol prefix for RNN. :param parallel_inputs: Support parallel inputs for the stacked RNN cells. :param layers: Specify which layers to create as a list of layer indexes. :return: RNN cell. """ rnn = mx.rnn.SequentialRNNCell() if not parallel_inputs else SequentialRNNCellParallelInput() if not layers: layers = range(config.num_layers) for layer_idx in layers: # fhieber: the 'l' in the prefix does NOT stand for 'layer' but for the direction 'l' as in mx.rnn.rnn_cell::517 # this ensures parameter name compatibility of training w/ FusedRNN and decoding with 'unfused' RNN. cell_prefix = "%sl%d_" % (prefix, layer_idx) if config.cell_type == C.LSTM_TYPE: if config.dropout_recurrent > 0.0: cell = RecurrentDropoutLSTMCell(num_hidden=config.num_hidden, prefix=cell_prefix, forget_bias=config.forget_bias, dropout=config.dropout_recurrent) else: cell = mx.rnn.LSTMCell(num_hidden=config.num_hidden, prefix=cell_prefix, forget_bias=config.forget_bias) elif config.cell_type == C.LNLSTM_TYPE: cell = LayerNormLSTMCell(num_hidden=config.num_hidden, prefix=cell_prefix, forget_bias=config.forget_bias) elif config.cell_type == C.LNGLSTM_TYPE: cell = LayerNormPerGateLSTMCell(num_hidden=config.num_hidden, prefix=cell_prefix, forget_bias=config.forget_bias) elif config.cell_type == C.GRU_TYPE: cell = mx.rnn.GRUCell(num_hidden=config.num_hidden, prefix=cell_prefix) elif config.cell_type == C.LNGRU_TYPE: cell = LayerNormGRUCell(num_hidden=config.num_hidden, prefix=cell_prefix) elif config.cell_type == C.LNGGRU_TYPE: cell = LayerNormPerGateGRUCell(num_hidden=config.num_hidden, prefix=cell_prefix) else: raise NotImplementedError() if config.dropout_inputs > 0 or config.dropout_states > 0: cell = VariationalDropoutCell(cell, dropout_inputs=config.dropout_inputs, dropout_states=config.dropout_states) if config.lhuc: cell = LHUCCell(cell, config.num_hidden, config.dtype) # layer_idx is 0 based, whereas first_residual_layer is 1-based if config.residual and layer_idx + 1 >= config.first_residual_layer: cell = mx.rnn.ResidualCell(cell) if not parallel_inputs else ResidualCellParallelInput(cell) elif parallel_inputs: cell = ParallelInputCell(cell) rnn.add(cell) return rnn
[ "def", "get_stacked_rnn", "(", "config", ":", "RNNConfig", ",", "prefix", ":", "str", ",", "parallel_inputs", ":", "bool", "=", "False", ",", "layers", ":", "Optional", "[", "Iterable", "[", "int", "]", "]", "=", "None", ")", "->", "mx", ".", "rnn", ".", "SequentialRNNCell", ":", "rnn", "=", "mx", ".", "rnn", ".", "SequentialRNNCell", "(", ")", "if", "not", "parallel_inputs", "else", "SequentialRNNCellParallelInput", "(", ")", "if", "not", "layers", ":", "layers", "=", "range", "(", "config", ".", "num_layers", ")", "for", "layer_idx", "in", "layers", ":", "# fhieber: the 'l' in the prefix does NOT stand for 'layer' but for the direction 'l' as in mx.rnn.rnn_cell::517", "# this ensures parameter name compatibility of training w/ FusedRNN and decoding with 'unfused' RNN.", "cell_prefix", "=", "\"%sl%d_\"", "%", "(", "prefix", ",", "layer_idx", ")", "if", "config", ".", "cell_type", "==", "C", ".", "LSTM_TYPE", ":", "if", "config", ".", "dropout_recurrent", ">", "0.0", ":", "cell", "=", "RecurrentDropoutLSTMCell", "(", "num_hidden", "=", "config", ".", "num_hidden", ",", "prefix", "=", "cell_prefix", ",", "forget_bias", "=", "config", ".", "forget_bias", ",", "dropout", "=", "config", ".", "dropout_recurrent", ")", "else", ":", "cell", "=", "mx", ".", "rnn", ".", "LSTMCell", "(", "num_hidden", "=", "config", ".", "num_hidden", ",", "prefix", "=", "cell_prefix", ",", "forget_bias", "=", "config", ".", "forget_bias", ")", "elif", "config", ".", "cell_type", "==", "C", ".", "LNLSTM_TYPE", ":", "cell", "=", "LayerNormLSTMCell", "(", "num_hidden", "=", "config", ".", "num_hidden", ",", "prefix", "=", "cell_prefix", ",", "forget_bias", "=", "config", ".", "forget_bias", ")", "elif", "config", ".", "cell_type", "==", "C", ".", "LNGLSTM_TYPE", ":", "cell", "=", "LayerNormPerGateLSTMCell", "(", "num_hidden", "=", "config", ".", "num_hidden", ",", "prefix", "=", "cell_prefix", ",", "forget_bias", "=", "config", ".", "forget_bias", ")", "elif", "config", ".", "cell_type", "==", "C", ".", "GRU_TYPE", ":", "cell", "=", "mx", ".", "rnn", ".", "GRUCell", "(", "num_hidden", "=", "config", ".", "num_hidden", ",", "prefix", "=", "cell_prefix", ")", "elif", "config", ".", "cell_type", "==", "C", ".", "LNGRU_TYPE", ":", "cell", "=", "LayerNormGRUCell", "(", "num_hidden", "=", "config", ".", "num_hidden", ",", "prefix", "=", "cell_prefix", ")", "elif", "config", ".", "cell_type", "==", "C", ".", "LNGGRU_TYPE", ":", "cell", "=", "LayerNormPerGateGRUCell", "(", "num_hidden", "=", "config", ".", "num_hidden", ",", "prefix", "=", "cell_prefix", ")", "else", ":", "raise", "NotImplementedError", "(", ")", "if", "config", ".", "dropout_inputs", ">", "0", "or", "config", ".", "dropout_states", ">", "0", ":", "cell", "=", "VariationalDropoutCell", "(", "cell", ",", "dropout_inputs", "=", "config", ".", "dropout_inputs", ",", "dropout_states", "=", "config", ".", "dropout_states", ")", "if", "config", ".", "lhuc", ":", "cell", "=", "LHUCCell", "(", "cell", ",", "config", ".", "num_hidden", ",", "config", ".", "dtype", ")", "# layer_idx is 0 based, whereas first_residual_layer is 1-based", "if", "config", ".", "residual", "and", "layer_idx", "+", "1", ">=", "config", ".", "first_residual_layer", ":", "cell", "=", "mx", ".", "rnn", ".", "ResidualCell", "(", "cell", ")", "if", "not", "parallel_inputs", "else", "ResidualCellParallelInput", "(", "cell", ")", "elif", "parallel_inputs", ":", "cell", "=", "ParallelInputCell", "(", "cell", ")", "rnn", ".", "add", "(", "cell", ")", "return", "rnn" ]
50.913793
29.258621
def _derX(self,x,y): ''' Returns the first derivative of the function with respect to X at each value in (x,y). Only called internally by HARKinterpolator2D._derX. ''' m = len(x) temp = np.zeros((m,self.funcCount)) for j in range(self.funcCount): temp[:,j] = self.functions[j](x,y) temp[np.isnan(temp)] = np.inf i = np.argmin(temp,axis=1) dfdx = np.zeros_like(x) for j in range(self.funcCount): c = i == j dfdx[c] = self.functions[j].derivativeX(x[c],y[c]) return dfdx
[ "def", "_derX", "(", "self", ",", "x", ",", "y", ")", ":", "m", "=", "len", "(", "x", ")", "temp", "=", "np", ".", "zeros", "(", "(", "m", ",", "self", ".", "funcCount", ")", ")", "for", "j", "in", "range", "(", "self", ".", "funcCount", ")", ":", "temp", "[", ":", ",", "j", "]", "=", "self", ".", "functions", "[", "j", "]", "(", "x", ",", "y", ")", "temp", "[", "np", ".", "isnan", "(", "temp", ")", "]", "=", "np", ".", "inf", "i", "=", "np", ".", "argmin", "(", "temp", ",", "axis", "=", "1", ")", "dfdx", "=", "np", ".", "zeros_like", "(", "x", ")", "for", "j", "in", "range", "(", "self", ".", "funcCount", ")", ":", "c", "=", "i", "==", "j", "dfdx", "[", "c", "]", "=", "self", ".", "functions", "[", "j", "]", ".", "derivativeX", "(", "x", "[", "c", "]", ",", "y", "[", "c", "]", ")", "return", "dfdx" ]
36.625
16.5
def run(self, args): """Flashes the device connected to the J-Link. Args: self (FlashCommand): the ``FlashCommand`` instance args (Namespace): the arguments passed on the command-line Returns: ``None`` """ kwargs = {} kwargs['path'] = args.file[0] kwargs['addr'] = args.addr kwargs['on_progress'] = pylink.util.flash_progress_callback jlink = self.create_jlink(args) _ = jlink.flash_file(**kwargs) print('Flashed device successfully.')
[ "def", "run", "(", "self", ",", "args", ")", ":", "kwargs", "=", "{", "}", "kwargs", "[", "'path'", "]", "=", "args", ".", "file", "[", "0", "]", "kwargs", "[", "'addr'", "]", "=", "args", ".", "addr", "kwargs", "[", "'on_progress'", "]", "=", "pylink", ".", "util", ".", "flash_progress_callback", "jlink", "=", "self", ".", "create_jlink", "(", "args", ")", "_", "=", "jlink", ".", "flash_file", "(", "*", "*", "kwargs", ")", "print", "(", "'Flashed device successfully.'", ")" ]
29.944444
18.111111
def create_app(settings): """Create a new Flask application""" app = Flask(__name__) # Import settings from file for name in dir(settings): value = getattr(settings, name) if not (name.startswith('_') or isinstance(value, ModuleType) or isinstance(value, FunctionType)): app.config[name] = value # Bootstrapping if 'INSTALLED_APPS' in app.config: app.installed_apps = app.config.get('INSTALLED_APPS', []) # Extensions Funnel(app) Mobility(app) # Register blueprints for app_path in app.installed_apps: app.register_blueprint( getattr(__import__('{0}.views'.format(app_path), fromlist=['blueprint']), 'blueprint')) # Register error handlers register_error_handlers(app) @app.context_processor def context_processor(): return dict(config=app.config) @app.teardown_request def teardown_request(exception=None): # Remove the database session if it exists if hasattr(app, 'db_session'): app.db_session.close() return app
[ "def", "create_app", "(", "settings", ")", ":", "app", "=", "Flask", "(", "__name__", ")", "# Import settings from file", "for", "name", "in", "dir", "(", "settings", ")", ":", "value", "=", "getattr", "(", "settings", ",", "name", ")", "if", "not", "(", "name", ".", "startswith", "(", "'_'", ")", "or", "isinstance", "(", "value", ",", "ModuleType", ")", "or", "isinstance", "(", "value", ",", "FunctionType", ")", ")", ":", "app", ".", "config", "[", "name", "]", "=", "value", "# Bootstrapping", "if", "'INSTALLED_APPS'", "in", "app", ".", "config", ":", "app", ".", "installed_apps", "=", "app", ".", "config", ".", "get", "(", "'INSTALLED_APPS'", ",", "[", "]", ")", "# Extensions", "Funnel", "(", "app", ")", "Mobility", "(", "app", ")", "# Register blueprints", "for", "app_path", "in", "app", ".", "installed_apps", ":", "app", ".", "register_blueprint", "(", "getattr", "(", "__import__", "(", "'{0}.views'", ".", "format", "(", "app_path", ")", ",", "fromlist", "=", "[", "'blueprint'", "]", ")", ",", "'blueprint'", ")", ")", "# Register error handlers", "register_error_handlers", "(", "app", ")", "@", "app", ".", "context_processor", "def", "context_processor", "(", ")", ":", "return", "dict", "(", "config", "=", "app", ".", "config", ")", "@", "app", ".", "teardown_request", "def", "teardown_request", "(", "exception", "=", "None", ")", ":", "# Remove the database session if it exists", "if", "hasattr", "(", "app", ",", "'db_session'", ")", ":", "app", ".", "db_session", ".", "close", "(", ")", "return", "app" ]
27.875
17.725
def start(self, threaded=True): """Start the data feed. :param threaded: If True, run in a separate thread. """ self.running = True if threaded: self._thread = Thread(target=self._run) self._thread.start() else: self._run()
[ "def", "start", "(", "self", ",", "threaded", "=", "True", ")", ":", "self", ".", "running", "=", "True", "if", "threaded", ":", "self", ".", "_thread", "=", "Thread", "(", "target", "=", "self", ".", "_run", ")", "self", ".", "_thread", ".", "start", "(", ")", "else", ":", "self", ".", "_run", "(", ")" ]
27.090909
14.909091
def with_color_stripped(f): """ A function decorator for applying to `len` or imitators thereof that strips ANSI color sequences from a string before passing it on. If any color sequences are not followed by a reset sequence, an `UnterminatedColorError` is raised. """ @wraps(f) def colored_len(s): s2 = re.sub( COLOR_BEGIN_RGX + '(.*?)' + COLOR_END_RGX, lambda m: re.sub(COLOR_BEGIN_RGX, '', m.group(1)), s, ) if re.search(COLOR_BEGIN_RGX, s2): raise UnterminatedColorError(s) return f(re.sub(COLOR_END_RGX, '', s2)) return colored_len
[ "def", "with_color_stripped", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "colored_len", "(", "s", ")", ":", "s2", "=", "re", ".", "sub", "(", "COLOR_BEGIN_RGX", "+", "'(.*?)'", "+", "COLOR_END_RGX", ",", "lambda", "m", ":", "re", ".", "sub", "(", "COLOR_BEGIN_RGX", ",", "''", ",", "m", ".", "group", "(", "1", ")", ")", ",", "s", ",", ")", "if", "re", ".", "search", "(", "COLOR_BEGIN_RGX", ",", "s2", ")", ":", "raise", "UnterminatedColorError", "(", "s", ")", "return", "f", "(", "re", ".", "sub", "(", "COLOR_END_RGX", ",", "''", ",", "s2", ")", ")", "return", "colored_len" ]
35.333333
18.777778
def check_longitude(self, ds): ''' Check variable(s) that define longitude and are defined correctly according to CF. CF §4.2 Variables representing longitude must always explicitly include the units attribute; there is no default value. The recommended unit of longitude is degrees_east. Also acceptable are degree_east, degree_E, degrees_E, degreeE, and degreesE. Optionally, the longitude type may be indicated additionally by providing the standard_name attribute with the value longitude, and/or the axis attribute with the value X. - Four checks per longitude variable - (H) longitude has units attribute - (M) longitude has an allowed units attribute - (L) longitude uses degrees_east (if not in rotated pole) - (M) longitude defines either standard_name or axis :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results ''' # TODO we already have a check_latitude... I'm sure we can make DRYer ret_val = [] allowed_lon_units = [ 'degrees_east', 'degree_east', 'degree_e', 'degrees_e', 'degreee', 'degreese' ] # Determine the grid mappings in this dataset grid_mapping = [] grid_mapping_variables = cfutil.get_grid_mapping_variables(ds) for name in grid_mapping_variables: variable = ds.variables[name] grid_mapping_name = getattr(variable, 'grid_mapping_name', None) if grid_mapping_name: grid_mapping.append(grid_mapping_name) longitude_variables = cfutil.get_longitude_variables(ds) for longitude in longitude_variables: variable = ds.variables[longitude] units = getattr(variable, 'units', None) units_is_string = isinstance(units, basestring) standard_name = getattr(variable, 'standard_name', None) axis = getattr(variable, 'axis', None) # NOTE see docstring--should below be 4.1 or 4.2? # Check that longitude defines units valid_longitude = TestCtx(BaseCheck.HIGH, self.section_titles['4.1']) valid_longitude.assert_true(units is not None, "longitude variable '{}' must define units".format(longitude)) ret_val.append(valid_longitude.to_result()) # Check that longitude uses allowed units allowed_units = TestCtx(BaseCheck.MEDIUM, self.section_titles['4.1']) if standard_name == 'grid_longitude': e_n_units = cfutil.VALID_LAT_UNITS | cfutil.VALID_LON_UNITS # check that the units aren't in east and north degrees units, # but are convertible to angular units allowed_units.assert_true(units not in e_n_units and Unit(units) == Unit('degree'), "Grid longitude variable '{}' should use degree equivalent units without east or north components. " "Current units are {}".format(longitude, units)) else: allowed_units.assert_true(units_is_string and units.lower() in allowed_lon_units, "longitude variable '{}' should define valid units for longitude" "".format(longitude)) ret_val.append(allowed_units.to_result()) # Check that longitude uses degrees_east if standard_name == 'longitude' and units != 'degrees_east': # This is only a recommendation and we won't penalize but we # will include a recommended action. msg = ("CF recommends longitude variable '{}' to use units degrees_east" "".format(longitude)) recommended_units = Result(BaseCheck.LOW, (1, 1), self.section_titles['4.1'], [msg]) ret_val.append(recommended_units) x_variables = ds.get_variables_by_attributes(axis='X') # Check that longitude defines either standard_name or axis definition = TestCtx(BaseCheck.MEDIUM, self.section_titles['4.1']) definition.assert_true(standard_name == 'longitude' or axis == 'Y' or x_variables != [], "longitude variable '{}' should define standard_name='longitude' or axis='X'" "".format(longitude)) ret_val.append(definition.to_result()) return ret_val
[ "def", "check_longitude", "(", "self", ",", "ds", ")", ":", "# TODO we already have a check_latitude... I'm sure we can make DRYer", "ret_val", "=", "[", "]", "allowed_lon_units", "=", "[", "'degrees_east'", ",", "'degree_east'", ",", "'degree_e'", ",", "'degrees_e'", ",", "'degreee'", ",", "'degreese'", "]", "# Determine the grid mappings in this dataset", "grid_mapping", "=", "[", "]", "grid_mapping_variables", "=", "cfutil", ".", "get_grid_mapping_variables", "(", "ds", ")", "for", "name", "in", "grid_mapping_variables", ":", "variable", "=", "ds", ".", "variables", "[", "name", "]", "grid_mapping_name", "=", "getattr", "(", "variable", ",", "'grid_mapping_name'", ",", "None", ")", "if", "grid_mapping_name", ":", "grid_mapping", ".", "append", "(", "grid_mapping_name", ")", "longitude_variables", "=", "cfutil", ".", "get_longitude_variables", "(", "ds", ")", "for", "longitude", "in", "longitude_variables", ":", "variable", "=", "ds", ".", "variables", "[", "longitude", "]", "units", "=", "getattr", "(", "variable", ",", "'units'", ",", "None", ")", "units_is_string", "=", "isinstance", "(", "units", ",", "basestring", ")", "standard_name", "=", "getattr", "(", "variable", ",", "'standard_name'", ",", "None", ")", "axis", "=", "getattr", "(", "variable", ",", "'axis'", ",", "None", ")", "# NOTE see docstring--should below be 4.1 or 4.2?", "# Check that longitude defines units", "valid_longitude", "=", "TestCtx", "(", "BaseCheck", ".", "HIGH", ",", "self", ".", "section_titles", "[", "'4.1'", "]", ")", "valid_longitude", ".", "assert_true", "(", "units", "is", "not", "None", ",", "\"longitude variable '{}' must define units\"", ".", "format", "(", "longitude", ")", ")", "ret_val", ".", "append", "(", "valid_longitude", ".", "to_result", "(", ")", ")", "# Check that longitude uses allowed units", "allowed_units", "=", "TestCtx", "(", "BaseCheck", ".", "MEDIUM", ",", "self", ".", "section_titles", "[", "'4.1'", "]", ")", "if", "standard_name", "==", "'grid_longitude'", ":", "e_n_units", "=", "cfutil", ".", "VALID_LAT_UNITS", "|", "cfutil", ".", "VALID_LON_UNITS", "# check that the units aren't in east and north degrees units,", "# but are convertible to angular units", "allowed_units", ".", "assert_true", "(", "units", "not", "in", "e_n_units", "and", "Unit", "(", "units", ")", "==", "Unit", "(", "'degree'", ")", ",", "\"Grid longitude variable '{}' should use degree equivalent units without east or north components. \"", "\"Current units are {}\"", ".", "format", "(", "longitude", ",", "units", ")", ")", "else", ":", "allowed_units", ".", "assert_true", "(", "units_is_string", "and", "units", ".", "lower", "(", ")", "in", "allowed_lon_units", ",", "\"longitude variable '{}' should define valid units for longitude\"", "\"\"", ".", "format", "(", "longitude", ")", ")", "ret_val", ".", "append", "(", "allowed_units", ".", "to_result", "(", ")", ")", "# Check that longitude uses degrees_east", "if", "standard_name", "==", "'longitude'", "and", "units", "!=", "'degrees_east'", ":", "# This is only a recommendation and we won't penalize but we", "# will include a recommended action.", "msg", "=", "(", "\"CF recommends longitude variable '{}' to use units degrees_east\"", "\"\"", ".", "format", "(", "longitude", ")", ")", "recommended_units", "=", "Result", "(", "BaseCheck", ".", "LOW", ",", "(", "1", ",", "1", ")", ",", "self", ".", "section_titles", "[", "'4.1'", "]", ",", "[", "msg", "]", ")", "ret_val", ".", "append", "(", "recommended_units", ")", "x_variables", "=", "ds", ".", "get_variables_by_attributes", "(", "axis", "=", "'X'", ")", "# Check that longitude defines either standard_name or axis", "definition", "=", "TestCtx", "(", "BaseCheck", ".", "MEDIUM", ",", "self", ".", "section_titles", "[", "'4.1'", "]", ")", "definition", ".", "assert_true", "(", "standard_name", "==", "'longitude'", "or", "axis", "==", "'Y'", "or", "x_variables", "!=", "[", "]", ",", "\"longitude variable '{}' should define standard_name='longitude' or axis='X'\"", "\"\"", ".", "format", "(", "longitude", ")", ")", "ret_val", ".", "append", "(", "definition", ".", "to_result", "(", ")", ")", "return", "ret_val" ]
49.1875
26.625
def get_mac_address_table_output_has_more(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_mac_address_table = ET.Element("get_mac_address_table") config = get_mac_address_table output = ET.SubElement(get_mac_address_table, "output") has_more = ET.SubElement(output, "has-more") has_more.text = kwargs.pop('has_more') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_mac_address_table_output_has_more", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_mac_address_table", "=", "ET", ".", "Element", "(", "\"get_mac_address_table\"", ")", "config", "=", "get_mac_address_table", "output", "=", "ET", ".", "SubElement", "(", "get_mac_address_table", ",", "\"output\"", ")", "has_more", "=", "ET", ".", "SubElement", "(", "output", ",", "\"has-more\"", ")", "has_more", ".", "text", "=", "kwargs", ".", "pop", "(", "'has_more'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
40.833333
13.083333
def search_extension(self, limit=100, offset=0, **kw): """Search the list of available extensions.""" response = self.request(E.searchExtensionRequest( E.limit(limit), E.offset(offset), E.withDescription(int(kw.get('with_description', 0))), E.withPrice(int(kw.get('with_price', 0))), E.withUsageCount(int(kw.get('with_usage_count', 0))), )) return response.as_models(Extension)
[ "def", "search_extension", "(", "self", ",", "limit", "=", "100", ",", "offset", "=", "0", ",", "*", "*", "kw", ")", ":", "response", "=", "self", ".", "request", "(", "E", ".", "searchExtensionRequest", "(", "E", ".", "limit", "(", "limit", ")", ",", "E", ".", "offset", "(", "offset", ")", ",", "E", ".", "withDescription", "(", "int", "(", "kw", ".", "get", "(", "'with_description'", ",", "0", ")", ")", ")", ",", "E", ".", "withPrice", "(", "int", "(", "kw", ".", "get", "(", "'with_price'", ",", "0", ")", ")", ")", ",", "E", ".", "withUsageCount", "(", "int", "(", "kw", ".", "get", "(", "'with_usage_count'", ",", "0", ")", ")", ")", ",", ")", ")", "return", "response", ".", "as_models", "(", "Extension", ")" ]
41.818182
17.636364
def message_to_dict(msg): """Convert an email message into a dictionary. This function transforms an `email.message.Message` object into a dictionary. Headers are stored as key:value pairs while the body of the message is stored inside `body` key. Body may have two other keys inside, 'plain', for plain body messages and 'html', for HTML encoded messages. The returned dictionary has the type `requests.structures.CaseInsensitiveDict` due to same headers with different case formats can appear in the same message. :param msg: email message of type `email.message.Message` :returns : dictionary of type `requests.structures.CaseInsensitiveDict` :raises ParseError: when an error occurs transforming the message to a dictionary """ def parse_headers(msg): headers = {} for header, value in msg.items(): hv = [] for text, charset in email.header.decode_header(value): if type(text) == bytes: charset = charset if charset else 'utf-8' try: text = text.decode(charset, errors='surrogateescape') except (UnicodeError, LookupError): # Try again with a 7bit encoding text = text.decode('ascii', errors='surrogateescape') hv.append(text) v = ' '.join(hv) headers[header] = v if v else None return headers def parse_payload(msg): body = {} if not msg.is_multipart(): payload = decode_payload(msg) subtype = msg.get_content_subtype() body[subtype] = [payload] else: # Include all the attached texts if it is multipart # Ignores binary parts by default for part in email.iterators.typed_subpart_iterator(msg): payload = decode_payload(part) subtype = part.get_content_subtype() body.setdefault(subtype, []).append(payload) return {k: '\n'.join(v) for k, v in body.items()} def decode_payload(msg_or_part): charset = msg_or_part.get_content_charset('utf-8') payload = msg_or_part.get_payload(decode=True) try: payload = payload.decode(charset, errors='surrogateescape') except (UnicodeError, LookupError): # Try again with a 7bit encoding payload = payload.decode('ascii', errors='surrogateescape') return payload # The function starts here message = requests.structures.CaseInsensitiveDict() if isinstance(msg, mailbox.mboxMessage): message['unixfrom'] = msg.get_from() else: message['unixfrom'] = None try: for k, v in parse_headers(msg).items(): message[k] = v message['body'] = parse_payload(msg) except UnicodeError as e: raise ParseError(cause=str(e)) return message
[ "def", "message_to_dict", "(", "msg", ")", ":", "def", "parse_headers", "(", "msg", ")", ":", "headers", "=", "{", "}", "for", "header", ",", "value", "in", "msg", ".", "items", "(", ")", ":", "hv", "=", "[", "]", "for", "text", ",", "charset", "in", "email", ".", "header", ".", "decode_header", "(", "value", ")", ":", "if", "type", "(", "text", ")", "==", "bytes", ":", "charset", "=", "charset", "if", "charset", "else", "'utf-8'", "try", ":", "text", "=", "text", ".", "decode", "(", "charset", ",", "errors", "=", "'surrogateescape'", ")", "except", "(", "UnicodeError", ",", "LookupError", ")", ":", "# Try again with a 7bit encoding", "text", "=", "text", ".", "decode", "(", "'ascii'", ",", "errors", "=", "'surrogateescape'", ")", "hv", ".", "append", "(", "text", ")", "v", "=", "' '", ".", "join", "(", "hv", ")", "headers", "[", "header", "]", "=", "v", "if", "v", "else", "None", "return", "headers", "def", "parse_payload", "(", "msg", ")", ":", "body", "=", "{", "}", "if", "not", "msg", ".", "is_multipart", "(", ")", ":", "payload", "=", "decode_payload", "(", "msg", ")", "subtype", "=", "msg", ".", "get_content_subtype", "(", ")", "body", "[", "subtype", "]", "=", "[", "payload", "]", "else", ":", "# Include all the attached texts if it is multipart", "# Ignores binary parts by default", "for", "part", "in", "email", ".", "iterators", ".", "typed_subpart_iterator", "(", "msg", ")", ":", "payload", "=", "decode_payload", "(", "part", ")", "subtype", "=", "part", ".", "get_content_subtype", "(", ")", "body", ".", "setdefault", "(", "subtype", ",", "[", "]", ")", ".", "append", "(", "payload", ")", "return", "{", "k", ":", "'\\n'", ".", "join", "(", "v", ")", "for", "k", ",", "v", "in", "body", ".", "items", "(", ")", "}", "def", "decode_payload", "(", "msg_or_part", ")", ":", "charset", "=", "msg_or_part", ".", "get_content_charset", "(", "'utf-8'", ")", "payload", "=", "msg_or_part", ".", "get_payload", "(", "decode", "=", "True", ")", "try", ":", "payload", "=", "payload", ".", "decode", "(", "charset", ",", "errors", "=", "'surrogateescape'", ")", "except", "(", "UnicodeError", ",", "LookupError", ")", ":", "# Try again with a 7bit encoding", "payload", "=", "payload", ".", "decode", "(", "'ascii'", ",", "errors", "=", "'surrogateescape'", ")", "return", "payload", "# The function starts here", "message", "=", "requests", ".", "structures", ".", "CaseInsensitiveDict", "(", ")", "if", "isinstance", "(", "msg", ",", "mailbox", ".", "mboxMessage", ")", ":", "message", "[", "'unixfrom'", "]", "=", "msg", ".", "get_from", "(", ")", "else", ":", "message", "[", "'unixfrom'", "]", "=", "None", "try", ":", "for", "k", ",", "v", "in", "parse_headers", "(", "msg", ")", ".", "items", "(", ")", ":", "message", "[", "k", "]", "=", "v", "message", "[", "'body'", "]", "=", "parse_payload", "(", "msg", ")", "except", "UnicodeError", "as", "e", ":", "raise", "ParseError", "(", "cause", "=", "str", "(", "e", ")", ")", "return", "message" ]
34.247059
21.129412
def Start(self): """Issue a request to list the directory.""" self.CallClient( server_stubs.PlistQuery, request=self.args.request, next_state="Receive")
[ "def", "Start", "(", "self", ")", ":", "self", ".", "CallClient", "(", "server_stubs", ".", "PlistQuery", ",", "request", "=", "self", ".", "args", ".", "request", ",", "next_state", "=", "\"Receive\"", ")" ]
29.833333
11.5
def on_key_press(self, symbol, modifiers): """ Pyglet specific key press callback. Forwards and translates the events to the example """ self.example.key_event(symbol, self.keys.ACTION_PRESS)
[ "def", "on_key_press", "(", "self", ",", "symbol", ",", "modifiers", ")", ":", "self", ".", "example", ".", "key_event", "(", "symbol", ",", "self", ".", "keys", ".", "ACTION_PRESS", ")" ]
38.5
7.833333
def are_diphtong(tokenA, tokenB): """ Check (naively) whether the two tokens can form a diphtong. This would be a sequence of vowels of which no more than one is syllabic. Vowel sequences connected with a tie bar would already be handled in tokenise_word, so are not checked for here. Users who want more sophisticated diphtong detection should instead write their own function and do something like:: tokenise(string, diphtong=False, merge=user_func) Helper for tokenise(string, ..). """ is_short = lambda token: '◌̯'[1] in token subtokens = [] for char in tokenA+tokenB: if ipa.is_vowel(char): subtokens.append(char) elif ipa.is_diacritic(char) or ipa.is_length(char): if subtokens: subtokens[-1] += char else: break else: break else: if len([x for x in subtokens if not is_short(x)]) < 2: return True return False
[ "def", "are_diphtong", "(", "tokenA", ",", "tokenB", ")", ":", "is_short", "=", "lambda", "token", ":", "'◌̯'[1]", " ", "i", "n", "to", "en", "subtokens", "=", "[", "]", "for", "char", "in", "tokenA", "+", "tokenB", ":", "if", "ipa", ".", "is_vowel", "(", "char", ")", ":", "subtokens", ".", "append", "(", "char", ")", "elif", "ipa", ".", "is_diacritic", "(", "char", ")", "or", "ipa", ".", "is_length", "(", "char", ")", ":", "if", "subtokens", ":", "subtokens", "[", "-", "1", "]", "+=", "char", "else", ":", "break", "else", ":", "break", "else", ":", "if", "len", "(", "[", "x", "for", "x", "in", "subtokens", "if", "not", "is_short", "(", "x", ")", "]", ")", "<", "2", ":", "return", "True", "return", "False" ]
26.15625
23.09375
def find_modules(import_path, include_packages=False, recursive=False): """Finds all the modules below a package. This can be useful to automatically import all views / controllers so that their metaclasses / function decorators have a chance to register themselves on the application. Packages are not returned unless `include_packages` is `True`. This can also recursively list modules but in that case it will import all the packages to get the correct load path of that module. :param import_path: the dotted name for the package to find child modules. :param include_packages: set to `True` if packages should be returned, too. :param recursive: set to `True` if recursion should happen. :return: generator """ module = import_string(import_path) path = getattr(module, "__path__", None) if path is None: raise ValueError("%r is not a package" % import_path) basename = module.__name__ + "." for _importer, modname, ispkg in pkgutil.iter_modules(path): modname = basename + modname if ispkg: if include_packages: yield modname if recursive: for item in find_modules(modname, include_packages, True): yield item else: yield modname
[ "def", "find_modules", "(", "import_path", ",", "include_packages", "=", "False", ",", "recursive", "=", "False", ")", ":", "module", "=", "import_string", "(", "import_path", ")", "path", "=", "getattr", "(", "module", ",", "\"__path__\"", ",", "None", ")", "if", "path", "is", "None", ":", "raise", "ValueError", "(", "\"%r is not a package\"", "%", "import_path", ")", "basename", "=", "module", ".", "__name__", "+", "\".\"", "for", "_importer", ",", "modname", ",", "ispkg", "in", "pkgutil", ".", "iter_modules", "(", "path", ")", ":", "modname", "=", "basename", "+", "modname", "if", "ispkg", ":", "if", "include_packages", ":", "yield", "modname", "if", "recursive", ":", "for", "item", "in", "find_modules", "(", "modname", ",", "include_packages", ",", "True", ")", ":", "yield", "item", "else", ":", "yield", "modname" ]
43.266667
20.766667
def f_preset_parameter(self, param_name, *args, **kwargs): """Presets parameter value before a parameter is added. Can be called before parameters are added to the Trajectory in order to change the values that are stored into the parameter on creation. After creation of a parameter, the instance of the parameter is called with `param.f_set(*args,**kwargs)` with `*args`, and `**kwargs` provided by the user with `f_preset_parameter`. Before an experiment is carried out it is checked if all parameters that were marked were also preset. :param param_name: The full name (!) of the parameter that is to be changed after its creation. :param args: Arguments that will be used for changing the parameter's data :param kwargs: Keyword arguments that will be used for changing the parameter's data Example: >>> traj.f_preset_parameter('groupA.param1', data=44) >>> traj.f_add_parameter('groupA.param1', data=11) >>> traj.parameters.groupA.param1 44 """ if not param_name.startswith('parameters.'): param_name = 'parameters.' + param_name self._preset(param_name, args, kwargs)
[ "def", "f_preset_parameter", "(", "self", ",", "param_name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "param_name", ".", "startswith", "(", "'parameters.'", ")", ":", "param_name", "=", "'parameters.'", "+", "param_name", "self", ".", "_preset", "(", "param_name", ",", "args", ",", "kwargs", ")" ]
32.894737
28.842105
def clone(self, **kwargs): ''' Clone this context, and return the ChildContextDict ''' child = ChildContextDict(parent=self, threadsafe=self._threadsafe, overrides=kwargs) return child
[ "def", "clone", "(", "self", ",", "*", "*", "kwargs", ")", ":", "child", "=", "ChildContextDict", "(", "parent", "=", "self", ",", "threadsafe", "=", "self", ".", "_threadsafe", ",", "overrides", "=", "kwargs", ")", "return", "child" ]
36.5
27.166667
def add_docs(self, docs): """docs is a list of fields that are a dictionary of name:value for a record.""" return self.query( 'solr', '<add>{}</add>'.format( ''.join([self._format_add(fields) for fields in docs]) ), do_post=True, )
[ "def", "add_docs", "(", "self", ",", "docs", ")", ":", "return", "self", ".", "query", "(", "'solr'", ",", "'<add>{}</add>'", ".", "format", "(", "''", ".", "join", "(", "[", "self", ".", "_format_add", "(", "fields", ")", "for", "fields", "in", "docs", "]", ")", ")", ",", "do_post", "=", "True", ",", ")" ]
34.555556
17.444444
def create_auto_scaling_group(AutoScalingGroupName=None, LaunchConfigurationName=None, InstanceId=None, MinSize=None, MaxSize=None, DesiredCapacity=None, DefaultCooldown=None, AvailabilityZones=None, LoadBalancerNames=None, TargetGroupARNs=None, HealthCheckType=None, HealthCheckGracePeriod=None, PlacementGroup=None, VPCZoneIdentifier=None, TerminationPolicies=None, NewInstancesProtectedFromScaleIn=None, Tags=None): """ Creates an Auto Scaling group with the specified name and attributes. If you exceed your maximum limit of Auto Scaling groups, which by default is 20 per region, the call fails. For information about viewing and updating this limit, see DescribeAccountLimits . For more information, see Auto Scaling Groups in the Auto Scaling User Guide . See also: AWS API Documentation Examples This example creates an Auto Scaling group. Expected Output: This example creates an Auto Scaling group and attaches the specified Classic Load Balancer. Expected Output: This example creates an Auto Scaling group and attaches the specified target group. Expected Output: :example: response = client.create_auto_scaling_group( AutoScalingGroupName='string', LaunchConfigurationName='string', InstanceId='string', MinSize=123, MaxSize=123, DesiredCapacity=123, DefaultCooldown=123, AvailabilityZones=[ 'string', ], LoadBalancerNames=[ 'string', ], TargetGroupARNs=[ 'string', ], HealthCheckType='string', HealthCheckGracePeriod=123, PlacementGroup='string', VPCZoneIdentifier='string', TerminationPolicies=[ 'string', ], NewInstancesProtectedFromScaleIn=True|False, Tags=[ { 'ResourceId': 'string', 'ResourceType': 'string', 'Key': 'string', 'Value': 'string', 'PropagateAtLaunch': True|False }, ] ) :type AutoScalingGroupName: string :param AutoScalingGroupName: [REQUIRED] The name of the group. This name must be unique within the scope of your AWS account. :type LaunchConfigurationName: string :param LaunchConfigurationName: The name of the launch configuration. Alternatively, specify an EC2 instance instead of a launch configuration. :type InstanceId: string :param InstanceId: The ID of the instance used to create a launch configuration for the group. Alternatively, specify a launch configuration instead of an EC2 instance. When you specify an ID of an instance, Auto Scaling creates a new launch configuration and associates it with the group. This launch configuration derives its attributes from the specified instance, with the exception of the block device mapping. For more information, see Create an Auto Scaling Group Using an EC2 Instance in the Auto Scaling User Guide . :type MinSize: integer :param MinSize: [REQUIRED] The minimum size of the group. :type MaxSize: integer :param MaxSize: [REQUIRED] The maximum size of the group. :type DesiredCapacity: integer :param DesiredCapacity: The number of EC2 instances that should be running in the group. This number must be greater than or equal to the minimum size of the group and less than or equal to the maximum size of the group. If you do not specify a desired capacity, the default is the minimum size of the group. :type DefaultCooldown: integer :param DefaultCooldown: The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. The default is 300. For more information, see Auto Scaling Cooldowns in the Auto Scaling User Guide . :type AvailabilityZones: list :param AvailabilityZones: One or more Availability Zones for the group. This parameter is optional if you specify one or more subnets. (string) -- :type LoadBalancerNames: list :param LoadBalancerNames: One or more Classic Load Balancers. To specify an Application Load Balancer, use TargetGroupARNs instead. For more information, see Using a Load Balancer With an Auto Scaling Group in the Auto Scaling User Guide . (string) -- :type TargetGroupARNs: list :param TargetGroupARNs: The Amazon Resource Names (ARN) of the target groups. (string) -- :type HealthCheckType: string :param HealthCheckType: The service to use for the health checks. The valid values are EC2 and ELB . By default, health checks use Amazon EC2 instance status checks to determine the health of an instance. For more information, see Health Checks in the Auto Scaling User Guide . :type HealthCheckGracePeriod: integer :param HealthCheckGracePeriod: The amount of time, in seconds, that Auto Scaling waits before checking the health status of an EC2 instance that has come into service. During this time, any health check failures for the instance are ignored. The default is 0. This parameter is required if you are adding an ELB health check. For more information, see Health Checks in the Auto Scaling User Guide . :type PlacementGroup: string :param PlacementGroup: The name of the placement group into which you'll launch your instances, if any. For more information, see Placement Groups in the Amazon Elastic Compute Cloud User Guide . :type VPCZoneIdentifier: string :param VPCZoneIdentifier: A comma-separated list of subnet identifiers for your virtual private cloud (VPC). If you specify subnets and Availability Zones with this call, ensure that the subnets' Availability Zones match the Availability Zones specified. For more information, see Launching Auto Scaling Instances in a VPC in the Auto Scaling User Guide . :type TerminationPolicies: list :param TerminationPolicies: One or more termination policies used to select the instance to terminate. These policies are executed in the order that they are listed. For more information, see Controlling Which Instances Auto Scaling Terminates During Scale In in the Auto Scaling User Guide . (string) -- :type NewInstancesProtectedFromScaleIn: boolean :param NewInstancesProtectedFromScaleIn: Indicates whether newly launched instances are protected from termination by Auto Scaling when scaling in. :type Tags: list :param Tags: One or more tags. For more information, see Tagging Auto Scaling Groups and Instances in the Auto Scaling User Guide . (dict) --Describes a tag for an Auto Scaling group. ResourceId (string) --The name of the group. ResourceType (string) --The type of resource. The only supported value is auto-scaling-group . Key (string) -- [REQUIRED]The tag key. Value (string) --The tag value. PropagateAtLaunch (boolean) --Determines whether the tag is added to new instances as they are launched in the group. :return: response = client.create_auto_scaling_group( AutoScalingGroupName='my-auto-scaling-group', LaunchConfigurationName='my-launch-config', MaxSize=3, MinSize=1, VPCZoneIdentifier='subnet-4176792c', ) print(response) """ pass
[ "def", "create_auto_scaling_group", "(", "AutoScalingGroupName", "=", "None", ",", "LaunchConfigurationName", "=", "None", ",", "InstanceId", "=", "None", ",", "MinSize", "=", "None", ",", "MaxSize", "=", "None", ",", "DesiredCapacity", "=", "None", ",", "DefaultCooldown", "=", "None", ",", "AvailabilityZones", "=", "None", ",", "LoadBalancerNames", "=", "None", ",", "TargetGroupARNs", "=", "None", ",", "HealthCheckType", "=", "None", ",", "HealthCheckGracePeriod", "=", "None", ",", "PlacementGroup", "=", "None", ",", "VPCZoneIdentifier", "=", "None", ",", "TerminationPolicies", "=", "None", ",", "NewInstancesProtectedFromScaleIn", "=", "None", ",", "Tags", "=", "None", ")", ":", "pass" ]
48.980519
38.967532
def _mask_feature_data(feature_data, mask, mask_type): """ Masks values of data feature with a given mask of given mask type. The masking is done by assigning `numpy.nan` value. :param feature_data: Data array which will be masked :type feature_data: numpy.ndarray :param mask: Mask array :type mask: numpy.ndarray :param mask_type: Feature type of mask :type mask_type: FeatureType :return: Masked data array :rtype: numpy.ndarray """ if mask_type.is_spatial() and feature_data.shape[1: 3] != mask.shape[-3: -1]: raise ValueError('Spatial dimensions of interpolation and mask feature do not match: ' '{} {}'.format(feature_data.shape, mask.shape)) if mask_type.is_time_dependent() and feature_data.shape[0] != mask.shape[0]: raise ValueError('Time dimension of interpolation and mask feature do not match: ' '{} {}'.format(feature_data.shape, mask.shape)) # This allows masking each channel differently but causes some complications while masking with label if mask.shape[-1] != feature_data.shape[-1]: mask = mask[..., 0] if mask_type is FeatureType.MASK: feature_data[mask, ...] = np.nan elif mask_type is FeatureType.MASK_TIMELESS: feature_data[:, mask, ...] = np.nan elif mask_type is FeatureType.LABEL: np.swapaxes(feature_data, 1, 3) feature_data[mask, ..., :, :] = np.nan np.swapaxes(feature_data, 1, 3) return feature_data
[ "def", "_mask_feature_data", "(", "feature_data", ",", "mask", ",", "mask_type", ")", ":", "if", "mask_type", ".", "is_spatial", "(", ")", "and", "feature_data", ".", "shape", "[", "1", ":", "3", "]", "!=", "mask", ".", "shape", "[", "-", "3", ":", "-", "1", "]", ":", "raise", "ValueError", "(", "'Spatial dimensions of interpolation and mask feature do not match: '", "'{} {}'", ".", "format", "(", "feature_data", ".", "shape", ",", "mask", ".", "shape", ")", ")", "if", "mask_type", ".", "is_time_dependent", "(", ")", "and", "feature_data", ".", "shape", "[", "0", "]", "!=", "mask", ".", "shape", "[", "0", "]", ":", "raise", "ValueError", "(", "'Time dimension of interpolation and mask feature do not match: '", "'{} {}'", ".", "format", "(", "feature_data", ".", "shape", ",", "mask", ".", "shape", ")", ")", "# This allows masking each channel differently but causes some complications while masking with label", "if", "mask", ".", "shape", "[", "-", "1", "]", "!=", "feature_data", ".", "shape", "[", "-", "1", "]", ":", "mask", "=", "mask", "[", "...", ",", "0", "]", "if", "mask_type", "is", "FeatureType", ".", "MASK", ":", "feature_data", "[", "mask", ",", "...", "]", "=", "np", ".", "nan", "elif", "mask_type", "is", "FeatureType", ".", "MASK_TIMELESS", ":", "feature_data", "[", ":", ",", "mask", ",", "...", "]", "=", "np", ".", "nan", "elif", "mask_type", "is", "FeatureType", ".", "LABEL", ":", "np", ".", "swapaxes", "(", "feature_data", ",", "1", ",", "3", ")", "feature_data", "[", "mask", ",", "...", ",", ":", ",", ":", "]", "=", "np", ".", "nan", "np", ".", "swapaxes", "(", "feature_data", ",", "1", ",", "3", ")", "return", "feature_data" ]
42.315789
21.894737
def set_cognitive_process(self, grade_id): """Sets the cognitive process. arg: grade_id (osid.id.Id): the new cognitive process raise: InvalidArgument - ``grade_id`` is invalid raise: NoAccess - ``grade_id`` cannot be modified raise: NullArgument - ``grade_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.set_avatar_template if self.get_cognitive_process_metadata().is_read_only(): raise errors.NoAccess() if not self._is_valid_id(grade_id): raise errors.InvalidArgument() self._my_map['cognitiveProcessId'] = str(grade_id)
[ "def", "set_cognitive_process", "(", "self", ",", "grade_id", ")", ":", "# Implemented from template for osid.resource.ResourceForm.set_avatar_template", "if", "self", ".", "get_cognitive_process_metadata", "(", ")", ".", "is_read_only", "(", ")", ":", "raise", "errors", ".", "NoAccess", "(", ")", "if", "not", "self", ".", "_is_valid_id", "(", "grade_id", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", ")", "self", ".", "_my_map", "[", "'cognitiveProcessId'", "]", "=", "str", "(", "grade_id", ")" ]
45
17.5625
def _no_auto_update_getter(self): """:class:`bool`. Boolean controlling whether the :meth:`start_update` method is automatically called by the :meth:`update` method Examples -------- You can disable the automatic update via >>> with data.no_auto_update: ... data.update(time=1) ... data.start_update() To permanently disable the automatic update, simply set >>> data.no_auto_update = True >>> data.update(time=1) >>> data.no_auto_update = False # reenable automatical update""" if getattr(self, '_no_auto_update', None) is not None: return self._no_auto_update else: self._no_auto_update = utils._TempBool() return self._no_auto_update
[ "def", "_no_auto_update_getter", "(", "self", ")", ":", "if", "getattr", "(", "self", ",", "'_no_auto_update'", ",", "None", ")", "is", "not", "None", ":", "return", "self", ".", "_no_auto_update", "else", ":", "self", ".", "_no_auto_update", "=", "utils", ".", "_TempBool", "(", ")", "return", "self", ".", "_no_auto_update" ]
31.608696
17.565217
def _get_all_volumes_paths(conn): ''' Extract the path and backing stores path of all volumes. :param conn: libvirt connection to use ''' volumes = [vol for l in [obj.listAllVolumes() for obj in conn.listAllStoragePools()] for vol in l] return {vol.path(): [path.text for path in ElementTree.fromstring(vol.XMLDesc()).findall('.//backingStore/path')] for vol in volumes if _is_valid_volume(vol)}
[ "def", "_get_all_volumes_paths", "(", "conn", ")", ":", "volumes", "=", "[", "vol", "for", "l", "in", "[", "obj", ".", "listAllVolumes", "(", ")", "for", "obj", "in", "conn", ".", "listAllStoragePools", "(", ")", "]", "for", "vol", "in", "l", "]", "return", "{", "vol", ".", "path", "(", ")", ":", "[", "path", ".", "text", "for", "path", "in", "ElementTree", ".", "fromstring", "(", "vol", ".", "XMLDesc", "(", ")", ")", ".", "findall", "(", "'.//backingStore/path'", ")", "]", "for", "vol", "in", "volumes", "if", "_is_valid_volume", "(", "vol", ")", "}" ]
47.111111
32.222222
def command(self, func): """ Decorator to add a command function to the registry. :param func: command function. """ command = Command(func) self._commands[func.__name__] = command return func
[ "def", "command", "(", "self", ",", "func", ")", ":", "command", "=", "Command", "(", "func", ")", "self", ".", "_commands", "[", "func", ".", "__name__", "]", "=", "command", "return", "func" ]
24.1
15.5
def recompute(self, quiet=False, **kwargs): """ Re-compute a previously computed model. You might want to do this if the kernel parameters change and the kernel is labeled as ``dirty``. :param quiet: (optional) If ``True``, return false when the computation fails. Otherwise, throw an error if something goes wrong. (default: ``False``) """ if not self.computed: if not (hasattr(self, "_x") and hasattr(self, "_yerr2")): raise RuntimeError("You need to compute the model first") try: # Update the model making sure that we store the original # ordering of the points. self.compute(self._x, np.sqrt(self._yerr2), **kwargs) except (ValueError, LinAlgError): if quiet: return False raise return True
[ "def", "recompute", "(", "self", ",", "quiet", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "computed", ":", "if", "not", "(", "hasattr", "(", "self", ",", "\"_x\"", ")", "and", "hasattr", "(", "self", ",", "\"_yerr2\"", ")", ")", ":", "raise", "RuntimeError", "(", "\"You need to compute the model first\"", ")", "try", ":", "# Update the model making sure that we store the original", "# ordering of the points.", "self", ".", "compute", "(", "self", ".", "_x", ",", "np", ".", "sqrt", "(", "self", ".", "_yerr2", ")", ",", "*", "*", "kwargs", ")", "except", "(", "ValueError", ",", "LinAlgError", ")", ":", "if", "quiet", ":", "return", "False", "raise", "return", "True" ]
41.318182
20.863636
def __intermediate_htmode(self, radio): """ only for mac80211 driver """ protocol = radio.pop('protocol') channel_width = radio.pop('channel_width') # allow overriding htmode if 'htmode' in radio: return radio['htmode'] if protocol == '802.11n': return 'HT{0}'.format(channel_width) elif protocol == '802.11ac': return 'VHT{0}'.format(channel_width) # disables n return 'NONE'
[ "def", "__intermediate_htmode", "(", "self", ",", "radio", ")", ":", "protocol", "=", "radio", ".", "pop", "(", "'protocol'", ")", "channel_width", "=", "radio", ".", "pop", "(", "'channel_width'", ")", "# allow overriding htmode", "if", "'htmode'", "in", "radio", ":", "return", "radio", "[", "'htmode'", "]", "if", "protocol", "==", "'802.11n'", ":", "return", "'HT{0}'", ".", "format", "(", "channel_width", ")", "elif", "protocol", "==", "'802.11ac'", ":", "return", "'VHT{0}'", ".", "format", "(", "channel_width", ")", "# disables n", "return", "'NONE'" ]
32.4
7.333333
def get_repos(self, since=github.GithubObject.NotSet): """ :calls: `GET /repositories <http://developer.github.com/v3/repos/#list-all-public-repositories>`_ :param since: integer :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository` """ assert since is github.GithubObject.NotSet or isinstance(since, (int, long)), since url_parameters = dict() if since is not github.GithubObject.NotSet: url_parameters["since"] = since return github.PaginatedList.PaginatedList( github.Repository.Repository, self.__requester, "/repositories", url_parameters )
[ "def", "get_repos", "(", "self", ",", "since", "=", "github", ".", "GithubObject", ".", "NotSet", ")", ":", "assert", "since", "is", "github", ".", "GithubObject", ".", "NotSet", "or", "isinstance", "(", "since", ",", "(", "int", ",", "long", ")", ")", ",", "since", "url_parameters", "=", "dict", "(", ")", "if", "since", "is", "not", "github", ".", "GithubObject", ".", "NotSet", ":", "url_parameters", "[", "\"since\"", "]", "=", "since", "return", "github", ".", "PaginatedList", ".", "PaginatedList", "(", "github", ".", "Repository", ".", "Repository", ",", "self", ".", "__requester", ",", "\"/repositories\"", ",", "url_parameters", ")" ]
44.3125
18.9375
def _series_col_letter(self, series): """ The letter of the Excel worksheet column in which the data for a series appears. """ column_number = 1 + series.categories.depth + series.index return self._column_reference(column_number)
[ "def", "_series_col_letter", "(", "self", ",", "series", ")", ":", "column_number", "=", "1", "+", "series", ".", "categories", ".", "depth", "+", "series", ".", "index", "return", "self", ".", "_column_reference", "(", "column_number", ")" ]
38.857143
12.857143
def table(contents, heading=True, colw=None, cwunit='dxa', tblw=0, twunit='auto', borders={}, celstyle=None): """ Return a table element based on specified parameters @param list contents: A list of lists describing contents. Every item in the list can be a string or a valid XML element itself. It can also be a list. In that case all the listed elements will be merged into the cell. @param bool heading: Tells whether first line should be treated as heading or not @param list colw: list of integer column widths specified in wunitS. @param str cwunit: Unit used for column width: 'pct' : fiftieths of a percent 'dxa' : twentieths of a point 'nil' : no width 'auto' : automagically determined @param int tblw: Table width @param str twunit: Unit used for table width. Same possible values as cwunit. @param dict borders: Dictionary defining table border. Supported keys are: 'top', 'left', 'bottom', 'right', 'insideH', 'insideV', 'all'. When specified, the 'all' key has precedence over others. Each key must define a dict of border attributes: color : The color of the border, in hex or 'auto' space : The space, measured in points sz : The size of the border, in eighths of a point val : The style of the border, see http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm @param list celstyle: Specify the style for each colum, list of dicts. supported keys: 'align' : specify the alignment, see paragraph documentation. @return lxml.etree: Generated XML etree element """ table = makeelement('tbl') columns = len(contents[0]) # Table properties tableprops = makeelement('tblPr') tablestyle = makeelement('tblStyle', attributes={'val': ''}) tableprops.append(tablestyle) tablewidth = makeelement( 'tblW', attributes={'w': str(tblw), 'type': str(twunit)}) tableprops.append(tablewidth) if len(borders.keys()): tableborders = makeelement('tblBorders') for b in ['top', 'left', 'bottom', 'right', 'insideH', 'insideV']: if b in borders.keys() or 'all' in borders.keys(): k = 'all' if 'all' in borders.keys() else b attrs = {} for a in borders[k].keys(): attrs[a] = unicode(borders[k][a]) borderelem = makeelement(b, attributes=attrs) tableborders.append(borderelem) tableprops.append(tableborders) tablelook = makeelement('tblLook', attributes={'val': '0400'}) tableprops.append(tablelook) table.append(tableprops) # Table Grid tablegrid = makeelement('tblGrid') for i in range(columns): attrs = {'w': str(colw[i]) if colw else '2390'} tablegrid.append(makeelement('gridCol', attributes=attrs)) table.append(tablegrid) # Heading Row row = makeelement('tr') rowprops = makeelement('trPr') cnfStyle = makeelement('cnfStyle', attributes={'val': '000000100000'}) rowprops.append(cnfStyle) row.append(rowprops) if heading: i = 0 for heading in contents[0]: cell = makeelement('tc') # Cell properties cellprops = makeelement('tcPr') if colw: wattr = {'w': str(colw[i]), 'type': cwunit} else: wattr = {'w': '0', 'type': 'auto'} cellwidth = makeelement('tcW', attributes=wattr) cellstyle = makeelement('shd', attributes={'val': 'clear', 'color': 'auto', 'fill': 'FFFFFF', 'themeFill': 'text2', 'themeFillTint': '99'}) cellprops.append(cellwidth) cellprops.append(cellstyle) cell.append(cellprops) # Paragraph (Content) if not isinstance(heading, (list, tuple)): heading = [heading] for h in heading: if isinstance(h, etree._Element): cell.append(h) else: cell.append(paragraph(h, jc='center')) row.append(cell) i += 1 table.append(row) # Contents Rows for contentrow in contents[1 if heading else 0:]: row = makeelement('tr') i = 0 for content in contentrow: cell = makeelement('tc') # Properties cellprops = makeelement('tcPr') if colw: wattr = {'w': str(colw[i]), 'type': cwunit} else: wattr = {'w': '0', 'type': 'auto'} cellwidth = makeelement('tcW', attributes=wattr) cellprops.append(cellwidth) cell.append(cellprops) # Paragraph (Content) if not isinstance(content, (list, tuple)): content = [content] for c in content: if isinstance(c, etree._Element): cell.append(c) else: if celstyle and 'align' in celstyle[i].keys(): align = celstyle[i]['align'] else: align = 'left' cell.append(paragraph(c, jc=align)) row.append(cell) i += 1 table.append(row) return table
[ "def", "table", "(", "contents", ",", "heading", "=", "True", ",", "colw", "=", "None", ",", "cwunit", "=", "'dxa'", ",", "tblw", "=", "0", ",", "twunit", "=", "'auto'", ",", "borders", "=", "{", "}", ",", "celstyle", "=", "None", ")", ":", "table", "=", "makeelement", "(", "'tbl'", ")", "columns", "=", "len", "(", "contents", "[", "0", "]", ")", "# Table properties", "tableprops", "=", "makeelement", "(", "'tblPr'", ")", "tablestyle", "=", "makeelement", "(", "'tblStyle'", ",", "attributes", "=", "{", "'val'", ":", "''", "}", ")", "tableprops", ".", "append", "(", "tablestyle", ")", "tablewidth", "=", "makeelement", "(", "'tblW'", ",", "attributes", "=", "{", "'w'", ":", "str", "(", "tblw", ")", ",", "'type'", ":", "str", "(", "twunit", ")", "}", ")", "tableprops", ".", "append", "(", "tablewidth", ")", "if", "len", "(", "borders", ".", "keys", "(", ")", ")", ":", "tableborders", "=", "makeelement", "(", "'tblBorders'", ")", "for", "b", "in", "[", "'top'", ",", "'left'", ",", "'bottom'", ",", "'right'", ",", "'insideH'", ",", "'insideV'", "]", ":", "if", "b", "in", "borders", ".", "keys", "(", ")", "or", "'all'", "in", "borders", ".", "keys", "(", ")", ":", "k", "=", "'all'", "if", "'all'", "in", "borders", ".", "keys", "(", ")", "else", "b", "attrs", "=", "{", "}", "for", "a", "in", "borders", "[", "k", "]", ".", "keys", "(", ")", ":", "attrs", "[", "a", "]", "=", "unicode", "(", "borders", "[", "k", "]", "[", "a", "]", ")", "borderelem", "=", "makeelement", "(", "b", ",", "attributes", "=", "attrs", ")", "tableborders", ".", "append", "(", "borderelem", ")", "tableprops", ".", "append", "(", "tableborders", ")", "tablelook", "=", "makeelement", "(", "'tblLook'", ",", "attributes", "=", "{", "'val'", ":", "'0400'", "}", ")", "tableprops", ".", "append", "(", "tablelook", ")", "table", ".", "append", "(", "tableprops", ")", "# Table Grid", "tablegrid", "=", "makeelement", "(", "'tblGrid'", ")", "for", "i", "in", "range", "(", "columns", ")", ":", "attrs", "=", "{", "'w'", ":", "str", "(", "colw", "[", "i", "]", ")", "if", "colw", "else", "'2390'", "}", "tablegrid", ".", "append", "(", "makeelement", "(", "'gridCol'", ",", "attributes", "=", "attrs", ")", ")", "table", ".", "append", "(", "tablegrid", ")", "# Heading Row", "row", "=", "makeelement", "(", "'tr'", ")", "rowprops", "=", "makeelement", "(", "'trPr'", ")", "cnfStyle", "=", "makeelement", "(", "'cnfStyle'", ",", "attributes", "=", "{", "'val'", ":", "'000000100000'", "}", ")", "rowprops", ".", "append", "(", "cnfStyle", ")", "row", ".", "append", "(", "rowprops", ")", "if", "heading", ":", "i", "=", "0", "for", "heading", "in", "contents", "[", "0", "]", ":", "cell", "=", "makeelement", "(", "'tc'", ")", "# Cell properties", "cellprops", "=", "makeelement", "(", "'tcPr'", ")", "if", "colw", ":", "wattr", "=", "{", "'w'", ":", "str", "(", "colw", "[", "i", "]", ")", ",", "'type'", ":", "cwunit", "}", "else", ":", "wattr", "=", "{", "'w'", ":", "'0'", ",", "'type'", ":", "'auto'", "}", "cellwidth", "=", "makeelement", "(", "'tcW'", ",", "attributes", "=", "wattr", ")", "cellstyle", "=", "makeelement", "(", "'shd'", ",", "attributes", "=", "{", "'val'", ":", "'clear'", ",", "'color'", ":", "'auto'", ",", "'fill'", ":", "'FFFFFF'", ",", "'themeFill'", ":", "'text2'", ",", "'themeFillTint'", ":", "'99'", "}", ")", "cellprops", ".", "append", "(", "cellwidth", ")", "cellprops", ".", "append", "(", "cellstyle", ")", "cell", ".", "append", "(", "cellprops", ")", "# Paragraph (Content)", "if", "not", "isinstance", "(", "heading", ",", "(", "list", ",", "tuple", ")", ")", ":", "heading", "=", "[", "heading", "]", "for", "h", "in", "heading", ":", "if", "isinstance", "(", "h", ",", "etree", ".", "_Element", ")", ":", "cell", ".", "append", "(", "h", ")", "else", ":", "cell", ".", "append", "(", "paragraph", "(", "h", ",", "jc", "=", "'center'", ")", ")", "row", ".", "append", "(", "cell", ")", "i", "+=", "1", "table", ".", "append", "(", "row", ")", "# Contents Rows", "for", "contentrow", "in", "contents", "[", "1", "if", "heading", "else", "0", ":", "]", ":", "row", "=", "makeelement", "(", "'tr'", ")", "i", "=", "0", "for", "content", "in", "contentrow", ":", "cell", "=", "makeelement", "(", "'tc'", ")", "# Properties", "cellprops", "=", "makeelement", "(", "'tcPr'", ")", "if", "colw", ":", "wattr", "=", "{", "'w'", ":", "str", "(", "colw", "[", "i", "]", ")", ",", "'type'", ":", "cwunit", "}", "else", ":", "wattr", "=", "{", "'w'", ":", "'0'", ",", "'type'", ":", "'auto'", "}", "cellwidth", "=", "makeelement", "(", "'tcW'", ",", "attributes", "=", "wattr", ")", "cellprops", ".", "append", "(", "cellwidth", ")", "cell", ".", "append", "(", "cellprops", ")", "# Paragraph (Content)", "if", "not", "isinstance", "(", "content", ",", "(", "list", ",", "tuple", ")", ")", ":", "content", "=", "[", "content", "]", "for", "c", "in", "content", ":", "if", "isinstance", "(", "c", ",", "etree", ".", "_Element", ")", ":", "cell", ".", "append", "(", "c", ")", "else", ":", "if", "celstyle", "and", "'align'", "in", "celstyle", "[", "i", "]", ".", "keys", "(", ")", ":", "align", "=", "celstyle", "[", "i", "]", "[", "'align'", "]", "else", ":", "align", "=", "'left'", "cell", ".", "append", "(", "paragraph", "(", "c", ",", "jc", "=", "align", ")", ")", "row", ".", "append", "(", "cell", ")", "i", "+=", "1", "table", ".", "append", "(", "row", ")", "return", "table" ]
44.296296
16.311111
def CheckForCopyright(filename, lines, error): """Logs an error if no Copyright message appears at the top of the file.""" # We'll say it should occur by line 10. Don't forget there's a # dummy line at the front. for line in range(1, min(len(lines), 11)): if re.search(r'Copyright', lines[line], re.I): break else: # means no copyright line was found error(filename, 0, 'legal/copyright', 5, 'No copyright message found. ' 'You should have a line: "Copyright [year] <Copyright Owner>"')
[ "def", "CheckForCopyright", "(", "filename", ",", "lines", ",", "error", ")", ":", "# We'll say it should occur by line 10. Don't forget there's a", "# dummy line at the front.", "for", "line", "in", "range", "(", "1", ",", "min", "(", "len", "(", "lines", ")", ",", "11", ")", ")", ":", "if", "re", ".", "search", "(", "r'Copyright'", ",", "lines", "[", "line", "]", ",", "re", ".", "I", ")", ":", "break", "else", ":", "# means no copyright line was found", "error", "(", "filename", ",", "0", ",", "'legal/copyright'", ",", "5", ",", "'No copyright message found. '", "'You should have a line: \"Copyright [year] <Copyright Owner>\"'", ")" ]
48.909091
15
def load_labware_by_name(self, name: str) -> Labware: """ Specify the presence of a piece of labware on the module. :param name: The name of the labware object. :returns: The initialized and loaded labware object. """ lw = load(name, self._geometry.location) return self.load_labware(lw)
[ "def", "load_labware_by_name", "(", "self", ",", "name", ":", "str", ")", "->", "Labware", ":", "lw", "=", "load", "(", "name", ",", "self", ".", "_geometry", ".", "location", ")", "return", "self", ".", "load_labware", "(", "lw", ")" ]
41.125
12.125
def read(self, filename): ''' Read a file content. :param string filename: The storage root-relative filename :raises FileNotFound: If the file does not exists ''' if not self.backend.exists(filename): raise FileNotFound(filename) return self.backend.read(filename)
[ "def", "read", "(", "self", ",", "filename", ")", ":", "if", "not", "self", ".", "backend", ".", "exists", "(", "filename", ")", ":", "raise", "FileNotFound", "(", "filename", ")", "return", "self", ".", "backend", ".", "read", "(", "filename", ")" ]
32.5
17.5
def build(self, construct): """Build a single construct in CLIPS. The Python equivalent of the CLIPS build command. """ if lib.EnvBuild(self._env, construct.encode()) != 1: raise CLIPSError(self._env)
[ "def", "build", "(", "self", ",", "construct", ")", ":", "if", "lib", ".", "EnvBuild", "(", "self", ".", "_env", ",", "construct", ".", "encode", "(", ")", ")", "!=", "1", ":", "raise", "CLIPSError", "(", "self", ".", "_env", ")" ]
29.875
16.375
def diff_parameters(old_params, new_params): """Compares the old vs. new parameters and returns a "diff" If there are no changes, we return an empty list. Args: old_params(dict): old paramters new_params(dict): new parameters Returns: list: A list of differences """ [changes, diff] = diff_dictionaries(old_params, new_params) if changes == 0: return [] return diff
[ "def", "diff_parameters", "(", "old_params", ",", "new_params", ")", ":", "[", "changes", ",", "diff", "]", "=", "diff_dictionaries", "(", "old_params", ",", "new_params", ")", "if", "changes", "==", "0", ":", "return", "[", "]", "return", "diff" ]
26.0625
18.3125
def reload_module(self, module_name): """Reloads the specified module without changing its ordering. 1. Calls stop(reloading=True) on the module 2. Reloads the Module object into .loaded_modules 3. Calls start(reloading=True) on the new object If called with a module name that is not currently loaded, it will load it. Returns True if the module was successfully reloaded, otherwise False. """ module = self.loaded_modules.get(module_name) if module: module.stop(reloading=True) else: _log.info("Reload loading new module module '%s'", module_name) success = self.load_module(module_name) if success: _log.info("Successfully (re)loaded module '%s'.", module_name) elif module: _log.error("Unable to reload module '%s', reusing existing.", module_name) else: _log.error("Failed to load module '%s'.", module_name) return False self.loaded_modules[module_name].start(reloading=True) return success
[ "def", "reload_module", "(", "self", ",", "module_name", ")", ":", "module", "=", "self", ".", "loaded_modules", ".", "get", "(", "module_name", ")", "if", "module", ":", "module", ".", "stop", "(", "reloading", "=", "True", ")", "else", ":", "_log", ".", "info", "(", "\"Reload loading new module module '%s'\"", ",", "module_name", ")", "success", "=", "self", ".", "load_module", "(", "module_name", ")", "if", "success", ":", "_log", ".", "info", "(", "\"Successfully (re)loaded module '%s'.\"", ",", "module_name", ")", "elif", "module", ":", "_log", ".", "error", "(", "\"Unable to reload module '%s', reusing existing.\"", ",", "module_name", ")", "else", ":", "_log", ".", "error", "(", "\"Failed to load module '%s'.\"", ",", "module_name", ")", "return", "False", "self", ".", "loaded_modules", "[", "module_name", "]", ".", "start", "(", "reloading", "=", "True", ")", "return", "success" ]
40.285714
19.892857
def _split_addr(addr): """ Splits a str of IP address and port pair into (host, port). Example:: >>> _split_addr('127.0.0.1:6653') ('127.0.0.1', 6653) >>> _split_addr('[::1]:6653') ('::1', 6653) Raises ValueError if invalid format. :param addr: A pair of IP address and port. :return: IP address and port """ e = ValueError('Invalid IP address and port pair: "%s"' % addr) pair = addr.rsplit(':', 1) if len(pair) != 2: raise e addr, port = pair if addr.startswith('[') and addr.endswith(']'): addr = addr.lstrip('[').rstrip(']') if not ip.valid_ipv6(addr): raise e elif not ip.valid_ipv4(addr): raise e return addr, int(port, 0)
[ "def", "_split_addr", "(", "addr", ")", ":", "e", "=", "ValueError", "(", "'Invalid IP address and port pair: \"%s\"'", "%", "addr", ")", "pair", "=", "addr", ".", "rsplit", "(", "':'", ",", "1", ")", "if", "len", "(", "pair", ")", "!=", "2", ":", "raise", "e", "addr", ",", "port", "=", "pair", "if", "addr", ".", "startswith", "(", "'['", ")", "and", "addr", ".", "endswith", "(", "']'", ")", ":", "addr", "=", "addr", ".", "lstrip", "(", "'['", ")", ".", "rstrip", "(", "']'", ")", "if", "not", "ip", ".", "valid_ipv6", "(", "addr", ")", ":", "raise", "e", "elif", "not", "ip", ".", "valid_ipv4", "(", "addr", ")", ":", "raise", "e", "return", "addr", ",", "int", "(", "port", ",", "0", ")" ]
24.566667
18.033333
def is_tracking_shield_displayed(self): """Tracking Protection shield. Returns: bool: True or False if the Tracking Shield is displayed. """ with self.selenium.context(self.selenium.CONTEXT_CHROME): if self.window.firefox_version >= 63: # Bug 1471713, 1476218 el = self.root.find_element(*self._tracking_protection_shield_locator) return el.get_attribute("active") is not None el = self.root.find_element(By.ID, "tracking-protection-icon") return bool(el.get_attribute("state"))
[ "def", "is_tracking_shield_displayed", "(", "self", ")", ":", "with", "self", ".", "selenium", ".", "context", "(", "self", ".", "selenium", ".", "CONTEXT_CHROME", ")", ":", "if", "self", ".", "window", ".", "firefox_version", ">=", "63", ":", "# Bug 1471713, 1476218", "el", "=", "self", ".", "root", ".", "find_element", "(", "*", "self", ".", "_tracking_protection_shield_locator", ")", "return", "el", ".", "get_attribute", "(", "\"active\"", ")", "is", "not", "None", "el", "=", "self", ".", "root", ".", "find_element", "(", "By", ".", "ID", ",", "\"tracking-protection-icon\"", ")", "return", "bool", "(", "el", ".", "get_attribute", "(", "\"state\"", ")", ")" ]
44.692308
23.230769
def parse_entry(self, name): """ Parse query entry name, just like: { 'User[]:user' } 'User[]:user' is an entry name. :param name: :return: """ # calculate schema mode # if ':name' or '' or '[]:name' or '[]' found, it'll be treat as multiple Schema query alias = name if ':' in name: name, alias = name.split(':') if name.endswith('[]'): need_list = True name = name[:-2] else: need_list = False return alias, name, need_list
[ "def", "parse_entry", "(", "self", ",", "name", ")", ":", "# calculate schema mode", "# if ':name' or '' or '[]:name' or '[]' found, it'll be treat as multiple Schema query", "alias", "=", "name", "if", "':'", "in", "name", ":", "name", ",", "alias", "=", "name", ".", "split", "(", "':'", ")", "if", "name", ".", "endswith", "(", "'[]'", ")", ":", "need_list", "=", "True", "name", "=", "name", "[", ":", "-", "2", "]", "else", ":", "need_list", "=", "False", "return", "alias", ",", "name", ",", "need_list" ]
24.375
17.958333
def convert_to_one_hot(y): """ converts y into one hot reprsentation. Parameters ---------- y : list A list containing continous integer values. Returns ------- one_hot : numpy.ndarray A numpy.ndarray object, which is one-hot representation of y. """ max_value = max(y) min_value = min(y) length = len(y) one_hot = numpy.zeros((length, (max_value - min_value + 1))) one_hot[numpy.arange(length), y] = 1 return one_hot
[ "def", "convert_to_one_hot", "(", "y", ")", ":", "max_value", "=", "max", "(", "y", ")", "min_value", "=", "min", "(", "y", ")", "length", "=", "len", "(", "y", ")", "one_hot", "=", "numpy", ".", "zeros", "(", "(", "length", ",", "(", "max_value", "-", "min_value", "+", "1", ")", ")", ")", "one_hot", "[", "numpy", ".", "arange", "(", "length", ")", ",", "y", "]", "=", "1", "return", "one_hot" ]
22.666667
20.47619
def _validatePullParams(MaxObjectCount, context): """ Validate the input paramaters for the PullInstances, PullInstancesWithPath, and PullInstancePaths requests. MaxObjectCount: Must be integer type and ge 0 context: Must be not None and length ge 2 """ if (not isinstance(MaxObjectCount, six.integer_types) or MaxObjectCount < 0): raise ValueError( _format("MaxObjectCount parameter must be integer >= 0 but is " "{0!A}", MaxObjectCount)) if context is None or len(context) < 2: raise ValueError( _format("Pull... Context parameter must be valid tuple {0!A}", context))
[ "def", "_validatePullParams", "(", "MaxObjectCount", ",", "context", ")", ":", "if", "(", "not", "isinstance", "(", "MaxObjectCount", ",", "six", ".", "integer_types", ")", "or", "MaxObjectCount", "<", "0", ")", ":", "raise", "ValueError", "(", "_format", "(", "\"MaxObjectCount parameter must be integer >= 0 but is \"", "\"{0!A}\"", ",", "MaxObjectCount", ")", ")", "if", "context", "is", "None", "or", "len", "(", "context", ")", "<", "2", ":", "raise", "ValueError", "(", "_format", "(", "\"Pull... Context parameter must be valid tuple {0!A}\"", ",", "context", ")", ")" ]
38.611111
16.611111
def parse(self, fo): """ Convert Improbizer output to motifs Parameters ---------- fo : file-like File object containing Improbizer output. Returns ------- motifs : list List of Motif instances. """ motifs = [] p = re.compile(r'\d+\s+@\s+\d+\.\d+\s+sd\s+\d+\.\d+\s+(\w+)$') line = fo.readline() while line and line.find("Color") == -1: m = p.search(line) if m: pwm_data = {} for i in range(4): vals = [x.strip() for x in fo.readline().strip().split(" ") if x] pwm_data[vals[0].upper()] = vals[1:] pwm = [] for i in range(len(pwm_data["A"])): pwm.append([float(pwm_data[x][i]) for x in ["A","C","G","T"]]) motifs.append(Motif(pwm)) motifs[-1].id = "%s_%s" % (self.name, m.group(1)) line = fo.readline() return motifs
[ "def", "parse", "(", "self", ",", "fo", ")", ":", "motifs", "=", "[", "]", "p", "=", "re", ".", "compile", "(", "r'\\d+\\s+@\\s+\\d+\\.\\d+\\s+sd\\s+\\d+\\.\\d+\\s+(\\w+)$'", ")", "line", "=", "fo", ".", "readline", "(", ")", "while", "line", "and", "line", ".", "find", "(", "\"Color\"", ")", "==", "-", "1", ":", "m", "=", "p", ".", "search", "(", "line", ")", "if", "m", ":", "pwm_data", "=", "{", "}", "for", "i", "in", "range", "(", "4", ")", ":", "vals", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "fo", ".", "readline", "(", ")", ".", "strip", "(", ")", ".", "split", "(", "\" \"", ")", "if", "x", "]", "pwm_data", "[", "vals", "[", "0", "]", ".", "upper", "(", ")", "]", "=", "vals", "[", "1", ":", "]", "pwm", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "pwm_data", "[", "\"A\"", "]", ")", ")", ":", "pwm", ".", "append", "(", "[", "float", "(", "pwm_data", "[", "x", "]", "[", "i", "]", ")", "for", "x", "in", "[", "\"A\"", ",", "\"C\"", ",", "\"G\"", ",", "\"T\"", "]", "]", ")", "motifs", ".", "append", "(", "Motif", "(", "pwm", ")", ")", "motifs", "[", "-", "1", "]", ".", "id", "=", "\"%s_%s\"", "%", "(", "self", ".", "name", ",", "m", ".", "group", "(", "1", ")", ")", "line", "=", "fo", ".", "readline", "(", ")", "return", "motifs" ]
31.242424
18.757576
def format_field(self, value, format_spec): """Override :meth:`string.Formatter.format_field` to have our default format_spec for :class:`datetime.Datetime` objects, and to let None yield an empty string rather than ``None``.""" if isinstance(value, datetime) and not format_spec: return super().format_field(value, '%Y-%m-%d_%H-%M-%S') if value is None: return '' return super().format_field(value, format_spec)
[ "def", "format_field", "(", "self", ",", "value", ",", "format_spec", ")", ":", "if", "isinstance", "(", "value", ",", "datetime", ")", "and", "not", "format_spec", ":", "return", "super", "(", ")", ".", "format_field", "(", "value", ",", "'%Y-%m-%d_%H-%M-%S'", ")", "if", "value", "is", "None", ":", "return", "''", "return", "super", "(", ")", ".", "format_field", "(", "value", ",", "format_spec", ")" ]
53.111111
14.777778
def check_cache(self, vts, counter): """Manually checks the artifact cache (usually immediately before compilation.) Returns true if the cache was hit successfully, indicating that no compilation is necessary. """ if not self.artifact_cache_reads_enabled(): return False cached_vts, _, _ = self.check_artifact_cache([vts]) if not cached_vts: self.context.log.debug('Missed cache during double check for {}' .format(vts.target.address.spec)) return False assert cached_vts == [vts], ( 'Cache returned unexpected target: {} vs {}'.format(cached_vts, [vts]) ) self.context.log.info('Hit cache during double check for {}'.format(vts.target.address.spec)) counter() return True
[ "def", "check_cache", "(", "self", ",", "vts", ",", "counter", ")", ":", "if", "not", "self", ".", "artifact_cache_reads_enabled", "(", ")", ":", "return", "False", "cached_vts", ",", "_", ",", "_", "=", "self", ".", "check_artifact_cache", "(", "[", "vts", "]", ")", "if", "not", "cached_vts", ":", "self", ".", "context", ".", "log", ".", "debug", "(", "'Missed cache during double check for {}'", ".", "format", "(", "vts", ".", "target", ".", "address", ".", "spec", ")", ")", "return", "False", "assert", "cached_vts", "==", "[", "vts", "]", ",", "(", "'Cache returned unexpected target: {} vs {}'", ".", "format", "(", "cached_vts", ",", "[", "vts", "]", ")", ")", "self", ".", "context", ".", "log", ".", "info", "(", "'Hit cache during double check for {}'", ".", "format", "(", "vts", ".", "target", ".", "address", ".", "spec", ")", ")", "counter", "(", ")", "return", "True" ]
40.666667
22.333333
def parse_darknet_ann_list_to_cls_box(annotations): """Parse darknet annotation format into two lists for class and bounding box. Input list of [[class, x, y, w, h], ...], return two list of [class ...] and [[x, y, w, h], ...]. Parameters ------------ annotations : list of list A list of class and bounding boxes of images e.g. [[class, x, y, w, h], ...] Returns ------- list of int List of class labels. list of list of 4 numbers List of bounding box. """ class_list = [] bbox_list = [] for ann in annotations: class_list.append(ann[0]) bbox_list.append(ann[1:]) return class_list, bbox_list
[ "def", "parse_darknet_ann_list_to_cls_box", "(", "annotations", ")", ":", "class_list", "=", "[", "]", "bbox_list", "=", "[", "]", "for", "ann", "in", "annotations", ":", "class_list", ".", "append", "(", "ann", "[", "0", "]", ")", "bbox_list", ".", "append", "(", "ann", "[", "1", ":", "]", ")", "return", "class_list", ",", "bbox_list" ]
26.8
22.8
def from_global_moment_and_saxis(cls, global_moment, saxis): """ Convenience method to initialize Magmom from a given global magnetic moment, i.e. magnetic moment with saxis=(0,0,1), and provided saxis. Method is useful if you do not know the components of your magnetic moment in frame of your desired saxis. :param global_moment: :param saxis: desired saxis :return: """ magmom = Magmom(global_moment) return cls(magmom.get_moment(saxis=saxis), saxis=saxis)
[ "def", "from_global_moment_and_saxis", "(", "cls", ",", "global_moment", ",", "saxis", ")", ":", "magmom", "=", "Magmom", "(", "global_moment", ")", "return", "cls", "(", "magmom", ".", "get_moment", "(", "saxis", "=", "saxis", ")", ",", "saxis", "=", "saxis", ")" ]
37.4
17.4
def subseq(self, start_offset=0, end_offset=None): """ Return a subset of the sequence starting at start_offset (defaulting to the beginning) ending at end_offset (None representing the end, whih is the default) Raises ValueError if duration_64 is missing on any element """ from sebastian.core import DURATION_64 def subseq_iter(start_offset, end_offset): cur_offset = 0 for point in self._elements: try: cur_offset += point[DURATION_64] except KeyError: raise ValueError("HSeq.subseq requires all points to have a %s attribute" % DURATION_64) #Skip until start if cur_offset < start_offset: continue #Yield points start_offset <= point < end_offset if end_offset is None or cur_offset < end_offset: yield point else: raise StopIteration return HSeq(subseq_iter(start_offset, end_offset))
[ "def", "subseq", "(", "self", ",", "start_offset", "=", "0", ",", "end_offset", "=", "None", ")", ":", "from", "sebastian", ".", "core", "import", "DURATION_64", "def", "subseq_iter", "(", "start_offset", ",", "end_offset", ")", ":", "cur_offset", "=", "0", "for", "point", "in", "self", ".", "_elements", ":", "try", ":", "cur_offset", "+=", "point", "[", "DURATION_64", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "\"HSeq.subseq requires all points to have a %s attribute\"", "%", "DURATION_64", ")", "#Skip until start", "if", "cur_offset", "<", "start_offset", ":", "continue", "#Yield points start_offset <= point < end_offset", "if", "end_offset", "is", "None", "or", "cur_offset", "<", "end_offset", ":", "yield", "point", "else", ":", "raise", "StopIteration", "return", "HSeq", "(", "subseq_iter", "(", "start_offset", ",", "end_offset", ")", ")" ]
41.346154
16.730769
def ok_for_running(self, cmd_obj, name, nargs): """We separate some of the common debugger command checks here: whether it makes sense to run the command in this execution state, if the command has the right number of arguments and so on. """ if hasattr(cmd_obj, 'execution_set'): if not (self.core.execution_status in cmd_obj.execution_set): part1 = ("Command '%s' is not available for execution status:" % name) mess = Mmisc.wrapped_lines(part1, self.core.execution_status, self.debugger.settings['width']) self.errmsg(mess) return False pass if self.frame is None and cmd_obj.need_stack: self.intf[-1].errmsg("Command '%s' needs an execution stack." % name) return False if nargs < cmd_obj.min_args: self.errmsg(("Command '%s' needs at least %d argument(s); " + "got %d.") % (name, cmd_obj.min_args, nargs)) return False elif cmd_obj.max_args is not None and nargs > cmd_obj.max_args: self.errmsg(("Command '%s' can take at most %d argument(s);" + " got %d.") % (name, cmd_obj.max_args, nargs)) return False return True
[ "def", "ok_for_running", "(", "self", ",", "cmd_obj", ",", "name", ",", "nargs", ")", ":", "if", "hasattr", "(", "cmd_obj", ",", "'execution_set'", ")", ":", "if", "not", "(", "self", ".", "core", ".", "execution_status", "in", "cmd_obj", ".", "execution_set", ")", ":", "part1", "=", "(", "\"Command '%s' is not available for execution status:\"", "%", "name", ")", "mess", "=", "Mmisc", ".", "wrapped_lines", "(", "part1", ",", "self", ".", "core", ".", "execution_status", ",", "self", ".", "debugger", ".", "settings", "[", "'width'", "]", ")", "self", ".", "errmsg", "(", "mess", ")", "return", "False", "pass", "if", "self", ".", "frame", "is", "None", "and", "cmd_obj", ".", "need_stack", ":", "self", ".", "intf", "[", "-", "1", "]", ".", "errmsg", "(", "\"Command '%s' needs an execution stack.\"", "%", "name", ")", "return", "False", "if", "nargs", "<", "cmd_obj", ".", "min_args", ":", "self", ".", "errmsg", "(", "(", "\"Command '%s' needs at least %d argument(s); \"", "+", "\"got %d.\"", ")", "%", "(", "name", ",", "cmd_obj", ".", "min_args", ",", "nargs", ")", ")", "return", "False", "elif", "cmd_obj", ".", "max_args", "is", "not", "None", "and", "nargs", ">", "cmd_obj", ".", "max_args", ":", "self", ".", "errmsg", "(", "(", "\"Command '%s' can take at most %d argument(s);\"", "+", "\" got %d.\"", ")", "%", "(", "name", ",", "cmd_obj", ".", "max_args", ",", "nargs", ")", ")", "return", "False", "return", "True" ]
49.333333
17.533333
def _add_step(self, step): """Add a step to the workflow. Args: step (Step): a step from the steps library. """ self._closed() self.has_workflow_step = self.has_workflow_step or step.is_workflow self.wf_steps[step.name_in_workflow] = step
[ "def", "_add_step", "(", "self", ",", "step", ")", ":", "self", ".", "_closed", "(", ")", "self", ".", "has_workflow_step", "=", "self", ".", "has_workflow_step", "or", "step", ".", "is_workflow", "self", ".", "wf_steps", "[", "step", ".", "name_in_workflow", "]", "=", "step" ]
29.1
20
def linkify(self, timeperiods, contacts, services, hosts): """Create link between objects:: * escalation -> host * escalation -> service * escalation -> timeperiods * escalation -> contact :param timeperiods: timeperiods to link :type timeperiods: alignak.objects.timeperiod.Timeperiods :param contacts: contacts to link :type contacts: alignak.objects.contact.Contacts :param services: services to link :type services: alignak.objects.service.Services :param hosts: hosts to link :type hosts: alignak.objects.host.Hosts :return: None """ self.linkify_with_timeperiods(timeperiods, 'escalation_period') self.linkify_with_contacts(contacts) self.linkify_es_by_s(services) self.linkify_es_by_h(hosts)
[ "def", "linkify", "(", "self", ",", "timeperiods", ",", "contacts", ",", "services", ",", "hosts", ")", ":", "self", ".", "linkify_with_timeperiods", "(", "timeperiods", ",", "'escalation_period'", ")", "self", ".", "linkify_with_contacts", "(", "contacts", ")", "self", ".", "linkify_es_by_s", "(", "services", ")", "self", ".", "linkify_es_by_h", "(", "hosts", ")" ]
37.954545
12.181818
def LAHF(cpu): """ Loads status flags into AH register. Moves the low byte of the EFLAGS register (which includes status flags SF, ZF, AF, PF, and CF) to the AH register. Reserved bits 1, 3, and 5 of the EFLAGS register are set in the AH register:: AH = EFLAGS(SF:ZF:0:AF:0:PF:1:CF); :param cpu: current CPU. :param dest: destination operand. :param src: source operand. """ used_regs = (cpu.SF, cpu.ZF, cpu.AF, cpu.PF, cpu.CF) is_expression = any(issymbolic(x) for x in used_regs) def make_flag(val, offset): if is_expression: return Operators.ITEBV(8, val, BitVecConstant(8, 1 << offset), BitVecConstant(8, 0)) else: return val << offset cpu.AH = (make_flag(cpu.SF, 7) | make_flag(cpu.ZF, 6) | make_flag(0, 5) | make_flag(cpu.AF, 4) | make_flag(0, 3) | make_flag(cpu.PF, 2) | make_flag(1, 1) | make_flag(cpu.CF, 0))
[ "def", "LAHF", "(", "cpu", ")", ":", "used_regs", "=", "(", "cpu", ".", "SF", ",", "cpu", ".", "ZF", ",", "cpu", ".", "AF", ",", "cpu", ".", "PF", ",", "cpu", ".", "CF", ")", "is_expression", "=", "any", "(", "issymbolic", "(", "x", ")", "for", "x", "in", "used_regs", ")", "def", "make_flag", "(", "val", ",", "offset", ")", ":", "if", "is_expression", ":", "return", "Operators", ".", "ITEBV", "(", "8", ",", "val", ",", "BitVecConstant", "(", "8", ",", "1", "<<", "offset", ")", ",", "BitVecConstant", "(", "8", ",", "0", ")", ")", "else", ":", "return", "val", "<<", "offset", "cpu", ".", "AH", "=", "(", "make_flag", "(", "cpu", ".", "SF", ",", "7", ")", "|", "make_flag", "(", "cpu", ".", "ZF", ",", "6", ")", "|", "make_flag", "(", "0", ",", "5", ")", "|", "make_flag", "(", "cpu", ".", "AF", ",", "4", ")", "|", "make_flag", "(", "0", ",", "3", ")", "|", "make_flag", "(", "cpu", ".", "PF", ",", "2", ")", "|", "make_flag", "(", "1", ",", "1", ")", "|", "make_flag", "(", "cpu", ".", "CF", ",", "0", ")", ")" ]
35.515152
15.333333
def verify_pss_padding(hash_algorithm, salt_length, key_length, message, signature): """ Verifies the PSS padding on an encoded message :param hash_algorithm: The string name of the hash algorithm to use: "sha1", "sha224", "sha256", "sha384", "sha512" :param salt_length: The length of the salt as an integer - typically the same as the length of the output from the hash_algorithm :param key_length: The length of the RSA key, in bits :param message: A byte string of the message to pad :param signature: The signature to verify :return: A boolean indicating if the signature is invalid """ if _backend != 'winlegacy' and sys.platform != 'darwin': raise SystemError(pretty_message( ''' Pure-python RSA PSS signature padding verification code is only for Windows XP/2003 and OS X ''' )) if not isinstance(message, byte_cls): raise TypeError(pretty_message( ''' message must be a byte string, not %s ''', type_name(message) )) if not isinstance(signature, byte_cls): raise TypeError(pretty_message( ''' signature must be a byte string, not %s ''', type_name(signature) )) if not isinstance(salt_length, int_types): raise TypeError(pretty_message( ''' salt_length must be an integer, not %s ''', type_name(salt_length) )) if salt_length < 0: raise ValueError(pretty_message( ''' salt_length must be 0 or more - is %s ''', repr(salt_length) )) if hash_algorithm not in set(['sha1', 'sha224', 'sha256', 'sha384', 'sha512']): raise ValueError(pretty_message( ''' hash_algorithm must be one of "sha1", "sha224", "sha256", "sha384", "sha512", not %s ''', repr(hash_algorithm) )) hash_func = getattr(hashlib, hash_algorithm) em_bits = key_length - 1 em_len = int(math.ceil(em_bits / 8)) message_digest = hash_func(message).digest() hash_length = len(message_digest) if em_len < hash_length + salt_length + 2: return False if signature[-1:] != b'\xBC': return False zero_bits = (8 * em_len) - em_bits masked_db_length = em_len - hash_length - 1 masked_db = signature[0:masked_db_length] first_byte = ord(masked_db[0:1]) bits_that_should_be_zero = first_byte >> (8 - zero_bits) if bits_that_should_be_zero != 0: return False m_prime_digest = signature[masked_db_length:masked_db_length + hash_length] db_mask = _mgf1(hash_algorithm, m_prime_digest, em_len - hash_length - 1) left_bit_mask = ('0' * zero_bits) + ('1' * (8 - zero_bits)) left_int_mask = int(left_bit_mask, 2) if left_int_mask != 255: db_mask = chr_cls(left_int_mask & ord(db_mask[0:1])) + db_mask[1:] db = int_to_bytes(int_from_bytes(masked_db) ^ int_from_bytes(db_mask)) if len(db) < len(masked_db): db = (b'\x00' * (len(masked_db) - len(db))) + db zero_length = em_len - hash_length - salt_length - 2 zero_string = b'\x00' * zero_length if not constant_compare(db[0:zero_length], zero_string): return False if db[zero_length:zero_length + 1] != b'\x01': return False salt = db[0 - salt_length:] m_prime = (b'\x00' * 8) + message_digest + salt h_prime = hash_func(m_prime).digest() return constant_compare(m_prime_digest, h_prime)
[ "def", "verify_pss_padding", "(", "hash_algorithm", ",", "salt_length", ",", "key_length", ",", "message", ",", "signature", ")", ":", "if", "_backend", "!=", "'winlegacy'", "and", "sys", ".", "platform", "!=", "'darwin'", ":", "raise", "SystemError", "(", "pretty_message", "(", "'''\n Pure-python RSA PSS signature padding verification code is only for\n Windows XP/2003 and OS X\n '''", ")", ")", "if", "not", "isinstance", "(", "message", ",", "byte_cls", ")", ":", "raise", "TypeError", "(", "pretty_message", "(", "'''\n message must be a byte string, not %s\n '''", ",", "type_name", "(", "message", ")", ")", ")", "if", "not", "isinstance", "(", "signature", ",", "byte_cls", ")", ":", "raise", "TypeError", "(", "pretty_message", "(", "'''\n signature must be a byte string, not %s\n '''", ",", "type_name", "(", "signature", ")", ")", ")", "if", "not", "isinstance", "(", "salt_length", ",", "int_types", ")", ":", "raise", "TypeError", "(", "pretty_message", "(", "'''\n salt_length must be an integer, not %s\n '''", ",", "type_name", "(", "salt_length", ")", ")", ")", "if", "salt_length", "<", "0", ":", "raise", "ValueError", "(", "pretty_message", "(", "'''\n salt_length must be 0 or more - is %s\n '''", ",", "repr", "(", "salt_length", ")", ")", ")", "if", "hash_algorithm", "not", "in", "set", "(", "[", "'sha1'", ",", "'sha224'", ",", "'sha256'", ",", "'sha384'", ",", "'sha512'", "]", ")", ":", "raise", "ValueError", "(", "pretty_message", "(", "'''\n hash_algorithm must be one of \"sha1\", \"sha224\", \"sha256\", \"sha384\",\n \"sha512\", not %s\n '''", ",", "repr", "(", "hash_algorithm", ")", ")", ")", "hash_func", "=", "getattr", "(", "hashlib", ",", "hash_algorithm", ")", "em_bits", "=", "key_length", "-", "1", "em_len", "=", "int", "(", "math", ".", "ceil", "(", "em_bits", "/", "8", ")", ")", "message_digest", "=", "hash_func", "(", "message", ")", ".", "digest", "(", ")", "hash_length", "=", "len", "(", "message_digest", ")", "if", "em_len", "<", "hash_length", "+", "salt_length", "+", "2", ":", "return", "False", "if", "signature", "[", "-", "1", ":", "]", "!=", "b'\\xBC'", ":", "return", "False", "zero_bits", "=", "(", "8", "*", "em_len", ")", "-", "em_bits", "masked_db_length", "=", "em_len", "-", "hash_length", "-", "1", "masked_db", "=", "signature", "[", "0", ":", "masked_db_length", "]", "first_byte", "=", "ord", "(", "masked_db", "[", "0", ":", "1", "]", ")", "bits_that_should_be_zero", "=", "first_byte", ">>", "(", "8", "-", "zero_bits", ")", "if", "bits_that_should_be_zero", "!=", "0", ":", "return", "False", "m_prime_digest", "=", "signature", "[", "masked_db_length", ":", "masked_db_length", "+", "hash_length", "]", "db_mask", "=", "_mgf1", "(", "hash_algorithm", ",", "m_prime_digest", ",", "em_len", "-", "hash_length", "-", "1", ")", "left_bit_mask", "=", "(", "'0'", "*", "zero_bits", ")", "+", "(", "'1'", "*", "(", "8", "-", "zero_bits", ")", ")", "left_int_mask", "=", "int", "(", "left_bit_mask", ",", "2", ")", "if", "left_int_mask", "!=", "255", ":", "db_mask", "=", "chr_cls", "(", "left_int_mask", "&", "ord", "(", "db_mask", "[", "0", ":", "1", "]", ")", ")", "+", "db_mask", "[", "1", ":", "]", "db", "=", "int_to_bytes", "(", "int_from_bytes", "(", "masked_db", ")", "^", "int_from_bytes", "(", "db_mask", ")", ")", "if", "len", "(", "db", ")", "<", "len", "(", "masked_db", ")", ":", "db", "=", "(", "b'\\x00'", "*", "(", "len", "(", "masked_db", ")", "-", "len", "(", "db", ")", ")", ")", "+", "db", "zero_length", "=", "em_len", "-", "hash_length", "-", "salt_length", "-", "2", "zero_string", "=", "b'\\x00'", "*", "zero_length", "if", "not", "constant_compare", "(", "db", "[", "0", ":", "zero_length", "]", ",", "zero_string", ")", ":", "return", "False", "if", "db", "[", "zero_length", ":", "zero_length", "+", "1", "]", "!=", "b'\\x01'", ":", "return", "False", "salt", "=", "db", "[", "0", "-", "salt_length", ":", "]", "m_prime", "=", "(", "b'\\x00'", "*", "8", ")", "+", "message_digest", "+", "salt", "h_prime", "=", "hash_func", "(", "m_prime", ")", ".", "digest", "(", ")", "return", "constant_compare", "(", "m_prime_digest", ",", "h_prime", ")" ]
28.173228
21.527559
def setup_logging(): """Function to configure log hadlers. .. important:: Configuration, if needed, should be applied before invoking this decorator, as starting the subscriber process for logging will configure the root logger for the child process based on the state of :obj:`bigchaindb.config` at the moment this decorator is invoked. """ logging_configs = DEFAULT_LOGGING_CONFIG new_logging_configs = bigchaindb.config['log'] if 'file' in new_logging_configs: filename = new_logging_configs['file'] logging_configs['handlers']['file']['filename'] = filename if 'error_file' in new_logging_configs: error_filename = new_logging_configs['error_file'] logging_configs['handlers']['errors']['filename'] = error_filename if 'level_console' in new_logging_configs: level = _normalize_log_level(new_logging_configs['level_console']) logging_configs['handlers']['console']['level'] = level if 'level_logfile' in new_logging_configs: level = _normalize_log_level(new_logging_configs['level_logfile']) logging_configs['handlers']['file']['level'] = level if 'fmt_console' in new_logging_configs: fmt = new_logging_configs['fmt_console'] logging_configs['formatters']['console']['format'] = fmt if 'fmt_logfile' in new_logging_configs: fmt = new_logging_configs['fmt_logfile'] logging_configs['formatters']['file']['format'] = fmt if 'datefmt_console' in new_logging_configs: fmt = new_logging_configs['datefmt_console'] logging_configs['formatters']['console']['datefmt'] = fmt if 'datefmt_logfile' in new_logging_configs: fmt = new_logging_configs['datefmt_logfile'] logging_configs['formatters']['file']['datefmt'] = fmt log_levels = new_logging_configs.get('granular_levels', {}) for logger_name, level in log_levels.items(): level = _normalize_log_level(level) try: logging_configs['loggers'][logger_name]['level'] = level except KeyError: logging_configs['loggers'][logger_name] = {'level': level} set_logging_config(logging_configs)
[ "def", "setup_logging", "(", ")", ":", "logging_configs", "=", "DEFAULT_LOGGING_CONFIG", "new_logging_configs", "=", "bigchaindb", ".", "config", "[", "'log'", "]", "if", "'file'", "in", "new_logging_configs", ":", "filename", "=", "new_logging_configs", "[", "'file'", "]", "logging_configs", "[", "'handlers'", "]", "[", "'file'", "]", "[", "'filename'", "]", "=", "filename", "if", "'error_file'", "in", "new_logging_configs", ":", "error_filename", "=", "new_logging_configs", "[", "'error_file'", "]", "logging_configs", "[", "'handlers'", "]", "[", "'errors'", "]", "[", "'filename'", "]", "=", "error_filename", "if", "'level_console'", "in", "new_logging_configs", ":", "level", "=", "_normalize_log_level", "(", "new_logging_configs", "[", "'level_console'", "]", ")", "logging_configs", "[", "'handlers'", "]", "[", "'console'", "]", "[", "'level'", "]", "=", "level", "if", "'level_logfile'", "in", "new_logging_configs", ":", "level", "=", "_normalize_log_level", "(", "new_logging_configs", "[", "'level_logfile'", "]", ")", "logging_configs", "[", "'handlers'", "]", "[", "'file'", "]", "[", "'level'", "]", "=", "level", "if", "'fmt_console'", "in", "new_logging_configs", ":", "fmt", "=", "new_logging_configs", "[", "'fmt_console'", "]", "logging_configs", "[", "'formatters'", "]", "[", "'console'", "]", "[", "'format'", "]", "=", "fmt", "if", "'fmt_logfile'", "in", "new_logging_configs", ":", "fmt", "=", "new_logging_configs", "[", "'fmt_logfile'", "]", "logging_configs", "[", "'formatters'", "]", "[", "'file'", "]", "[", "'format'", "]", "=", "fmt", "if", "'datefmt_console'", "in", "new_logging_configs", ":", "fmt", "=", "new_logging_configs", "[", "'datefmt_console'", "]", "logging_configs", "[", "'formatters'", "]", "[", "'console'", "]", "[", "'datefmt'", "]", "=", "fmt", "if", "'datefmt_logfile'", "in", "new_logging_configs", ":", "fmt", "=", "new_logging_configs", "[", "'datefmt_logfile'", "]", "logging_configs", "[", "'formatters'", "]", "[", "'file'", "]", "[", "'datefmt'", "]", "=", "fmt", "log_levels", "=", "new_logging_configs", ".", "get", "(", "'granular_levels'", ",", "{", "}", ")", "for", "logger_name", ",", "level", "in", "log_levels", ".", "items", "(", ")", ":", "level", "=", "_normalize_log_level", "(", "level", ")", "try", ":", "logging_configs", "[", "'loggers'", "]", "[", "logger_name", "]", "[", "'level'", "]", "=", "level", "except", "KeyError", ":", "logging_configs", "[", "'loggers'", "]", "[", "logger_name", "]", "=", "{", "'level'", ":", "level", "}", "set_logging_config", "(", "logging_configs", ")" ]
37.413793
22.482759
def ignore_failed_logs_action(self, request, queryset): """Set FAILED trigger logs in queryset to IGNORED.""" count = _ignore_failed_logs(queryset) self.message_user( request, _('{count} failed trigger logs marked as ignored.').format(count=count), )
[ "def", "ignore_failed_logs_action", "(", "self", ",", "request", ",", "queryset", ")", ":", "count", "=", "_ignore_failed_logs", "(", "queryset", ")", "self", ".", "message_user", "(", "request", ",", "_", "(", "'{count} failed trigger logs marked as ignored.'", ")", ".", "format", "(", "count", "=", "count", ")", ",", ")" ]
42.857143
18.428571
def parse_option(self, option, block_name, *values): """ Parse app path values for option. """ # treat arguments as part of the program name (support spaces in name) values = [x.replace(' ', '\\ ') if not x.startswith(os.sep) else x for x in [str(v) for v in values]] if option == 'close': option = 'start_' + option key = option.split('_', 1)[0] self.paths[key] = set(common.extract_app_paths(values))
[ "def", "parse_option", "(", "self", ",", "option", ",", "block_name", ",", "*", "values", ")", ":", "# treat arguments as part of the program name (support spaces in name)", "values", "=", "[", "x", ".", "replace", "(", "' '", ",", "'\\\\ '", ")", "if", "not", "x", ".", "startswith", "(", "os", ".", "sep", ")", "else", "x", "for", "x", "in", "[", "str", "(", "v", ")", "for", "v", "in", "values", "]", "]", "if", "option", "==", "'close'", ":", "option", "=", "'start_'", "+", "option", "key", "=", "option", ".", "split", "(", "'_'", ",", "1", ")", "[", "0", "]", "self", ".", "paths", "[", "key", "]", "=", "set", "(", "common", ".", "extract_app_paths", "(", "values", ")", ")" ]
37.153846
19.615385
def __check_looks_like_uri(self, uri): """Checks the URI looks like a RAW uri in github: - 'https://raw.githubusercontent.com/github/hubot/master/README.md' - 'https://github.com/github/hubot/raw/master/README.md' :param uri: uri of the file """ if uri.split('/')[2] == 'raw.githubusercontent.com': return True elif uri.split('/')[2] == 'github.com': if uri.split('/')[5] == 'raw': return True else: raise GithubFileNotFound('URI %s is not a valid link to a raw file in Github' % uri)
[ "def", "__check_looks_like_uri", "(", "self", ",", "uri", ")", ":", "if", "uri", ".", "split", "(", "'/'", ")", "[", "2", "]", "==", "'raw.githubusercontent.com'", ":", "return", "True", "elif", "uri", ".", "split", "(", "'/'", ")", "[", "2", "]", "==", "'github.com'", ":", "if", "uri", ".", "split", "(", "'/'", ")", "[", "5", "]", "==", "'raw'", ":", "return", "True", "else", ":", "raise", "GithubFileNotFound", "(", "'URI %s is not a valid link to a raw file in Github'", "%", "uri", ")" ]
39.2
19.2
def market_value(self): """ [float] 市值 """ return sum(account.market_value for account in six.itervalues(self._accounts))
[ "def", "market_value", "(", "self", ")", ":", "return", "sum", "(", "account", ".", "market_value", "for", "account", "in", "six", ".", "itervalues", "(", "self", ".", "_accounts", ")", ")" ]
29.8
17
def js_to_url_function(converter): """Get the JavaScript converter function from a rule.""" if hasattr(converter, 'js_to_url_function'): data = converter.js_to_url_function() else: for cls in getmro(type(converter)): if cls in js_to_url_functions: data = js_to_url_functions[cls](converter) break else: return 'encodeURIComponent' return '(function(value) { %s })' % data
[ "def", "js_to_url_function", "(", "converter", ")", ":", "if", "hasattr", "(", "converter", ",", "'js_to_url_function'", ")", ":", "data", "=", "converter", ".", "js_to_url_function", "(", ")", "else", ":", "for", "cls", "in", "getmro", "(", "type", "(", "converter", ")", ")", ":", "if", "cls", "in", "js_to_url_functions", ":", "data", "=", "js_to_url_functions", "[", "cls", "]", "(", "converter", ")", "break", "else", ":", "return", "'encodeURIComponent'", "return", "'(function(value) { %s })'", "%", "data" ]
38
10.333333
def walk_code(co, _prefix=''): """ Traverse a code object, finding all consts which are also code objects. Yields pairs of (name, code object). """ name = _prefix + co.co_name yield name, co yield from chain.from_iterable( walk_code(c, _prefix=_extend_name(name, co)) for c in co.co_consts if isinstance(c, CodeType) )
[ "def", "walk_code", "(", "co", ",", "_prefix", "=", "''", ")", ":", "name", "=", "_prefix", "+", "co", ".", "co_name", "yield", "name", ",", "co", "yield", "from", "chain", ".", "from_iterable", "(", "walk_code", "(", "c", ",", "_prefix", "=", "_extend_name", "(", "name", ",", "co", ")", ")", "for", "c", "in", "co", ".", "co_consts", "if", "isinstance", "(", "c", ",", "CodeType", ")", ")" ]
27.923077
14.230769
def d_acquisition_function(self, x): """ Returns the gradient of the acquisition function at x. """ x = np.atleast_2d(x) if self.transform=='softplus': fval = -self.acq.acquisition_function(x)[:,0] scale = 1./(np.log1p(np.exp(fval))*(1.+np.exp(-fval))) elif self.transform=='none': fval = -self.acq.acquisition_function(x)[:,0] scale = 1./fval else: scale = 1. if self.X_batch is None: _, grad_acq_x = self.acq.acquisition_function_withGradients(x) return scale*grad_acq_x else: _, grad_acq_x = self.acq.acquisition_function_withGradients(x) return scale*grad_acq_x - self._d_hammer_function(x, self.X_batch, self.r_x0, self.s_x0)
[ "def", "d_acquisition_function", "(", "self", ",", "x", ")", ":", "x", "=", "np", ".", "atleast_2d", "(", "x", ")", "if", "self", ".", "transform", "==", "'softplus'", ":", "fval", "=", "-", "self", ".", "acq", ".", "acquisition_function", "(", "x", ")", "[", ":", ",", "0", "]", "scale", "=", "1.", "/", "(", "np", ".", "log1p", "(", "np", ".", "exp", "(", "fval", ")", ")", "*", "(", "1.", "+", "np", ".", "exp", "(", "-", "fval", ")", ")", ")", "elif", "self", ".", "transform", "==", "'none'", ":", "fval", "=", "-", "self", ".", "acq", ".", "acquisition_function", "(", "x", ")", "[", ":", ",", "0", "]", "scale", "=", "1.", "/", "fval", "else", ":", "scale", "=", "1.", "if", "self", ".", "X_batch", "is", "None", ":", "_", ",", "grad_acq_x", "=", "self", ".", "acq", ".", "acquisition_function_withGradients", "(", "x", ")", "return", "scale", "*", "grad_acq_x", "else", ":", "_", ",", "grad_acq_x", "=", "self", ".", "acq", ".", "acquisition_function_withGradients", "(", "x", ")", "return", "scale", "*", "grad_acq_x", "-", "self", ".", "_d_hammer_function", "(", "x", ",", "self", ".", "X_batch", ",", "self", ".", "r_x0", ",", "self", ".", "s_x0", ")" ]
37.761905
19.571429
def plot(x, y, z, ax=None, **kwargs): r""" Plot iso-probability mass function, converted to sigmas. Parameters ---------- x, y, z : numpy arrays Same as arguments to :func:`matplotlib.pyplot.contour` ax: axes object, optional :class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to get the last axis used, or create a new one. colors: color scheme, optional :class:`matplotlib.colors.LinearSegmentedColormap` Color scheme to plot with. Recommend plotting in reverse (Default: :class:`matplotlib.pyplot.cm.Reds_r`) smooth: float, optional Percentage by which to smooth the contours. (Default: no smoothing) contour_line_levels: List[float], optional Contour lines to be plotted. (Default: [1,2]) linewidths: float, optional Thickness of contour lines. (Default: 0.3) contour_color_levels: List[float], optional Contour color levels. (Default: `numpy.arange(0, contour_line_levels[-1] + 1, fineness)`) fineness: float, optional Spacing of contour color levels. (Default: 0.1) lines: bool, optional (Default: True) rasterize_contours: bool, optional Rasterize the contours while keeping the lines, text etc in vector format. Useful for reducing file size bloat and making printing easier when you have dense contours. (Default: False) Returns ------- cbar: color bar :class:`matplotlib.contour.QuadContourSet` Colors to create a global colour bar """ if ax is None: ax = matplotlib.pyplot.gca() # Get inputs colors = kwargs.pop('colors', matplotlib.pyplot.cm.Reds_r) smooth = kwargs.pop('smooth', False) linewidths = kwargs.pop('linewidths', 0.3) contour_line_levels = kwargs.pop('contour_line_levels', [1, 2, 3]) fineness = kwargs.pop('fineness', 0.5) default_color_levels = numpy.arange(0, contour_line_levels[-1] + 1, fineness) contour_color_levels = kwargs.pop('contour_color_levels', default_color_levels) rasterize_contours = kwargs.pop('rasterize_contours', False) lines = kwargs.pop('lines', True) if kwargs: raise TypeError('Unexpected **kwargs: %r' % kwargs) # Convert to sigmas z = numpy.sqrt(2) * scipy.special.erfinv(1 - z) # Gaussian filter if desired the sigmas by a factor of smooth% if smooth: sigma = smooth*numpy.array(z.shape)/100.0 z = scipy.ndimage.gaussian_filter(z, sigma=sigma, order=0) # Plot the filled contours onto the axis ax cbar = ax.contourf(x, y, z, cmap=colors, levels=contour_color_levels) # Rasterize contours (the rest of the figure stays in vector format) if rasterize_contours: for c in cbar.collections: c.set_rasterized(True) # Remove those annoying white lines for c in cbar.collections: c.set_edgecolor("face") # Plot some sigma-based contour lines if lines: ax.contour(x, y, z, colors='k', linewidths=linewidths, levels=contour_line_levels) # Return the contours for use as a colourbar later return cbar
[ "def", "plot", "(", "x", ",", "y", ",", "z", ",", "ax", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "ax", "is", "None", ":", "ax", "=", "matplotlib", ".", "pyplot", ".", "gca", "(", ")", "# Get inputs", "colors", "=", "kwargs", ".", "pop", "(", "'colors'", ",", "matplotlib", ".", "pyplot", ".", "cm", ".", "Reds_r", ")", "smooth", "=", "kwargs", ".", "pop", "(", "'smooth'", ",", "False", ")", "linewidths", "=", "kwargs", ".", "pop", "(", "'linewidths'", ",", "0.3", ")", "contour_line_levels", "=", "kwargs", ".", "pop", "(", "'contour_line_levels'", ",", "[", "1", ",", "2", ",", "3", "]", ")", "fineness", "=", "kwargs", ".", "pop", "(", "'fineness'", ",", "0.5", ")", "default_color_levels", "=", "numpy", ".", "arange", "(", "0", ",", "contour_line_levels", "[", "-", "1", "]", "+", "1", ",", "fineness", ")", "contour_color_levels", "=", "kwargs", ".", "pop", "(", "'contour_color_levels'", ",", "default_color_levels", ")", "rasterize_contours", "=", "kwargs", ".", "pop", "(", "'rasterize_contours'", ",", "False", ")", "lines", "=", "kwargs", ".", "pop", "(", "'lines'", ",", "True", ")", "if", "kwargs", ":", "raise", "TypeError", "(", "'Unexpected **kwargs: %r'", "%", "kwargs", ")", "# Convert to sigmas", "z", "=", "numpy", ".", "sqrt", "(", "2", ")", "*", "scipy", ".", "special", ".", "erfinv", "(", "1", "-", "z", ")", "# Gaussian filter if desired the sigmas by a factor of smooth%", "if", "smooth", ":", "sigma", "=", "smooth", "*", "numpy", ".", "array", "(", "z", ".", "shape", ")", "/", "100.0", "z", "=", "scipy", ".", "ndimage", ".", "gaussian_filter", "(", "z", ",", "sigma", "=", "sigma", ",", "order", "=", "0", ")", "# Plot the filled contours onto the axis ax", "cbar", "=", "ax", ".", "contourf", "(", "x", ",", "y", ",", "z", ",", "cmap", "=", "colors", ",", "levels", "=", "contour_color_levels", ")", "# Rasterize contours (the rest of the figure stays in vector format)", "if", "rasterize_contours", ":", "for", "c", "in", "cbar", ".", "collections", ":", "c", ".", "set_rasterized", "(", "True", ")", "# Remove those annoying white lines", "for", "c", "in", "cbar", ".", "collections", ":", "c", ".", "set_edgecolor", "(", "\"face\"", ")", "# Plot some sigma-based contour lines", "if", "lines", ":", "ax", ".", "contour", "(", "x", ",", "y", ",", "z", ",", "colors", "=", "'k'", ",", "linewidths", "=", "linewidths", ",", "levels", "=", "contour_line_levels", ")", "# Return the contours for use as a colourbar later", "return", "cbar" ]
32.62
21.43