text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def get_qseq_dir(fc_dir): """Retrieve the qseq directory within Solexa flowcell output. """ machine_bc = os.path.join(fc_dir, "Data", "Intensities", "BaseCalls") if os.path.exists(machine_bc): return machine_bc # otherwise assume we are in the qseq directory # XXX What other cases can we end up with here? else: return fc_dir
[ "def", "get_qseq_dir", "(", "fc_dir", ")", ":", "machine_bc", "=", "os", ".", "path", ".", "join", "(", "fc_dir", ",", "\"Data\"", ",", "\"Intensities\"", ",", "\"BaseCalls\"", ")", "if", "os", ".", "path", ".", "exists", "(", "machine_bc", ")", ":", "return", "machine_bc", "# otherwise assume we are in the qseq directory", "# XXX What other cases can we end up with here?", "else", ":", "return", "fc_dir" ]
36.1
14.1
def app0(self): """ First APP0 marker in image markers. """ for m in self._markers: if m.marker_code == JPEG_MARKER_CODE.APP0: return m raise KeyError('no APP0 marker in image')
[ "def", "app0", "(", "self", ")", ":", "for", "m", "in", "self", ".", "_markers", ":", "if", "m", ".", "marker_code", "==", "JPEG_MARKER_CODE", ".", "APP0", ":", "return", "m", "raise", "KeyError", "(", "'no APP0 marker in image'", ")" ]
29.75
9.5
def parse_plotFingerprint(self): """Find plotFingerprint output. Both --outQualityMetrics and --outRawCounts""" self.deeptools_plotFingerprintOutQualityMetrics = dict() for f in self.find_log_files('deeptools/plotFingerprintOutQualityMetrics'): parsed_data = self.parsePlotFingerprintOutQualityMetrics(f) for k, v in parsed_data.items(): if k in self.deeptools_plotFingerprintOutQualityMetrics: log.warning("Replacing duplicate sample {}.".format(k)) # Values are fractions - convert to percentages for consistency with other MultiQC output self.deeptools_plotFingerprintOutQualityMetrics[k] = { i:float(j)*100.0 for i,j in v.items() } if len(parsed_data) > 0: self.add_data_source(f, section='plotFingerprint') self.deeptools_plotFingerprintOutRawCounts= dict() for f in self.find_log_files('deeptools/plotFingerprintOutRawCounts'): parsed_data = self.parsePlotFingerprintOutRawCounts(f) for k, v in parsed_data.items(): if k in self.deeptools_plotFingerprintOutRawCounts: log.warning("Replacing duplicate sample {}.".format(k)) self.deeptools_plotFingerprintOutRawCounts[k] = v if len(parsed_data) > 0: self.add_data_source(f, section='plotFingerprint') if len(self.deeptools_plotFingerprintOutRawCounts) > 0: self.add_section(name="Fingerprint plot", anchor="deeptools_fingerprint", description="Signal fingerprint according to plotFingerprint", plot=linegraph.plot( self.deeptools_plotFingerprintOutRawCounts, { 'id': 'deeptools_fingerprint_plot', 'title': 'deepTools: Fingerprint plot', 'xmin': 0.0, 'xmax': 1.0, 'ymin': 0.0, 'ymax': 1.0, 'xlab': 'rank', 'ylab': 'Fraction w.r.t. bin with highest coverage' } )) if len(self.deeptools_plotFingerprintOutQualityMetrics) > 0: self.add_section(name="Fingerprint quality metrics", anchor="plotFingerprint", description="Various quality metrics returned by plotFingerprint", plot=linegraph.plot( self.deeptools_plotFingerprintOutQualityMetrics, { 'id': 'plotFingerprint_quality_metrics', 'title': 'deepTools: Fingerprint quality metrics', 'stacking': None, 'ymin': 0, 'ymax': 100, 'yLabelFormat': '{value}%', 'ylab': 'Percentage of fragments', 'categories': True, 'tt_label': '<strong>{point.x}</strong>: {point.y:.2f}%' } )) return len(self.deeptools_plotFingerprintOutQualityMetrics), len(self.deeptools_plotFingerprintOutRawCounts)
[ "def", "parse_plotFingerprint", "(", "self", ")", ":", "self", ".", "deeptools_plotFingerprintOutQualityMetrics", "=", "dict", "(", ")", "for", "f", "in", "self", ".", "find_log_files", "(", "'deeptools/plotFingerprintOutQualityMetrics'", ")", ":", "parsed_data", "=", "self", ".", "parsePlotFingerprintOutQualityMetrics", "(", "f", ")", "for", "k", ",", "v", "in", "parsed_data", ".", "items", "(", ")", ":", "if", "k", "in", "self", ".", "deeptools_plotFingerprintOutQualityMetrics", ":", "log", ".", "warning", "(", "\"Replacing duplicate sample {}.\"", ".", "format", "(", "k", ")", ")", "# Values are fractions - convert to percentages for consistency with other MultiQC output", "self", ".", "deeptools_plotFingerprintOutQualityMetrics", "[", "k", "]", "=", "{", "i", ":", "float", "(", "j", ")", "*", "100.0", "for", "i", ",", "j", "in", "v", ".", "items", "(", ")", "}", "if", "len", "(", "parsed_data", ")", ">", "0", ":", "self", ".", "add_data_source", "(", "f", ",", "section", "=", "'plotFingerprint'", ")", "self", ".", "deeptools_plotFingerprintOutRawCounts", "=", "dict", "(", ")", "for", "f", "in", "self", ".", "find_log_files", "(", "'deeptools/plotFingerprintOutRawCounts'", ")", ":", "parsed_data", "=", "self", ".", "parsePlotFingerprintOutRawCounts", "(", "f", ")", "for", "k", ",", "v", "in", "parsed_data", ".", "items", "(", ")", ":", "if", "k", "in", "self", ".", "deeptools_plotFingerprintOutRawCounts", ":", "log", ".", "warning", "(", "\"Replacing duplicate sample {}.\"", ".", "format", "(", "k", ")", ")", "self", ".", "deeptools_plotFingerprintOutRawCounts", "[", "k", "]", "=", "v", "if", "len", "(", "parsed_data", ")", ">", "0", ":", "self", ".", "add_data_source", "(", "f", ",", "section", "=", "'plotFingerprint'", ")", "if", "len", "(", "self", ".", "deeptools_plotFingerprintOutRawCounts", ")", ">", "0", ":", "self", ".", "add_section", "(", "name", "=", "\"Fingerprint plot\"", ",", "anchor", "=", "\"deeptools_fingerprint\"", ",", "description", "=", "\"Signal fingerprint according to plotFingerprint\"", ",", "plot", "=", "linegraph", ".", "plot", "(", "self", ".", "deeptools_plotFingerprintOutRawCounts", ",", "{", "'id'", ":", "'deeptools_fingerprint_plot'", ",", "'title'", ":", "'deepTools: Fingerprint plot'", ",", "'xmin'", ":", "0.0", ",", "'xmax'", ":", "1.0", ",", "'ymin'", ":", "0.0", ",", "'ymax'", ":", "1.0", ",", "'xlab'", ":", "'rank'", ",", "'ylab'", ":", "'Fraction w.r.t. bin with highest coverage'", "}", ")", ")", "if", "len", "(", "self", ".", "deeptools_plotFingerprintOutQualityMetrics", ")", ">", "0", ":", "self", ".", "add_section", "(", "name", "=", "\"Fingerprint quality metrics\"", ",", "anchor", "=", "\"plotFingerprint\"", ",", "description", "=", "\"Various quality metrics returned by plotFingerprint\"", ",", "plot", "=", "linegraph", ".", "plot", "(", "self", ".", "deeptools_plotFingerprintOutQualityMetrics", ",", "{", "'id'", ":", "'plotFingerprint_quality_metrics'", ",", "'title'", ":", "'deepTools: Fingerprint quality metrics'", ",", "'stacking'", ":", "None", ",", "'ymin'", ":", "0", ",", "'ymax'", ":", "100", ",", "'yLabelFormat'", ":", "'{value}%'", ",", "'ylab'", ":", "'Percentage of fragments'", ",", "'categories'", ":", "True", ",", "'tt_label'", ":", "'<strong>{point.x}</strong>: {point.y:.2f}%'", "}", ")", ")", "return", "len", "(", "self", ".", "deeptools_plotFingerprintOutQualityMetrics", ")", ",", "len", "(", "self", ".", "deeptools_plotFingerprintOutRawCounts", ")" ]
56.746032
25.603175
def _validate_message(self, message): """ Is C{message} a valid direct child of this action? @param message: Either a C{WrittenAction} or a C{WrittenMessage}. @raise WrongTask: If C{message} has a C{task_uuid} that differs from the action's C{task_uuid}. @raise WrongTaskLevel: If C{message} has a C{task_level} that means it's not a direct child. """ if message.task_uuid != self.task_uuid: raise WrongTask(self, message) if not message.task_level.parent() == self.task_level: raise WrongTaskLevel(self, message)
[ "def", "_validate_message", "(", "self", ",", "message", ")", ":", "if", "message", ".", "task_uuid", "!=", "self", ".", "task_uuid", ":", "raise", "WrongTask", "(", "self", ",", "message", ")", "if", "not", "message", ".", "task_level", ".", "parent", "(", ")", "==", "self", ".", "task_level", ":", "raise", "WrongTaskLevel", "(", "self", ",", "message", ")" ]
40.866667
17.133333
def pk_prom(word): '''Return the number of stressed light syllables.''' LIGHT = r'[ieaAoO]{1}[\.]*(u|y)(\.|$)' # # if the word is not monosyllabic, lop off the final syllable, which is # # extrametrical # if '.' in word: # word = word[:word.rindex('.')] # gather the indices of syllable boundaries delimiters = [0, ] + [i for i, char in enumerate(word) if char == '.'] if len(delimiters) % 2 != 0: delimiters.append(len(word)) stressed = [] # gather the indices of stressed positions for i, d in enumerate(delimiters): if i % 2 == 0: stressed.extend(range(d + 1, delimiters[i + 1])) # find the number of stressed light syllables heavies = re.finditer(LIGHT, word) violations = sum(1 for m in heavies if m.start(1) in stressed) return violations
[ "def", "pk_prom", "(", "word", ")", ":", "LIGHT", "=", "r'[ieaAoO]{1}[\\.]*(u|y)(\\.|$)'", "# # if the word is not monosyllabic, lop off the final syllable, which is", "# # extrametrical", "# if '.' in word:", "# word = word[:word.rindex('.')]", "# gather the indices of syllable boundaries", "delimiters", "=", "[", "0", ",", "]", "+", "[", "i", "for", "i", ",", "char", "in", "enumerate", "(", "word", ")", "if", "char", "==", "'.'", "]", "if", "len", "(", "delimiters", ")", "%", "2", "!=", "0", ":", "delimiters", ".", "append", "(", "len", "(", "word", ")", ")", "stressed", "=", "[", "]", "# gather the indices of stressed positions", "for", "i", ",", "d", "in", "enumerate", "(", "delimiters", ")", ":", "if", "i", "%", "2", "==", "0", ":", "stressed", ".", "extend", "(", "range", "(", "d", "+", "1", ",", "delimiters", "[", "i", "+", "1", "]", ")", ")", "# find the number of stressed light syllables", "heavies", "=", "re", ".", "finditer", "(", "LIGHT", ",", "word", ")", "violations", "=", "sum", "(", "1", "for", "m", "in", "heavies", "if", "m", ".", "start", "(", "1", ")", "in", "stressed", ")", "return", "violations" ]
30.407407
21.222222
def namedb_get_names_by_sender( cur, sender, current_block ): """ Given a sender pubkey script, find all the non-expired non-revoked names owned by it. Return None if the sender owns no names. """ unexpired_query, unexpired_args = namedb_select_where_unexpired_names( current_block ) query = "SELECT name_records.name FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id " + \ "WHERE name_records.sender = ? AND name_records.revoked = 0 AND " + unexpired_query + ";" args = (sender,) + unexpired_args name_rows = namedb_query_execute( cur, query, args ) names = [] for name_row in name_rows: names.append( name_row['name'] ) return names
[ "def", "namedb_get_names_by_sender", "(", "cur", ",", "sender", ",", "current_block", ")", ":", "unexpired_query", ",", "unexpired_args", "=", "namedb_select_where_unexpired_names", "(", "current_block", ")", "query", "=", "\"SELECT name_records.name FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id \"", "+", "\"WHERE name_records.sender = ? AND name_records.revoked = 0 AND \"", "+", "unexpired_query", "+", "\";\"", "args", "=", "(", "sender", ",", ")", "+", "unexpired_args", "name_rows", "=", "namedb_query_execute", "(", "cur", ",", "query", ",", "args", ")", "names", "=", "[", "]", "for", "name_row", "in", "name_rows", ":", "names", ".", "append", "(", "name_row", "[", "'name'", "]", ")", "return", "names" ]
36.2
29.8
def show_help(command_name: str = None, raw_args: str = '') -> Response: """ Prints the basic command help to the console """ response = Response() cmds = fetch() if command_name and command_name in cmds: parser, result = parse.get_parser( cmds[command_name], parse.explode_line(raw_args), dict() ) if parser is not None: out = parser.format_help() return response.notify( kind='INFO', code='COMMAND_DESCRIPTION' ).kernel( commands=out ).console( out, whitespace=1 ).response environ.log_header('Available Commands') response.consume(print_module_help()) return response.fail( code='NO_SUCH_COMMAND', message='Failed to show command help for "{}"'.format(command_name) ).console( """ For more information on the various commands, enter help on the specific command: help [COMMAND] """, whitespace_bottom=1 ).response
[ "def", "show_help", "(", "command_name", ":", "str", "=", "None", ",", "raw_args", ":", "str", "=", "''", ")", "->", "Response", ":", "response", "=", "Response", "(", ")", "cmds", "=", "fetch", "(", ")", "if", "command_name", "and", "command_name", "in", "cmds", ":", "parser", ",", "result", "=", "parse", ".", "get_parser", "(", "cmds", "[", "command_name", "]", ",", "parse", ".", "explode_line", "(", "raw_args", ")", ",", "dict", "(", ")", ")", "if", "parser", "is", "not", "None", ":", "out", "=", "parser", ".", "format_help", "(", ")", "return", "response", ".", "notify", "(", "kind", "=", "'INFO'", ",", "code", "=", "'COMMAND_DESCRIPTION'", ")", ".", "kernel", "(", "commands", "=", "out", ")", ".", "console", "(", "out", ",", "whitespace", "=", "1", ")", ".", "response", "environ", ".", "log_header", "(", "'Available Commands'", ")", "response", ".", "consume", "(", "print_module_help", "(", ")", ")", "return", "response", ".", "fail", "(", "code", "=", "'NO_SUCH_COMMAND'", ",", "message", "=", "'Failed to show command help for \"{}\"'", ".", "format", "(", "command_name", ")", ")", ".", "console", "(", "\"\"\"\n For more information on the various commands, enter help on the\n specific command:\n\n help [COMMAND]\n \"\"\"", ",", "whitespace_bottom", "=", "1", ")", ".", "response" ]
27.175
17.45
def editor_js_initialization(selector, **extra_settings): """ Return script tag with initialization code. """ init_template = loader.get_template( settings.MARKDOWN_EDITOR_INIT_TEMPLATE) options = dict( previewParserPath=reverse('django_markdown_preview'), **settings.MARKDOWN_EDITOR_SETTINGS) options.update(extra_settings) ctx = dict( selector=selector, extra_settings=simplejson.dumps(options) ) return init_template.render(ctx)
[ "def", "editor_js_initialization", "(", "selector", ",", "*", "*", "extra_settings", ")", ":", "init_template", "=", "loader", ".", "get_template", "(", "settings", ".", "MARKDOWN_EDITOR_INIT_TEMPLATE", ")", "options", "=", "dict", "(", "previewParserPath", "=", "reverse", "(", "'django_markdown_preview'", ")", ",", "*", "*", "settings", ".", "MARKDOWN_EDITOR_SETTINGS", ")", "options", ".", "update", "(", "extra_settings", ")", "ctx", "=", "dict", "(", "selector", "=", "selector", ",", "extra_settings", "=", "simplejson", ".", "dumps", "(", "options", ")", ")", "return", "init_template", ".", "render", "(", "ctx", ")" ]
32.466667
16.133333
def send_photo(self, peer: Peer, photo: str, caption: str=None, reply: int=None, on_success: callable=None, reply_markup: botapi.ReplyMarkup=None): """ Send photo to peer. :param peer: Peer to send message to. :param photo: File path to photo to send. :param caption: Caption for photo :param reply: Message object or message_id to reply to. :param on_success: Callback to call when call is complete. :type reply: int or Message """ if isinstance(reply, Message): reply = reply.id photo = botapi.InputFile('photo', botapi.InputFileInfo(photo, open(photo, 'rb'), get_mimetype(photo))) botapi.send_photo(chat_id=peer.id, photo=photo, caption=caption, reply_to_message_id=reply, on_success=on_success, reply_markup=reply_markup, **self.request_args).run()
[ "def", "send_photo", "(", "self", ",", "peer", ":", "Peer", ",", "photo", ":", "str", ",", "caption", ":", "str", "=", "None", ",", "reply", ":", "int", "=", "None", ",", "on_success", ":", "callable", "=", "None", ",", "reply_markup", ":", "botapi", ".", "ReplyMarkup", "=", "None", ")", ":", "if", "isinstance", "(", "reply", ",", "Message", ")", ":", "reply", "=", "reply", ".", "id", "photo", "=", "botapi", ".", "InputFile", "(", "'photo'", ",", "botapi", ".", "InputFileInfo", "(", "photo", ",", "open", "(", "photo", ",", "'rb'", ")", ",", "get_mimetype", "(", "photo", ")", ")", ")", "botapi", ".", "send_photo", "(", "chat_id", "=", "peer", ".", "id", ",", "photo", "=", "photo", ",", "caption", "=", "caption", ",", "reply_to_message_id", "=", "reply", ",", "on_success", "=", "on_success", ",", "reply_markup", "=", "reply_markup", ",", "*", "*", "self", ".", "request_args", ")", ".", "run", "(", ")" ]
46.842105
25.894737
def _parse(self, source, accept_encoded_idn, only_icann=False): """ PSL parser core """ publicsuffix = set() maxlabel = 0 section_is_icann = None if isinstance(source, decodablestr): source = source.splitlines() ln = 0 for line in source: ln += 1 if only_icann: ul = u(line).rstrip() if ul == "// ===BEGIN ICANN DOMAINS===": section_is_icann = True continue elif ul == "// ===END ICANN DOMAINS===": section_is_icann = False continue if not section_is_icann: continue s = u(line).lower().split(" ")[0].rstrip() if s == "" or s.startswith("//"): continue maxlabel = max(maxlabel, s.count(".") + 1) publicsuffix.add(s) if accept_encoded_idn: e = encode_idn(s.lstrip("!")) if s[0] == "!": publicsuffix.add("!" + e) else: publicsuffix.add(e) self._publicsuffix = frozenset(publicsuffix) self._maxlabel = maxlabel
[ "def", "_parse", "(", "self", ",", "source", ",", "accept_encoded_idn", ",", "only_icann", "=", "False", ")", ":", "publicsuffix", "=", "set", "(", ")", "maxlabel", "=", "0", "section_is_icann", "=", "None", "if", "isinstance", "(", "source", ",", "decodablestr", ")", ":", "source", "=", "source", ".", "splitlines", "(", ")", "ln", "=", "0", "for", "line", "in", "source", ":", "ln", "+=", "1", "if", "only_icann", ":", "ul", "=", "u", "(", "line", ")", ".", "rstrip", "(", ")", "if", "ul", "==", "\"// ===BEGIN ICANN DOMAINS===\"", ":", "section_is_icann", "=", "True", "continue", "elif", "ul", "==", "\"// ===END ICANN DOMAINS===\"", ":", "section_is_icann", "=", "False", "continue", "if", "not", "section_is_icann", ":", "continue", "s", "=", "u", "(", "line", ")", ".", "lower", "(", ")", ".", "split", "(", "\" \"", ")", "[", "0", "]", ".", "rstrip", "(", ")", "if", "s", "==", "\"\"", "or", "s", ".", "startswith", "(", "\"//\"", ")", ":", "continue", "maxlabel", "=", "max", "(", "maxlabel", ",", "s", ".", "count", "(", "\".\"", ")", "+", "1", ")", "publicsuffix", ".", "add", "(", "s", ")", "if", "accept_encoded_idn", ":", "e", "=", "encode_idn", "(", "s", ".", "lstrip", "(", "\"!\"", ")", ")", "if", "s", "[", "0", "]", "==", "\"!\"", ":", "publicsuffix", ".", "add", "(", "\"!\"", "+", "e", ")", "else", ":", "publicsuffix", ".", "add", "(", "e", ")", "self", ".", "_publicsuffix", "=", "frozenset", "(", "publicsuffix", ")", "self", ".", "_maxlabel", "=", "maxlabel" ]
31.051282
14.923077
def _slice(expr, start=None, stop=None, step=None): """ Slice substrings from each element in the sequence or scalar :param expr: :param start: int or None :param stop: int or None :param step: int or None :return: sliced """ return _string_op(expr, Slice, _start=start, _end=stop, _step=step)
[ "def", "_slice", "(", "expr", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "step", "=", "None", ")", ":", "return", "_string_op", "(", "expr", ",", "Slice", ",", "_start", "=", "start", ",", "_end", "=", "stop", ",", "_step", "=", "step", ")" ]
26.666667
18.833333
def com_google_fonts_check_version_bump(ttFont, api_gfonts_ttFont, github_gfonts_ttFont): """Version number has increased since previous release on Google Fonts?""" v_number = ttFont["head"].fontRevision api_gfonts_v_number = api_gfonts_ttFont["head"].fontRevision github_gfonts_v_number = github_gfonts_ttFont["head"].fontRevision failed = False if v_number == api_gfonts_v_number: failed = True yield FAIL, ("Version number {} is equal to" " version on Google Fonts.").format(v_number) if v_number < api_gfonts_v_number: failed = True yield FAIL, ("Version number {} is less than" " version on Google Fonts ({})." "").format(v_number, api_gfonts_v_number) if v_number == github_gfonts_v_number: failed = True yield FAIL, ("Version number {} is equal to" " version on Google Fonts GitHub repo." "").format(v_number) if v_number < github_gfonts_v_number: failed = True yield FAIL, ("Version number {} is less than" " version on Google Fonts GitHub repo ({})." "").format(v_number, github_gfonts_v_number) if not failed: yield PASS, ("Version number {} is greater than" " version on Google Fonts GitHub ({})" " and production servers ({})." "").format(v_number, github_gfonts_v_number, api_gfonts_v_number)
[ "def", "com_google_fonts_check_version_bump", "(", "ttFont", ",", "api_gfonts_ttFont", ",", "github_gfonts_ttFont", ")", ":", "v_number", "=", "ttFont", "[", "\"head\"", "]", ".", "fontRevision", "api_gfonts_v_number", "=", "api_gfonts_ttFont", "[", "\"head\"", "]", ".", "fontRevision", "github_gfonts_v_number", "=", "github_gfonts_ttFont", "[", "\"head\"", "]", ".", "fontRevision", "failed", "=", "False", "if", "v_number", "==", "api_gfonts_v_number", ":", "failed", "=", "True", "yield", "FAIL", ",", "(", "\"Version number {} is equal to\"", "\" version on Google Fonts.\"", ")", ".", "format", "(", "v_number", ")", "if", "v_number", "<", "api_gfonts_v_number", ":", "failed", "=", "True", "yield", "FAIL", ",", "(", "\"Version number {} is less than\"", "\" version on Google Fonts ({}).\"", "\"\"", ")", ".", "format", "(", "v_number", ",", "api_gfonts_v_number", ")", "if", "v_number", "==", "github_gfonts_v_number", ":", "failed", "=", "True", "yield", "FAIL", ",", "(", "\"Version number {} is equal to\"", "\" version on Google Fonts GitHub repo.\"", "\"\"", ")", ".", "format", "(", "v_number", ")", "if", "v_number", "<", "github_gfonts_v_number", ":", "failed", "=", "True", "yield", "FAIL", ",", "(", "\"Version number {} is less than\"", "\" version on Google Fonts GitHub repo ({}).\"", "\"\"", ")", ".", "format", "(", "v_number", ",", "github_gfonts_v_number", ")", "if", "not", "failed", ":", "yield", "PASS", ",", "(", "\"Version number {} is greater than\"", "\" version on Google Fonts GitHub ({})\"", "\" and production servers ({}).\"", "\"\"", ")", ".", "format", "(", "v_number", ",", "github_gfonts_v_number", ",", "api_gfonts_v_number", ")" ]
38.731707
15.414634
def load(self, record_key, secret_key=''): ''' a method to retrieve byte data of an S3 record :param record_key: string with name of record :param secret_key: [optional] string used to decrypt data :return: byte data for record body ''' title = '%s.load' % self.__class__.__name__ # validate inputs input_fields = { 'record_key': record_key, 'secret_key': secret_key } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # retrieve record data from s3 record_data, record_metadata = self.s3.read_record(self.bucket_name, record_key) # validate secret key error_msg = '%s(secret_key="...") required to decrypt record "%s"' % (title, record_key) if 'encryption' in record_metadata['metadata'].keys(): if record_metadata['metadata']['encryption'] == 'lab512': if not secret_key: raise Exception(error_msg) else: self.s3.iam.printer('[WARNING]: %s uses unrecognized encryption method. Decryption skipped.' % record_key) secret_key = '' # decrypt (if necessary) if secret_key: from labpack.encryption import cryptolab record_data = cryptolab.decrypt(record_data, secret_key) return record_data
[ "def", "load", "(", "self", ",", "record_key", ",", "secret_key", "=", "''", ")", ":", "title", "=", "'%s.load'", "%", "self", ".", "__class__", ".", "__name__", "# validate inputs", "input_fields", "=", "{", "'record_key'", ":", "record_key", ",", "'secret_key'", ":", "secret_key", "}", "for", "key", ",", "value", "in", "input_fields", ".", "items", "(", ")", ":", "if", "value", ":", "object_title", "=", "'%s(%s=%s)'", "%", "(", "title", ",", "key", ",", "str", "(", "value", ")", ")", "self", ".", "fields", ".", "validate", "(", "value", ",", "'.%s'", "%", "key", ",", "object_title", ")", "# retrieve record data from s3", "record_data", ",", "record_metadata", "=", "self", ".", "s3", ".", "read_record", "(", "self", ".", "bucket_name", ",", "record_key", ")", "# validate secret key", "error_msg", "=", "'%s(secret_key=\"...\") required to decrypt record \"%s\"'", "%", "(", "title", ",", "record_key", ")", "if", "'encryption'", "in", "record_metadata", "[", "'metadata'", "]", ".", "keys", "(", ")", ":", "if", "record_metadata", "[", "'metadata'", "]", "[", "'encryption'", "]", "==", "'lab512'", ":", "if", "not", "secret_key", ":", "raise", "Exception", "(", "error_msg", ")", "else", ":", "self", ".", "s3", ".", "iam", ".", "printer", "(", "'[WARNING]: %s uses unrecognized encryption method. Decryption skipped.'", "%", "record_key", ")", "secret_key", "=", "''", "# decrypt (if necessary)", "if", "secret_key", ":", "from", "labpack", ".", "encryption", "import", "cryptolab", "record_data", "=", "cryptolab", ".", "decrypt", "(", "record_data", ",", "secret_key", ")", "return", "record_data" ]
37.439024
23.04878
def unordered_pair_eq(pair1, pair2): '''Performs pairwise unordered equality. ``pair1`` == ``pair2`` if and only if ``frozenset(pair1)`` == ``frozenset(pair2)``. ''' (x1, y1), (x2, y2) = pair1, pair2 return (x1 == x2 and y1 == y2) or (x1 == y2 and y1 == x2)
[ "def", "unordered_pair_eq", "(", "pair1", ",", "pair2", ")", ":", "(", "x1", ",", "y1", ")", ",", "(", "x2", ",", "y2", ")", "=", "pair1", ",", "pair2", "return", "(", "x1", "==", "x2", "and", "y1", "==", "y2", ")", "or", "(", "x1", "==", "y2", "and", "y1", "==", "x2", ")" ]
34.375
14.375
def slurpChompedLines(file, expand=False): r"""Return ``file`` a list of chomped lines. See `slurpLines`.""" f=_normalizeToFile(file, "r", expand) try: return list(chompLines(f)) finally: f.close()
[ "def", "slurpChompedLines", "(", "file", ",", "expand", "=", "False", ")", ":", "f", "=", "_normalizeToFile", "(", "file", ",", "\"r\"", ",", "expand", ")", "try", ":", "return", "list", "(", "chompLines", "(", "f", ")", ")", "finally", ":", "f", ".", "close", "(", ")" ]
41.8
5.2
def Blaster(inputfile, databases, db_path, out_path='.', min_cov=0.6, threshold=0.9, blast='blastn', cut_off=True): ''' BLAST wrapper method, that takes a simple input and produces a overview list of the hits to templates, and their alignments Usage >>> import os, subprocess, collections >>> from Bio.Blast import NCBIXML >>> from Bio import SeqIO >>> from string import maketrans >>> inputfile = 'test.fsa' >>> databases = ['enterobacteriaceae'] >>> db_path = '/path/to/databases/plasmidfinder/' >>> Blaster(inputfile, databases, db_path) ''' min_cov = 100 * float(min_cov) threshold = 100 * float(threshold) # For alignment gene_align_query = dict() #will contain the sequence alignment lines gene_align_homo = dict() #will contain the sequence alignment homolog string gene_align_sbjct = dict() #will contain the sequence alignment allele string results = dict() #will contain the results for db in databases: # Adding the path to the database and output db_file = "%s/%s.fsa"%(db_path, db) os.system("mkdir -p %s/tmp"%(out_path)) os.system("chmod 775 %s/tmp"%(out_path)) out_file = "%s/tmp/out_%s.xml"%(out_path, db) # Running blast cmd = "%s -subject %s -query %s -out %s -outfmt '5' -perc_identity %s -dust 'no'"%(blast, db_file, inputfile, out_file, threshold) process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = process.communicate() # Getting the results result_handle = open(out_file) blast_records = NCBIXML.parse(result_handle) # Declaring variables for saving the results gene_results = dict() #will contain the results for each gene # For finding the best hits best_hsp = dict() # Keeping track of gene split gene_split = collections.defaultdict(dict) # Making the dicts for sequence outputs gene_align_query[db] = dict() gene_align_homo[db] = dict() gene_align_sbjct[db] = dict() # Parsing over the hits and only keeping the best for blast_record in blast_records: query = blast_record.query blast_record.alignments.sort(key = lambda align: -max((len(hsp.query) * (int(hsp.identities)/float(len(hsp.query))) for hsp in align.hsps))) for alignment in blast_record.alignments: # Setting the e-value as 1 and bit as 0 to get the best HSP fragment best_e_value = 1 best_bit = 0 for hsp in alignment.hsps: if hsp.expect < best_e_value or hsp.bits > best_bit: best_e_value = hsp.expect best_bit = hsp.bits tmp = alignment.title.split(" ") sbjct_header = tmp[1] bit = hsp.bits sbjct_length = alignment.length sbjct_start = hsp.sbjct_start sbjct_end = hsp.sbjct_end gaps = hsp.gaps query_string = str(hsp.query) homo_string = str(hsp.match) sbjct_string = str(hsp.sbjct) contig_name = query.replace(">","") query_start = hsp.query_start query_end = hsp.query_end HSP_length = len(query_string) perc_ident = int(hsp.identities)/float(HSP_length) * 100 strand = 0 coverage = ((int(HSP_length) - int(gaps))/float(sbjct_length)) perc_coverage = ((int(HSP_length) - int(gaps))/float(sbjct_length)) * 100 if int(HSP_length) == int(sbjct_length): cal_score = perc_ident * coverage * 100 else: cal_score = perc_ident * coverage hit_id = "%s:%s..%s:%s:%f"%(contig_name, query_start, query_end, sbjct_header, cal_score) # If the hit is on the other strand if sbjct_start > sbjct_end: tmp = sbjct_start sbjct_start = sbjct_end sbjct_end = tmp query_string = reverse_complement(query_string) homo_string = homo_string[::-1] sbjct_string = reverse_complement(sbjct_string) strand = 1 if cut_off == True: if perc_coverage > 20 : best_hsp = {'evalue': hsp.expect, 'sbjct_header': sbjct_header, 'bit': bit, 'perc_ident': perc_ident, 'sbjct_length':sbjct_length, 'sbjct_start': sbjct_start, 'sbjct_end': sbjct_end, 'gaps': gaps, 'query_string': query_string, 'homo_string': homo_string, 'sbjct_string': sbjct_string, 'contig_name': contig_name, 'query_start': query_start, 'query_end': query_end, 'HSP_length': HSP_length, 'coverage': coverage, 'cal_score': cal_score, 'hit_id': hit_id, 'strand': strand, 'perc_coverage': perc_coverage } else: best_hsp = {'evalue': hsp.expect, 'sbjct_header': sbjct_header, 'bit': bit, 'perc_ident': perc_ident, 'sbjct_length':sbjct_length, 'sbjct_start': sbjct_start, 'sbjct_end': sbjct_end, 'gaps': gaps, 'query_string': query_string, 'homo_string': homo_string, 'sbjct_string': sbjct_string, 'contig_name': contig_name, 'query_start': query_start, 'query_end': query_end, 'HSP_length': HSP_length, 'coverage': coverage, 'cal_score': cal_score, 'hit_id': hit_id, 'strand': strand, 'perc_coverage': perc_coverage } # Saving the result if any if best_hsp: save = 1 # If there are other gene alignments they are compared if gene_results: tmp_gene_split = gene_split tmp_results = gene_results # Compare the hit results save, gene_split, gene_results = compare_results(save, best_hsp, tmp_results, tmp_gene_split) # If the hit is not overlapping with other hit seqeunces it is kept if save == 1: gene_results[hit_id] = best_hsp else: pass # If the hit does not cover the entire database reference the missing seqence data are extracted for hit_id in list(gene_results): hit = gene_results[hit_id] # Calculate possible split gene coverage perc_coverage = hit['perc_coverage'] if hit['sbjct_header'] in gene_split and len(gene_split[hit['sbjct_header']]) > 1: # Calculate new length new_length = calculate_new_length(gene_split, gene_results, hit) hit['split_length'] = new_length # Calculate new coverage perc_coverage = new_length / float(hit['sbjct_length']) * 100 # If the hit is above the minimum length threshold it is kept if perc_coverage >= min_cov: if hit['coverage'] == 1: gene_align_query[db][hit_id] = hit['query_string'] gene_align_homo[db][hit_id] = hit['homo_string'] gene_align_sbjct[db][hit_id] = hit['sbjct_string'] elif hit['coverage'] != 1: # Getting the whole database sequence for seq_record in SeqIO.parse(db_file, "fasta"): if seq_record.description == hit['sbjct_header']: gene_align_sbjct[db][hit_id] = str(seq_record.seq) break # Getting the whole contig to extract extra query seqeunce contig = '' for seq_record in SeqIO.parse(inputfile, "fasta"): if seq_record.description == hit['contig_name']: contig = str(seq_record.seq) break # Extract extra sequence from query query_seq, homo_seq = get_query_align(hit, contig) # Saving the new alignment sequences gene_align_query[db][hit_id] = query_seq gene_align_homo[db][hit_id] = homo_seq else: del gene_results[hit_id] if hit['sbjct_header'] in gene_split: del gene_split[hit['sbjct_header']] # Save the database result if gene_results: results[db] = gene_results else: results[db] = "No hit found" return (results, gene_align_query, gene_align_homo, gene_align_sbjct)
[ "def", "Blaster", "(", "inputfile", ",", "databases", ",", "db_path", ",", "out_path", "=", "'.'", ",", "min_cov", "=", "0.6", ",", "threshold", "=", "0.9", ",", "blast", "=", "'blastn'", ",", "cut_off", "=", "True", ")", ":", "min_cov", "=", "100", "*", "float", "(", "min_cov", ")", "threshold", "=", "100", "*", "float", "(", "threshold", ")", "# For alignment", "gene_align_query", "=", "dict", "(", ")", "#will contain the sequence alignment lines", "gene_align_homo", "=", "dict", "(", ")", "#will contain the sequence alignment homolog string", "gene_align_sbjct", "=", "dict", "(", ")", "#will contain the sequence alignment allele string", "results", "=", "dict", "(", ")", "#will contain the results", "for", "db", "in", "databases", ":", "# Adding the path to the database and output", "db_file", "=", "\"%s/%s.fsa\"", "%", "(", "db_path", ",", "db", ")", "os", ".", "system", "(", "\"mkdir -p %s/tmp\"", "%", "(", "out_path", ")", ")", "os", ".", "system", "(", "\"chmod 775 %s/tmp\"", "%", "(", "out_path", ")", ")", "out_file", "=", "\"%s/tmp/out_%s.xml\"", "%", "(", "out_path", ",", "db", ")", "# Running blast", "cmd", "=", "\"%s -subject %s -query %s -out %s -outfmt '5' -perc_identity %s -dust 'no'\"", "%", "(", "blast", ",", "db_file", ",", "inputfile", ",", "out_file", ",", "threshold", ")", "process", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "out", ",", "err", "=", "process", ".", "communicate", "(", ")", "# Getting the results", "result_handle", "=", "open", "(", "out_file", ")", "blast_records", "=", "NCBIXML", ".", "parse", "(", "result_handle", ")", "# Declaring variables for saving the results", "gene_results", "=", "dict", "(", ")", "#will contain the results for each gene", "# For finding the best hits", "best_hsp", "=", "dict", "(", ")", "# Keeping track of gene split", "gene_split", "=", "collections", ".", "defaultdict", "(", "dict", ")", "# Making the dicts for sequence outputs", "gene_align_query", "[", "db", "]", "=", "dict", "(", ")", "gene_align_homo", "[", "db", "]", "=", "dict", "(", ")", "gene_align_sbjct", "[", "db", "]", "=", "dict", "(", ")", "# Parsing over the hits and only keeping the best", "for", "blast_record", "in", "blast_records", ":", "query", "=", "blast_record", ".", "query", "blast_record", ".", "alignments", ".", "sort", "(", "key", "=", "lambda", "align", ":", "-", "max", "(", "(", "len", "(", "hsp", ".", "query", ")", "*", "(", "int", "(", "hsp", ".", "identities", ")", "/", "float", "(", "len", "(", "hsp", ".", "query", ")", ")", ")", "for", "hsp", "in", "align", ".", "hsps", ")", ")", ")", "for", "alignment", "in", "blast_record", ".", "alignments", ":", "# Setting the e-value as 1 and bit as 0 to get the best HSP fragment", "best_e_value", "=", "1", "best_bit", "=", "0", "for", "hsp", "in", "alignment", ".", "hsps", ":", "if", "hsp", ".", "expect", "<", "best_e_value", "or", "hsp", ".", "bits", ">", "best_bit", ":", "best_e_value", "=", "hsp", ".", "expect", "best_bit", "=", "hsp", ".", "bits", "tmp", "=", "alignment", ".", "title", ".", "split", "(", "\" \"", ")", "sbjct_header", "=", "tmp", "[", "1", "]", "bit", "=", "hsp", ".", "bits", "sbjct_length", "=", "alignment", ".", "length", "sbjct_start", "=", "hsp", ".", "sbjct_start", "sbjct_end", "=", "hsp", ".", "sbjct_end", "gaps", "=", "hsp", ".", "gaps", "query_string", "=", "str", "(", "hsp", ".", "query", ")", "homo_string", "=", "str", "(", "hsp", ".", "match", ")", "sbjct_string", "=", "str", "(", "hsp", ".", "sbjct", ")", "contig_name", "=", "query", ".", "replace", "(", "\">\"", ",", "\"\"", ")", "query_start", "=", "hsp", ".", "query_start", "query_end", "=", "hsp", ".", "query_end", "HSP_length", "=", "len", "(", "query_string", ")", "perc_ident", "=", "int", "(", "hsp", ".", "identities", ")", "/", "float", "(", "HSP_length", ")", "*", "100", "strand", "=", "0", "coverage", "=", "(", "(", "int", "(", "HSP_length", ")", "-", "int", "(", "gaps", ")", ")", "/", "float", "(", "sbjct_length", ")", ")", "perc_coverage", "=", "(", "(", "int", "(", "HSP_length", ")", "-", "int", "(", "gaps", ")", ")", "/", "float", "(", "sbjct_length", ")", ")", "*", "100", "if", "int", "(", "HSP_length", ")", "==", "int", "(", "sbjct_length", ")", ":", "cal_score", "=", "perc_ident", "*", "coverage", "*", "100", "else", ":", "cal_score", "=", "perc_ident", "*", "coverage", "hit_id", "=", "\"%s:%s..%s:%s:%f\"", "%", "(", "contig_name", ",", "query_start", ",", "query_end", ",", "sbjct_header", ",", "cal_score", ")", "# If the hit is on the other strand", "if", "sbjct_start", ">", "sbjct_end", ":", "tmp", "=", "sbjct_start", "sbjct_start", "=", "sbjct_end", "sbjct_end", "=", "tmp", "query_string", "=", "reverse_complement", "(", "query_string", ")", "homo_string", "=", "homo_string", "[", ":", ":", "-", "1", "]", "sbjct_string", "=", "reverse_complement", "(", "sbjct_string", ")", "strand", "=", "1", "if", "cut_off", "==", "True", ":", "if", "perc_coverage", ">", "20", ":", "best_hsp", "=", "{", "'evalue'", ":", "hsp", ".", "expect", ",", "'sbjct_header'", ":", "sbjct_header", ",", "'bit'", ":", "bit", ",", "'perc_ident'", ":", "perc_ident", ",", "'sbjct_length'", ":", "sbjct_length", ",", "'sbjct_start'", ":", "sbjct_start", ",", "'sbjct_end'", ":", "sbjct_end", ",", "'gaps'", ":", "gaps", ",", "'query_string'", ":", "query_string", ",", "'homo_string'", ":", "homo_string", ",", "'sbjct_string'", ":", "sbjct_string", ",", "'contig_name'", ":", "contig_name", ",", "'query_start'", ":", "query_start", ",", "'query_end'", ":", "query_end", ",", "'HSP_length'", ":", "HSP_length", ",", "'coverage'", ":", "coverage", ",", "'cal_score'", ":", "cal_score", ",", "'hit_id'", ":", "hit_id", ",", "'strand'", ":", "strand", ",", "'perc_coverage'", ":", "perc_coverage", "}", "else", ":", "best_hsp", "=", "{", "'evalue'", ":", "hsp", ".", "expect", ",", "'sbjct_header'", ":", "sbjct_header", ",", "'bit'", ":", "bit", ",", "'perc_ident'", ":", "perc_ident", ",", "'sbjct_length'", ":", "sbjct_length", ",", "'sbjct_start'", ":", "sbjct_start", ",", "'sbjct_end'", ":", "sbjct_end", ",", "'gaps'", ":", "gaps", ",", "'query_string'", ":", "query_string", ",", "'homo_string'", ":", "homo_string", ",", "'sbjct_string'", ":", "sbjct_string", ",", "'contig_name'", ":", "contig_name", ",", "'query_start'", ":", "query_start", ",", "'query_end'", ":", "query_end", ",", "'HSP_length'", ":", "HSP_length", ",", "'coverage'", ":", "coverage", ",", "'cal_score'", ":", "cal_score", ",", "'hit_id'", ":", "hit_id", ",", "'strand'", ":", "strand", ",", "'perc_coverage'", ":", "perc_coverage", "}", "# Saving the result if any", "if", "best_hsp", ":", "save", "=", "1", "# If there are other gene alignments they are compared", "if", "gene_results", ":", "tmp_gene_split", "=", "gene_split", "tmp_results", "=", "gene_results", "# Compare the hit results", "save", ",", "gene_split", ",", "gene_results", "=", "compare_results", "(", "save", ",", "best_hsp", ",", "tmp_results", ",", "tmp_gene_split", ")", "# If the hit is not overlapping with other hit seqeunces it is kept", "if", "save", "==", "1", ":", "gene_results", "[", "hit_id", "]", "=", "best_hsp", "else", ":", "pass", "# If the hit does not cover the entire database reference the missing seqence data are extracted", "for", "hit_id", "in", "list", "(", "gene_results", ")", ":", "hit", "=", "gene_results", "[", "hit_id", "]", "# Calculate possible split gene coverage", "perc_coverage", "=", "hit", "[", "'perc_coverage'", "]", "if", "hit", "[", "'sbjct_header'", "]", "in", "gene_split", "and", "len", "(", "gene_split", "[", "hit", "[", "'sbjct_header'", "]", "]", ")", ">", "1", ":", "# Calculate new length", "new_length", "=", "calculate_new_length", "(", "gene_split", ",", "gene_results", ",", "hit", ")", "hit", "[", "'split_length'", "]", "=", "new_length", "# Calculate new coverage", "perc_coverage", "=", "new_length", "/", "float", "(", "hit", "[", "'sbjct_length'", "]", ")", "*", "100", "# If the hit is above the minimum length threshold it is kept", "if", "perc_coverage", ">=", "min_cov", ":", "if", "hit", "[", "'coverage'", "]", "==", "1", ":", "gene_align_query", "[", "db", "]", "[", "hit_id", "]", "=", "hit", "[", "'query_string'", "]", "gene_align_homo", "[", "db", "]", "[", "hit_id", "]", "=", "hit", "[", "'homo_string'", "]", "gene_align_sbjct", "[", "db", "]", "[", "hit_id", "]", "=", "hit", "[", "'sbjct_string'", "]", "elif", "hit", "[", "'coverage'", "]", "!=", "1", ":", "# Getting the whole database sequence", "for", "seq_record", "in", "SeqIO", ".", "parse", "(", "db_file", ",", "\"fasta\"", ")", ":", "if", "seq_record", ".", "description", "==", "hit", "[", "'sbjct_header'", "]", ":", "gene_align_sbjct", "[", "db", "]", "[", "hit_id", "]", "=", "str", "(", "seq_record", ".", "seq", ")", "break", "# Getting the whole contig to extract extra query seqeunce", "contig", "=", "''", "for", "seq_record", "in", "SeqIO", ".", "parse", "(", "inputfile", ",", "\"fasta\"", ")", ":", "if", "seq_record", ".", "description", "==", "hit", "[", "'contig_name'", "]", ":", "contig", "=", "str", "(", "seq_record", ".", "seq", ")", "break", "# Extract extra sequence from query", "query_seq", ",", "homo_seq", "=", "get_query_align", "(", "hit", ",", "contig", ")", "# Saving the new alignment sequences", "gene_align_query", "[", "db", "]", "[", "hit_id", "]", "=", "query_seq", "gene_align_homo", "[", "db", "]", "[", "hit_id", "]", "=", "homo_seq", "else", ":", "del", "gene_results", "[", "hit_id", "]", "if", "hit", "[", "'sbjct_header'", "]", "in", "gene_split", ":", "del", "gene_split", "[", "hit", "[", "'sbjct_header'", "]", "]", "# Save the database result", "if", "gene_results", ":", "results", "[", "db", "]", "=", "gene_results", "else", ":", "results", "[", "db", "]", "=", "\"No hit found\"", "return", "(", "results", ",", "gene_align_query", ",", "gene_align_homo", ",", "gene_align_sbjct", ")" ]
47.05102
21.428571
def _parse_epsilon(line, lines): """Parse Energy [eV] Re_eps_xx Im_eps_xx Re_eps_zz Im_eps_zz""" split_line = line.split() energy = float(split_line[0]) re_eps_xx = float(split_line[1]) im_eps_xx = float(split_line[2]) re_eps_zz = float(split_line[3]) im_eps_zz = float(split_line[4]) return {"energy": energy, "re_eps_xx": re_eps_xx, "im_eps_xx": im_eps_xx, "re_eps_zz": re_eps_zz, "im_eps_zz": im_eps_zz}
[ "def", "_parse_epsilon", "(", "line", ",", "lines", ")", ":", "split_line", "=", "line", ".", "split", "(", ")", "energy", "=", "float", "(", "split_line", "[", "0", "]", ")", "re_eps_xx", "=", "float", "(", "split_line", "[", "1", "]", ")", "im_eps_xx", "=", "float", "(", "split_line", "[", "2", "]", ")", "re_eps_zz", "=", "float", "(", "split_line", "[", "3", "]", ")", "im_eps_zz", "=", "float", "(", "split_line", "[", "4", "]", ")", "return", "{", "\"energy\"", ":", "energy", ",", "\"re_eps_xx\"", ":", "re_eps_xx", ",", "\"im_eps_xx\"", ":", "im_eps_xx", ",", "\"re_eps_zz\"", ":", "re_eps_zz", ",", "\"im_eps_zz\"", ":", "im_eps_zz", "}" ]
34.846154
17.538462
def data(self, index, role): """ Get the data for the header. This is used when a header has levels. """ if not index.isValid() or \ index.row() >= self._shape[0] or \ index.column() >= self._shape[1]: return None row, col = ((index.row(), index.column()) if self.axis == 0 else (index.column(), index.row())) if role != Qt.DisplayRole: return None if self.axis == 0 and self._shape[0] <= 1: return None header = self.model.header(self.axis, col, row) # Don't perform any conversion on strings # because it leads to differences between # the data present in the dataframe and # what is shown by Spyder if not is_type_text_string(header): header = to_text_string(header) return header
[ "def", "data", "(", "self", ",", "index", ",", "role", ")", ":", "if", "not", "index", ".", "isValid", "(", ")", "or", "index", ".", "row", "(", ")", ">=", "self", ".", "_shape", "[", "0", "]", "or", "index", ".", "column", "(", ")", ">=", "self", ".", "_shape", "[", "1", "]", ":", "return", "None", "row", ",", "col", "=", "(", "(", "index", ".", "row", "(", ")", ",", "index", ".", "column", "(", ")", ")", "if", "self", ".", "axis", "==", "0", "else", "(", "index", ".", "column", "(", ")", ",", "index", ".", "row", "(", ")", ")", ")", "if", "role", "!=", "Qt", ".", "DisplayRole", ":", "return", "None", "if", "self", ".", "axis", "==", "0", "and", "self", ".", "_shape", "[", "0", "]", "<=", "1", ":", "return", "None", "header", "=", "self", ".", "model", ".", "header", "(", "self", ".", "axis", ",", "col", ",", "row", ")", "# Don't perform any conversion on strings\r", "# because it leads to differences between\r", "# the data present in the dataframe and\r", "# what is shown by Spyder\r", "if", "not", "is_type_text_string", "(", "header", ")", ":", "header", "=", "to_text_string", "(", "header", ")", "return", "header" ]
33.222222
13.962963
def sensor_values(self): """ Returns the values of all sensors for this cluster """ self.update_instance_sensors(opt="all") return { "light": self.lux, "water": self.soil_moisture, "humidity": self.humidity, "temperature": self.temp }
[ "def", "sensor_values", "(", "self", ")", ":", "self", ".", "update_instance_sensors", "(", "opt", "=", "\"all\"", ")", "return", "{", "\"light\"", ":", "self", ".", "lux", ",", "\"water\"", ":", "self", ".", "soil_moisture", ",", "\"humidity\"", ":", "self", ".", "humidity", ",", "\"temperature\"", ":", "self", ".", "temp", "}" ]
30
10
def NetFxSdkDir(self): """ Microsoft .NET Framework SDK directory. """ for ver in self.NetFxSdkVersion: loc = os.path.join(self.ri.netfx_sdk, ver) sdkdir = self.ri.lookup(loc, 'kitsinstallationfolder') if sdkdir: break return sdkdir or ''
[ "def", "NetFxSdkDir", "(", "self", ")", ":", "for", "ver", "in", "self", ".", "NetFxSdkVersion", ":", "loc", "=", "os", ".", "path", ".", "join", "(", "self", ".", "ri", ".", "netfx_sdk", ",", "ver", ")", "sdkdir", "=", "self", ".", "ri", ".", "lookup", "(", "loc", ",", "'kitsinstallationfolder'", ")", "if", "sdkdir", ":", "break", "return", "sdkdir", "or", "''" ]
32.1
11.5
def _get_type(self, s): """ Converts a string from Scratch to its proper type in Python. Expects a string with its delimiting quotes in place. Returns either a string, int or float. """ # TODO: what if the number is bigger than an int or float? if s.startswith('"') and s.endswith('"'): return s[1:-1] elif s.find('.') != -1: return float(s) else: return int(s)
[ "def", "_get_type", "(", "self", ",", "s", ")", ":", "# TODO: what if the number is bigger than an int or float?", "if", "s", ".", "startswith", "(", "'\"'", ")", "and", "s", ".", "endswith", "(", "'\"'", ")", ":", "return", "s", "[", "1", ":", "-", "1", "]", "elif", "s", ".", "find", "(", "'.'", ")", "!=", "-", "1", ":", "return", "float", "(", "s", ")", "else", ":", "return", "int", "(", "s", ")" ]
35.538462
16.923077
def original_query_sequence_length(self): """Similar to get_get_query_sequence_length, but it also includes hard clipped bases if there is no cigar, then default to trying the sequence :return: the length of the query before any clipping :rtype: int """ if not self.is_aligned() or not self.entries.cigar: return self.query_sequence_length # take the naive approach # we are here with something aligned so take more intelligent cigar apporach return sum([x[0] for x in self.cigar_array if re.match('[HMIS=X]',x[1])])
[ "def", "original_query_sequence_length", "(", "self", ")", ":", "if", "not", "self", ".", "is_aligned", "(", ")", "or", "not", "self", ".", "entries", ".", "cigar", ":", "return", "self", ".", "query_sequence_length", "# take the naive approach", "# we are here with something aligned so take more intelligent cigar apporach", "return", "sum", "(", "[", "x", "[", "0", "]", "for", "x", "in", "self", ".", "cigar_array", "if", "re", ".", "match", "(", "'[HMIS=X]'", ",", "x", "[", "1", "]", ")", "]", ")" ]
45.666667
19.833333
def _clean_salt_variables(params, variable_prefix="__"): ''' Pops out variables from params which starts with `variable_prefix`. ''' list(list(map(params.pop, [k for k in params if k.startswith(variable_prefix)]))) return params
[ "def", "_clean_salt_variables", "(", "params", ",", "variable_prefix", "=", "\"__\"", ")", ":", "list", "(", "list", "(", "map", "(", "params", ".", "pop", ",", "[", "k", "for", "k", "in", "params", "if", "k", ".", "startswith", "(", "variable_prefix", ")", "]", ")", ")", ")", "return", "params" ]
40.5
30.166667
def from_bytes(self, raw): '''Return a Null header object reconstructed from raw bytes, or an Exception if we can't resurrect the packet.''' if len(raw) < 4: raise NotEnoughDataError("Not enough bytes ({}) to reconstruct a Null object".format(len(raw))) fields = struct.unpack('=I', raw[:4]) self._af = fields[0] return raw[4:]
[ "def", "from_bytes", "(", "self", ",", "raw", ")", ":", "if", "len", "(", "raw", ")", "<", "4", ":", "raise", "NotEnoughDataError", "(", "\"Not enough bytes ({}) to reconstruct a Null object\"", ".", "format", "(", "len", "(", "raw", ")", ")", ")", "fields", "=", "struct", ".", "unpack", "(", "'=I'", ",", "raw", "[", ":", "4", "]", ")", "self", ".", "_af", "=", "fields", "[", "0", "]", "return", "raw", "[", "4", ":", "]" ]
47.5
22.5
def create_interface_method_ref(self, class_: str, if_method: str, descriptor: str) -> InterfaceMethodRef: """ Creates a new :class:`ConstantInterfaceMethodRef`, adding it to the pool and returning it. :param class_: The name of the class to which `if_method` belongs. :param if_method: The name of the interface method. :param descriptor: The descriptor for `if_method`. """ self.append(( 11, self.create_class(class_).index, self.create_name_and_type(if_method, descriptor).index )) return self.get(self.raw_count - 1)
[ "def", "create_interface_method_ref", "(", "self", ",", "class_", ":", "str", ",", "if_method", ":", "str", ",", "descriptor", ":", "str", ")", "->", "InterfaceMethodRef", ":", "self", ".", "append", "(", "(", "11", ",", "self", ".", "create_class", "(", "class_", ")", ".", "index", ",", "self", ".", "create_name_and_type", "(", "if_method", ",", "descriptor", ")", ".", "index", ")", ")", "return", "self", ".", "get", "(", "self", ".", "raw_count", "-", "1", ")" ]
41.125
20.25
def multi_ping(dest_addrs, timeout, retry=0, ignore_lookup_errors=False): """ Combine send and receive measurement into single function. This offers a retry mechanism: Overall timeout time is divided by number of retries. Additional ICMPecho packets are sent to those addresses from which we have not received answers, yet. The retry mechanism is useful, because individual ICMP packets may get lost. If 'retry' is set to 0 then only a single packet is sent to each address. If 'ignore_lookup_errors' is set then any issues with resolving target names or looking up their address information will silently be ignored. Those targets simply appear in the 'no_results' return list. """ retry = int(retry) if retry < 0: retry = 0 timeout = float(timeout) if timeout < 0.1: raise MultiPingError("Timeout < 0.1 seconds not allowed") retry_timeout = float(timeout) / (retry + 1) if retry_timeout < 0.1: raise MultiPingError("Time between ping retries < 0.1 seconds") mp = MultiPing(dest_addrs, ignore_lookup_errors=ignore_lookup_errors) results = {} retry_count = 0 while retry_count <= retry: # Send a batch of pings mp.send() single_results, no_results = mp.receive(retry_timeout) # Add the results from the last sending of pings to the overall results results.update(single_results) if not no_results: # No addresses left? We are done. break retry_count += 1 return results, no_results
[ "def", "multi_ping", "(", "dest_addrs", ",", "timeout", ",", "retry", "=", "0", ",", "ignore_lookup_errors", "=", "False", ")", ":", "retry", "=", "int", "(", "retry", ")", "if", "retry", "<", "0", ":", "retry", "=", "0", "timeout", "=", "float", "(", "timeout", ")", "if", "timeout", "<", "0.1", ":", "raise", "MultiPingError", "(", "\"Timeout < 0.1 seconds not allowed\"", ")", "retry_timeout", "=", "float", "(", "timeout", ")", "/", "(", "retry", "+", "1", ")", "if", "retry_timeout", "<", "0.1", ":", "raise", "MultiPingError", "(", "\"Time between ping retries < 0.1 seconds\"", ")", "mp", "=", "MultiPing", "(", "dest_addrs", ",", "ignore_lookup_errors", "=", "ignore_lookup_errors", ")", "results", "=", "{", "}", "retry_count", "=", "0", "while", "retry_count", "<=", "retry", ":", "# Send a batch of pings", "mp", ".", "send", "(", ")", "single_results", ",", "no_results", "=", "mp", ".", "receive", "(", "retry_timeout", ")", "# Add the results from the last sending of pings to the overall results", "results", ".", "update", "(", "single_results", ")", "if", "not", "no_results", ":", "# No addresses left? We are done.", "break", "retry_count", "+=", "1", "return", "results", ",", "no_results" ]
32.87234
24.829787
def SetEncoding(sval): """Sets the encoding variable according to the text passed :param sval: text specification for the desired model """ global encoding s=sval.lower() if s == "additive": encoding = Encoding.Additive elif s == "dominant": encoding = Encoding.Dominant elif s == "recessive": encoding = Encoding.Recessive elif s == "genotype": encoding = Encoding.Genotype elif s == "raw": encoding = Encoding.Raw else: raise InvalidSelection("Invalid encoding, %s, selected" % (sval))
[ "def", "SetEncoding", "(", "sval", ")", ":", "global", "encoding", "s", "=", "sval", ".", "lower", "(", ")", "if", "s", "==", "\"additive\"", ":", "encoding", "=", "Encoding", ".", "Additive", "elif", "s", "==", "\"dominant\"", ":", "encoding", "=", "Encoding", ".", "Dominant", "elif", "s", "==", "\"recessive\"", ":", "encoding", "=", "Encoding", ".", "Recessive", "elif", "s", "==", "\"genotype\"", ":", "encoding", "=", "Encoding", ".", "Genotype", "elif", "s", "==", "\"raw\"", ":", "encoding", "=", "Encoding", ".", "Raw", "else", ":", "raise", "InvalidSelection", "(", "\"Invalid encoding, %s, selected\"", "%", "(", "sval", ")", ")" ]
29.578947
15.105263
def DefaultSelector(): """ This function serves as a first call for DefaultSelector to detect if the select module is being monkey-patched incorrectly by eventlet, greenlet, and preserve proper behavior. """ global _DEFAULT_SELECTOR if _DEFAULT_SELECTOR is None: if platform.python_implementation() == 'Jython': # Platform-specific: Jython _DEFAULT_SELECTOR = JythonSelectSelector elif _can_allocate('kqueue'): _DEFAULT_SELECTOR = KqueueSelector elif _can_allocate('devpoll'): _DEFAULT_SELECTOR = DevpollSelector elif _can_allocate('epoll'): _DEFAULT_SELECTOR = EpollSelector elif _can_allocate('poll'): _DEFAULT_SELECTOR = PollSelector elif hasattr(select, 'select'): _DEFAULT_SELECTOR = SelectSelector else: # Platform-specific: AppEngine raise RuntimeError('Platform does not have a selector.') return _DEFAULT_SELECTOR()
[ "def", "DefaultSelector", "(", ")", ":", "global", "_DEFAULT_SELECTOR", "if", "_DEFAULT_SELECTOR", "is", "None", ":", "if", "platform", ".", "python_implementation", "(", ")", "==", "'Jython'", ":", "# Platform-specific: Jython", "_DEFAULT_SELECTOR", "=", "JythonSelectSelector", "elif", "_can_allocate", "(", "'kqueue'", ")", ":", "_DEFAULT_SELECTOR", "=", "KqueueSelector", "elif", "_can_allocate", "(", "'devpoll'", ")", ":", "_DEFAULT_SELECTOR", "=", "DevpollSelector", "elif", "_can_allocate", "(", "'epoll'", ")", ":", "_DEFAULT_SELECTOR", "=", "EpollSelector", "elif", "_can_allocate", "(", "'poll'", ")", ":", "_DEFAULT_SELECTOR", "=", "PollSelector", "elif", "hasattr", "(", "select", ",", "'select'", ")", ":", "_DEFAULT_SELECTOR", "=", "SelectSelector", "else", ":", "# Platform-specific: AppEngine", "raise", "RuntimeError", "(", "'Platform does not have a selector.'", ")", "return", "_DEFAULT_SELECTOR", "(", ")" ]
46.190476
9.857143
def instantiate(self, value_of_n): """Instantiates the template""" template = Cheetah.Template.Template( self.content, searchList={'n': value_of_n} ) template.random_string = random_string return str(template)
[ "def", "instantiate", "(", "self", ",", "value_of_n", ")", ":", "template", "=", "Cheetah", ".", "Template", ".", "Template", "(", "self", ".", "content", ",", "searchList", "=", "{", "'n'", ":", "value_of_n", "}", ")", "template", ".", "random_string", "=", "random_string", "return", "str", "(", "template", ")" ]
33.25
9.375
def plot_fracs(self, Q=None, ax=None, fignum=None): """ Plot fractions of Eigenvalues sorted in descending order. """ from ..plotting import Tango Tango.reset() col = Tango.nextMedium() if ax is None: fig = pylab.figure(fignum) ax = fig.add_subplot(111) if Q is None: Q = self.Q ticks = numpy.arange(Q) bar = ax.bar(ticks - .4, self.fracs[:Q], color=col) ax.set_xticks(ticks, map(lambda x: r"${}$".format(x), ticks + 1)) ax.set_ylabel("Eigenvalue fraction") ax.set_xlabel("PC") ax.set_ylim(0, ax.get_ylim()[1]) ax.set_xlim(ticks.min() - .5, ticks.max() + .5) try: pylab.tight_layout() except: pass return bar
[ "def", "plot_fracs", "(", "self", ",", "Q", "=", "None", ",", "ax", "=", "None", ",", "fignum", "=", "None", ")", ":", "from", ".", ".", "plotting", "import", "Tango", "Tango", ".", "reset", "(", ")", "col", "=", "Tango", ".", "nextMedium", "(", ")", "if", "ax", "is", "None", ":", "fig", "=", "pylab", ".", "figure", "(", "fignum", ")", "ax", "=", "fig", ".", "add_subplot", "(", "111", ")", "if", "Q", "is", "None", ":", "Q", "=", "self", ".", "Q", "ticks", "=", "numpy", ".", "arange", "(", "Q", ")", "bar", "=", "ax", ".", "bar", "(", "ticks", "-", ".4", ",", "self", ".", "fracs", "[", ":", "Q", "]", ",", "color", "=", "col", ")", "ax", ".", "set_xticks", "(", "ticks", ",", "map", "(", "lambda", "x", ":", "r\"${}$\"", ".", "format", "(", "x", ")", ",", "ticks", "+", "1", ")", ")", "ax", ".", "set_ylabel", "(", "\"Eigenvalue fraction\"", ")", "ax", ".", "set_xlabel", "(", "\"PC\"", ")", "ax", ".", "set_ylim", "(", "0", ",", "ax", ".", "get_ylim", "(", ")", "[", "1", "]", ")", "ax", ".", "set_xlim", "(", "ticks", ".", "min", "(", ")", "-", ".5", ",", "ticks", ".", "max", "(", ")", "+", ".5", ")", "try", ":", "pylab", ".", "tight_layout", "(", ")", "except", ":", "pass", "return", "bar" ]
32.875
13.625
def count_lightning(datain, time_step): """**Count lightning strikes detected within a defined time_step** Generate time intervals according to the time_step defined and count lightning strikes in these intervals. Statistics are also calculated for lightning detection errors and the number of stations and added to an output dataframe. Time stamps in output dataframe correspond to center of time periods in which lightning are counted. :paramter datain: dataframe (lightning data) :parameter time_step: integer (time step in minutes) :Example: >>> count_lightning(LN_data, time_step) """ if(1440 % time_step == 0): # check if time_step is multiple of 1 day i = 0 # run for loop for all time steps in one day for time_interval in gen_time_intervals(extract_date(datain['datetime'].iloc[0]), (extract_date(datain['datetime'].iloc[0])+timedelta(days=1)), timedelta(minutes=time_step)): # select data in given time_interval tmp_LN_data = datain.loc[(datain['datetime'] >= time_interval) & (datain['datetime'] < time_interval + timedelta(minutes=time_step))] # calculate stats stats_err = gen_stats(tmp_LN_data['err']) stats_sta = gen_stats(tmp_LN_data['#sta']) d = {'count': stats_err['count'], 'err_mean': stats_err['mean'], 'err_std': stats_err['std'], 'err_min': stats_err['min'], 'err_max': stats_err['max'], '#sta_mean': stats_sta['mean'], '#sta_std': stats_sta['std'], '#sta_min': stats_sta['min'], '#sta_max': stats_sta['max']} col_names = [k for k in d.keys()] df_index = time_interval+timedelta(minutes=(time_step/2)) temp_LN_count = pd.DataFrame(d, index=[df_index], columns=col_names) # add data to existing df if(i >= 1): LN_count = LN_count.append(temp_LN_count) else: LN_count = temp_LN_count i = i + 1 return LN_count else: print("time_step {0} multiple of 1 day (1400 min)".format(time_step))
[ "def", "count_lightning", "(", "datain", ",", "time_step", ")", ":", "if", "(", "1440", "%", "time_step", "==", "0", ")", ":", "# check if time_step is multiple of 1 day", "i", "=", "0", "# run for loop for all time steps in one day", "for", "time_interval", "in", "gen_time_intervals", "(", "extract_date", "(", "datain", "[", "'datetime'", "]", ".", "iloc", "[", "0", "]", ")", ",", "(", "extract_date", "(", "datain", "[", "'datetime'", "]", ".", "iloc", "[", "0", "]", ")", "+", "timedelta", "(", "days", "=", "1", ")", ")", ",", "timedelta", "(", "minutes", "=", "time_step", ")", ")", ":", "# select data in given time_interval", "tmp_LN_data", "=", "datain", ".", "loc", "[", "(", "datain", "[", "'datetime'", "]", ">=", "time_interval", ")", "&", "(", "datain", "[", "'datetime'", "]", "<", "time_interval", "+", "timedelta", "(", "minutes", "=", "time_step", ")", ")", "]", "# calculate stats", "stats_err", "=", "gen_stats", "(", "tmp_LN_data", "[", "'err'", "]", ")", "stats_sta", "=", "gen_stats", "(", "tmp_LN_data", "[", "'#sta'", "]", ")", "d", "=", "{", "'count'", ":", "stats_err", "[", "'count'", "]", ",", "'err_mean'", ":", "stats_err", "[", "'mean'", "]", ",", "'err_std'", ":", "stats_err", "[", "'std'", "]", ",", "'err_min'", ":", "stats_err", "[", "'min'", "]", ",", "'err_max'", ":", "stats_err", "[", "'max'", "]", ",", "'#sta_mean'", ":", "stats_sta", "[", "'mean'", "]", ",", "'#sta_std'", ":", "stats_sta", "[", "'std'", "]", ",", "'#sta_min'", ":", "stats_sta", "[", "'min'", "]", ",", "'#sta_max'", ":", "stats_sta", "[", "'max'", "]", "}", "col_names", "=", "[", "k", "for", "k", "in", "d", ".", "keys", "(", ")", "]", "df_index", "=", "time_interval", "+", "timedelta", "(", "minutes", "=", "(", "time_step", "/", "2", ")", ")", "temp_LN_count", "=", "pd", ".", "DataFrame", "(", "d", ",", "index", "=", "[", "df_index", "]", ",", "columns", "=", "col_names", ")", "# add data to existing df", "if", "(", "i", ">=", "1", ")", ":", "LN_count", "=", "LN_count", ".", "append", "(", "temp_LN_count", ")", "else", ":", "LN_count", "=", "temp_LN_count", "i", "=", "i", "+", "1", "return", "LN_count", "else", ":", "print", "(", "\"time_step {0} multiple of 1 day (1400 min)\"", ".", "format", "(", "time_step", ")", ")" ]
46.882353
20.098039
def _get_attributes(self, path): """ :param path: filepath within fast5 :return: dictionary of attributes found at ``path`` :rtype dict """ path_grp = self.handle[path] path_attr = path_grp.attrs return dict(path_attr)
[ "def", "_get_attributes", "(", "self", ",", "path", ")", ":", "path_grp", "=", "self", ".", "handle", "[", "path", "]", "path_attr", "=", "path_grp", ".", "attrs", "return", "dict", "(", "path_attr", ")" ]
30.444444
7.777778
def generate_password_hash(password, method='pbkdf2:sha1', salt_length=8): """Hash a password with the given method and salt with with a string of the given length. The format of the string returned includes the method that was used so that :func:`check_password_hash` can check the hash. The format for the hashed string looks like this:: method$salt$hash This method can **not** generate unsalted passwords but it is possible to set the method to plain to enforce plaintext passwords. If a salt is used, hmac is used internally to salt the password. If PBKDF2 is wanted it can be enabled by setting the method to ``pbkdf2:method:iterations`` where iterations is optional:: pbkdf2:sha1:2000$salt$hash pbkdf2:sha1$salt$hash :param password: the password to hash :param method: the hash method to use (one that hashlib supports), can optionally be in the format ``pbpdf2:<method>[:iterations]`` to enable PBKDF2. :param salt_length: the length of the salt in letters """ salt = method != 'plain' and gen_salt(salt_length) or '' h, actual_method = _hash_internal(method, salt, password) return '%s$%s$%s' % (actual_method, salt, h)
[ "def", "generate_password_hash", "(", "password", ",", "method", "=", "'pbkdf2:sha1'", ",", "salt_length", "=", "8", ")", ":", "salt", "=", "method", "!=", "'plain'", "and", "gen_salt", "(", "salt_length", ")", "or", "''", "h", ",", "actual_method", "=", "_hash_internal", "(", "method", ",", "salt", ",", "password", ")", "return", "'%s$%s$%s'", "%", "(", "actual_method", ",", "salt", ",", "h", ")" ]
44.142857
23.857143
def sample_orbit(self, Npts=100, primary=None, trailing=True, timespan=None, useTrueAnomaly=True): """ Returns a nested list of xyz positions along the osculating orbit of the particle. If primary is not passed, returns xyz positions along the Jacobi osculating orbit (with mu = G*Minc, where Minc is the total mass from index 0 to the particle's index, inclusive). Parameters ---------- Npts : int, optional Number of points along the orbit to return (default: 100) primary : rebound.Particle, optional Primary to use for the osculating orbit (default: Jacobi center of mass) trailing: bool, optional Whether to return points stepping backwards in time (True) or forwards (False). (default: True) timespan: float, optional Return points (for the osculating orbit) from the current position to timespan (forwards or backwards in time depending on trailing keyword). Defaults to the orbital period for bound orbits, and to the rough time it takes the orbit to move by the current distance from the primary for a hyperbolic orbit. Implementation currently only supports this option if useTrueAnomaly=False. useTrueAnomaly: bool, optional Will sample equally spaced points in true anomaly if True, otherwise in mean anomaly. Latter might be better for hyperbolic orbits, where true anomaly can stay near the limiting value for a long time, and then switch abruptly at pericenter. (Default: True) """ pts = [] if primary is None: primary = self.jacobi_com o = self.calculate_orbit(primary=primary) if timespan is None: if o.a < 0.: # hyperbolic orbit timespan = 2*math.pi*o.d/o.v # rough time to cross display box else: timespan = o.P lim_phase = abs(o.n)*timespan # n is negative for hyperbolic orbits if trailing is True: lim_phase *= -1 # sample phase backwards from current value phase = [lim_phase*i/(Npts-1) for i in range(Npts)] for i,ph in enumerate(phase): if useTrueAnomaly is True: newp = Particle(a=o.a, f=o.f+ph, inc=o.inc, omega=o.omega, Omega=o.Omega, e=o.e, m=self.m, primary=primary, simulation=self._sim.contents) else: newp = Particle(a=o.a, M=o.M+ph, inc=o.inc, omega=o.omega, Omega=o.Omega, e=o.e, m=self.m, primary=primary, simulation=self._sim.contents) pts.append(newp.xyz) return pts
[ "def", "sample_orbit", "(", "self", ",", "Npts", "=", "100", ",", "primary", "=", "None", ",", "trailing", "=", "True", ",", "timespan", "=", "None", ",", "useTrueAnomaly", "=", "True", ")", ":", "pts", "=", "[", "]", "if", "primary", "is", "None", ":", "primary", "=", "self", ".", "jacobi_com", "o", "=", "self", ".", "calculate_orbit", "(", "primary", "=", "primary", ")", "if", "timespan", "is", "None", ":", "if", "o", ".", "a", "<", "0.", ":", "# hyperbolic orbit", "timespan", "=", "2", "*", "math", ".", "pi", "*", "o", ".", "d", "/", "o", ".", "v", "# rough time to cross display box", "else", ":", "timespan", "=", "o", ".", "P", "lim_phase", "=", "abs", "(", "o", ".", "n", ")", "*", "timespan", "# n is negative for hyperbolic orbits", "if", "trailing", "is", "True", ":", "lim_phase", "*=", "-", "1", "# sample phase backwards from current value", "phase", "=", "[", "lim_phase", "*", "i", "/", "(", "Npts", "-", "1", ")", "for", "i", "in", "range", "(", "Npts", ")", "]", "for", "i", ",", "ph", "in", "enumerate", "(", "phase", ")", ":", "if", "useTrueAnomaly", "is", "True", ":", "newp", "=", "Particle", "(", "a", "=", "o", ".", "a", ",", "f", "=", "o", ".", "f", "+", "ph", ",", "inc", "=", "o", ".", "inc", ",", "omega", "=", "o", ".", "omega", ",", "Omega", "=", "o", ".", "Omega", ",", "e", "=", "o", ".", "e", ",", "m", "=", "self", ".", "m", ",", "primary", "=", "primary", ",", "simulation", "=", "self", ".", "_sim", ".", "contents", ")", "else", ":", "newp", "=", "Particle", "(", "a", "=", "o", ".", "a", ",", "M", "=", "o", ".", "M", "+", "ph", ",", "inc", "=", "o", ".", "inc", ",", "omega", "=", "o", ".", "omega", ",", "Omega", "=", "o", ".", "Omega", ",", "e", "=", "o", ".", "e", ",", "m", "=", "self", ".", "m", ",", "primary", "=", "primary", ",", "simulation", "=", "self", ".", "_sim", ".", "contents", ")", "pts", ".", "append", "(", "newp", ".", "xyz", ")", "return", "pts" ]
55.212766
36.957447
def request(self, url: str, method: str = 'GET', *, callback=None, encoding: typing.Optional[str] = None, headers: dict = None, metadata: dict = None, request_config: dict = None, request_session=None, **kwargs): """Init a Request class for crawling html""" headers = headers or {} metadata = metadata or {} request_config = request_config or {} request_session = request_session or self.request_session headers.update(self.headers.copy()) request_config.update(self.request_config.copy()) kwargs.update(self.kwargs.copy()) return Request( url=url, method=method, callback=callback, encoding=encoding, headers=headers, metadata=metadata, request_config=request_config, request_session=request_session, **kwargs)
[ "def", "request", "(", "self", ",", "url", ":", "str", ",", "method", ":", "str", "=", "'GET'", ",", "*", ",", "callback", "=", "None", ",", "encoding", ":", "typing", ".", "Optional", "[", "str", "]", "=", "None", ",", "headers", ":", "dict", "=", "None", ",", "metadata", ":", "dict", "=", "None", ",", "request_config", ":", "dict", "=", "None", ",", "request_session", "=", "None", ",", "*", "*", "kwargs", ")", ":", "headers", "=", "headers", "or", "{", "}", "metadata", "=", "metadata", "or", "{", "}", "request_config", "=", "request_config", "or", "{", "}", "request_session", "=", "request_session", "or", "self", ".", "request_session", "headers", ".", "update", "(", "self", ".", "headers", ".", "copy", "(", ")", ")", "request_config", ".", "update", "(", "self", ".", "request_config", ".", "copy", "(", ")", ")", "kwargs", ".", "update", "(", "self", ".", "kwargs", ".", "copy", "(", ")", ")", "return", "Request", "(", "url", "=", "url", ",", "method", "=", "method", ",", "callback", "=", "callback", ",", "encoding", "=", "encoding", ",", "headers", "=", "headers", ",", "metadata", "=", "metadata", ",", "request_config", "=", "request_config", ",", "request_session", "=", "request_session", ",", "*", "*", "kwargs", ")" ]
33
12.225806
def equalizer(self, frequency, width_q, gain_db): '''Apply a two-pole peaking equalisation (EQ) filter to boost or reduce around a given frequency. This effect can be applied multiple times to produce complex EQ curves. Parameters ---------- frequency : float The filter's central frequency in Hz. width_q : float The filter's width as a Q-factor. gain_db : float The filter's gain in dB. See Also -------- bass, treble ''' if not is_number(frequency) or frequency <= 0: raise ValueError("frequency must be a positive number.") if not is_number(width_q) or width_q <= 0: raise ValueError("width_q must be a positive number.") if not is_number(gain_db): raise ValueError("gain_db must be a number.") effect_args = [ 'equalizer', '{:f}'.format(frequency), '{:f}q'.format(width_q), '{:f}'.format(gain_db) ] self.effects.extend(effect_args) self.effects_log.append('equalizer') return self
[ "def", "equalizer", "(", "self", ",", "frequency", ",", "width_q", ",", "gain_db", ")", ":", "if", "not", "is_number", "(", "frequency", ")", "or", "frequency", "<=", "0", ":", "raise", "ValueError", "(", "\"frequency must be a positive number.\"", ")", "if", "not", "is_number", "(", "width_q", ")", "or", "width_q", "<=", "0", ":", "raise", "ValueError", "(", "\"width_q must be a positive number.\"", ")", "if", "not", "is_number", "(", "gain_db", ")", ":", "raise", "ValueError", "(", "\"gain_db must be a number.\"", ")", "effect_args", "=", "[", "'equalizer'", ",", "'{:f}'", ".", "format", "(", "frequency", ")", ",", "'{:f}q'", ".", "format", "(", "width_q", ")", ",", "'{:f}'", ".", "format", "(", "gain_db", ")", "]", "self", ".", "effects", ".", "extend", "(", "effect_args", ")", "self", ".", "effects_log", ".", "append", "(", "'equalizer'", ")", "return", "self" ]
30.675676
19.756757
def download(date_array, tag, sat_id='', data_path=None, user=None, password=None, baseline='all', delta='none', options='all', file_fmt='ascii'): """Routine to download SuperMAG data Parameters ----------- date_array : np.array Array of datetime objects tag : string String denoting the type of file to load, accepted values are 'indices', 'all', 'stations', and '' (for only magnetometer data) sat_id : string Not used (default='') data_path : string or NoneType Data path to save downloaded files to (default=None) user : string or NoneType SuperMAG requires user registration (default=None) password : string or NoneType Not used; SuperMAG does not require a password (default=None) file_fmt : string File format options: 'ascii' and 'csv'. (default='ascii') baseline : string Baseline to remove from magnetometer data. Options are 'all', 'yearly', and 'none'. (default='all') delta : string Remove a value from the magnetometer data. Options are 'none', 'start', and 'median'. (default='none') options : string or NoneType Additional parameter options for magnetometer data. Includes 'mlt' (MLat and MLT), 'decl' (IGRF declination), 'sza' (Solar Zenith Angle), 'all', and None. (default='all') Returns ------- """ import sys import requests global platform, name max_stations = 470 if user is None: raise ValueError('SuperMAG requires user registration') remoteaccess = {'method':'http', 'host':'supermag.jhuapl.edu', 'path':'mag/lib/services', 'user':'user={:s}'.format(user), 'service':'service=', 'options':'options='} remotefmt = "{method}://{host}/{path}/??{user}&{service}&{filefmt}&{start}" # Set the tag information if tag == "indices": tag = "all" if tag != "stations": remotefmt += "&{interval}&{stations}&{delta}&{baseline}&{options}" # Determine whether station or magnetometer data is requested remoteaccess['service'] += tag if tag == "stations" else "mag" # Add request for file type file_fmt = file_fmt.lower() if not file_fmt in ['ascii', 'csv']: estr = "unknown file format [{:s}], using 'ascii'".format(file_fmt) print("WARNING: {:s}".format(estr)) file_fmt = 'ascii' remoteaccess['filefmt'] = 'fmt={:s}'.format(file_fmt) # If indices are requested, add them now. if not tag in [None, 'stations']: remoteaccess['options'] += "+envelope" # Add other download options (for non-station files) if tag != "stations": if options is not None: options = options.lower() if options is 'all': remoteaccess['options'] += "+mlt+sza+decl" else: remoteaccess['options'] += "+{:s}".format(options) # Add requests for baseline substraction baseline = baseline.lower() if not baseline in ['all', 'yearly', 'none']: estr = "unknown baseline [{:s}], using 'all'".format(baseline) print("WARNING: {:s}".format(estr)) baseline = 'all' remoteaccess['baseline'] = "baseline={:s}".format(baseline) delta = delta.lower() if not delta in ['none', 'median', 'start']: estr = "unknown delta [{:s}], using 'none'".format(delta) print("WARNING: {:s}".format(estr)) delta = 'none' remoteaccess['delta'] = 'delta={:s}'.format(delta) # Set the time information and format remoteaccess['interval'] = "interval=23:59" sfmt = "%Y-%m-%dT00:00:00.000" tag_str = "_" if tag is None else "_all_" ffmt = "{:s}_{:s}{:s}%Y%m%d.{:s}".format(platform, name, tag_str, "txt" if file_fmt == "ascii" else file_fmt) start_str = "start=" else: # Set the time format sfmt = "%Y" ffmt = "{:s}_{:s}_{:s}_%Y.{:s}".format(platform, name, tag, "txt" if file_fmt == "ascii" else file_fmt) start_str = "year=" # Cycle through all of the dates, formatting them to achieve a unique set # of times to download data date_fmts = list(set([dd.strftime(sfmt) for dd in date_array])) # Now that the unique dates are known, construct the file names name_fmts = [None for dd in date_fmts] for dd in date_array: i = date_fmts.index(dd.strftime(sfmt)) name_fmts[i] = dd.strftime(ffmt) if None in name_fmts: raise ValueError("unable to construct all unique file names") # Cycle through all of the unique dates. Stations lists are yearly and # magnetometer data is daily station_year = None istr = 'SuperMAG {:s}'.format(tag if tag == "stations" else "data") for i,date in enumerate(date_fmts): print("Downloading {:s} for {:s}".format(istr, date.split("T")[0])) sys.stdout.flush() nreq = 1 # Add the start time and download period to query remoteaccess['start'] = "{:s}{:s}".format(start_str, date) if tag != "stations": # Station lists are for each year, see if this year is loaded current_date = pds.datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.000") if current_date.year != station_year: # Get all of the stations for this time smag_stat = pysat.Instrument(platform=platform, name=name, tag='stations') # try to load data smag_stat.load(date=current_date) if smag_stat.empty: # no data etime = current_date + pds.DateOffset(days=1) smag_stat.download(start=current_date, stop=etime, user=user, password=password, file_fmt=file_fmt) smag_stat.load(date=current_date) if smag_stat.empty: # no data estr = "unable to format station query for " estr += "[{:d}]".format(current_date.year) raise ValueError(estr) # Format a string of the station names if smag_stat.data.IAGA.shape[0] > max_stations: station_year = current_date.year nreq = int(np.ceil(smag_stat.data.IAGA.shape[0] / float(max_stations))) out = list() for ireq in range(nreq): if tag != "stations": if station_year is None: raise RuntimeError("unable to load station data") stat_str = ",".join(smag_stat.data.IAGA[ireq*max_stations: (ireq+1)*max_stations]) remoteaccess['stations'] = "stations={:s}".format(stat_str) # Format the query url = remotefmt.format(**remoteaccess) # Set up a request try: # print (url) result = requests.post(url) result.encoding = 'ISO-8859-1' # handle strings differently for python 2/3 if sys.version_info.major == 2: out.append(str(result.text.encode('ascii', 'replace'))) else: out.append(result.text) except: raise RuntimeError("unable to connect to [{:s}]".format(url)) # Test the result if "requested URL was rejected" in out[-1]: estr = "Requested url was rejected:\n{:s}".format(url) raise RuntimeError(estr) # Build the output file name if tag is '': fname = path.join(data_path, name_fmts[i]) else: fname = path.join(data_path, name_fmts[i]) # If more than one data pass was needed, append the files if len(out) > 1: out_data = append_data(out, file_fmt, tag) else: out_data = out[0] # Save the file data with open(fname, "w") as local_file: local_file.write(out_data) local_file.close() del out_data return
[ "def", "download", "(", "date_array", ",", "tag", ",", "sat_id", "=", "''", ",", "data_path", "=", "None", ",", "user", "=", "None", ",", "password", "=", "None", ",", "baseline", "=", "'all'", ",", "delta", "=", "'none'", ",", "options", "=", "'all'", ",", "file_fmt", "=", "'ascii'", ")", ":", "import", "sys", "import", "requests", "global", "platform", ",", "name", "max_stations", "=", "470", "if", "user", "is", "None", ":", "raise", "ValueError", "(", "'SuperMAG requires user registration'", ")", "remoteaccess", "=", "{", "'method'", ":", "'http'", ",", "'host'", ":", "'supermag.jhuapl.edu'", ",", "'path'", ":", "'mag/lib/services'", ",", "'user'", ":", "'user={:s}'", ".", "format", "(", "user", ")", ",", "'service'", ":", "'service='", ",", "'options'", ":", "'options='", "}", "remotefmt", "=", "\"{method}://{host}/{path}/??{user}&{service}&{filefmt}&{start}\"", "# Set the tag information", "if", "tag", "==", "\"indices\"", ":", "tag", "=", "\"all\"", "if", "tag", "!=", "\"stations\"", ":", "remotefmt", "+=", "\"&{interval}&{stations}&{delta}&{baseline}&{options}\"", "# Determine whether station or magnetometer data is requested", "remoteaccess", "[", "'service'", "]", "+=", "tag", "if", "tag", "==", "\"stations\"", "else", "\"mag\"", "# Add request for file type", "file_fmt", "=", "file_fmt", ".", "lower", "(", ")", "if", "not", "file_fmt", "in", "[", "'ascii'", ",", "'csv'", "]", ":", "estr", "=", "\"unknown file format [{:s}], using 'ascii'\"", ".", "format", "(", "file_fmt", ")", "print", "(", "\"WARNING: {:s}\"", ".", "format", "(", "estr", ")", ")", "file_fmt", "=", "'ascii'", "remoteaccess", "[", "'filefmt'", "]", "=", "'fmt={:s}'", ".", "format", "(", "file_fmt", ")", "# If indices are requested, add them now.", "if", "not", "tag", "in", "[", "None", ",", "'stations'", "]", ":", "remoteaccess", "[", "'options'", "]", "+=", "\"+envelope\"", "# Add other download options (for non-station files)", "if", "tag", "!=", "\"stations\"", ":", "if", "options", "is", "not", "None", ":", "options", "=", "options", ".", "lower", "(", ")", "if", "options", "is", "'all'", ":", "remoteaccess", "[", "'options'", "]", "+=", "\"+mlt+sza+decl\"", "else", ":", "remoteaccess", "[", "'options'", "]", "+=", "\"+{:s}\"", ".", "format", "(", "options", ")", "# Add requests for baseline substraction", "baseline", "=", "baseline", ".", "lower", "(", ")", "if", "not", "baseline", "in", "[", "'all'", ",", "'yearly'", ",", "'none'", "]", ":", "estr", "=", "\"unknown baseline [{:s}], using 'all'\"", ".", "format", "(", "baseline", ")", "print", "(", "\"WARNING: {:s}\"", ".", "format", "(", "estr", ")", ")", "baseline", "=", "'all'", "remoteaccess", "[", "'baseline'", "]", "=", "\"baseline={:s}\"", ".", "format", "(", "baseline", ")", "delta", "=", "delta", ".", "lower", "(", ")", "if", "not", "delta", "in", "[", "'none'", ",", "'median'", ",", "'start'", "]", ":", "estr", "=", "\"unknown delta [{:s}], using 'none'\"", ".", "format", "(", "delta", ")", "print", "(", "\"WARNING: {:s}\"", ".", "format", "(", "estr", ")", ")", "delta", "=", "'none'", "remoteaccess", "[", "'delta'", "]", "=", "'delta={:s}'", ".", "format", "(", "delta", ")", "# Set the time information and format", "remoteaccess", "[", "'interval'", "]", "=", "\"interval=23:59\"", "sfmt", "=", "\"%Y-%m-%dT00:00:00.000\"", "tag_str", "=", "\"_\"", "if", "tag", "is", "None", "else", "\"_all_\"", "ffmt", "=", "\"{:s}_{:s}{:s}%Y%m%d.{:s}\"", ".", "format", "(", "platform", ",", "name", ",", "tag_str", ",", "\"txt\"", "if", "file_fmt", "==", "\"ascii\"", "else", "file_fmt", ")", "start_str", "=", "\"start=\"", "else", ":", "# Set the time format", "sfmt", "=", "\"%Y\"", "ffmt", "=", "\"{:s}_{:s}_{:s}_%Y.{:s}\"", ".", "format", "(", "platform", ",", "name", ",", "tag", ",", "\"txt\"", "if", "file_fmt", "==", "\"ascii\"", "else", "file_fmt", ")", "start_str", "=", "\"year=\"", "# Cycle through all of the dates, formatting them to achieve a unique set", "# of times to download data", "date_fmts", "=", "list", "(", "set", "(", "[", "dd", ".", "strftime", "(", "sfmt", ")", "for", "dd", "in", "date_array", "]", ")", ")", "# Now that the unique dates are known, construct the file names", "name_fmts", "=", "[", "None", "for", "dd", "in", "date_fmts", "]", "for", "dd", "in", "date_array", ":", "i", "=", "date_fmts", ".", "index", "(", "dd", ".", "strftime", "(", "sfmt", ")", ")", "name_fmts", "[", "i", "]", "=", "dd", ".", "strftime", "(", "ffmt", ")", "if", "None", "in", "name_fmts", ":", "raise", "ValueError", "(", "\"unable to construct all unique file names\"", ")", "# Cycle through all of the unique dates. Stations lists are yearly and", "# magnetometer data is daily", "station_year", "=", "None", "istr", "=", "'SuperMAG {:s}'", ".", "format", "(", "tag", "if", "tag", "==", "\"stations\"", "else", "\"data\"", ")", "for", "i", ",", "date", "in", "enumerate", "(", "date_fmts", ")", ":", "print", "(", "\"Downloading {:s} for {:s}\"", ".", "format", "(", "istr", ",", "date", ".", "split", "(", "\"T\"", ")", "[", "0", "]", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "nreq", "=", "1", "# Add the start time and download period to query", "remoteaccess", "[", "'start'", "]", "=", "\"{:s}{:s}\"", ".", "format", "(", "start_str", ",", "date", ")", "if", "tag", "!=", "\"stations\"", ":", "# Station lists are for each year, see if this year is loaded", "current_date", "=", "pds", ".", "datetime", ".", "strptime", "(", "date", ",", "\"%Y-%m-%dT%H:%M:%S.000\"", ")", "if", "current_date", ".", "year", "!=", "station_year", ":", "# Get all of the stations for this time", "smag_stat", "=", "pysat", ".", "Instrument", "(", "platform", "=", "platform", ",", "name", "=", "name", ",", "tag", "=", "'stations'", ")", "# try to load data", "smag_stat", ".", "load", "(", "date", "=", "current_date", ")", "if", "smag_stat", ".", "empty", ":", "# no data", "etime", "=", "current_date", "+", "pds", ".", "DateOffset", "(", "days", "=", "1", ")", "smag_stat", ".", "download", "(", "start", "=", "current_date", ",", "stop", "=", "etime", ",", "user", "=", "user", ",", "password", "=", "password", ",", "file_fmt", "=", "file_fmt", ")", "smag_stat", ".", "load", "(", "date", "=", "current_date", ")", "if", "smag_stat", ".", "empty", ":", "# no data", "estr", "=", "\"unable to format station query for \"", "estr", "+=", "\"[{:d}]\"", ".", "format", "(", "current_date", ".", "year", ")", "raise", "ValueError", "(", "estr", ")", "# Format a string of the station names", "if", "smag_stat", ".", "data", ".", "IAGA", ".", "shape", "[", "0", "]", ">", "max_stations", ":", "station_year", "=", "current_date", ".", "year", "nreq", "=", "int", "(", "np", ".", "ceil", "(", "smag_stat", ".", "data", ".", "IAGA", ".", "shape", "[", "0", "]", "/", "float", "(", "max_stations", ")", ")", ")", "out", "=", "list", "(", ")", "for", "ireq", "in", "range", "(", "nreq", ")", ":", "if", "tag", "!=", "\"stations\"", ":", "if", "station_year", "is", "None", ":", "raise", "RuntimeError", "(", "\"unable to load station data\"", ")", "stat_str", "=", "\",\"", ".", "join", "(", "smag_stat", ".", "data", ".", "IAGA", "[", "ireq", "*", "max_stations", ":", "(", "ireq", "+", "1", ")", "*", "max_stations", "]", ")", "remoteaccess", "[", "'stations'", "]", "=", "\"stations={:s}\"", ".", "format", "(", "stat_str", ")", "# Format the query", "url", "=", "remotefmt", ".", "format", "(", "*", "*", "remoteaccess", ")", "# Set up a request", "try", ":", "# print (url)", "result", "=", "requests", ".", "post", "(", "url", ")", "result", ".", "encoding", "=", "'ISO-8859-1'", "# handle strings differently for python 2/3", "if", "sys", ".", "version_info", ".", "major", "==", "2", ":", "out", ".", "append", "(", "str", "(", "result", ".", "text", ".", "encode", "(", "'ascii'", ",", "'replace'", ")", ")", ")", "else", ":", "out", ".", "append", "(", "result", ".", "text", ")", "except", ":", "raise", "RuntimeError", "(", "\"unable to connect to [{:s}]\"", ".", "format", "(", "url", ")", ")", "# Test the result", "if", "\"requested URL was rejected\"", "in", "out", "[", "-", "1", "]", ":", "estr", "=", "\"Requested url was rejected:\\n{:s}\"", ".", "format", "(", "url", ")", "raise", "RuntimeError", "(", "estr", ")", "# Build the output file name", "if", "tag", "is", "''", ":", "fname", "=", "path", ".", "join", "(", "data_path", ",", "name_fmts", "[", "i", "]", ")", "else", ":", "fname", "=", "path", ".", "join", "(", "data_path", ",", "name_fmts", "[", "i", "]", ")", "# If more than one data pass was needed, append the files", "if", "len", "(", "out", ")", ">", "1", ":", "out_data", "=", "append_data", "(", "out", ",", "file_fmt", ",", "tag", ")", "else", ":", "out_data", "=", "out", "[", "0", "]", "# Save the file data", "with", "open", "(", "fname", ",", "\"w\"", ")", "as", "local_file", ":", "local_file", ".", "write", "(", "out_data", ")", "local_file", ".", "close", "(", ")", "del", "out_data", "return" ]
38.677419
21.069124
def _do_layout(self): """Adds widgets to the aui manager and controls the layout""" # Set background color for the toolbar via the manager ap = self._mgr.GetArtProvider() ap.SetColour(aui.AUI_DOCKART_BACKGROUND_GRADIENT_COLOUR, get_color(config["background_color"])) # Add the toolbars to the manager self._mgr.AddPane(self.main_toolbar, aui.AuiPaneInfo(). Name("main_window_toolbar"). Caption(_("Main toolbar")). ToolbarPane().Top().Row(0)) self._mgr.AddPane(self.find_toolbar, aui.AuiPaneInfo(). Name("find_toolbar").Caption(_("Find toolbar")). ToolbarPane().Top().Row(0)) self._mgr.AddPane(self.attributes_toolbar, aui.AuiPaneInfo(). Name("attributes_toolbar"). Caption(_("Format toolbar")). ToolbarPane().Top().Row(1)) self._mgr.AddPane(self.macro_toolbar, aui.AuiPaneInfo(). Name("macro_toolbar").Caption(_("Macro toolbar")). Gripper(True).ToolbarPane().Top().Row(1)) self._mgr.AddPane(self.widget_toolbar, aui.AuiPaneInfo(). Name("widget_toolbar").Caption(_("Widget toolbar")). Gripper(True).ToolbarPane().Top().Row(1)) self._mgr.AddPane(self.entry_line_panel, aui.AuiPaneInfo(). Name("entry_line_panel").Caption(_("Entry line")). Gripper(False).CenterPane().Top().Row(2). BestSize(400, 30).PaneBorder(False)) self._mgr.AddPane(self.table_list_panel, aui.AuiPaneInfo(). Name("table_list_panel").Caption(_("Table")). CenterPane().Left().BestSize(50, 300)) self._mgr.AddPane(self.macro_panel, aui.AuiPaneInfo(). Name("macro_panel").Caption(_("Macro panel")). Gripper(False).CenterPane().Right(). BestSize(200, 200)) # Load perspective from config window_layout = config["window_layout"] if window_layout: self._mgr.LoadPerspective(window_layout) # Add the main grid self._mgr.AddPane(self.grid, aui.AuiPaneInfo(). Name("grid").Caption(_("Main grid")).CentrePane()) # Tell the manager to 'commit' all the changes just made self._mgr.Update() self._mgr.GetPane("attributes_toolbar") self._mgr.Update() self._set_menu_toggles() # Set initial size to config value self.SetInitialSize(config["window_size"]) self.SetMinSize((10, 10)) # TODO: Set window position fix --> different positions for # different window managers prevent this self.SetPosition(config["window_position"])
[ "def", "_do_layout", "(", "self", ")", ":", "# Set background color for the toolbar via the manager", "ap", "=", "self", ".", "_mgr", ".", "GetArtProvider", "(", ")", "ap", ".", "SetColour", "(", "aui", ".", "AUI_DOCKART_BACKGROUND_GRADIENT_COLOUR", ",", "get_color", "(", "config", "[", "\"background_color\"", "]", ")", ")", "# Add the toolbars to the manager", "self", ".", "_mgr", ".", "AddPane", "(", "self", ".", "main_toolbar", ",", "aui", ".", "AuiPaneInfo", "(", ")", ".", "Name", "(", "\"main_window_toolbar\"", ")", ".", "Caption", "(", "_", "(", "\"Main toolbar\"", ")", ")", ".", "ToolbarPane", "(", ")", ".", "Top", "(", ")", ".", "Row", "(", "0", ")", ")", "self", ".", "_mgr", ".", "AddPane", "(", "self", ".", "find_toolbar", ",", "aui", ".", "AuiPaneInfo", "(", ")", ".", "Name", "(", "\"find_toolbar\"", ")", ".", "Caption", "(", "_", "(", "\"Find toolbar\"", ")", ")", ".", "ToolbarPane", "(", ")", ".", "Top", "(", ")", ".", "Row", "(", "0", ")", ")", "self", ".", "_mgr", ".", "AddPane", "(", "self", ".", "attributes_toolbar", ",", "aui", ".", "AuiPaneInfo", "(", ")", ".", "Name", "(", "\"attributes_toolbar\"", ")", ".", "Caption", "(", "_", "(", "\"Format toolbar\"", ")", ")", ".", "ToolbarPane", "(", ")", ".", "Top", "(", ")", ".", "Row", "(", "1", ")", ")", "self", ".", "_mgr", ".", "AddPane", "(", "self", ".", "macro_toolbar", ",", "aui", ".", "AuiPaneInfo", "(", ")", ".", "Name", "(", "\"macro_toolbar\"", ")", ".", "Caption", "(", "_", "(", "\"Macro toolbar\"", ")", ")", ".", "Gripper", "(", "True", ")", ".", "ToolbarPane", "(", ")", ".", "Top", "(", ")", ".", "Row", "(", "1", ")", ")", "self", ".", "_mgr", ".", "AddPane", "(", "self", ".", "widget_toolbar", ",", "aui", ".", "AuiPaneInfo", "(", ")", ".", "Name", "(", "\"widget_toolbar\"", ")", ".", "Caption", "(", "_", "(", "\"Widget toolbar\"", ")", ")", ".", "Gripper", "(", "True", ")", ".", "ToolbarPane", "(", ")", ".", "Top", "(", ")", ".", "Row", "(", "1", ")", ")", "self", ".", "_mgr", ".", "AddPane", "(", "self", ".", "entry_line_panel", ",", "aui", ".", "AuiPaneInfo", "(", ")", ".", "Name", "(", "\"entry_line_panel\"", ")", ".", "Caption", "(", "_", "(", "\"Entry line\"", ")", ")", ".", "Gripper", "(", "False", ")", ".", "CenterPane", "(", ")", ".", "Top", "(", ")", ".", "Row", "(", "2", ")", ".", "BestSize", "(", "400", ",", "30", ")", ".", "PaneBorder", "(", "False", ")", ")", "self", ".", "_mgr", ".", "AddPane", "(", "self", ".", "table_list_panel", ",", "aui", ".", "AuiPaneInfo", "(", ")", ".", "Name", "(", "\"table_list_panel\"", ")", ".", "Caption", "(", "_", "(", "\"Table\"", ")", ")", ".", "CenterPane", "(", ")", ".", "Left", "(", ")", ".", "BestSize", "(", "50", ",", "300", ")", ")", "self", ".", "_mgr", ".", "AddPane", "(", "self", ".", "macro_panel", ",", "aui", ".", "AuiPaneInfo", "(", ")", ".", "Name", "(", "\"macro_panel\"", ")", ".", "Caption", "(", "_", "(", "\"Macro panel\"", ")", ")", ".", "Gripper", "(", "False", ")", ".", "CenterPane", "(", ")", ".", "Right", "(", ")", ".", "BestSize", "(", "200", ",", "200", ")", ")", "# Load perspective from config", "window_layout", "=", "config", "[", "\"window_layout\"", "]", "if", "window_layout", ":", "self", ".", "_mgr", ".", "LoadPerspective", "(", "window_layout", ")", "# Add the main grid", "self", ".", "_mgr", ".", "AddPane", "(", "self", ".", "grid", ",", "aui", ".", "AuiPaneInfo", "(", ")", ".", "Name", "(", "\"grid\"", ")", ".", "Caption", "(", "_", "(", "\"Main grid\"", ")", ")", ".", "CentrePane", "(", ")", ")", "# Tell the manager to 'commit' all the changes just made", "self", ".", "_mgr", ".", "Update", "(", ")", "self", ".", "_mgr", ".", "GetPane", "(", "\"attributes_toolbar\"", ")", "self", ".", "_mgr", ".", "Update", "(", ")", "self", ".", "_set_menu_toggles", "(", ")", "# Set initial size to config value", "self", ".", "SetInitialSize", "(", "config", "[", "\"window_size\"", "]", ")", "self", ".", "SetMinSize", "(", "(", "10", ",", "10", ")", ")", "# TODO: Set window position fix --> different positions for", "# different window managers prevent this", "self", ".", "SetPosition", "(", "config", "[", "\"window_position\"", "]", ")" ]
41.352113
23.84507
def create_connection(self, session=None): """ Create connection in the Connection table, according to whether it uses proxy, TCP, UNIX sockets, SSL. Connection ID will be randomly generated. :param session: Session of the SQL Alchemy ORM (automatically generated with decorator). """ connection = Connection(conn_id=self.db_conn_id) uri = self._generate_connection_uri() self.log.info("Creating connection %s", self.db_conn_id) connection.parse_from_uri(uri) session.add(connection) session.commit()
[ "def", "create_connection", "(", "self", ",", "session", "=", "None", ")", ":", "connection", "=", "Connection", "(", "conn_id", "=", "self", ".", "db_conn_id", ")", "uri", "=", "self", ".", "_generate_connection_uri", "(", ")", "self", ".", "log", ".", "info", "(", "\"Creating connection %s\"", ",", "self", ".", "db_conn_id", ")", "connection", ".", "parse_from_uri", "(", "uri", ")", "session", ".", "add", "(", "connection", ")", "session", ".", "commit", "(", ")" ]
42.857143
17.285714
def _annotate_crossmatch_with_value_added_parameters( self, crossmatchDict, catalogueName, searchPara, search_name): """*annotate each crossmatch with physical parameters such are distances etc* **Key Arguments:** - ``crossmatchDict`` -- the crossmatch dictionary - ``catalogueName`` -- the name of the catalogue the crossmatch results from - ``searchPara`` -- the search parameters for this individual search as lifted from the search algorithm in the sherlock settings file - ``search_name`` -- the name of the search as given in the sherlock settings file **Return:** - ``crossmatchDict`` -- the annotated crossmatch dictionary .. todo :: - update key arguments values and definitions with defaults - update return values and definitions - update usage examples and text - update docstring text - check sublime snippet exists - clip any useful text to docs mindmap - regenerate the docs and check redendering of this docstring """ self.log.debug( 'starting the ``_annotate_crossmatch_with_value_added_parameters`` method') redshift = None z = None scale = None distance = None distance_modulus = None major_axis_arcsec = None direct_distance = None direct_distance_scale = None direct_distance_modulus = None # IF THERE'S A REDSHIFT, CALCULATE PHYSICAL PARAMETERS if 'z' in crossmatchDict: # THE CATALOGUE HAS A REDSHIFT COLUMN redshift = crossmatchDict['z'] elif 'photoZ' in crossmatchDict: redshift = crossmatchDict['photoZ'] if redshift and redshift > 0.0: # CALCULATE DISTANCE MODULUS, ETC c = converter(log=self.log) dists = c.redshift_to_distance( z=redshift, WM=0.3, WV=0.7, H0=70.0 ) if dists: z = dists['z'] scale = dists["da_scale"] distance = dists["dl_mpc"] distance_modulus = dists["dmod"] # ADD MAJOR AXIS VALUE if "or within semi major axis" in searchPara and searchPara["or within semi major axis"] == True and "semiMajor" in crossmatchDict and crossmatchDict["semiMajor"]: major_axis_arcsec = crossmatchDict[ "semiMajor"] * self.colMaps[catalogueName]["semiMajorToArcsec"] if "semiMajor" in crossmatchDict: del crossmatchDict["semiMajor"] # ADD DISTANCE VALUES if "distance" in crossmatchDict and crossmatchDict["distance"]: direct_distance = crossmatchDict["distance"] direct_distance_scale = direct_distance / 206.264806 direct_distance_modulus = 5 * \ math.log10(direct_distance * 1e6) - 5 # crossmatchDict['z'] = z crossmatchDict['scale'] = scale crossmatchDict['distance'] = distance crossmatchDict['distance_modulus'] = distance_modulus crossmatchDict['major_axis_arcsec'] = major_axis_arcsec crossmatchDict['direct_distance'] = direct_distance crossmatchDict['direct_distance_scale'] = direct_distance_scale crossmatchDict['direct_distance_modulus'] = direct_distance_modulus crossmatchDict['catalogue_object_type'] = self.colMaps[ catalogueName]["object_type"] crossmatchDict["search_name"] = search_name crossmatchDict["raDeg"] = crossmatchDict["ra"] crossmatchDict["decDeg"] = crossmatchDict["dec"] del crossmatchDict["ra"] del crossmatchDict["dec"] crossmatchDict["original_search_radius_arcsec"] = searchPara[ "angular radius arcsec"] physical_separation_kpc = None # CALCULATE MOST ACCURATE PHYSICAL SEPARATION if crossmatchDict["direct_distance_scale"]: physical_separation_kpc = crossmatchDict[ "direct_distance_scale"] * crossmatchDict["separationArcsec"] elif crossmatchDict["scale"]: physical_separation_kpc = crossmatchDict[ "scale"] * crossmatchDict["separationArcsec"] crossmatchDict["physical_separation_kpc"] = physical_separation_kpc self.log.debug( 'completed the ``_annotate_crossmatch_with_value_added_parameters`` method') return crossmatchDict
[ "def", "_annotate_crossmatch_with_value_added_parameters", "(", "self", ",", "crossmatchDict", ",", "catalogueName", ",", "searchPara", ",", "search_name", ")", ":", "self", ".", "log", ".", "debug", "(", "'starting the ``_annotate_crossmatch_with_value_added_parameters`` method'", ")", "redshift", "=", "None", "z", "=", "None", "scale", "=", "None", "distance", "=", "None", "distance_modulus", "=", "None", "major_axis_arcsec", "=", "None", "direct_distance", "=", "None", "direct_distance_scale", "=", "None", "direct_distance_modulus", "=", "None", "# IF THERE'S A REDSHIFT, CALCULATE PHYSICAL PARAMETERS", "if", "'z'", "in", "crossmatchDict", ":", "# THE CATALOGUE HAS A REDSHIFT COLUMN", "redshift", "=", "crossmatchDict", "[", "'z'", "]", "elif", "'photoZ'", "in", "crossmatchDict", ":", "redshift", "=", "crossmatchDict", "[", "'photoZ'", "]", "if", "redshift", "and", "redshift", ">", "0.0", ":", "# CALCULATE DISTANCE MODULUS, ETC", "c", "=", "converter", "(", "log", "=", "self", ".", "log", ")", "dists", "=", "c", ".", "redshift_to_distance", "(", "z", "=", "redshift", ",", "WM", "=", "0.3", ",", "WV", "=", "0.7", ",", "H0", "=", "70.0", ")", "if", "dists", ":", "z", "=", "dists", "[", "'z'", "]", "scale", "=", "dists", "[", "\"da_scale\"", "]", "distance", "=", "dists", "[", "\"dl_mpc\"", "]", "distance_modulus", "=", "dists", "[", "\"dmod\"", "]", "# ADD MAJOR AXIS VALUE", "if", "\"or within semi major axis\"", "in", "searchPara", "and", "searchPara", "[", "\"or within semi major axis\"", "]", "==", "True", "and", "\"semiMajor\"", "in", "crossmatchDict", "and", "crossmatchDict", "[", "\"semiMajor\"", "]", ":", "major_axis_arcsec", "=", "crossmatchDict", "[", "\"semiMajor\"", "]", "*", "self", ".", "colMaps", "[", "catalogueName", "]", "[", "\"semiMajorToArcsec\"", "]", "if", "\"semiMajor\"", "in", "crossmatchDict", ":", "del", "crossmatchDict", "[", "\"semiMajor\"", "]", "# ADD DISTANCE VALUES", "if", "\"distance\"", "in", "crossmatchDict", "and", "crossmatchDict", "[", "\"distance\"", "]", ":", "direct_distance", "=", "crossmatchDict", "[", "\"distance\"", "]", "direct_distance_scale", "=", "direct_distance", "/", "206.264806", "direct_distance_modulus", "=", "5", "*", "math", ".", "log10", "(", "direct_distance", "*", "1e6", ")", "-", "5", "# crossmatchDict['z'] = z", "crossmatchDict", "[", "'scale'", "]", "=", "scale", "crossmatchDict", "[", "'distance'", "]", "=", "distance", "crossmatchDict", "[", "'distance_modulus'", "]", "=", "distance_modulus", "crossmatchDict", "[", "'major_axis_arcsec'", "]", "=", "major_axis_arcsec", "crossmatchDict", "[", "'direct_distance'", "]", "=", "direct_distance", "crossmatchDict", "[", "'direct_distance_scale'", "]", "=", "direct_distance_scale", "crossmatchDict", "[", "'direct_distance_modulus'", "]", "=", "direct_distance_modulus", "crossmatchDict", "[", "'catalogue_object_type'", "]", "=", "self", ".", "colMaps", "[", "catalogueName", "]", "[", "\"object_type\"", "]", "crossmatchDict", "[", "\"search_name\"", "]", "=", "search_name", "crossmatchDict", "[", "\"raDeg\"", "]", "=", "crossmatchDict", "[", "\"ra\"", "]", "crossmatchDict", "[", "\"decDeg\"", "]", "=", "crossmatchDict", "[", "\"dec\"", "]", "del", "crossmatchDict", "[", "\"ra\"", "]", "del", "crossmatchDict", "[", "\"dec\"", "]", "crossmatchDict", "[", "\"original_search_radius_arcsec\"", "]", "=", "searchPara", "[", "\"angular radius arcsec\"", "]", "physical_separation_kpc", "=", "None", "# CALCULATE MOST ACCURATE PHYSICAL SEPARATION", "if", "crossmatchDict", "[", "\"direct_distance_scale\"", "]", ":", "physical_separation_kpc", "=", "crossmatchDict", "[", "\"direct_distance_scale\"", "]", "*", "crossmatchDict", "[", "\"separationArcsec\"", "]", "elif", "crossmatchDict", "[", "\"scale\"", "]", ":", "physical_separation_kpc", "=", "crossmatchDict", "[", "\"scale\"", "]", "*", "crossmatchDict", "[", "\"separationArcsec\"", "]", "crossmatchDict", "[", "\"physical_separation_kpc\"", "]", "=", "physical_separation_kpc", "self", ".", "log", ".", "debug", "(", "'completed the ``_annotate_crossmatch_with_value_added_parameters`` method'", ")", "return", "crossmatchDict" ]
41.574074
19.981481
def iter_orgs(self, number=-1, etag=None): """Iterate over organizations the user is member of :param int number: (optional), number of organizations to return. Default: -1 returns all available organization :param str etag: (optional), ETag from a previous request to the same endpoint :returns: list of :class:`Event <github3.orgs.Organization>`\ s """ # Import here, because a toplevel import causes an import loop from .orgs import Organization url = self._build_url('orgs', base_url=self._api) return self._iter(int(number), url, Organization, etag=etag)
[ "def", "iter_orgs", "(", "self", ",", "number", "=", "-", "1", ",", "etag", "=", "None", ")", ":", "# Import here, because a toplevel import causes an import loop", "from", ".", "orgs", "import", "Organization", "url", "=", "self", ".", "_build_url", "(", "'orgs'", ",", "base_url", "=", "self", ".", "_api", ")", "return", "self", ".", "_iter", "(", "int", "(", "number", ")", ",", "url", ",", "Organization", ",", "etag", "=", "etag", ")" ]
49.538462
19.846154
def new_scheduler(self, tasks, root_subject_types, build_root, work_dir, local_store_dir, ignore_patterns, execution_options, construct_directory_digest, construct_snapshot, construct_file_content, construct_files_content, construct_process_result, type_address, type_path_globs, type_directory_digest, type_snapshot, type_merge_snapshots_request, type_files_content, type_dir, type_file, type_link, type_process_request, type_process_result, type_generator, type_url_to_fetch): """Create and return an ExternContext and native Scheduler.""" def func(fn): return Function(self.context.to_key(fn)) def ti(type_obj): return TypeId(self.context.to_id(type_obj)) scheduler = self.lib.scheduler_create( tasks, # Constructors/functions. func(construct_directory_digest), func(construct_snapshot), func(construct_file_content), func(construct_files_content), func(construct_process_result), # Types. ti(type_address), ti(type_path_globs), ti(type_directory_digest), ti(type_snapshot), ti(type_merge_snapshots_request), ti(type_files_content), ti(type_dir), ti(type_file), ti(type_link), ti(type_process_request), ti(type_process_result), ti(type_generator), ti(type_url_to_fetch), ti(text_type), ti(binary_type), # Project tree. self.context.utf8_buf(build_root), self.context.utf8_buf(work_dir), self.context.utf8_buf(local_store_dir), self.context.utf8_buf_buf(ignore_patterns), self.to_ids_buf(root_subject_types), # Remote execution config. self.context.utf8_buf_buf(execution_options.remote_store_server), # We can't currently pass Options to the rust side, so we pass empty strings for None. self.context.utf8_buf(execution_options.remote_execution_server or ""), self.context.utf8_buf(execution_options.remote_execution_process_cache_namespace or ""), self.context.utf8_buf(execution_options.remote_instance_name or ""), self.context.utf8_buf(execution_options.remote_ca_certs_path or ""), self.context.utf8_buf(execution_options.remote_oauth_bearer_token_path or ""), execution_options.remote_store_thread_count, execution_options.remote_store_chunk_bytes, execution_options.remote_store_chunk_upload_timeout_seconds, execution_options.remote_store_rpc_retries, execution_options.process_execution_parallelism, execution_options.process_execution_cleanup_local_dirs, ) return self.gc(scheduler, self.lib.scheduler_destroy)
[ "def", "new_scheduler", "(", "self", ",", "tasks", ",", "root_subject_types", ",", "build_root", ",", "work_dir", ",", "local_store_dir", ",", "ignore_patterns", ",", "execution_options", ",", "construct_directory_digest", ",", "construct_snapshot", ",", "construct_file_content", ",", "construct_files_content", ",", "construct_process_result", ",", "type_address", ",", "type_path_globs", ",", "type_directory_digest", ",", "type_snapshot", ",", "type_merge_snapshots_request", ",", "type_files_content", ",", "type_dir", ",", "type_file", ",", "type_link", ",", "type_process_request", ",", "type_process_result", ",", "type_generator", ",", "type_url_to_fetch", ")", ":", "def", "func", "(", "fn", ")", ":", "return", "Function", "(", "self", ".", "context", ".", "to_key", "(", "fn", ")", ")", "def", "ti", "(", "type_obj", ")", ":", "return", "TypeId", "(", "self", ".", "context", ".", "to_id", "(", "type_obj", ")", ")", "scheduler", "=", "self", ".", "lib", ".", "scheduler_create", "(", "tasks", ",", "# Constructors/functions.", "func", "(", "construct_directory_digest", ")", ",", "func", "(", "construct_snapshot", ")", ",", "func", "(", "construct_file_content", ")", ",", "func", "(", "construct_files_content", ")", ",", "func", "(", "construct_process_result", ")", ",", "# Types.", "ti", "(", "type_address", ")", ",", "ti", "(", "type_path_globs", ")", ",", "ti", "(", "type_directory_digest", ")", ",", "ti", "(", "type_snapshot", ")", ",", "ti", "(", "type_merge_snapshots_request", ")", ",", "ti", "(", "type_files_content", ")", ",", "ti", "(", "type_dir", ")", ",", "ti", "(", "type_file", ")", ",", "ti", "(", "type_link", ")", ",", "ti", "(", "type_process_request", ")", ",", "ti", "(", "type_process_result", ")", ",", "ti", "(", "type_generator", ")", ",", "ti", "(", "type_url_to_fetch", ")", ",", "ti", "(", "text_type", ")", ",", "ti", "(", "binary_type", ")", ",", "# Project tree.", "self", ".", "context", ".", "utf8_buf", "(", "build_root", ")", ",", "self", ".", "context", ".", "utf8_buf", "(", "work_dir", ")", ",", "self", ".", "context", ".", "utf8_buf", "(", "local_store_dir", ")", ",", "self", ".", "context", ".", "utf8_buf_buf", "(", "ignore_patterns", ")", ",", "self", ".", "to_ids_buf", "(", "root_subject_types", ")", ",", "# Remote execution config.", "self", ".", "context", ".", "utf8_buf_buf", "(", "execution_options", ".", "remote_store_server", ")", ",", "# We can't currently pass Options to the rust side, so we pass empty strings for None.", "self", ".", "context", ".", "utf8_buf", "(", "execution_options", ".", "remote_execution_server", "or", "\"\"", ")", ",", "self", ".", "context", ".", "utf8_buf", "(", "execution_options", ".", "remote_execution_process_cache_namespace", "or", "\"\"", ")", ",", "self", ".", "context", ".", "utf8_buf", "(", "execution_options", ".", "remote_instance_name", "or", "\"\"", ")", ",", "self", ".", "context", ".", "utf8_buf", "(", "execution_options", ".", "remote_ca_certs_path", "or", "\"\"", ")", ",", "self", ".", "context", ".", "utf8_buf", "(", "execution_options", ".", "remote_oauth_bearer_token_path", "or", "\"\"", ")", ",", "execution_options", ".", "remote_store_thread_count", ",", "execution_options", ".", "remote_store_chunk_bytes", ",", "execution_options", ".", "remote_store_chunk_upload_timeout_seconds", ",", "execution_options", ".", "remote_store_rpc_retries", ",", "execution_options", ".", "process_execution_parallelism", ",", "execution_options", ".", "process_execution_cleanup_local_dirs", ",", ")", "return", "self", ".", "gc", "(", "scheduler", ",", "self", ".", "lib", ".", "scheduler_destroy", ")" ]
39.607595
13.177215
def plot_network(symbol, title="plot", save_format='pdf', shape=None, dtype=None, node_attrs={}, hide_weights=True): """Creates a visualization (Graphviz digraph object) of the given computation graph. Graphviz must be installed for this function to work. Parameters ---------- title: str, optional Title of the generated visualization. symbol: Symbol A symbol from the computation graph. The generated digraph will visualize the part of the computation graph required to compute `symbol`. shape: dict, optional Specifies the shape of the input tensors. If specified, the visualization will include the shape of the tensors between the nodes. `shape` is a dictionary mapping input symbol names (str) to the corresponding tensor shape (tuple). dtype: dict, optional Specifies the type of the input tensors. If specified, the visualization will include the type of the tensors between the nodes. `dtype` is a dictionary mapping input symbol names (str) to the corresponding tensor type (e.g. `numpy.float32`). node_attrs: dict, optional Specifies the attributes for nodes in the generated visualization. `node_attrs` is a dictionary of Graphviz attribute names and values. For example:: node_attrs={"shape":"oval","fixedsize":"false"} will use oval shape for nodes and allow variable sized nodes in the visualization. hide_weights: bool, optional If True (default), then inputs with names of form *_weight* (corresponding to weight tensors) or *_bias* (corresponding to bias vectors) will be hidden for a cleaner visualization. Returns ------- dot: Digraph A Graphviz digraph object visualizing the computation graph to compute `symbol`. Example ------- >>> net = mx.sym.Variable('data') >>> net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=128) >>> net = mx.sym.Activation(data=net, name='relu1', act_type="relu") >>> net = mx.sym.FullyConnected(data=net, name='fc2', num_hidden=10) >>> net = mx.sym.SoftmaxOutput(data=net, name='out') >>> digraph = mx.viz.plot_network(net, shape={'data':(100,200)}, ... node_attrs={"fixedsize":"false"}) >>> digraph.view() Notes ----- If ``mxnet`` is imported, the visualization module can be used in its short-form. For example, if we ``import mxnet`` as follows:: import mxnet this method in visualization module can be used in its short-form as:: mxnet.viz.plot_network(...) """ # todo add shape support try: from graphviz import Digraph except: raise ImportError("Draw network requires graphviz library") if not isinstance(symbol, Symbol): raise TypeError("symbol must be a Symbol") internals = symbol.get_internals() draw_shape = shape is not None if draw_shape: _, out_shapes, _ = internals.infer_shape(**shape) if out_shapes is None: raise ValueError("Input shape is incomplete") shape_dict = dict(zip(internals.list_outputs(), out_shapes)) draw_type = dtype is not None if draw_type: _, out_types, _ = internals.infer_type(**dtype) if out_types is None: raise ValueError("Input type is incomplete") type_dict = dict(zip(internals.list_outputs(), out_types)) conf = json.loads(symbol.tojson()) nodes = conf["nodes"] # check if multiple nodes have the same name if len(nodes) != len(set([node["name"] for node in nodes])): seen_nodes = set() # find all repeated names repeated = set(node['name'] for node in nodes if node['name'] in seen_nodes or seen_nodes.add(node['name'])) warning_message = "There are multiple variables with the same name in your graph, " \ "this may result in cyclic graph. Repeated names: " + ','.join(repeated) warnings.warn(warning_message, RuntimeWarning) # default attributes of node node_attr = {"shape": "box", "fixedsize": "true", "width": "1.3", "height": "0.8034", "style": "filled"} # merge the dict provided by user and the default one node_attr.update(node_attrs) dot = Digraph(name=title, format=save_format) # color map cm = ("#8dd3c7", "#fb8072", "#ffffb3", "#bebada", "#80b1d3", "#fdb462", "#b3de69", "#fccde5") def looks_like_weight(name): """Internal helper to figure out if node should be hidden with `hide_weights`. """ weight_like = ('_weight', '_bias', '_beta', '_gamma', '_moving_var', '_moving_mean', '_running_var', '_running_mean') return name.endswith(weight_like) # make nodes hidden_nodes = set() for node in nodes: op = node["op"] name = node["name"] # input data attr = copy.deepcopy(node_attr) label = name if op == "null": if looks_like_weight(node["name"]): if hide_weights: hidden_nodes.add(node["name"]) # else we don't render a node, but # don't add it to the hidden_nodes set # so it gets rendered as an empty oval continue attr["shape"] = "oval" # inputs get their own shape label = node["name"] attr["fillcolor"] = cm[0] elif op == "Convolution": label = "Convolution\n{kernel}/{stride}, {filter}".format( kernel="x".join(_str2tuple(node["attrs"]["kernel"])), stride="x".join(_str2tuple(node["attrs"]["stride"])) if "stride" in node["attrs"] else "1", filter=node["attrs"]["num_filter"] ) attr["fillcolor"] = cm[1] elif op == "FullyConnected": label = "FullyConnected\n{hidden}".format(hidden=node["attrs"]["num_hidden"]) attr["fillcolor"] = cm[1] elif op == "BatchNorm": attr["fillcolor"] = cm[3] elif op == 'Activation': act_type = node["attrs"]["act_type"] label = 'Activation\n{activation}'.format(activation=act_type) attr["fillcolor"] = cm[2] elif op == 'LeakyReLU': attrs = node.get("attrs") act_type = attrs.get("act_type", "Leaky") if attrs else "Leaky" label = 'LeakyReLU\n{activation}'.format(activation=act_type) attr["fillcolor"] = cm[2] elif op == "Pooling": label = "Pooling\n{pooltype}, {kernel}/{stride}".format(pooltype=node["attrs"]["pool_type"], kernel="x".join(_str2tuple(node["attrs"]["kernel"])) if "kernel" in node["attrs"] else "[]", stride="x".join(_str2tuple(node["attrs"]["stride"])) if "stride" in node["attrs"] else "1") attr["fillcolor"] = cm[4] elif op in ("Concat", "Flatten", "Reshape"): attr["fillcolor"] = cm[5] elif op == "Softmax": attr["fillcolor"] = cm[6] else: attr["fillcolor"] = cm[7] if op == "Custom": label = node["attrs"]["op_type"] dot.node(name=name, label=label, **attr) # add edges for node in nodes: # pylint: disable=too-many-nested-blocks op = node["op"] name = node["name"] if op == "null": continue else: inputs = node["inputs"] for item in inputs: input_node = nodes[item[0]] input_name = input_node["name"] if input_name not in hidden_nodes: attr = {"dir": "back", 'arrowtail':'open', 'label': ''} # add shapes if draw_shape: if input_node["op"] != "null": key = input_name + "_output" if "attrs" in input_node: params = input_node["attrs"] if "num_outputs" in params: key += str(int(params["num_outputs"]) - 1) shape = shape_dict[key][1:] label = "x".join([str(x) for x in shape]) attr["label"] = label else: key = input_name shape = shape_dict[key][1:] label = "x".join([str(x) for x in shape]) attr["label"] = label if draw_type: if input_node["op"] != "null": key = input_name + "_output" if "attrs" in input_node: params = input_node["attrs"] if "num_outputs" in params: key += str(int(params["num_outputs"]) - 1) dtype = type_dict[key] attr["label"] += '(' + dtype.__name__ + ')' else: key = input_name dtype = type_dict[key] attr["label"] += '(' + dtype.__name__ + ')' dot.edge(tail_name=name, head_name=input_name, **attr) return dot
[ "def", "plot_network", "(", "symbol", ",", "title", "=", "\"plot\"", ",", "save_format", "=", "'pdf'", ",", "shape", "=", "None", ",", "dtype", "=", "None", ",", "node_attrs", "=", "{", "}", ",", "hide_weights", "=", "True", ")", ":", "# todo add shape support", "try", ":", "from", "graphviz", "import", "Digraph", "except", ":", "raise", "ImportError", "(", "\"Draw network requires graphviz library\"", ")", "if", "not", "isinstance", "(", "symbol", ",", "Symbol", ")", ":", "raise", "TypeError", "(", "\"symbol must be a Symbol\"", ")", "internals", "=", "symbol", ".", "get_internals", "(", ")", "draw_shape", "=", "shape", "is", "not", "None", "if", "draw_shape", ":", "_", ",", "out_shapes", ",", "_", "=", "internals", ".", "infer_shape", "(", "*", "*", "shape", ")", "if", "out_shapes", "is", "None", ":", "raise", "ValueError", "(", "\"Input shape is incomplete\"", ")", "shape_dict", "=", "dict", "(", "zip", "(", "internals", ".", "list_outputs", "(", ")", ",", "out_shapes", ")", ")", "draw_type", "=", "dtype", "is", "not", "None", "if", "draw_type", ":", "_", ",", "out_types", ",", "_", "=", "internals", ".", "infer_type", "(", "*", "*", "dtype", ")", "if", "out_types", "is", "None", ":", "raise", "ValueError", "(", "\"Input type is incomplete\"", ")", "type_dict", "=", "dict", "(", "zip", "(", "internals", ".", "list_outputs", "(", ")", ",", "out_types", ")", ")", "conf", "=", "json", ".", "loads", "(", "symbol", ".", "tojson", "(", ")", ")", "nodes", "=", "conf", "[", "\"nodes\"", "]", "# check if multiple nodes have the same name", "if", "len", "(", "nodes", ")", "!=", "len", "(", "set", "(", "[", "node", "[", "\"name\"", "]", "for", "node", "in", "nodes", "]", ")", ")", ":", "seen_nodes", "=", "set", "(", ")", "# find all repeated names", "repeated", "=", "set", "(", "node", "[", "'name'", "]", "for", "node", "in", "nodes", "if", "node", "[", "'name'", "]", "in", "seen_nodes", "or", "seen_nodes", ".", "add", "(", "node", "[", "'name'", "]", ")", ")", "warning_message", "=", "\"There are multiple variables with the same name in your graph, \"", "\"this may result in cyclic graph. Repeated names: \"", "+", "','", ".", "join", "(", "repeated", ")", "warnings", ".", "warn", "(", "warning_message", ",", "RuntimeWarning", ")", "# default attributes of node", "node_attr", "=", "{", "\"shape\"", ":", "\"box\"", ",", "\"fixedsize\"", ":", "\"true\"", ",", "\"width\"", ":", "\"1.3\"", ",", "\"height\"", ":", "\"0.8034\"", ",", "\"style\"", ":", "\"filled\"", "}", "# merge the dict provided by user and the default one", "node_attr", ".", "update", "(", "node_attrs", ")", "dot", "=", "Digraph", "(", "name", "=", "title", ",", "format", "=", "save_format", ")", "# color map", "cm", "=", "(", "\"#8dd3c7\"", ",", "\"#fb8072\"", ",", "\"#ffffb3\"", ",", "\"#bebada\"", ",", "\"#80b1d3\"", ",", "\"#fdb462\"", ",", "\"#b3de69\"", ",", "\"#fccde5\"", ")", "def", "looks_like_weight", "(", "name", ")", ":", "\"\"\"Internal helper to figure out if node should be hidden with `hide_weights`.\n \"\"\"", "weight_like", "=", "(", "'_weight'", ",", "'_bias'", ",", "'_beta'", ",", "'_gamma'", ",", "'_moving_var'", ",", "'_moving_mean'", ",", "'_running_var'", ",", "'_running_mean'", ")", "return", "name", ".", "endswith", "(", "weight_like", ")", "# make nodes", "hidden_nodes", "=", "set", "(", ")", "for", "node", "in", "nodes", ":", "op", "=", "node", "[", "\"op\"", "]", "name", "=", "node", "[", "\"name\"", "]", "# input data", "attr", "=", "copy", ".", "deepcopy", "(", "node_attr", ")", "label", "=", "name", "if", "op", "==", "\"null\"", ":", "if", "looks_like_weight", "(", "node", "[", "\"name\"", "]", ")", ":", "if", "hide_weights", ":", "hidden_nodes", ".", "add", "(", "node", "[", "\"name\"", "]", ")", "# else we don't render a node, but", "# don't add it to the hidden_nodes set", "# so it gets rendered as an empty oval", "continue", "attr", "[", "\"shape\"", "]", "=", "\"oval\"", "# inputs get their own shape", "label", "=", "node", "[", "\"name\"", "]", "attr", "[", "\"fillcolor\"", "]", "=", "cm", "[", "0", "]", "elif", "op", "==", "\"Convolution\"", ":", "label", "=", "\"Convolution\\n{kernel}/{stride}, {filter}\"", ".", "format", "(", "kernel", "=", "\"x\"", ".", "join", "(", "_str2tuple", "(", "node", "[", "\"attrs\"", "]", "[", "\"kernel\"", "]", ")", ")", ",", "stride", "=", "\"x\"", ".", "join", "(", "_str2tuple", "(", "node", "[", "\"attrs\"", "]", "[", "\"stride\"", "]", ")", ")", "if", "\"stride\"", "in", "node", "[", "\"attrs\"", "]", "else", "\"1\"", ",", "filter", "=", "node", "[", "\"attrs\"", "]", "[", "\"num_filter\"", "]", ")", "attr", "[", "\"fillcolor\"", "]", "=", "cm", "[", "1", "]", "elif", "op", "==", "\"FullyConnected\"", ":", "label", "=", "\"FullyConnected\\n{hidden}\"", ".", "format", "(", "hidden", "=", "node", "[", "\"attrs\"", "]", "[", "\"num_hidden\"", "]", ")", "attr", "[", "\"fillcolor\"", "]", "=", "cm", "[", "1", "]", "elif", "op", "==", "\"BatchNorm\"", ":", "attr", "[", "\"fillcolor\"", "]", "=", "cm", "[", "3", "]", "elif", "op", "==", "'Activation'", ":", "act_type", "=", "node", "[", "\"attrs\"", "]", "[", "\"act_type\"", "]", "label", "=", "'Activation\\n{activation}'", ".", "format", "(", "activation", "=", "act_type", ")", "attr", "[", "\"fillcolor\"", "]", "=", "cm", "[", "2", "]", "elif", "op", "==", "'LeakyReLU'", ":", "attrs", "=", "node", ".", "get", "(", "\"attrs\"", ")", "act_type", "=", "attrs", ".", "get", "(", "\"act_type\"", ",", "\"Leaky\"", ")", "if", "attrs", "else", "\"Leaky\"", "label", "=", "'LeakyReLU\\n{activation}'", ".", "format", "(", "activation", "=", "act_type", ")", "attr", "[", "\"fillcolor\"", "]", "=", "cm", "[", "2", "]", "elif", "op", "==", "\"Pooling\"", ":", "label", "=", "\"Pooling\\n{pooltype}, {kernel}/{stride}\"", ".", "format", "(", "pooltype", "=", "node", "[", "\"attrs\"", "]", "[", "\"pool_type\"", "]", ",", "kernel", "=", "\"x\"", ".", "join", "(", "_str2tuple", "(", "node", "[", "\"attrs\"", "]", "[", "\"kernel\"", "]", ")", ")", "if", "\"kernel\"", "in", "node", "[", "\"attrs\"", "]", "else", "\"[]\"", ",", "stride", "=", "\"x\"", ".", "join", "(", "_str2tuple", "(", "node", "[", "\"attrs\"", "]", "[", "\"stride\"", "]", ")", ")", "if", "\"stride\"", "in", "node", "[", "\"attrs\"", "]", "else", "\"1\"", ")", "attr", "[", "\"fillcolor\"", "]", "=", "cm", "[", "4", "]", "elif", "op", "in", "(", "\"Concat\"", ",", "\"Flatten\"", ",", "\"Reshape\"", ")", ":", "attr", "[", "\"fillcolor\"", "]", "=", "cm", "[", "5", "]", "elif", "op", "==", "\"Softmax\"", ":", "attr", "[", "\"fillcolor\"", "]", "=", "cm", "[", "6", "]", "else", ":", "attr", "[", "\"fillcolor\"", "]", "=", "cm", "[", "7", "]", "if", "op", "==", "\"Custom\"", ":", "label", "=", "node", "[", "\"attrs\"", "]", "[", "\"op_type\"", "]", "dot", ".", "node", "(", "name", "=", "name", ",", "label", "=", "label", ",", "*", "*", "attr", ")", "# add edges", "for", "node", "in", "nodes", ":", "# pylint: disable=too-many-nested-blocks", "op", "=", "node", "[", "\"op\"", "]", "name", "=", "node", "[", "\"name\"", "]", "if", "op", "==", "\"null\"", ":", "continue", "else", ":", "inputs", "=", "node", "[", "\"inputs\"", "]", "for", "item", "in", "inputs", ":", "input_node", "=", "nodes", "[", "item", "[", "0", "]", "]", "input_name", "=", "input_node", "[", "\"name\"", "]", "if", "input_name", "not", "in", "hidden_nodes", ":", "attr", "=", "{", "\"dir\"", ":", "\"back\"", ",", "'arrowtail'", ":", "'open'", ",", "'label'", ":", "''", "}", "# add shapes", "if", "draw_shape", ":", "if", "input_node", "[", "\"op\"", "]", "!=", "\"null\"", ":", "key", "=", "input_name", "+", "\"_output\"", "if", "\"attrs\"", "in", "input_node", ":", "params", "=", "input_node", "[", "\"attrs\"", "]", "if", "\"num_outputs\"", "in", "params", ":", "key", "+=", "str", "(", "int", "(", "params", "[", "\"num_outputs\"", "]", ")", "-", "1", ")", "shape", "=", "shape_dict", "[", "key", "]", "[", "1", ":", "]", "label", "=", "\"x\"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "shape", "]", ")", "attr", "[", "\"label\"", "]", "=", "label", "else", ":", "key", "=", "input_name", "shape", "=", "shape_dict", "[", "key", "]", "[", "1", ":", "]", "label", "=", "\"x\"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "shape", "]", ")", "attr", "[", "\"label\"", "]", "=", "label", "if", "draw_type", ":", "if", "input_node", "[", "\"op\"", "]", "!=", "\"null\"", ":", "key", "=", "input_name", "+", "\"_output\"", "if", "\"attrs\"", "in", "input_node", ":", "params", "=", "input_node", "[", "\"attrs\"", "]", "if", "\"num_outputs\"", "in", "params", ":", "key", "+=", "str", "(", "int", "(", "params", "[", "\"num_outputs\"", "]", ")", "-", "1", ")", "dtype", "=", "type_dict", "[", "key", "]", "attr", "[", "\"label\"", "]", "+=", "'('", "+", "dtype", ".", "__name__", "+", "')'", "else", ":", "key", "=", "input_name", "dtype", "=", "type_dict", "[", "key", "]", "attr", "[", "\"label\"", "]", "+=", "'('", "+", "dtype", ".", "__name__", "+", "')'", "dot", ".", "edge", "(", "tail_name", "=", "name", ",", "head_name", "=", "input_name", ",", "*", "*", "attr", ")", "return", "dot" ]
44.638498
21.661972
def content(): """Helper method that returns just the content. This method was added so that the text could be reused in the dock_help module. .. versionadded:: 3.2.3 :returns: A message object without brand element. :rtype: safe.messaging.message.Message """ message = m.Message() paragraph = m.Paragraph( m.Image( 'file:///%s/img/screenshots/' 'petabencana-screenshot.png' % resources_path()), style_class='text-center' ) message.add(paragraph) link = m.Link('https://petabencana.id', 'PetaBencana.id') body = m.Paragraph(tr( 'This tool will fetch current flood data for Jakarta from '), link) tips = m.BulletedList() tips.add(tr( 'Check the output directory is correct. Note that the saved ' 'dataset will be called jakarta_flood.shp (and associated files).' )) tips.add(tr( 'If you wish you can specify a prefix to ' 'add in front of this default name. For example using a prefix ' 'of \'foo-\' will cause the downloaded files to be saved as e.g. ' '\'foo-rw-jakarta-flood.shp\'. Note that the only allowed prefix ' 'characters are A-Z, a-z, 0-9 and the characters \'-\' and \'_\'. ' 'You can leave this blank if you prefer.' )) tips.add(tr( 'If a dataset already exists in the output directory it will be ' 'overwritten if the "overwrite existing files" checkbox is ticked.' )) tips.add(tr( 'If the "include date/time in output filename" option is ticked, ' 'the filename will be prefixed with a time stamp e.g. ' '\'foo-22-Mar-2015-08-01-2015-rw-jakarta-flood.shp\' where the date ' 'timestamp is in the form DD-MMM-YYYY.' )) tips.add(tr( 'This tool requires a working internet connection and fetching ' 'data will consume your bandwidth.')) tips.add(m.Link( production_api['help_url'], text=tr( 'Downloaded data is copyright the PetaBencana contributors' ' (click for more info).') )) message.add(body) message.add(tips) return message
[ "def", "content", "(", ")", ":", "message", "=", "m", ".", "Message", "(", ")", "paragraph", "=", "m", ".", "Paragraph", "(", "m", ".", "Image", "(", "'file:///%s/img/screenshots/'", "'petabencana-screenshot.png'", "%", "resources_path", "(", ")", ")", ",", "style_class", "=", "'text-center'", ")", "message", ".", "add", "(", "paragraph", ")", "link", "=", "m", ".", "Link", "(", "'https://petabencana.id'", ",", "'PetaBencana.id'", ")", "body", "=", "m", ".", "Paragraph", "(", "tr", "(", "'This tool will fetch current flood data for Jakarta from '", ")", ",", "link", ")", "tips", "=", "m", ".", "BulletedList", "(", ")", "tips", ".", "add", "(", "tr", "(", "'Check the output directory is correct. Note that the saved '", "'dataset will be called jakarta_flood.shp (and associated files).'", ")", ")", "tips", ".", "add", "(", "tr", "(", "'If you wish you can specify a prefix to '", "'add in front of this default name. For example using a prefix '", "'of \\'foo-\\' will cause the downloaded files to be saved as e.g. '", "'\\'foo-rw-jakarta-flood.shp\\'. Note that the only allowed prefix '", "'characters are A-Z, a-z, 0-9 and the characters \\'-\\' and \\'_\\'. '", "'You can leave this blank if you prefer.'", ")", ")", "tips", ".", "add", "(", "tr", "(", "'If a dataset already exists in the output directory it will be '", "'overwritten if the \"overwrite existing files\" checkbox is ticked.'", ")", ")", "tips", ".", "add", "(", "tr", "(", "'If the \"include date/time in output filename\" option is ticked, '", "'the filename will be prefixed with a time stamp e.g. '", "'\\'foo-22-Mar-2015-08-01-2015-rw-jakarta-flood.shp\\' where the date '", "'timestamp is in the form DD-MMM-YYYY.'", ")", ")", "tips", ".", "add", "(", "tr", "(", "'This tool requires a working internet connection and fetching '", "'data will consume your bandwidth.'", ")", ")", "tips", ".", "add", "(", "m", ".", "Link", "(", "production_api", "[", "'help_url'", "]", ",", "text", "=", "tr", "(", "'Downloaded data is copyright the PetaBencana contributors'", "' (click for more info).'", ")", ")", ")", "message", ".", "add", "(", "body", ")", "message", ".", "add", "(", "tips", ")", "return", "message" ]
37.140351
22.54386
def question_encoder(question, question_self_attention_bias, hparams, name="question_encoder", save_weights_to=None, make_image_summary=True): """A stack of self attention layers.""" x = question with tf.variable_scope(name): for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers): with tf.variable_scope("layer_%d" % layer): with tf.variable_scope("self_attention"): y = vqa_layers.multihead_attention( common_layers.layer_preprocess(x, hparams), None, question_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.question_self_attention_type, block_length=hparams.block_length, save_weights_to=save_weights_to, make_image_summary=make_image_summary, scale_dotproduct=hparams.scale_dotproduct, ) utils.collect_named_outputs( "norms", "query_self_attention_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs( "norms", "query_self_attention_postprocess_%d"%(layer), tf.norm(x, axis=-1)) with tf.variable_scope("ffn"): y = common_layers.dense_relu_dense( common_layers.layer_preprocess(x, hparams), hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout, ) utils.collect_named_outputs( "norms", "query_ffn_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs( "norms", "query_ffn_postprocess_%d"%(layer), tf.norm(x, axis=-1)) # if normalization is done in layer_preprocess, then it should also be done # on the output, since the output can grow very large, being the sum of # a whole stack of unnormalized layer outputs. return common_layers.layer_preprocess(x, hparams)
[ "def", "question_encoder", "(", "question", ",", "question_self_attention_bias", ",", "hparams", ",", "name", "=", "\"question_encoder\"", ",", "save_weights_to", "=", "None", ",", "make_image_summary", "=", "True", ")", ":", "x", "=", "question", "with", "tf", ".", "variable_scope", "(", "name", ")", ":", "for", "layer", "in", "range", "(", "hparams", ".", "num_encoder_layers", "or", "hparams", ".", "num_hidden_layers", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"layer_%d\"", "%", "layer", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"self_attention\"", ")", ":", "y", "=", "vqa_layers", ".", "multihead_attention", "(", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")", ",", "None", ",", "question_self_attention_bias", ",", "hparams", ".", "attention_key_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "attention_value_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "hidden_size", ",", "hparams", ".", "num_heads", ",", "hparams", ".", "attention_dropout", ",", "attention_type", "=", "hparams", ".", "question_self_attention_type", ",", "block_length", "=", "hparams", ".", "block_length", ",", "save_weights_to", "=", "save_weights_to", ",", "make_image_summary", "=", "make_image_summary", ",", "scale_dotproduct", "=", "hparams", ".", "scale_dotproduct", ",", ")", "utils", ".", "collect_named_outputs", "(", "\"norms\"", ",", "\"query_self_attention_%d\"", "%", "(", "layer", ")", ",", "tf", ".", "norm", "(", "y", ",", "axis", "=", "-", "1", ")", ")", "x", "=", "common_layers", ".", "layer_postprocess", "(", "x", ",", "y", ",", "hparams", ")", "utils", ".", "collect_named_outputs", "(", "\"norms\"", ",", "\"query_self_attention_postprocess_%d\"", "%", "(", "layer", ")", ",", "tf", ".", "norm", "(", "x", ",", "axis", "=", "-", "1", ")", ")", "with", "tf", ".", "variable_scope", "(", "\"ffn\"", ")", ":", "y", "=", "common_layers", ".", "dense_relu_dense", "(", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")", ",", "hparams", ".", "filter_size", ",", "hparams", ".", "hidden_size", ",", "dropout", "=", "hparams", ".", "relu_dropout", ",", ")", "utils", ".", "collect_named_outputs", "(", "\"norms\"", ",", "\"query_ffn_%d\"", "%", "(", "layer", ")", ",", "tf", ".", "norm", "(", "y", ",", "axis", "=", "-", "1", ")", ")", "x", "=", "common_layers", ".", "layer_postprocess", "(", "x", ",", "y", ",", "hparams", ")", "utils", ".", "collect_named_outputs", "(", "\"norms\"", ",", "\"query_ffn_postprocess_%d\"", "%", "(", "layer", ")", ",", "tf", ".", "norm", "(", "x", ",", "axis", "=", "-", "1", ")", ")", "# if normalization is done in layer_preprocess, then it should also be done", "# on the output, since the output can grow very large, being the sum of", "# a whole stack of unnormalized layer outputs.", "return", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")" ]
45.901961
13.137255
def GetFirstWrittenEventSource(self): """Retrieves the first event source that was written after open. Using GetFirstWrittenEventSource and GetNextWrittenEventSource newly added event sources can be retrieved in order of addition. Returns: EventSource: event source or None if there are no newly written ones. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed. """ if not self._storage_file: raise IOError('Unable to read from closed storage writer.') event_source = self._storage_file.GetEventSourceByIndex( self._first_written_event_source_index) if event_source: self._written_event_source_index = ( self._first_written_event_source_index + 1) return event_source
[ "def", "GetFirstWrittenEventSource", "(", "self", ")", ":", "if", "not", "self", ".", "_storage_file", ":", "raise", "IOError", "(", "'Unable to read from closed storage writer.'", ")", "event_source", "=", "self", ".", "_storage_file", ".", "GetEventSourceByIndex", "(", "self", ".", "_first_written_event_source_index", ")", "if", "event_source", ":", "self", ".", "_written_event_source_index", "=", "(", "self", ".", "_first_written_event_source_index", "+", "1", ")", "return", "event_source" ]
34
20.913043
def get_default_margins(self): """get default margins""" trans = self.fig.transFigure.inverted().transform # Static margins l, t, r, b = self.axesmargins (l, b), (r, t) = trans(((l, b), (r, t))) # Extent dl, dt, dr, db = 0, 0, 0, 0 for i, ax in enumerate(self.fig.get_axes()): (x0, y0),(x1, y1) = ax.get_position().get_points() try: (ox0, oy0), (ox1, oy1) = ax.get_tightbbox(self.canvas.get_renderer()).get_points() (ox0, oy0), (ox1, oy1) = trans(((ox0 ,oy0),(ox1 ,oy1))) dl = min(0.2, max(dl, (x0 - ox0))) dt = min(0.2, max(dt, (oy1 - y1))) dr = min(0.2, max(dr, (ox1 - x1))) db = min(0.2, max(db, (y0 - oy0))) except: pass return (l + dl, t + dt, r + dr, b + db)
[ "def", "get_default_margins", "(", "self", ")", ":", "trans", "=", "self", ".", "fig", ".", "transFigure", ".", "inverted", "(", ")", ".", "transform", "# Static margins", "l", ",", "t", ",", "r", ",", "b", "=", "self", ".", "axesmargins", "(", "l", ",", "b", ")", ",", "(", "r", ",", "t", ")", "=", "trans", "(", "(", "(", "l", ",", "b", ")", ",", "(", "r", ",", "t", ")", ")", ")", "# Extent", "dl", ",", "dt", ",", "dr", ",", "db", "=", "0", ",", "0", ",", "0", ",", "0", "for", "i", ",", "ax", "in", "enumerate", "(", "self", ".", "fig", ".", "get_axes", "(", ")", ")", ":", "(", "x0", ",", "y0", ")", ",", "(", "x1", ",", "y1", ")", "=", "ax", ".", "get_position", "(", ")", ".", "get_points", "(", ")", "try", ":", "(", "ox0", ",", "oy0", ")", ",", "(", "ox1", ",", "oy1", ")", "=", "ax", ".", "get_tightbbox", "(", "self", ".", "canvas", ".", "get_renderer", "(", ")", ")", ".", "get_points", "(", ")", "(", "ox0", ",", "oy0", ")", ",", "(", "ox1", ",", "oy1", ")", "=", "trans", "(", "(", "(", "ox0", ",", "oy0", ")", ",", "(", "ox1", ",", "oy1", ")", ")", ")", "dl", "=", "min", "(", "0.2", ",", "max", "(", "dl", ",", "(", "x0", "-", "ox0", ")", ")", ")", "dt", "=", "min", "(", "0.2", ",", "max", "(", "dt", ",", "(", "oy1", "-", "y1", ")", ")", ")", "dr", "=", "min", "(", "0.2", ",", "max", "(", "dr", ",", "(", "ox1", "-", "x1", ")", ")", ")", "db", "=", "min", "(", "0.2", ",", "max", "(", "db", ",", "(", "y0", "-", "oy0", ")", ")", ")", "except", ":", "pass", "return", "(", "l", "+", "dl", ",", "t", "+", "dt", ",", "r", "+", "dr", ",", "b", "+", "db", ")" ]
37.608696
19.043478
def _brute_force_install_pip(self): """A brute force install of pip itself.""" if os.path.exists(self.pip_installer_fname): logger.debug("Using pip installer from %r", self.pip_installer_fname) else: logger.debug( "Installer for pip not found in %r, downloading it", self.pip_installer_fname) self._download_pip_installer() logger.debug("Installing PIP manually in the virtualenv") python_exe = os.path.join(self.env_bin_path, "python") helpers.logged_exec([python_exe, self.pip_installer_fname, '-I']) self.pip_installed = True
[ "def", "_brute_force_install_pip", "(", "self", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "pip_installer_fname", ")", ":", "logger", ".", "debug", "(", "\"Using pip installer from %r\"", ",", "self", ".", "pip_installer_fname", ")", "else", ":", "logger", ".", "debug", "(", "\"Installer for pip not found in %r, downloading it\"", ",", "self", ".", "pip_installer_fname", ")", "self", ".", "_download_pip_installer", "(", ")", "logger", ".", "debug", "(", "\"Installing PIP manually in the virtualenv\"", ")", "python_exe", "=", "os", ".", "path", ".", "join", "(", "self", ".", "env_bin_path", ",", "\"python\"", ")", "helpers", ".", "logged_exec", "(", "[", "python_exe", ",", "self", ".", "pip_installer_fname", ",", "'-I'", "]", ")", "self", ".", "pip_installed", "=", "True" ]
48.076923
21.769231
def power(self, n): """The matrix power of the channel. Args: n (int): compute the matrix power of the superoperator matrix. Returns: Kraus: the matrix power of the SuperOp converted to a Kraus channel. Raises: QiskitError: if the input and output dimensions of the QuantumChannel are not equal, or the power is not an integer. """ if n > 0: return super().power(n) return Kraus(SuperOp(self).power(n))
[ "def", "power", "(", "self", ",", "n", ")", ":", "if", "n", ">", "0", ":", "return", "super", "(", ")", ".", "power", "(", "n", ")", "return", "Kraus", "(", "SuperOp", "(", "self", ")", ".", "power", "(", "n", ")", ")" ]
31.625
23.875
def computeActivity(self, activeInputsBySource, permanenceThreshold=None): """ Calculate the number of active synapses per segment. @param activeInputsBySource (dict) The active cells in each source. Example: {"customInputName1": np.array([42, 69])} """ overlaps = None for source, connections in self.connectionsBySource.iteritems(): o = connections.computeActivity(activeInputsBySource[source], permanenceThreshold) if overlaps is None: overlaps = o else: overlaps += o return overlaps
[ "def", "computeActivity", "(", "self", ",", "activeInputsBySource", ",", "permanenceThreshold", "=", "None", ")", ":", "overlaps", "=", "None", "for", "source", ",", "connections", "in", "self", ".", "connectionsBySource", ".", "iteritems", "(", ")", ":", "o", "=", "connections", ".", "computeActivity", "(", "activeInputsBySource", "[", "source", "]", ",", "permanenceThreshold", ")", "if", "overlaps", "is", "None", ":", "overlaps", "=", "o", "else", ":", "overlaps", "+=", "o", "return", "overlaps" ]
30.631579
20
def set_text(self, text=None, **kwargs): """ Add textual information passed as dictionary. All pairs in dictionary will be written, but keys should be latin-1; registered keywords could be used as arguments. When called more than once overwrite exist data. """ if text is None: text = {} text.update(popdict(kwargs, _registered_kw)) if 'Creation Time' in text and\ not isinstance(text['Creation Time'], (basestring, bytes)): text['Creation Time'] = datetime.datetime( *(check_time(text['Creation Time'])[:6])).isoformat() self.text = text
[ "def", "set_text", "(", "self", ",", "text", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "text", "is", "None", ":", "text", "=", "{", "}", "text", ".", "update", "(", "popdict", "(", "kwargs", ",", "_registered_kw", ")", ")", "if", "'Creation Time'", "in", "text", "and", "not", "isinstance", "(", "text", "[", "'Creation Time'", "]", ",", "(", "basestring", ",", "bytes", ")", ")", ":", "text", "[", "'Creation Time'", "]", "=", "datetime", ".", "datetime", "(", "*", "(", "check_time", "(", "text", "[", "'Creation Time'", "]", ")", "[", ":", "6", "]", ")", ")", ".", "isoformat", "(", ")", "self", ".", "text", "=", "text" ]
38.823529
17.764706
def printActiveIndices(self, state, andValues=False): """Print the list of [column, cellIdx] indices for each of the active cells in state. """ (cols, cellIdxs) = state.nonzero() if len(cols) == 0: print "NONE" return prevCol = -1 for (col,cellIdx) in zip(cols, cellIdxs): if col != prevCol: if prevCol != -1: print "] ", print "Col %d: [" % (col), prevCol = col if andValues: print "%d: %s," % (cellIdx, state[col,cellIdx]), else: print "%d," % (cellIdx), print "]"
[ "def", "printActiveIndices", "(", "self", ",", "state", ",", "andValues", "=", "False", ")", ":", "(", "cols", ",", "cellIdxs", ")", "=", "state", ".", "nonzero", "(", ")", "if", "len", "(", "cols", ")", "==", "0", ":", "print", "\"NONE\"", "return", "prevCol", "=", "-", "1", "for", "(", "col", ",", "cellIdx", ")", "in", "zip", "(", "cols", ",", "cellIdxs", ")", ":", "if", "col", "!=", "prevCol", ":", "if", "prevCol", "!=", "-", "1", ":", "print", "\"] \"", ",", "print", "\"Col %d: [\"", "%", "(", "col", ")", ",", "prevCol", "=", "col", "if", "andValues", ":", "print", "\"%d: %s,\"", "%", "(", "cellIdx", ",", "state", "[", "col", ",", "cellIdx", "]", ")", ",", "else", ":", "print", "\"%d,\"", "%", "(", "cellIdx", ")", ",", "print", "\"]\"" ]
25.272727
18.545455
def _remove_rule(self, group, role, value=None): """Implementation detail""" if role not in group: return if value is None: group.pop(role, None) else: group[role].remove(value) self.invalidate()
[ "def", "_remove_rule", "(", "self", ",", "group", ",", "role", ",", "value", "=", "None", ")", ":", "if", "role", "not", "in", "group", ":", "return", "if", "value", "is", "None", ":", "group", ".", "pop", "(", "role", ",", "None", ")", "else", ":", "group", "[", "role", "]", ".", "remove", "(", "value", ")", "self", ".", "invalidate", "(", ")" ]
23.909091
17.090909
def cs_encode(s): """Encode URI component like CloudStack would do before signing. java.net.URLEncoder.encode(s).replace('+', '%20') """ if PY2 and isinstance(s, text_type): s = s.encode("utf-8") return quote(s, safe="*")
[ "def", "cs_encode", "(", "s", ")", ":", "if", "PY2", "and", "isinstance", "(", "s", ",", "text_type", ")", ":", "s", "=", "s", ".", "encode", "(", "\"utf-8\"", ")", "return", "quote", "(", "s", ",", "safe", "=", "\"*\"", ")" ]
30.375
12.25
def json(value, schema = None, allow_empty = False, json_serializer = None, **kwargs): """Validate that ``value`` conforms to the supplied JSON Schema. .. note:: ``schema`` supports JSON Schema Drafts 3 - 7. Unless the JSON Schema indicates the meta-schema using a ``$schema`` property, the schema will be assumed to conform to Draft 7. .. hint:: If either ``value`` or ``schema`` is a string, this validator will assume it is a JSON object and try to convert it into a :class:`dict <python:dict>`. You can override the JSON serializer used by passing it to the ``json_serializer`` property. By default, will utilize the Python :class:`json <json>` encoder/decoder. :param value: The value to validate. :param schema: An optional JSON Schema against which ``value`` will be validated. :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is empty. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is empty. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :param json_serializer: The JSON encoder/decoder to use to deserialize a string passed in ``value``. If not supplied, will default to the Python :class:`json <python:json>` encoder/decoder. :type json_serializer: callable :returns: ``value`` / :obj:`None <python:None>` :rtype: :class:`dict <python:dict>` / :class:`list <python:list>` of :class:`dict <python:dict>` / :obj:`None <python:None>` :raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False`` :raises CannotCoerceError: if ``value`` cannot be coerced to a :class:`dict <python:dict>` :raises NotJSONError: if ``value`` cannot be deserialized from JSON :raises NotJSONSchemaError: if ``schema`` is not a valid JSON Schema object :raises JSONValidationError: if ``value`` does not validate against the JSON Schema """ original_value = value original_schema = schema if not value and not allow_empty: raise errors.EmptyValueError('value (%s) was empty' % value) elif not value: return None if not json_serializer: json_serializer = json_ if isinstance(value, str): try: value = json_serializer.loads(value) except Exception: raise errors.CannotCoerceError( 'value (%s) cannot be deserialized from JSON' % original_value ) if isinstance(schema, str): try: schema = dict(schema, allow_empty = allow_empty, json_serializer = json_serializer, **kwargs) except Exception: raise errors.CannotCoerceError( 'schema (%s) cannot be coerced to a dict' % original_schema ) if not isinstance(value, (list, dict_)): raise errors.NotJSONError('value (%s) is not a JSON object' % original_value) if original_schema and not isinstance(schema, dict_): raise errors.NotJSONError('schema (%s) is not a JSON object' % original_schema) if not schema: return value try: jsonschema.validate(value, schema) except jsonschema.exceptions.ValidationError as error: raise errors.JSONValidationError(error.message) except jsonschema.exceptions.SchemaError as error: raise errors.NotJSONSchemaError(error.message) return value
[ "def", "json", "(", "value", ",", "schema", "=", "None", ",", "allow_empty", "=", "False", ",", "json_serializer", "=", "None", ",", "*", "*", "kwargs", ")", ":", "original_value", "=", "value", "original_schema", "=", "schema", "if", "not", "value", "and", "not", "allow_empty", ":", "raise", "errors", ".", "EmptyValueError", "(", "'value (%s) was empty'", "%", "value", ")", "elif", "not", "value", ":", "return", "None", "if", "not", "json_serializer", ":", "json_serializer", "=", "json_", "if", "isinstance", "(", "value", ",", "str", ")", ":", "try", ":", "value", "=", "json_serializer", ".", "loads", "(", "value", ")", "except", "Exception", ":", "raise", "errors", ".", "CannotCoerceError", "(", "'value (%s) cannot be deserialized from JSON'", "%", "original_value", ")", "if", "isinstance", "(", "schema", ",", "str", ")", ":", "try", ":", "schema", "=", "dict", "(", "schema", ",", "allow_empty", "=", "allow_empty", ",", "json_serializer", "=", "json_serializer", ",", "*", "*", "kwargs", ")", "except", "Exception", ":", "raise", "errors", ".", "CannotCoerceError", "(", "'schema (%s) cannot be coerced to a dict'", "%", "original_schema", ")", "if", "not", "isinstance", "(", "value", ",", "(", "list", ",", "dict_", ")", ")", ":", "raise", "errors", ".", "NotJSONError", "(", "'value (%s) is not a JSON object'", "%", "original_value", ")", "if", "original_schema", "and", "not", "isinstance", "(", "schema", ",", "dict_", ")", ":", "raise", "errors", ".", "NotJSONError", "(", "'schema (%s) is not a JSON object'", "%", "original_schema", ")", "if", "not", "schema", ":", "return", "value", "try", ":", "jsonschema", ".", "validate", "(", "value", ",", "schema", ")", "except", "jsonschema", ".", "exceptions", ".", "ValidationError", "as", "error", ":", "raise", "errors", ".", "JSONValidationError", "(", "error", ".", "message", ")", "except", "jsonschema", ".", "exceptions", ".", "SchemaError", "as", "error", ":", "raise", "errors", ".", "NotJSONSchemaError", "(", "error", ".", "message", ")", "return", "value" ]
36.821053
25
def get_results(self, metadata=False): """ Return results of the analysis. """ results_data = [] self.process_har() self.process_from_splash() for rt in sorted(self._results.get_results()): rdict = {'name': rt.name} if rt.version: rdict['version'] = rt.version if metadata: rdict['homepage'] = rt.homepage rdict['type'] = rt.type rdict['from_url'] = rt.from_url rdict['plugin'] = rt.plugin results_data.append(rdict) return results_data
[ "def", "get_results", "(", "self", ",", "metadata", "=", "False", ")", ":", "results_data", "=", "[", "]", "self", ".", "process_har", "(", ")", "self", ".", "process_from_splash", "(", ")", "for", "rt", "in", "sorted", "(", "self", ".", "_results", ".", "get_results", "(", ")", ")", ":", "rdict", "=", "{", "'name'", ":", "rt", ".", "name", "}", "if", "rt", ".", "version", ":", "rdict", "[", "'version'", "]", "=", "rt", ".", "version", "if", "metadata", ":", "rdict", "[", "'homepage'", "]", "=", "rt", ".", "homepage", "rdict", "[", "'type'", "]", "=", "rt", ".", "type", "rdict", "[", "'from_url'", "]", "=", "rt", ".", "from_url", "rdict", "[", "'plugin'", "]", "=", "rt", ".", "plugin", "results_data", ".", "append", "(", "rdict", ")", "return", "results_data" ]
28.428571
15.333333
def start(self, max_transitions=None, keep_active=False): """ Start the driver. This method creates a thread which runs the event loop. The method returns immediately. To wait until the driver finishes, use `stmpy.Driver.wait_until_finished`. `max_transitions`: execute only this number of transitions, then stop `keep_active`: When true, keep the driver running even when all state machines terminated """ self._active = True self._max_transitions = max_transitions self._keep_active = keep_active self.thread = Thread(target=self._start_loop) self.thread.start()
[ "def", "start", "(", "self", ",", "max_transitions", "=", "None", ",", "keep_active", "=", "False", ")", ":", "self", ".", "_active", "=", "True", "self", ".", "_max_transitions", "=", "max_transitions", "self", ".", "_keep_active", "=", "keep_active", "self", ".", "thread", "=", "Thread", "(", "target", "=", "self", ".", "_start_loop", ")", "self", ".", "thread", ".", "start", "(", ")" ]
38.941176
18.235294
def error(self, text): """ Ajout d'un message de log de type ERROR """ self.logger.error("{}{}".format(self.message_prefix, text))
[ "def", "error", "(", "self", ",", "text", ")", ":", "self", ".", "logger", ".", "error", "(", "\"{}{}\"", ".", "format", "(", "self", ".", "message_prefix", ",", "text", ")", ")" ]
44
13
def rotate(self, shift): ''' Rotate 90 degrees clockwise `shift` times. If `shift` is negative, rotate counter-clockwise. ''' self.child_corners.values[:] = np.roll(self.child_corners .values, shift, axis=0) self.update_transform()
[ "def", "rotate", "(", "self", ",", "shift", ")", ":", "self", ".", "child_corners", ".", "values", "[", ":", "]", "=", "np", ".", "roll", "(", "self", ".", "child_corners", ".", "values", ",", "shift", ",", "axis", "=", "0", ")", "self", ".", "update_transform", "(", ")" ]
40.25
22.75
def batch_step(self, batch_idx=None): """Updates the learning rate for the batch index: ``batch_idx``. If ``batch_idx`` is None, ``CyclicLR`` will use an internal batch index to keep track of the index. """ if batch_idx is None: batch_idx = self.last_batch_idx + 1 self.last_batch_idx = batch_idx for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): param_group['lr'] = lr
[ "def", "batch_step", "(", "self", ",", "batch_idx", "=", "None", ")", ":", "if", "batch_idx", "is", "None", ":", "batch_idx", "=", "self", ".", "last_batch_idx", "+", "1", "self", ".", "last_batch_idx", "=", "batch_idx", "for", "param_group", ",", "lr", "in", "zip", "(", "self", ".", "optimizer", ".", "param_groups", ",", "self", ".", "get_lr", "(", ")", ")", ":", "param_group", "[", "'lr'", "]", "=", "lr" ]
46.2
10.1
def interpolate_curve(points, degree, **kwargs): """ Curve interpolation through the data points. Please refer to Algorithm A9.1 on The NURBS Book (2nd Edition), pp.369-370 for details. Keyword Arguments: * ``centripetal``: activates centripetal parametrization method. *Default: False* :param points: data points :type points: list, tuple :param degree: degree of the output parametric curve :type degree: int :return: interpolated B-Spline curve :rtype: BSpline.Curve """ # Keyword arguments use_centripetal = kwargs.get('centripetal', False) # Number of control points num_points = len(points) # Get uk uk = compute_params_curve(points, use_centripetal) # Compute knot vector kv = compute_knot_vector(degree, num_points, uk) # Do global interpolation matrix_a = _build_coeff_matrix(degree, kv, uk, points) ctrlpts = ginterp(matrix_a, points) # Generate B-spline curve curve = BSpline.Curve() curve.degree = degree curve.ctrlpts = ctrlpts curve.knotvector = kv return curve
[ "def", "interpolate_curve", "(", "points", ",", "degree", ",", "*", "*", "kwargs", ")", ":", "# Keyword arguments", "use_centripetal", "=", "kwargs", ".", "get", "(", "'centripetal'", ",", "False", ")", "# Number of control points", "num_points", "=", "len", "(", "points", ")", "# Get uk", "uk", "=", "compute_params_curve", "(", "points", ",", "use_centripetal", ")", "# Compute knot vector", "kv", "=", "compute_knot_vector", "(", "degree", ",", "num_points", ",", "uk", ")", "# Do global interpolation", "matrix_a", "=", "_build_coeff_matrix", "(", "degree", ",", "kv", ",", "uk", ",", "points", ")", "ctrlpts", "=", "ginterp", "(", "matrix_a", ",", "points", ")", "# Generate B-spline curve", "curve", "=", "BSpline", ".", "Curve", "(", ")", "curve", ".", "degree", "=", "degree", "curve", ".", "ctrlpts", "=", "ctrlpts", "curve", ".", "knotvector", "=", "kv", "return", "curve" ]
27.973684
21.052632
def skopeo_pull(self): """ Pull image from Docker to local Docker daemon using skopeo :return: pulled image """ return self.copy(self.name, self.tag, SkopeoTransport.DOCKER, SkopeoTransport.DOCKER_DAEMON)\ .using_transport(SkopeoTransport.DOCKER_DAEMON)
[ "def", "skopeo_pull", "(", "self", ")", ":", "return", "self", ".", "copy", "(", "self", ".", "name", ",", "self", ".", "tag", ",", "SkopeoTransport", ".", "DOCKER", ",", "SkopeoTransport", ".", "DOCKER_DAEMON", ")", ".", "using_transport", "(", "SkopeoTransport", ".", "DOCKER_DAEMON", ")" ]
39.5
16.625
def _reset(self, framer): """ Reset the state for the framer. It is safe to call this method multiple times with the same framer; the ID of the framer object will be saved and the state only reset if the IDs are different. After resetting the state, the framer's ``init_state()`` method will be called. """ # Do nothing if we're already properly initialized if id(framer) == self._framer_id: return # Reset the state self._other = {} # Initialize the state and save the framer ID framer.init_state(self) self._framer_id = id(framer)
[ "def", "_reset", "(", "self", ",", "framer", ")", ":", "# Do nothing if we're already properly initialized", "if", "id", "(", "framer", ")", "==", "self", ".", "_framer_id", ":", "return", "# Reset the state", "self", ".", "_other", "=", "{", "}", "# Initialize the state and save the framer ID", "framer", ".", "init_state", "(", "self", ")", "self", ".", "_framer_id", "=", "id", "(", "framer", ")" ]
33.842105
18.052632
def classify(self, token, previous=None, next=None, **kwargs): """ Returns the predicted tag for the given token, in context of the given previous and next (token, tag)-tuples. """ return self._classifier.classify(self._v(token, previous, next), **kwargs)
[ "def", "classify", "(", "self", ",", "token", ",", "previous", "=", "None", ",", "next", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_classifier", ".", "classify", "(", "self", ".", "_v", "(", "token", ",", "previous", ",", "next", ")", ",", "*", "*", "kwargs", ")" ]
57.4
19.6
def edit_files(patterns, expressions=None, functions=None, executables=None, start_dirs=None, max_depth=1, dry_run=True, output=sys.stdout, encoding=None, newline=None): """Process patterns with MassEdit. Arguments: patterns: file pattern to identify the files to be processed. expressions: single python expression to be applied line by line. functions: functions to process files contents. executables: os executables to execute on the argument files. Keyword arguments: max_depth: maximum recursion level when looking for file matches. start_dirs: workspace(ies) where to start the file search. dry_run: only display differences if True. Save modified file otherwise. output: handle where the output should be redirected. Return: list of files processed. """ if not is_list(patterns): raise TypeError("patterns should be a list") if expressions and not is_list(expressions): raise TypeError("expressions should be a list of exec expressions") if functions and not is_list(functions): raise TypeError("functions should be a list of functions") if executables and not is_list(executables): raise TypeError("executables should be a list of program names") editor = MassEdit(dry_run=dry_run, encoding=encoding, newline=newline) if expressions: editor.set_code_exprs(expressions) if functions: editor.set_functions(functions) if executables: editor.set_executables(executables) processed_paths = [] for path in get_paths(patterns, start_dirs=start_dirs, max_depth=max_depth): try: diffs = list(editor.edit_file(path)) if dry_run: # At this point, encoding is the input encoding. diff = "".join(diffs) if not diff: continue # The encoding of the target output may not match the input # encoding. If it's defined, we round trip the diff text # to bytes and back to silence any conversion errors. encoding = output.encoding if encoding: bytes_diff = diff.encode(encoding=encoding, errors='ignore') diff = bytes_diff.decode(encoding=output.encoding) output.write(diff) except UnicodeDecodeError as err: log.error("failed to process %s: %s", path, err) continue processed_paths.append(os.path.abspath(path)) return processed_paths
[ "def", "edit_files", "(", "patterns", ",", "expressions", "=", "None", ",", "functions", "=", "None", ",", "executables", "=", "None", ",", "start_dirs", "=", "None", ",", "max_depth", "=", "1", ",", "dry_run", "=", "True", ",", "output", "=", "sys", ".", "stdout", ",", "encoding", "=", "None", ",", "newline", "=", "None", ")", ":", "if", "not", "is_list", "(", "patterns", ")", ":", "raise", "TypeError", "(", "\"patterns should be a list\"", ")", "if", "expressions", "and", "not", "is_list", "(", "expressions", ")", ":", "raise", "TypeError", "(", "\"expressions should be a list of exec expressions\"", ")", "if", "functions", "and", "not", "is_list", "(", "functions", ")", ":", "raise", "TypeError", "(", "\"functions should be a list of functions\"", ")", "if", "executables", "and", "not", "is_list", "(", "executables", ")", ":", "raise", "TypeError", "(", "\"executables should be a list of program names\"", ")", "editor", "=", "MassEdit", "(", "dry_run", "=", "dry_run", ",", "encoding", "=", "encoding", ",", "newline", "=", "newline", ")", "if", "expressions", ":", "editor", ".", "set_code_exprs", "(", "expressions", ")", "if", "functions", ":", "editor", ".", "set_functions", "(", "functions", ")", "if", "executables", ":", "editor", ".", "set_executables", "(", "executables", ")", "processed_paths", "=", "[", "]", "for", "path", "in", "get_paths", "(", "patterns", ",", "start_dirs", "=", "start_dirs", ",", "max_depth", "=", "max_depth", ")", ":", "try", ":", "diffs", "=", "list", "(", "editor", ".", "edit_file", "(", "path", ")", ")", "if", "dry_run", ":", "# At this point, encoding is the input encoding.", "diff", "=", "\"\"", ".", "join", "(", "diffs", ")", "if", "not", "diff", ":", "continue", "# The encoding of the target output may not match the input", "# encoding. If it's defined, we round trip the diff text", "# to bytes and back to silence any conversion errors.", "encoding", "=", "output", ".", "encoding", "if", "encoding", ":", "bytes_diff", "=", "diff", ".", "encode", "(", "encoding", "=", "encoding", ",", "errors", "=", "'ignore'", ")", "diff", "=", "bytes_diff", ".", "decode", "(", "encoding", "=", "output", ".", "encoding", ")", "output", ".", "write", "(", "diff", ")", "except", "UnicodeDecodeError", "as", "err", ":", "log", ".", "error", "(", "\"failed to process %s: %s\"", ",", "path", ",", "err", ")", "continue", "processed_paths", ".", "append", "(", "os", ".", "path", ".", "abspath", "(", "path", ")", ")", "return", "processed_paths" ]
41.758065
19.741935
def dump(obj, fp, imports=None, binary=True, sequence_as_stream=False, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, encoding='utf-8', default=None, use_decimal=True, namedtuple_as_object=True, tuple_as_array=True, bigint_as_string=False, sort_keys=False, item_sort_key=None, for_json=None, ignore_nan=False, int_as_string_bitcount=None, iterable_as_array=False, **kw): """Serialize ``obj`` as an Ion-formatted stream to ``fp`` (a file-like object), using the following conversion table:: +-------------------+-------------------+ | Python | Ion | |-------------------+-------------------| | None | null.null | |-------------------+-------------------| | IonPyNull(<type>) | null.<type> | |-------------------+-------------------| | True, False, | | | IonPyInt(BOOL), | bool | | IonPyBool, | | |-------------------+-------------------| | int (Python 2, 3) | | | long (Python 2), | int | | IonPyInt(INT) | | |-------------------+-------------------| | float, IonPyFloat | float | |-------------------+-------------------| | Decimal, | | | IonPyDecimal | decimal | |-------------------+-------------------| | datetime, | | | Timestamp, | timestamp | | IonPyTimestamp | | |-------------------+-------------------| | SymbolToken, | | | IonPySymbol, | symbol | | IonPyText(SYMBOL) | | |-------------------+-------------------| | str (Python 3), | | | unicode (Python2),| string | | IonPyText(STRING) | | |-------------------+-------------------| | IonPyBytes(CLOB) | clob | |-------------------+-------------------| | str (Python 2), | | | bytes (Python 3) | blob | | IonPyBytes(BLOB) | | |-------------------+-------------------| | list, tuple, | | | IonPyList(LIST) | list | |-------------------+-------------------| | IonPyList(SEXP) | sexp | |-------------------+-------------------| | dict, namedtuple, | | | IonPyDict | struct | +-------------------+-------------------+ Args: obj (Any): A python object to serialize according to the above table. Any Python object which is neither an instance of nor inherits from one of the types in the above table will raise TypeError. fp (BaseIO): A file-like object. imports (Optional[Sequence[SymbolTable]]): A sequence of shared symbol tables to be used by by the writer. binary (Optional[True|False]): When True, outputs binary Ion. When false, outputs text Ion. sequence_as_stream (Optional[True|False]): When True, if ``obj`` is a sequence, it will be treated as a stream of top-level Ion values (i.e. the resulting Ion data will begin with ``obj``'s first element). Default: False. skipkeys: NOT IMPLEMENTED ensure_ascii: NOT IMPLEMENTED check_circular: NOT IMPLEMENTED allow_nan: NOT IMPLEMENTED cls: NOT IMPLEMENTED indent (Str): If binary is False and indent is a string, then members of containers will be pretty-printed with a newline followed by that string repeated for each level of nesting. None (the default) selects the most compact representation without any newlines. Example: to indent with four spaces per level of nesting, use ``' '``. separators: NOT IMPLEMENTED encoding: NOT IMPLEMENTED default: NOT IMPLEMENTED use_decimal: NOT IMPLEMENTED namedtuple_as_object: NOT IMPLEMENTED tuple_as_array: NOT IMPLEMENTED bigint_as_string: NOT IMPLEMENTED sort_keys: NOT IMPLEMENTED item_sort_key: NOT IMPLEMENTED for_json: NOT IMPLEMENTED ignore_nan: NOT IMPLEMENTED int_as_string_bitcount: NOT IMPLEMENTED iterable_as_array: NOT IMPLEMENTED **kw: NOT IMPLEMENTED """ raw_writer = binary_writer(imports) if binary else text_writer(indent=indent) writer = blocking_writer(raw_writer, fp) writer.send(ION_VERSION_MARKER_EVENT) # The IVM is emitted automatically in binary; it's optional in text. if sequence_as_stream and isinstance(obj, (list, tuple)): # Treat this top-level sequence as a stream; serialize its elements as top-level values, but don't serialize the # sequence itself. for top_level in obj: _dump(top_level, writer) else: _dump(obj, writer) writer.send(ION_STREAM_END_EVENT)
[ "def", "dump", "(", "obj", ",", "fp", ",", "imports", "=", "None", ",", "binary", "=", "True", ",", "sequence_as_stream", "=", "False", ",", "skipkeys", "=", "False", ",", "ensure_ascii", "=", "True", ",", "check_circular", "=", "True", ",", "allow_nan", "=", "True", ",", "cls", "=", "None", ",", "indent", "=", "None", ",", "separators", "=", "None", ",", "encoding", "=", "'utf-8'", ",", "default", "=", "None", ",", "use_decimal", "=", "True", ",", "namedtuple_as_object", "=", "True", ",", "tuple_as_array", "=", "True", ",", "bigint_as_string", "=", "False", ",", "sort_keys", "=", "False", ",", "item_sort_key", "=", "None", ",", "for_json", "=", "None", ",", "ignore_nan", "=", "False", ",", "int_as_string_bitcount", "=", "None", ",", "iterable_as_array", "=", "False", ",", "*", "*", "kw", ")", ":", "raw_writer", "=", "binary_writer", "(", "imports", ")", "if", "binary", "else", "text_writer", "(", "indent", "=", "indent", ")", "writer", "=", "blocking_writer", "(", "raw_writer", ",", "fp", ")", "writer", ".", "send", "(", "ION_VERSION_MARKER_EVENT", ")", "# The IVM is emitted automatically in binary; it's optional in text.", "if", "sequence_as_stream", "and", "isinstance", "(", "obj", ",", "(", "list", ",", "tuple", ")", ")", ":", "# Treat this top-level sequence as a stream; serialize its elements as top-level values, but don't serialize the", "# sequence itself.", "for", "top_level", "in", "obj", ":", "_dump", "(", "top_level", ",", "writer", ")", "else", ":", "_dump", "(", "obj", ",", "writer", ")", "writer", ".", "send", "(", "ION_STREAM_END_EVENT", ")" ]
52.27
19.72
def cache_scan(self): """ Scan the lineage for the index of the most recent cache. :return: Index of most recent cache """ try: return len(self.transformations) - self.transformations[::-1].index(CACHE_T) except ValueError: return 0
[ "def", "cache_scan", "(", "self", ")", ":", "try", ":", "return", "len", "(", "self", ".", "transformations", ")", "-", "self", ".", "transformations", "[", ":", ":", "-", "1", "]", ".", "index", "(", "CACHE_T", ")", "except", "ValueError", ":", "return", "0" ]
32.888889
17.333333
def cget(self, key): """ Query widget option. :param key: option name :type key: str :return: value of the option To get the list of options for this widget, call the method :meth:`~Balloon.keys`. """ if key == "headertext": return self.__headertext elif key == "text": return self.__text elif key == "width": return self.__width elif key == "timeout": return self._timeout elif key == "background": return self.__background else: return ttk.Frame.cget(self, key)
[ "def", "cget", "(", "self", ",", "key", ")", ":", "if", "key", "==", "\"headertext\"", ":", "return", "self", ".", "__headertext", "elif", "key", "==", "\"text\"", ":", "return", "self", ".", "__text", "elif", "key", "==", "\"width\"", ":", "return", "self", ".", "__width", "elif", "key", "==", "\"timeout\"", ":", "return", "self", ".", "_timeout", "elif", "key", "==", "\"background\"", ":", "return", "self", ".", "__background", "else", ":", "return", "ttk", ".", "Frame", ".", "cget", "(", "self", ",", "key", ")" ]
28.181818
14.090909
def wait_for_winexe(host, port, username, password, timeout=900): ''' Wait until winexe connection can be established. ''' start = time.time() log.debug( 'Attempting winexe connection to host %s on port %s', host, port ) try_count = 0 while True: try_count += 1 try: # Shell out to winexe to check %TEMP% ret_code = run_winexe_command( "sc", "query winexesvc", host, username, password, port ) if ret_code == 0: log.debug('winexe connected...') return True log.debug('Return code was %s', ret_code) except socket.error as exc: log.debug('Caught exception in wait_for_winexesvc: %s', exc) if time.time() - start > timeout: return False time.sleep(1)
[ "def", "wait_for_winexe", "(", "host", ",", "port", ",", "username", ",", "password", ",", "timeout", "=", "900", ")", ":", "start", "=", "time", ".", "time", "(", ")", "log", ".", "debug", "(", "'Attempting winexe connection to host %s on port %s'", ",", "host", ",", "port", ")", "try_count", "=", "0", "while", "True", ":", "try_count", "+=", "1", "try", ":", "# Shell out to winexe to check %TEMP%", "ret_code", "=", "run_winexe_command", "(", "\"sc\"", ",", "\"query winexesvc\"", ",", "host", ",", "username", ",", "password", ",", "port", ")", "if", "ret_code", "==", "0", ":", "log", ".", "debug", "(", "'winexe connected...'", ")", "return", "True", "log", ".", "debug", "(", "'Return code was %s'", ",", "ret_code", ")", "except", "socket", ".", "error", "as", "exc", ":", "log", ".", "debug", "(", "'Caught exception in wait_for_winexesvc: %s'", ",", "exc", ")", "if", "time", ".", "time", "(", ")", "-", "start", ">", "timeout", ":", "return", "False", "time", ".", "sleep", "(", "1", ")" ]
31.222222
20.185185
def colorize_output(output, colors, indent=0): r"""Print output to console using provided color mappings. Color mapping is dict with regular expressions as key and tuple of two as values. Key is used to match if line should be colorized and tuple contains color to be used and boolean value that indicates if dark foreground is used. For example: >>> CLS = { >>> re.compile(r'^(--- .*)$'): (Color.red, False) >>> } will colorize lines that start with '---' to red. If different parts of line needs to be in different color then dict must be supplied in colors with keys that are named group from regular expression and values that are tuples of color and boolean that indicates if dark foreground is used. For example: >>> CLS = { >>> re.compile(r'^(?P<key>user:\s+)(?P<user>.*)$'): { >>> 'key': (Color.yellow, True), >>> 'user': (Color.cyan, False) >>> } >>> } will colorize line 'user: Some user' so that 'user:' part is yellow with dark foreground and 'Some user' part is cyan without dark foreground. """ for line in output.split("\n"): cprint(" " * indent) if line == "": cprint("\n") continue for regexp, color_def in colors.items(): if regexp.match(line) is not None: _colorize_single_line(line, regexp, color_def) break else: cprint("%s\n" % line)
[ "def", "colorize_output", "(", "output", ",", "colors", ",", "indent", "=", "0", ")", ":", "for", "line", "in", "output", ".", "split", "(", "\"\\n\"", ")", ":", "cprint", "(", "\" \"", "*", "indent", ")", "if", "line", "==", "\"\"", ":", "cprint", "(", "\"\\n\"", ")", "continue", "for", "regexp", ",", "color_def", "in", "colors", ".", "items", "(", ")", ":", "if", "regexp", ".", "match", "(", "line", ")", "is", "not", "None", ":", "_colorize_single_line", "(", "line", ",", "regexp", ",", "color_def", ")", "break", "else", ":", "cprint", "(", "\"%s\\n\"", "%", "line", ")" ]
36.642857
23.238095
def osm_filter(network_type): """ Create a filter to query Overpass API for the specified OSM network type. Parameters ---------- network_type : string, {'walk', 'drive'} denoting the type of street network to extract Returns ------- osm_filter : string """ filters = {} # drive: select only roads that are drivable by normal 2 wheel drive # passenger vehicles both private and public # roads. Filter out un-drivable roads and service roads tagged as parking, # driveway, or emergency-access filters['drive'] = ('["highway"!~"cycleway|footway|path|pedestrian|steps' '|track|proposed|construction|bridleway|abandoned' '|platform|raceway|service"]' '["motor_vehicle"!~"no"]["motorcar"!~"no"]' '["service"!~"parking|parking_aisle|driveway' '|emergency_access"]') # walk: select only roads and pathways that allow pedestrian access both # private and public pathways and roads. # Filter out limited access roadways and allow service roads filters['walk'] = ('["highway"!~"motor|proposed|construction|abandoned' '|platform|raceway"]["foot"!~"no"]' '["pedestrians"!~"no"]') if network_type in filters: osm_filter = filters[network_type] else: raise ValueError('unknown network_type "{}"'.format(network_type)) return osm_filter
[ "def", "osm_filter", "(", "network_type", ")", ":", "filters", "=", "{", "}", "# drive: select only roads that are drivable by normal 2 wheel drive", "# passenger vehicles both private and public", "# roads. Filter out un-drivable roads and service roads tagged as parking,", "# driveway, or emergency-access", "filters", "[", "'drive'", "]", "=", "(", "'[\"highway\"!~\"cycleway|footway|path|pedestrian|steps'", "'|track|proposed|construction|bridleway|abandoned'", "'|platform|raceway|service\"]'", "'[\"motor_vehicle\"!~\"no\"][\"motorcar\"!~\"no\"]'", "'[\"service\"!~\"parking|parking_aisle|driveway'", "'|emergency_access\"]'", ")", "# walk: select only roads and pathways that allow pedestrian access both", "# private and public pathways and roads.", "# Filter out limited access roadways and allow service roads", "filters", "[", "'walk'", "]", "=", "(", "'[\"highway\"!~\"motor|proposed|construction|abandoned'", "'|platform|raceway\"][\"foot\"!~\"no\"]'", "'[\"pedestrians\"!~\"no\"]'", ")", "if", "network_type", "in", "filters", ":", "osm_filter", "=", "filters", "[", "network_type", "]", "else", ":", "raise", "ValueError", "(", "'unknown network_type \"{}\"'", ".", "format", "(", "network_type", ")", ")", "return", "osm_filter" ]
37.512821
24.025641
def get_sleep_timer(self): """Retrieves remaining sleep time, if any Returns: int or NoneType: Number of seconds left in timer. If there is no sleep timer currently set it will return None. """ resp = self.avTransport.GetRemainingSleepTimerDuration([ ('InstanceID', 0), ]) if resp['RemainingSleepTimerDuration']: times = resp['RemainingSleepTimerDuration'].split(':') return (int(times[0]) * 3600 + int(times[1]) * 60 + int(times[2])) else: return None
[ "def", "get_sleep_timer", "(", "self", ")", ":", "resp", "=", "self", ".", "avTransport", ".", "GetRemainingSleepTimerDuration", "(", "[", "(", "'InstanceID'", ",", "0", ")", ",", "]", ")", "if", "resp", "[", "'RemainingSleepTimerDuration'", "]", ":", "times", "=", "resp", "[", "'RemainingSleepTimerDuration'", "]", ".", "split", "(", "':'", ")", "return", "(", "int", "(", "times", "[", "0", "]", ")", "*", "3600", "+", "int", "(", "times", "[", "1", "]", ")", "*", "60", "+", "int", "(", "times", "[", "2", "]", ")", ")", "else", ":", "return", "None" ]
35.823529
16.764706
def _get_calling_module(self): """Get the last module in the call stack that is not this module or ``None`` if the call originated from this module. """ for frame in inspect.stack(): mod = inspect.getmodule(frame[0]) logger.debug(f'calling module: {mod}') if mod is not None: mod_name = mod.__name__ if mod_name != __name__: return mod
[ "def", "_get_calling_module", "(", "self", ")", ":", "for", "frame", "in", "inspect", ".", "stack", "(", ")", ":", "mod", "=", "inspect", ".", "getmodule", "(", "frame", "[", "0", "]", ")", "logger", ".", "debug", "(", "f'calling module: {mod}'", ")", "if", "mod", "is", "not", "None", ":", "mod_name", "=", "mod", ".", "__name__", "if", "mod_name", "!=", "__name__", ":", "return", "mod" ]
37.083333
7.75
def _find_wikipedia_names(self, name_en): """ Finds all Wikipedia pages referring to the specified name in English and returns a dictionary where the keys are the language code and the values are the titles of the corresponding pages. """ url = 'https://en.wikipedia.org/w/api.php' params = {'action': 'query', 'titles': name_en, 'prop': 'langlinks', 'lllimit': 500, 'format': 'json'} r = requests.get(url, params=params) if not r: raise ValueError('Could not find wikipedia page: {0}'.format(name_en)) out = r.json() names = {} pages = out['query']['pages'] for page in pages: for langlink in pages[page].get('langlinks', []): names[langlink['lang']] = langlink['*'] return names
[ "def", "_find_wikipedia_names", "(", "self", ",", "name_en", ")", ":", "url", "=", "'https://en.wikipedia.org/w/api.php'", "params", "=", "{", "'action'", ":", "'query'", ",", "'titles'", ":", "name_en", ",", "'prop'", ":", "'langlinks'", ",", "'lllimit'", ":", "500", ",", "'format'", ":", "'json'", "}", "r", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "params", ")", "if", "not", "r", ":", "raise", "ValueError", "(", "'Could not find wikipedia page: {0}'", ".", "format", "(", "name_en", ")", ")", "out", "=", "r", ".", "json", "(", ")", "names", "=", "{", "}", "pages", "=", "out", "[", "'query'", "]", "[", "'pages'", "]", "for", "page", "in", "pages", ":", "for", "langlink", "in", "pages", "[", "page", "]", ".", "get", "(", "'langlinks'", ",", "[", "]", ")", ":", "names", "[", "langlink", "[", "'lang'", "]", "]", "=", "langlink", "[", "'*'", "]", "return", "names" ]
36.791667
16.041667
def folderitems(self, full_objects=False, classic=True): """Sort by Categories """ bsc = getToolByName(self.context, "bika_setup_catalog") self.an_cats = bsc( portal_type="AnalysisCategory", sort_on="sortable_title") self.an_cats_order = dict([ (b.Title, "{:04}".format(a)) for a, b in enumerate(self.an_cats)]) items = super(AnalysisServicesView, self).folderitems() if self.do_cats: self.categories = map(lambda x: x[0], sorted(self.categories, key=lambda x: x[1])) else: self.categories.sort() return items
[ "def", "folderitems", "(", "self", ",", "full_objects", "=", "False", ",", "classic", "=", "True", ")", ":", "bsc", "=", "getToolByName", "(", "self", ".", "context", ",", "\"bika_setup_catalog\"", ")", "self", ".", "an_cats", "=", "bsc", "(", "portal_type", "=", "\"AnalysisCategory\"", ",", "sort_on", "=", "\"sortable_title\"", ")", "self", ".", "an_cats_order", "=", "dict", "(", "[", "(", "b", ".", "Title", ",", "\"{:04}\"", ".", "format", "(", "a", ")", ")", "for", "a", ",", "b", "in", "enumerate", "(", "self", ".", "an_cats", ")", "]", ")", "items", "=", "super", "(", "AnalysisServicesView", ",", "self", ")", ".", "folderitems", "(", ")", "if", "self", ".", "do_cats", ":", "self", ".", "categories", "=", "map", "(", "lambda", "x", ":", "x", "[", "0", "]", ",", "sorted", "(", "self", ".", "categories", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ")", ")", "else", ":", "self", ".", "categories", ".", "sort", "(", ")", "return", "items" ]
39.470588
12.411765
def polls_get(self, **kwargs): """ Polls A Poll on Pollster is a collection of questions and responses published by a reputable survey house. This endpoint provides raw data from the survey house, plus Pollster-provided metadata about each question. Pollster editors don't include every question when they enter Polls, and they don't necessarily enter every subpopulation for the responses they _do_ enter. They make editorial decisions about which questions belong in the database. The response will contain a maximum of 25 Poll objects, even if the database contains more than 25 polls. Use the `next_cursor` parameter to fetch the rest, 25 Polls at a time. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.polls_get(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str cursor: Special string to index into the Array :param str tags: Comma-separated list of Question tag names; only Polls containing Questions with any of the given tags will be returned. :param str question: Question slug; only Polls that ask that Question will be returned. :param str sort: If `updated_at`, sort the most recently updated Poll first. (This can cause race conditions when used with `cursor`.) Otherwise, sort by most recently _entered_ Poll first. :return: InlineResponse2003 If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.polls_get_with_http_info(**kwargs) else: (data) = self.polls_get_with_http_info(**kwargs) return data
[ "def", "polls_get", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'callback'", ")", ":", "return", "self", ".", "polls_get_with_http_info", "(", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "polls_get_with_http_info", "(", "*", "*", "kwargs", ")", "return", "data" ]
69.413793
45.827586
def skip_format_url(format_, url): """ Checks whether a give format/url should be skipped and not downloaded. @param format_: Filename format (extension). @type format_: str (e.g. html, txt, zip, pdf) @param url: URL. @type url: str @return: True if format/url should be skipped, False otherwise. @rtype bool """ # Do not download empty formats if format_ == '': return True # Do not download email addresses if ('mailto:' in url) and ('@' in url): return True # Is this localhost? parsed = urlparse(url) if parsed.hostname == 'localhost': return True # These are trusted manually added formats, do not skip them if RE_VALID_FORMATS.match(format_): return False # Simple formats only contain letters, numbers, "_" and "-" # If this a non simple format? if RE_NON_SIMPLE_FORMAT.match(format_): return True # Is this a link to the site root? if parsed.path in ('', '/'): return True # Do not skip return False
[ "def", "skip_format_url", "(", "format_", ",", "url", ")", ":", "# Do not download empty formats", "if", "format_", "==", "''", ":", "return", "True", "# Do not download email addresses", "if", "(", "'mailto:'", "in", "url", ")", "and", "(", "'@'", "in", "url", ")", ":", "return", "True", "# Is this localhost?", "parsed", "=", "urlparse", "(", "url", ")", "if", "parsed", ".", "hostname", "==", "'localhost'", ":", "return", "True", "# These are trusted manually added formats, do not skip them", "if", "RE_VALID_FORMATS", ".", "match", "(", "format_", ")", ":", "return", "False", "# Simple formats only contain letters, numbers, \"_\" and \"-\"", "# If this a non simple format?", "if", "RE_NON_SIMPLE_FORMAT", ".", "match", "(", "format_", ")", ":", "return", "True", "# Is this a link to the site root?", "if", "parsed", ".", "path", "in", "(", "''", ",", "'/'", ")", ":", "return", "True", "# Do not skip", "return", "False" ]
24.97561
19.804878
def chord_counts(im): r""" Finds the length of each chord in the supplied image and returns a list of their individual sizes Parameters ---------- im : ND-array An image containing chords drawn in the void space. Returns ------- result : 1D-array A 1D array with one element for each chord, containing its length. Notes ---- The returned array can be passed to ``plt.hist`` to plot the histogram, or to ``sp.histogram`` to get the histogram data directly. Another useful function is ``sp.bincount`` which gives the number of chords of each length in a format suitable for ``plt.plot``. """ labels, N = spim.label(im > 0) props = regionprops(labels, coordinates='xy') chord_lens = sp.array([i.filled_area for i in props]) return chord_lens
[ "def", "chord_counts", "(", "im", ")", ":", "labels", ",", "N", "=", "spim", ".", "label", "(", "im", ">", "0", ")", "props", "=", "regionprops", "(", "labels", ",", "coordinates", "=", "'xy'", ")", "chord_lens", "=", "sp", ".", "array", "(", "[", "i", ".", "filled_area", "for", "i", "in", "props", "]", ")", "return", "chord_lens" ]
31.230769
23.730769
def generate_key(url, page_number): """ >>> url_a = 'http://localhost:5009/search?keywords=a' >>> generate_key(url_a, 10) 'http://localhost:5009/search?keywords=a&page=10' >>> url_b = 'http://localhost:5009/search?keywords=b&page=1' >>> generate_key(url_b, 10) 'http://localhost:5009/search?keywords=b&page=10' """ index = url.rfind('page') if index != -1: result = url[0:index] result += 'page=%s' % page_number else: result = url result += '&page=%s' % page_number return result
[ "def", "generate_key", "(", "url", ",", "page_number", ")", ":", "index", "=", "url", ".", "rfind", "(", "'page'", ")", "if", "index", "!=", "-", "1", ":", "result", "=", "url", "[", "0", ":", "index", "]", "result", "+=", "'page=%s'", "%", "page_number", "else", ":", "result", "=", "url", "result", "+=", "'&page=%s'", "%", "page_number", "return", "result" ]
32
12.352941
def RegisterCustomFieldCodec(encoder, decoder): """Register a custom encoder/decoder for this field.""" def Register(field): _CUSTOM_FIELD_CODECS[field] = _Codec(encoder=encoder, decoder=decoder) return field return Register
[ "def", "RegisterCustomFieldCodec", "(", "encoder", ",", "decoder", ")", ":", "def", "Register", "(", "field", ")", ":", "_CUSTOM_FIELD_CODECS", "[", "field", "]", "=", "_Codec", "(", "encoder", "=", "encoder", ",", "decoder", "=", "decoder", ")", "return", "field", "return", "Register" ]
41.166667
17
def order_phase_diagram(lines, stable_entries, unstable_entries, ordering): """ Orders the entries (their coordinates) in a phase diagram plot according to the user specified ordering. Ordering should be given as ['Up', 'Left', 'Right'], where Up, Left and Right are the names of the entries in the upper, left and right corners of the triangle respectively. Args: lines: list of list of coordinates for lines in the PD. stable_entries: {coordinate : entry} for each stable node in the phase diagram. (Each coordinate can only have one stable phase) unstable_entries: {entry: coordinates} for all unstable nodes in the phase diagram. ordering: Ordering of the phase diagram, given as a list ['Up', 'Left','Right'] Returns: (newlines, newstable_entries, newunstable_entries): - newlines is a list of list of coordinates for lines in the PD. - newstable_entries is a {coordinate : entry} for each stable node in the phase diagram. (Each coordinate can only have one stable phase) - newunstable_entries is a {entry: coordinates} for all unstable nodes in the phase diagram. """ yup = -1000.0 xleft = 1000.0 xright = -1000.0 for coord in stable_entries: if coord[0] > xright: xright = coord[0] nameright = stable_entries[coord].name if coord[0] < xleft: xleft = coord[0] nameleft = stable_entries[coord].name if coord[1] > yup: yup = coord[1] nameup = stable_entries[coord].name if (not nameup in ordering) or (not nameright in ordering) or \ (not nameleft in ordering): raise ValueError( 'Error in ordering_phase_diagram : \n"{up}", "{left}" and "{' 'right}"' ' should be in ordering : {ord}'.format(up=nameup, left=nameleft, right=nameright, ord=ordering)) cc = np.array([0.5, np.sqrt(3.0) / 6.0], np.float) if nameup == ordering[0]: if nameleft == ordering[1]: # The coordinates were already in the user ordering return lines, stable_entries, unstable_entries else: newlines = [[np.array(1.0 - x), y] for x, y in lines] newstable_entries = {(1.0 - c[0], c[1]): entry for c, entry in stable_entries.items()} newunstable_entries = {entry: (1.0 - c[0], c[1]) for entry, c in unstable_entries.items()} return newlines, newstable_entries, newunstable_entries elif nameup == ordering[1]: if nameleft == ordering[2]: c120 = np.cos(2.0 * np.pi / 3.0) s120 = np.sin(2.0 * np.pi / 3.0) newlines = [] for x, y in lines: newx = np.zeros_like(x) newy = np.zeros_like(y) for ii, xx in enumerate(x): newx[ii] = c120 * (xx - cc[0]) - s120 * (y[ii] - cc[1]) + \ cc[0] newy[ii] = s120 * (xx - cc[0]) + c120 * (y[ii] - cc[1]) + \ cc[1] newlines.append([newx, newy]) newstable_entries = { (c120 * (c[0] - cc[0]) - s120 * (c[1] - cc[1]) + cc[0], s120 * (c[0] - cc[0]) + c120 * (c[1] - cc[1]) + cc[1]): entry for c, entry in stable_entries.items()} newunstable_entries = { entry: (c120 * (c[0] - cc[0]) - s120 * (c[1] - cc[1]) + cc[0], s120 * (c[0] - cc[0]) + c120 * (c[1] - cc[1]) + cc[1]) for entry, c in unstable_entries.items()} return newlines, newstable_entries, newunstable_entries else: c120 = np.cos(2.0 * np.pi / 3.0) s120 = np.sin(2.0 * np.pi / 3.0) newlines = [] for x, y in lines: newx = np.zeros_like(x) newy = np.zeros_like(y) for ii, xx in enumerate(x): newx[ii] = -c120 * (xx - 1.0) - s120 * y[ii] + 1.0 newy[ii] = -s120 * (xx - 1.0) + c120 * y[ii] newlines.append([newx, newy]) newstable_entries = {(-c120 * (c[0] - 1.0) - s120 * c[1] + 1.0, -s120 * (c[0] - 1.0) + c120 * c[1]): entry for c, entry in stable_entries.items()} newunstable_entries = { entry: (-c120 * (c[0] - 1.0) - s120 * c[1] + 1.0, -s120 * (c[0] - 1.0) + c120 * c[1]) for entry, c in unstable_entries.items()} return newlines, newstable_entries, newunstable_entries elif nameup == ordering[2]: if nameleft == ordering[0]: c240 = np.cos(4.0 * np.pi / 3.0) s240 = np.sin(4.0 * np.pi / 3.0) newlines = [] for x, y in lines: newx = np.zeros_like(x) newy = np.zeros_like(y) for ii, xx in enumerate(x): newx[ii] = c240 * (xx - cc[0]) - s240 * (y[ii] - cc[1]) + \ cc[0] newy[ii] = s240 * (xx - cc[0]) + c240 * (y[ii] - cc[1]) + \ cc[1] newlines.append([newx, newy]) newstable_entries = { (c240 * (c[0] - cc[0]) - s240 * (c[1] - cc[1]) + cc[0], s240 * (c[0] - cc[0]) + c240 * (c[1] - cc[1]) + cc[1]): entry for c, entry in stable_entries.items()} newunstable_entries = { entry: (c240 * (c[0] - cc[0]) - s240 * (c[1] - cc[1]) + cc[0], s240 * (c[0] - cc[0]) + c240 * (c[1] - cc[1]) + cc[1]) for entry, c in unstable_entries.items()} return newlines, newstable_entries, newunstable_entries else: c240 = np.cos(4.0 * np.pi / 3.0) s240 = np.sin(4.0 * np.pi / 3.0) newlines = [] for x, y in lines: newx = np.zeros_like(x) newy = np.zeros_like(y) for ii, xx in enumerate(x): newx[ii] = -c240 * xx - s240 * y[ii] newy[ii] = -s240 * xx + c240 * y[ii] newlines.append([newx, newy]) newstable_entries = {(-c240 * c[0] - s240 * c[1], -s240 * c[0] + c240 * c[1]): entry for c, entry in stable_entries.items()} newunstable_entries = {entry: (-c240 * c[0] - s240 * c[1], -s240 * c[0] + c240 * c[1]) for entry, c in unstable_entries.items()} return newlines, newstable_entries, newunstable_entries
[ "def", "order_phase_diagram", "(", "lines", ",", "stable_entries", ",", "unstable_entries", ",", "ordering", ")", ":", "yup", "=", "-", "1000.0", "xleft", "=", "1000.0", "xright", "=", "-", "1000.0", "for", "coord", "in", "stable_entries", ":", "if", "coord", "[", "0", "]", ">", "xright", ":", "xright", "=", "coord", "[", "0", "]", "nameright", "=", "stable_entries", "[", "coord", "]", ".", "name", "if", "coord", "[", "0", "]", "<", "xleft", ":", "xleft", "=", "coord", "[", "0", "]", "nameleft", "=", "stable_entries", "[", "coord", "]", ".", "name", "if", "coord", "[", "1", "]", ">", "yup", ":", "yup", "=", "coord", "[", "1", "]", "nameup", "=", "stable_entries", "[", "coord", "]", ".", "name", "if", "(", "not", "nameup", "in", "ordering", ")", "or", "(", "not", "nameright", "in", "ordering", ")", "or", "(", "not", "nameleft", "in", "ordering", ")", ":", "raise", "ValueError", "(", "'Error in ordering_phase_diagram : \\n\"{up}\", \"{left}\" and \"{'", "'right}\"'", "' should be in ordering : {ord}'", ".", "format", "(", "up", "=", "nameup", ",", "left", "=", "nameleft", ",", "right", "=", "nameright", ",", "ord", "=", "ordering", ")", ")", "cc", "=", "np", ".", "array", "(", "[", "0.5", ",", "np", ".", "sqrt", "(", "3.0", ")", "/", "6.0", "]", ",", "np", ".", "float", ")", "if", "nameup", "==", "ordering", "[", "0", "]", ":", "if", "nameleft", "==", "ordering", "[", "1", "]", ":", "# The coordinates were already in the user ordering", "return", "lines", ",", "stable_entries", ",", "unstable_entries", "else", ":", "newlines", "=", "[", "[", "np", ".", "array", "(", "1.0", "-", "x", ")", ",", "y", "]", "for", "x", ",", "y", "in", "lines", "]", "newstable_entries", "=", "{", "(", "1.0", "-", "c", "[", "0", "]", ",", "c", "[", "1", "]", ")", ":", "entry", "for", "c", ",", "entry", "in", "stable_entries", ".", "items", "(", ")", "}", "newunstable_entries", "=", "{", "entry", ":", "(", "1.0", "-", "c", "[", "0", "]", ",", "c", "[", "1", "]", ")", "for", "entry", ",", "c", "in", "unstable_entries", ".", "items", "(", ")", "}", "return", "newlines", ",", "newstable_entries", ",", "newunstable_entries", "elif", "nameup", "==", "ordering", "[", "1", "]", ":", "if", "nameleft", "==", "ordering", "[", "2", "]", ":", "c120", "=", "np", ".", "cos", "(", "2.0", "*", "np", ".", "pi", "/", "3.0", ")", "s120", "=", "np", ".", "sin", "(", "2.0", "*", "np", ".", "pi", "/", "3.0", ")", "newlines", "=", "[", "]", "for", "x", ",", "y", "in", "lines", ":", "newx", "=", "np", ".", "zeros_like", "(", "x", ")", "newy", "=", "np", ".", "zeros_like", "(", "y", ")", "for", "ii", ",", "xx", "in", "enumerate", "(", "x", ")", ":", "newx", "[", "ii", "]", "=", "c120", "*", "(", "xx", "-", "cc", "[", "0", "]", ")", "-", "s120", "*", "(", "y", "[", "ii", "]", "-", "cc", "[", "1", "]", ")", "+", "cc", "[", "0", "]", "newy", "[", "ii", "]", "=", "s120", "*", "(", "xx", "-", "cc", "[", "0", "]", ")", "+", "c120", "*", "(", "y", "[", "ii", "]", "-", "cc", "[", "1", "]", ")", "+", "cc", "[", "1", "]", "newlines", ".", "append", "(", "[", "newx", ",", "newy", "]", ")", "newstable_entries", "=", "{", "(", "c120", "*", "(", "c", "[", "0", "]", "-", "cc", "[", "0", "]", ")", "-", "s120", "*", "(", "c", "[", "1", "]", "-", "cc", "[", "1", "]", ")", "+", "cc", "[", "0", "]", ",", "s120", "*", "(", "c", "[", "0", "]", "-", "cc", "[", "0", "]", ")", "+", "c120", "*", "(", "c", "[", "1", "]", "-", "cc", "[", "1", "]", ")", "+", "cc", "[", "1", "]", ")", ":", "entry", "for", "c", ",", "entry", "in", "stable_entries", ".", "items", "(", ")", "}", "newunstable_entries", "=", "{", "entry", ":", "(", "c120", "*", "(", "c", "[", "0", "]", "-", "cc", "[", "0", "]", ")", "-", "s120", "*", "(", "c", "[", "1", "]", "-", "cc", "[", "1", "]", ")", "+", "cc", "[", "0", "]", ",", "s120", "*", "(", "c", "[", "0", "]", "-", "cc", "[", "0", "]", ")", "+", "c120", "*", "(", "c", "[", "1", "]", "-", "cc", "[", "1", "]", ")", "+", "cc", "[", "1", "]", ")", "for", "entry", ",", "c", "in", "unstable_entries", ".", "items", "(", ")", "}", "return", "newlines", ",", "newstable_entries", ",", "newunstable_entries", "else", ":", "c120", "=", "np", ".", "cos", "(", "2.0", "*", "np", ".", "pi", "/", "3.0", ")", "s120", "=", "np", ".", "sin", "(", "2.0", "*", "np", ".", "pi", "/", "3.0", ")", "newlines", "=", "[", "]", "for", "x", ",", "y", "in", "lines", ":", "newx", "=", "np", ".", "zeros_like", "(", "x", ")", "newy", "=", "np", ".", "zeros_like", "(", "y", ")", "for", "ii", ",", "xx", "in", "enumerate", "(", "x", ")", ":", "newx", "[", "ii", "]", "=", "-", "c120", "*", "(", "xx", "-", "1.0", ")", "-", "s120", "*", "y", "[", "ii", "]", "+", "1.0", "newy", "[", "ii", "]", "=", "-", "s120", "*", "(", "xx", "-", "1.0", ")", "+", "c120", "*", "y", "[", "ii", "]", "newlines", ".", "append", "(", "[", "newx", ",", "newy", "]", ")", "newstable_entries", "=", "{", "(", "-", "c120", "*", "(", "c", "[", "0", "]", "-", "1.0", ")", "-", "s120", "*", "c", "[", "1", "]", "+", "1.0", ",", "-", "s120", "*", "(", "c", "[", "0", "]", "-", "1.0", ")", "+", "c120", "*", "c", "[", "1", "]", ")", ":", "entry", "for", "c", ",", "entry", "in", "stable_entries", ".", "items", "(", ")", "}", "newunstable_entries", "=", "{", "entry", ":", "(", "-", "c120", "*", "(", "c", "[", "0", "]", "-", "1.0", ")", "-", "s120", "*", "c", "[", "1", "]", "+", "1.0", ",", "-", "s120", "*", "(", "c", "[", "0", "]", "-", "1.0", ")", "+", "c120", "*", "c", "[", "1", "]", ")", "for", "entry", ",", "c", "in", "unstable_entries", ".", "items", "(", ")", "}", "return", "newlines", ",", "newstable_entries", ",", "newunstable_entries", "elif", "nameup", "==", "ordering", "[", "2", "]", ":", "if", "nameleft", "==", "ordering", "[", "0", "]", ":", "c240", "=", "np", ".", "cos", "(", "4.0", "*", "np", ".", "pi", "/", "3.0", ")", "s240", "=", "np", ".", "sin", "(", "4.0", "*", "np", ".", "pi", "/", "3.0", ")", "newlines", "=", "[", "]", "for", "x", ",", "y", "in", "lines", ":", "newx", "=", "np", ".", "zeros_like", "(", "x", ")", "newy", "=", "np", ".", "zeros_like", "(", "y", ")", "for", "ii", ",", "xx", "in", "enumerate", "(", "x", ")", ":", "newx", "[", "ii", "]", "=", "c240", "*", "(", "xx", "-", "cc", "[", "0", "]", ")", "-", "s240", "*", "(", "y", "[", "ii", "]", "-", "cc", "[", "1", "]", ")", "+", "cc", "[", "0", "]", "newy", "[", "ii", "]", "=", "s240", "*", "(", "xx", "-", "cc", "[", "0", "]", ")", "+", "c240", "*", "(", "y", "[", "ii", "]", "-", "cc", "[", "1", "]", ")", "+", "cc", "[", "1", "]", "newlines", ".", "append", "(", "[", "newx", ",", "newy", "]", ")", "newstable_entries", "=", "{", "(", "c240", "*", "(", "c", "[", "0", "]", "-", "cc", "[", "0", "]", ")", "-", "s240", "*", "(", "c", "[", "1", "]", "-", "cc", "[", "1", "]", ")", "+", "cc", "[", "0", "]", ",", "s240", "*", "(", "c", "[", "0", "]", "-", "cc", "[", "0", "]", ")", "+", "c240", "*", "(", "c", "[", "1", "]", "-", "cc", "[", "1", "]", ")", "+", "cc", "[", "1", "]", ")", ":", "entry", "for", "c", ",", "entry", "in", "stable_entries", ".", "items", "(", ")", "}", "newunstable_entries", "=", "{", "entry", ":", "(", "c240", "*", "(", "c", "[", "0", "]", "-", "cc", "[", "0", "]", ")", "-", "s240", "*", "(", "c", "[", "1", "]", "-", "cc", "[", "1", "]", ")", "+", "cc", "[", "0", "]", ",", "s240", "*", "(", "c", "[", "0", "]", "-", "cc", "[", "0", "]", ")", "+", "c240", "*", "(", "c", "[", "1", "]", "-", "cc", "[", "1", "]", ")", "+", "cc", "[", "1", "]", ")", "for", "entry", ",", "c", "in", "unstable_entries", ".", "items", "(", ")", "}", "return", "newlines", ",", "newstable_entries", ",", "newunstable_entries", "else", ":", "c240", "=", "np", ".", "cos", "(", "4.0", "*", "np", ".", "pi", "/", "3.0", ")", "s240", "=", "np", ".", "sin", "(", "4.0", "*", "np", ".", "pi", "/", "3.0", ")", "newlines", "=", "[", "]", "for", "x", ",", "y", "in", "lines", ":", "newx", "=", "np", ".", "zeros_like", "(", "x", ")", "newy", "=", "np", ".", "zeros_like", "(", "y", ")", "for", "ii", ",", "xx", "in", "enumerate", "(", "x", ")", ":", "newx", "[", "ii", "]", "=", "-", "c240", "*", "xx", "-", "s240", "*", "y", "[", "ii", "]", "newy", "[", "ii", "]", "=", "-", "s240", "*", "xx", "+", "c240", "*", "y", "[", "ii", "]", "newlines", ".", "append", "(", "[", "newx", ",", "newy", "]", ")", "newstable_entries", "=", "{", "(", "-", "c240", "*", "c", "[", "0", "]", "-", "s240", "*", "c", "[", "1", "]", ",", "-", "s240", "*", "c", "[", "0", "]", "+", "c240", "*", "c", "[", "1", "]", ")", ":", "entry", "for", "c", ",", "entry", "in", "stable_entries", ".", "items", "(", ")", "}", "newunstable_entries", "=", "{", "entry", ":", "(", "-", "c240", "*", "c", "[", "0", "]", "-", "s240", "*", "c", "[", "1", "]", ",", "-", "s240", "*", "c", "[", "0", "]", "+", "c240", "*", "c", "[", "1", "]", ")", "for", "entry", ",", "c", "in", "unstable_entries", ".", "items", "(", ")", "}", "return", "newlines", ",", "newstable_entries", ",", "newunstable_entries" ]
47.102041
18.639456
def kill(self): """Kill the browser. This is useful when the browser is stuck. """ if self.process: self.process.kill() self.process.wait()
[ "def", "kill", "(", "self", ")", ":", "if", "self", ".", "process", ":", "self", ".", "process", ".", "kill", "(", ")", "self", ".", "process", ".", "wait", "(", ")" ]
23.625
13.5
def try_open (*args, **kwargs): """Simply a wrapper for io.open(), unless an IOError with errno=2 (ENOENT) is raised, in which case None is retured. """ try: return io.open (*args, **kwargs) except IOError as e: if e.errno == 2: return None raise
[ "def", "try_open", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "io", ".", "open", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "IOError", "as", "e", ":", "if", "e", ".", "errno", "==", "2", ":", "return", "None", "raise" ]
26.636364
14.454545
def _elect_source_broker(self, victim_partition, broker_subset=None): """Select first over loaded broker having victim_partition. Note: The broker with maximum siblings of victim-partitions (same topic) is selected to reduce topic-partition imbalance. """ broker_subset = broker_subset or self._brokers over_loaded_brokers = sorted( [ broker for broker in broker_subset if victim_partition in broker.partitions and not broker.inactive ], key=lambda b: len(b.partitions), reverse=True, ) if not over_loaded_brokers: return None broker_topic_partition_cnt = [ (broker, broker.count_partitions(victim_partition.topic)) for broker in over_loaded_brokers ] max_count_pair = max( broker_topic_partition_cnt, key=lambda ele: ele[1], ) return max_count_pair[0]
[ "def", "_elect_source_broker", "(", "self", ",", "victim_partition", ",", "broker_subset", "=", "None", ")", ":", "broker_subset", "=", "broker_subset", "or", "self", ".", "_brokers", "over_loaded_brokers", "=", "sorted", "(", "[", "broker", "for", "broker", "in", "broker_subset", "if", "victim_partition", "in", "broker", ".", "partitions", "and", "not", "broker", ".", "inactive", "]", ",", "key", "=", "lambda", "b", ":", "len", "(", "b", ".", "partitions", ")", ",", "reverse", "=", "True", ",", ")", "if", "not", "over_loaded_brokers", ":", "return", "None", "broker_topic_partition_cnt", "=", "[", "(", "broker", ",", "broker", ".", "count_partitions", "(", "victim_partition", ".", "topic", ")", ")", "for", "broker", "in", "over_loaded_brokers", "]", "max_count_pair", "=", "max", "(", "broker_topic_partition_cnt", ",", "key", "=", "lambda", "ele", ":", "ele", "[", "1", "]", ",", ")", "return", "max_count_pair", "[", "0", "]" ]
35.25
17.535714
def import_classes(names, src, dst): """Import classes in package from their implementation modules.""" for name in names: module = importlib.import_module('pygsp.' + src + '.' + name.lower()) setattr(sys.modules['pygsp.' + dst], name, getattr(module, name))
[ "def", "import_classes", "(", "names", ",", "src", ",", "dst", ")", ":", "for", "name", "in", "names", ":", "module", "=", "importlib", ".", "import_module", "(", "'pygsp.'", "+", "src", "+", "'.'", "+", "name", ".", "lower", "(", ")", ")", "setattr", "(", "sys", ".", "modules", "[", "'pygsp.'", "+", "dst", "]", ",", "name", ",", "getattr", "(", "module", ",", "name", ")", ")" ]
55.6
18.4
def remember_order(self): """Verify that subsequent :func:`fudge.Fake.expects` are called in the right order. For example:: >>> import fudge >>> db = fudge.Fake('db').remember_order().expects('insert').expects('update') >>> db.update() Traceback (most recent call last): ... AssertionError: Call #1 was fake:db.update(); Expected: #1 fake:db.insert(), #2 fake:db.update(), end >>> fudge.clear_expectations() When declaring multiple calls using :func:`fudge.Fake.next_call`, each subsequent call will be added to the expected order of calls :: >>> import fudge >>> sess = fudge.Fake("session").remember_order().expects("get_id").returns(1) >>> sess = sess.expects("set_id").with_args(5) >>> sess = sess.next_call(for_method="get_id").returns(5) Multiple calls to ``get_id()`` are now expected :: >>> sess.get_id() 1 >>> sess.set_id(5) >>> sess.get_id() 5 >>> fudge.verify() >>> fudge.clear_expectations() """ if self._callable: raise FakeDeclarationError( "remember_order() cannot be used for Fake(callable=True) or Fake(expect_call=True)") self._expected_call_order = ExpectedCallOrder(self) registry.remember_expected_call_order(self._expected_call_order) return self
[ "def", "remember_order", "(", "self", ")", ":", "if", "self", ".", "_callable", ":", "raise", "FakeDeclarationError", "(", "\"remember_order() cannot be used for Fake(callable=True) or Fake(expect_call=True)\"", ")", "self", ".", "_expected_call_order", "=", "ExpectedCallOrder", "(", "self", ")", "registry", ".", "remember_expected_call_order", "(", "self", ".", "_expected_call_order", ")", "return", "self" ]
38.578947
24.736842
def indir(path): """ Context manager for switching the current path of the process. Can be used: with indir('/tmp'): <do something in tmp> """ cwd = os.getcwd() try: os.chdir(path) yield except Exception as e: raise finally: os.chdir(cwd)
[ "def", "indir", "(", "path", ")", ":", "cwd", "=", "os", ".", "getcwd", "(", ")", "try", ":", "os", ".", "chdir", "(", "path", ")", "yield", "except", "Exception", "as", "e", ":", "raise", "finally", ":", "os", ".", "chdir", "(", "cwd", ")" ]
19.0625
21.6875
def find(cls, name): """Returns the NDS2 channel type corresponding to the given name """ try: return cls._member_map_[name] except KeyError: for ctype in cls._member_map_.values(): if ctype.name == name: return ctype raise ValueError('%s is not a valid %s' % (name, cls.__name__))
[ "def", "find", "(", "cls", ",", "name", ")", ":", "try", ":", "return", "cls", ".", "_member_map_", "[", "name", "]", "except", "KeyError", ":", "for", "ctype", "in", "cls", ".", "_member_map_", ".", "values", "(", ")", ":", "if", "ctype", ".", "name", "==", "name", ":", "return", "ctype", "raise", "ValueError", "(", "'%s is not a valid %s'", "%", "(", "name", ",", "cls", ".", "__name__", ")", ")" ]
37.6
12.1
def SetPixelColorsHorizontally(self, x: int, y: int, colors: Iterable) -> bool: """ Set pixel colors form x,y horizontally. x: int. y: int. colors: Iterable, an iterable list of int color values in argb. Return bool, True if succeed otherwise False. """ count = len(colors) arrayType = ctypes.c_uint32 * count values = arrayType(*colors) return _DllClient.instance().dll.BitmapSetPixelsHorizontally(ctypes.c_size_t(self._bitmap), x, y, values, count)
[ "def", "SetPixelColorsHorizontally", "(", "self", ",", "x", ":", "int", ",", "y", ":", "int", ",", "colors", ":", "Iterable", ")", "->", "bool", ":", "count", "=", "len", "(", "colors", ")", "arrayType", "=", "ctypes", ".", "c_uint32", "*", "count", "values", "=", "arrayType", "(", "*", "colors", ")", "return", "_DllClient", ".", "instance", "(", ")", ".", "dll", ".", "BitmapSetPixelsHorizontally", "(", "ctypes", ".", "c_size_t", "(", "self", ".", "_bitmap", ")", ",", "x", ",", "y", ",", "values", ",", "count", ")" ]
43.916667
20.083333
def sessions_in_range(self, start_session_label, end_session_label): """ Given start and end session labels, return all the sessions in that range, inclusive. Parameters ---------- start_session_label: pd.Timestamp (midnight UTC) The label representing the first session of the desired range. end_session_label: pd.Timestamp (midnight UTC) The label representing the last session of the desired range. Returns ------- pd.DatetimeIndex The desired sessions. """ return self.all_sessions[ self.all_sessions.slice_indexer( start_session_label, end_session_label ) ]
[ "def", "sessions_in_range", "(", "self", ",", "start_session_label", ",", "end_session_label", ")", ":", "return", "self", ".", "all_sessions", "[", "self", ".", "all_sessions", ".", "slice_indexer", "(", "start_session_label", ",", "end_session_label", ")", "]" ]
30.75
20.5
def validate(self, object_dict, partial=False): """Validate given internal object returned by ``to_representation()``. Internal object is validated against missing/forbidden/invalid fields values using fields definitions defined in serializer. Args: object_dict (dict): internal object dictionart to perform to validate partial (bool): if set to True then incomplete object_dict is accepter and will not raise any exceptions when one of fields is missing Raises: DeserializationError: """ # we are working on object_dict not an representation so there # is a need to annotate sources differently sources = { _source(name, field): field for name, field in self.fields.items() } # note: we are checking for all mising and invalid fields so we can # return exception with all fields that are missing and should # exist instead of single one missing = [ name for name, field in sources.items() if all((not partial, name not in object_dict, not field.read_only)) ] forbidden = [ name for name in object_dict if any((name not in sources, sources[name].read_only)) ] invalid = {} for name, value in object_dict.items(): try: field = sources[name] if field.many: for single_value in value: field.validate(single_value) else: field.validate(value) except ValueError as err: invalid[name] = str(err) if any([missing, forbidden, invalid]): # note: We have validated internal object instance but need to # inform the user about problems with his representation. # This is why we have to do this dirty transformation. # note: This will be removed in 1.0.0 where we change how # validation works and where we remove star-like fields. # refs: #42 (https://github.com/swistakm/graceful/issues/42) sources_to_field_names = { _source(name, field): name for name, field in self.fields.items() } def _(names): if isinstance(names, list): return [ sources_to_field_names.get(name, name) for name in names ] elif isinstance(names, dict): return { sources_to_field_names.get(name, name): value for name, value in names.items() } else: return names # pragma: nocover raise DeserializationError(_(missing), _(forbidden), _(invalid))
[ "def", "validate", "(", "self", ",", "object_dict", ",", "partial", "=", "False", ")", ":", "# we are working on object_dict not an representation so there", "# is a need to annotate sources differently", "sources", "=", "{", "_source", "(", "name", ",", "field", ")", ":", "field", "for", "name", ",", "field", "in", "self", ".", "fields", ".", "items", "(", ")", "}", "# note: we are checking for all mising and invalid fields so we can", "# return exception with all fields that are missing and should", "# exist instead of single one", "missing", "=", "[", "name", "for", "name", ",", "field", "in", "sources", ".", "items", "(", ")", "if", "all", "(", "(", "not", "partial", ",", "name", "not", "in", "object_dict", ",", "not", "field", ".", "read_only", ")", ")", "]", "forbidden", "=", "[", "name", "for", "name", "in", "object_dict", "if", "any", "(", "(", "name", "not", "in", "sources", ",", "sources", "[", "name", "]", ".", "read_only", ")", ")", "]", "invalid", "=", "{", "}", "for", "name", ",", "value", "in", "object_dict", ".", "items", "(", ")", ":", "try", ":", "field", "=", "sources", "[", "name", "]", "if", "field", ".", "many", ":", "for", "single_value", "in", "value", ":", "field", ".", "validate", "(", "single_value", ")", "else", ":", "field", ".", "validate", "(", "value", ")", "except", "ValueError", "as", "err", ":", "invalid", "[", "name", "]", "=", "str", "(", "err", ")", "if", "any", "(", "[", "missing", ",", "forbidden", ",", "invalid", "]", ")", ":", "# note: We have validated internal object instance but need to", "# inform the user about problems with his representation.", "# This is why we have to do this dirty transformation.", "# note: This will be removed in 1.0.0 where we change how", "# validation works and where we remove star-like fields.", "# refs: #42 (https://github.com/swistakm/graceful/issues/42)", "sources_to_field_names", "=", "{", "_source", "(", "name", ",", "field", ")", ":", "name", "for", "name", ",", "field", "in", "self", ".", "fields", ".", "items", "(", ")", "}", "def", "_", "(", "names", ")", ":", "if", "isinstance", "(", "names", ",", "list", ")", ":", "return", "[", "sources_to_field_names", ".", "get", "(", "name", ",", "name", ")", "for", "name", "in", "names", "]", "elif", "isinstance", "(", "names", ",", "dict", ")", ":", "return", "{", "sources_to_field_names", ".", "get", "(", "name", ",", "name", ")", ":", "value", "for", "name", ",", "value", "in", "names", ".", "items", "(", ")", "}", "else", ":", "return", "names", "# pragma: nocover", "raise", "DeserializationError", "(", "_", "(", "missing", ")", ",", "_", "(", "forbidden", ")", ",", "_", "(", "invalid", ")", ")" ]
37.423077
20.948718
def gradient_plots(self, analytes=None, win=15, samples=None, ranges=False, focus=None, outdir=None, figsize=[10, 4], subset='All_Analyses'): """ Plot analyte gradients as a function of time. Parameters ---------- analytes : optional, array_like or str The analyte(s) to plot. Defaults to all analytes. samples: optional, array_like or str The sample(s) to plot. Defaults to all samples. ranges : bool Whether or not to show the signal/backgroudn regions identified by 'autorange'. focus : str The focus 'stage' of the analysis to plot. Can be 'rawdata', 'despiked':, 'signal', 'background', 'bkgsub', 'ratios' or 'calibrated'. outdir : str Path to a directory where you'd like the plots to be saved. Defaults to 'reports/[focus]' in your data directory. filt : str, dict or bool Either logical filter expression contained in a str, a dict of expressions specifying the filter string to use for each analyte or a boolean. Passed to `grab_filt`. scale : str If 'log', plots the data on a log scale. figsize : array_like Array of length 2 specifying figure [width, height] in inches. stats : bool Whether or not to overlay the mean and standard deviations for each trace. stat, err: str The names of the statistic and error components to plot. Deafaults to 'nanmean' and 'nanstd'. Returns ------- None """ if focus is None: focus = self.focus_stage if outdir is None: outdir = self.report_dir + '/' + focus + '_gradient' if not os.path.isdir(outdir): os.mkdir(outdir) # if samples is not None: # subset = self.make_subset(samples) if subset is not None: samples = self._get_samples(subset) elif samples is None: samples = self.subsets['All_Analyses'] elif isinstance(samples, str): samples = [samples] with self.pbar.set(total=len(samples), desc='Drawing Plots') as prog: for s in samples: f, a = self.data[s].gplot(analytes=analytes, win=win, figsize=figsize, ranges=ranges, focus_stage=focus) # ax = fig.axes[0] # for l, u in s.sigrng: # ax.axvspan(l, u, color='r', alpha=0.1) # for l, u in s.bkgrng: # ax.axvspan(l, u, color='k', alpha=0.1) f.savefig(outdir + '/' + s + '_gradients.pdf') # TODO: on older(?) computers raises # 'OSError: [Errno 24] Too many open files' plt.close(f) prog.update() return
[ "def", "gradient_plots", "(", "self", ",", "analytes", "=", "None", ",", "win", "=", "15", ",", "samples", "=", "None", ",", "ranges", "=", "False", ",", "focus", "=", "None", ",", "outdir", "=", "None", ",", "figsize", "=", "[", "10", ",", "4", "]", ",", "subset", "=", "'All_Analyses'", ")", ":", "if", "focus", "is", "None", ":", "focus", "=", "self", ".", "focus_stage", "if", "outdir", "is", "None", ":", "outdir", "=", "self", ".", "report_dir", "+", "'/'", "+", "focus", "+", "'_gradient'", "if", "not", "os", ".", "path", ".", "isdir", "(", "outdir", ")", ":", "os", ".", "mkdir", "(", "outdir", ")", "# if samples is not None:", "# subset = self.make_subset(samples)", "if", "subset", "is", "not", "None", ":", "samples", "=", "self", ".", "_get_samples", "(", "subset", ")", "elif", "samples", "is", "None", ":", "samples", "=", "self", ".", "subsets", "[", "'All_Analyses'", "]", "elif", "isinstance", "(", "samples", ",", "str", ")", ":", "samples", "=", "[", "samples", "]", "with", "self", ".", "pbar", ".", "set", "(", "total", "=", "len", "(", "samples", ")", ",", "desc", "=", "'Drawing Plots'", ")", "as", "prog", ":", "for", "s", "in", "samples", ":", "f", ",", "a", "=", "self", ".", "data", "[", "s", "]", ".", "gplot", "(", "analytes", "=", "analytes", ",", "win", "=", "win", ",", "figsize", "=", "figsize", ",", "ranges", "=", "ranges", ",", "focus_stage", "=", "focus", ")", "# ax = fig.axes[0]", "# for l, u in s.sigrng:", "# ax.axvspan(l, u, color='r', alpha=0.1)", "# for l, u in s.bkgrng:", "# ax.axvspan(l, u, color='k', alpha=0.1)", "f", ".", "savefig", "(", "outdir", "+", "'/'", "+", "s", "+", "'_gradients.pdf'", ")", "# TODO: on older(?) computers raises", "# 'OSError: [Errno 24] Too many open files'", "plt", ".", "close", "(", "f", ")", "prog", ".", "update", "(", ")", "return" ]
39.266667
18.493333
def create_label(self, label, doc=None, callback=dummy_progress_cb): """ Create a new label Arguments: doc --- first document on which the label must be added (required for now) """ if doc: clone = doc.clone() # make sure it's serializable r = self.index.create_label(label, doc=clone) return r
[ "def", "create_label", "(", "self", ",", "label", ",", "doc", "=", "None", ",", "callback", "=", "dummy_progress_cb", ")", ":", "if", "doc", ":", "clone", "=", "doc", ".", "clone", "(", ")", "# make sure it's serializable", "r", "=", "self", ".", "index", ".", "create_label", "(", "label", ",", "doc", "=", "clone", ")", "return", "r" ]
32.083333
19.75
def split(self, grouper): '''Split the current DenseRunVariable into multiple columns. Parameters ---------- grouper : :obj:`pandas.DataFrame` Binary DF specifying the design matrix to use for splitting. Number of rows must match current ``DenseRunVariable``; a new ``DenseRunVariable`` will be generated for each column in the grouper. Returns ------- A list of DenseRunVariables, one per unique value in the grouper. ''' values = grouper.values * self.values.values df = pd.DataFrame(values, columns=grouper.columns) return [DenseRunVariable(name='%s.%s' % (self.name, name), values=df[name].values, run_info=self.run_info, source=self.source, sampling_rate=self.sampling_rate) for i, name in enumerate(df.columns)]
[ "def", "split", "(", "self", ",", "grouper", ")", ":", "values", "=", "grouper", ".", "values", "*", "self", ".", "values", ".", "values", "df", "=", "pd", ".", "DataFrame", "(", "values", ",", "columns", "=", "grouper", ".", "columns", ")", "return", "[", "DenseRunVariable", "(", "name", "=", "'%s.%s'", "%", "(", "self", ".", "name", ",", "name", ")", ",", "values", "=", "df", "[", "name", "]", ".", "values", ",", "run_info", "=", "self", ".", "run_info", ",", "source", "=", "self", ".", "source", ",", "sampling_rate", "=", "self", ".", "sampling_rate", ")", "for", "i", ",", "name", "in", "enumerate", "(", "df", ".", "columns", ")", "]" ]
44.090909
23.909091
def remove_core_element(self, model): """Remove respective core element of handed global variable name :param str model: String that is the key/gv_name of core element which should be removed :return: """ gv_name = model if self.global_variable_is_editable(gv_name, "Deletion"): try: self.model.global_variable_manager.delete_variable(gv_name) except AttributeError as e: logger.warning("The respective global variable '{1}' couldn't be removed. -> {0}" "".format(e, model))
[ "def", "remove_core_element", "(", "self", ",", "model", ")", ":", "gv_name", "=", "model", "if", "self", ".", "global_variable_is_editable", "(", "gv_name", ",", "\"Deletion\"", ")", ":", "try", ":", "self", ".", "model", ".", "global_variable_manager", ".", "delete_variable", "(", "gv_name", ")", "except", "AttributeError", "as", "e", ":", "logger", ".", "warning", "(", "\"The respective global variable '{1}' couldn't be removed. -> {0}\"", "\"\"", ".", "format", "(", "e", ",", "model", ")", ")" ]
45.769231
22.307692
def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False): """ Remove missing values. See the :ref:`User Guide <missing_data>` for more on which values are considered missing, and how to work with missing data. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 Determine if rows or columns which contain missing values are removed. * 0, or 'index' : Drop rows which contain missing values. * 1, or 'columns' : Drop columns which contain missing value. .. deprecated:: 0.23.0 Pass tuple or list to drop on multiple axes. Only a single axis is allowed. how : {'any', 'all'}, default 'any' Determine if row or column is removed from DataFrame, when we have at least one NA or all NA. * 'any' : If any NA values are present, drop that row or column. * 'all' : If all values are NA, drop that row or column. thresh : int, optional Require that many non-NA values. subset : array-like, optional Labels along other axis to consider, e.g. if you are dropping rows these would be a list of columns to include. inplace : bool, default False If True, do operation inplace and return None. Returns ------- DataFrame DataFrame with NA entries dropped from it. See Also -------- DataFrame.isna: Indicate missing values. DataFrame.notna : Indicate existing (non-missing) values. DataFrame.fillna : Replace missing values. Series.dropna : Drop missing values. Index.dropna : Drop missing indices. Examples -------- >>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'], ... "toy": [np.nan, 'Batmobile', 'Bullwhip'], ... "born": [pd.NaT, pd.Timestamp("1940-04-25"), ... pd.NaT]}) >>> df name toy born 0 Alfred NaN NaT 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Drop the rows where at least one element is missing. >>> df.dropna() name toy born 1 Batman Batmobile 1940-04-25 Drop the columns where at least one element is missing. >>> df.dropna(axis='columns') name 0 Alfred 1 Batman 2 Catwoman Drop the rows where all elements are missing. >>> df.dropna(how='all') name toy born 0 Alfred NaN NaT 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Keep only the rows with at least 2 non-NA values. >>> df.dropna(thresh=2) name toy born 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Define in which columns to look for missing values. >>> df.dropna(subset=['name', 'born']) name toy born 1 Batman Batmobile 1940-04-25 Keep the DataFrame with valid entries in the same variable. >>> df.dropna(inplace=True) >>> df name toy born 1 Batman Batmobile 1940-04-25 """ inplace = validate_bool_kwarg(inplace, 'inplace') if isinstance(axis, (tuple, list)): # GH20987 msg = ("supplying multiple axes to axis is deprecated and " "will be removed in a future version.") warnings.warn(msg, FutureWarning, stacklevel=2) result = self for ax in axis: result = result.dropna(how=how, thresh=thresh, subset=subset, axis=ax) else: axis = self._get_axis_number(axis) agg_axis = 1 - axis agg_obj = self if subset is not None: ax = self._get_axis(agg_axis) indices = ax.get_indexer_for(subset) check = indices == -1 if check.any(): raise KeyError(list(np.compress(check, subset))) agg_obj = self.take(indices, axis=agg_axis) count = agg_obj.count(axis=agg_axis) if thresh is not None: mask = count >= thresh elif how == 'any': mask = count == len(agg_obj._get_axis(agg_axis)) elif how == 'all': mask = count > 0 else: if how is not None: raise ValueError('invalid how option: {h}'.format(h=how)) else: raise TypeError('must specify how or thresh') result = self.loc(axis=axis)[mask] if inplace: self._update_inplace(result) else: return result
[ "def", "dropna", "(", "self", ",", "axis", "=", "0", ",", "how", "=", "'any'", ",", "thresh", "=", "None", ",", "subset", "=", "None", ",", "inplace", "=", "False", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "if", "isinstance", "(", "axis", ",", "(", "tuple", ",", "list", ")", ")", ":", "# GH20987", "msg", "=", "(", "\"supplying multiple axes to axis is deprecated and \"", "\"will be removed in a future version.\"", ")", "warnings", ".", "warn", "(", "msg", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "result", "=", "self", "for", "ax", "in", "axis", ":", "result", "=", "result", ".", "dropna", "(", "how", "=", "how", ",", "thresh", "=", "thresh", ",", "subset", "=", "subset", ",", "axis", "=", "ax", ")", "else", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "agg_axis", "=", "1", "-", "axis", "agg_obj", "=", "self", "if", "subset", "is", "not", "None", ":", "ax", "=", "self", ".", "_get_axis", "(", "agg_axis", ")", "indices", "=", "ax", ".", "get_indexer_for", "(", "subset", ")", "check", "=", "indices", "==", "-", "1", "if", "check", ".", "any", "(", ")", ":", "raise", "KeyError", "(", "list", "(", "np", ".", "compress", "(", "check", ",", "subset", ")", ")", ")", "agg_obj", "=", "self", ".", "take", "(", "indices", ",", "axis", "=", "agg_axis", ")", "count", "=", "agg_obj", ".", "count", "(", "axis", "=", "agg_axis", ")", "if", "thresh", "is", "not", "None", ":", "mask", "=", "count", ">=", "thresh", "elif", "how", "==", "'any'", ":", "mask", "=", "count", "==", "len", "(", "agg_obj", ".", "_get_axis", "(", "agg_axis", ")", ")", "elif", "how", "==", "'all'", ":", "mask", "=", "count", ">", "0", "else", ":", "if", "how", "is", "not", "None", ":", "raise", "ValueError", "(", "'invalid how option: {h}'", ".", "format", "(", "h", "=", "how", ")", ")", "else", ":", "raise", "TypeError", "(", "'must specify how or thresh'", ")", "result", "=", "self", ".", "loc", "(", "axis", "=", "axis", ")", "[", "mask", "]", "if", "inplace", ":", "self", ".", "_update_inplace", "(", "result", ")", "else", ":", "return", "result" ]
33.722973
19.52027
def execute(tokens): """ Perform the actions described by the input tokens. """ if not validate_rc(): print('Your .vacationrc file has errors!') echo_vacation_rc() return for action, value in tokens: if action == 'show': show() elif action == 'log': log_vacation_days() elif action == 'echo': echo_vacation_rc() elif action == 'take': take(value) elif action == 'cancel': cancel(value) elif action == 'setrate': setrate(value) elif action == 'setdays': setdays(value)
[ "def", "execute", "(", "tokens", ")", ":", "if", "not", "validate_rc", "(", ")", ":", "print", "(", "'Your .vacationrc file has errors!'", ")", "echo_vacation_rc", "(", ")", "return", "for", "action", ",", "value", "in", "tokens", ":", "if", "action", "==", "'show'", ":", "show", "(", ")", "elif", "action", "==", "'log'", ":", "log_vacation_days", "(", ")", "elif", "action", "==", "'echo'", ":", "echo_vacation_rc", "(", ")", "elif", "action", "==", "'take'", ":", "take", "(", "value", ")", "elif", "action", "==", "'cancel'", ":", "cancel", "(", "value", ")", "elif", "action", "==", "'setrate'", ":", "setrate", "(", "value", ")", "elif", "action", "==", "'setdays'", ":", "setdays", "(", "value", ")" ]
28.318182
13.590909
def send_offer_email(self, offer_id, email_dict): """ Sends an offer by email If you want to send your email to more than one persons do: 'recipients': {'to': ['[email protected]', '[email protected]']}} :param offer_id: the invoice id :param email_dict: the email dict :return dict """ return self._create_post_request( resource=OFFERS, billomat_id=offer_id, send_data=email_dict, command=EMAIL, )
[ "def", "send_offer_email", "(", "self", ",", "offer_id", ",", "email_dict", ")", ":", "return", "self", ".", "_create_post_request", "(", "resource", "=", "OFFERS", ",", "billomat_id", "=", "offer_id", ",", "send_data", "=", "email_dict", ",", "command", "=", "EMAIL", ",", ")" ]
32.3125
13.5625
def create_oracle(self, **kwargs): """ :rtype: Engine """ return self._ce( self._ccs(self.DialectAndDriver.oracle), **kwargs )
[ "def", "create_oracle", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_ce", "(", "self", ".", "_ccs", "(", "self", ".", "DialectAndDriver", ".", "oracle", ")", ",", "*", "*", "kwargs", ")" ]
24.571429
13.142857
def createEditor( self, parent, option, index ): """ Overloads the create editor method to assign the parent's completer to any line edit created. :param parent | <QWidget> option | <QStyleOption> index | <QModelIndex> :return <QWidget> || None """ multi_tag = projexui.ancestor(self, XMultiTagEdit) edit = QLineEdit(parent) edit.setFrame(False) edit.setCompleter(multi_tag.completer()) edit.installEventFilter(multi_tag) return edit
[ "def", "createEditor", "(", "self", ",", "parent", ",", "option", ",", "index", ")", ":", "multi_tag", "=", "projexui", ".", "ancestor", "(", "self", ",", "XMultiTagEdit", ")", "edit", "=", "QLineEdit", "(", "parent", ")", "edit", ".", "setFrame", "(", "False", ")", "edit", ".", "setCompleter", "(", "multi_tag", ".", "completer", "(", ")", ")", "edit", ".", "installEventFilter", "(", "multi_tag", ")", "return", "edit" ]
32.368421
13.736842