text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def create_manifest(self, cart_name, manifests): """ `cart_name` - Name of this release cart `manifests` - a list of manifest files """ cart = juicer.common.Cart.Cart(cart_name) for manifest in manifests: cart.add_from_manifest(manifest, self.connectors) cart.save() return cart
[ "def", "create_manifest", "(", "self", ",", "cart_name", ",", "manifests", ")", ":", "cart", "=", "juicer", ".", "common", ".", "Cart", ".", "Cart", "(", "cart_name", ")", "for", "manifest", "in", "manifests", ":", "cart", ".", "add_from_manifest", "(", "manifest", ",", "self", ".", "connectors", ")", "cart", ".", "save", "(", ")", "return", "cart" ]
28.75
14.916667
def is_condition_met(self, hand, win_tile, melds, is_tsumo): """ Three closed pon sets, the other sets need not to be closed :param hand: list of hand's sets :param win_tile: 136 tiles format :param melds: list Meld objects :param is_tsumo: :return: true|false """ win_tile //= 4 open_sets = [x.tiles_34 for x in melds if x.opened] chi_sets = [x for x in hand if (is_chi(x) and win_tile in x and x not in open_sets)] pon_sets = [x for x in hand if is_pon(x)] closed_pon_sets = [] for item in pon_sets: if item in open_sets: continue # if we do the ron on syanpon wait our pon will be consider as open # and it is not 789999 set if win_tile in item and not is_tsumo and not len(chi_sets): continue closed_pon_sets.append(item) return len(closed_pon_sets) == 3
[ "def", "is_condition_met", "(", "self", ",", "hand", ",", "win_tile", ",", "melds", ",", "is_tsumo", ")", ":", "win_tile", "//=", "4", "open_sets", "=", "[", "x", ".", "tiles_34", "for", "x", "in", "melds", "if", "x", ".", "opened", "]", "chi_sets", "=", "[", "x", "for", "x", "in", "hand", "if", "(", "is_chi", "(", "x", ")", "and", "win_tile", "in", "x", "and", "x", "not", "in", "open_sets", ")", "]", "pon_sets", "=", "[", "x", "for", "x", "in", "hand", "if", "is_pon", "(", "x", ")", "]", "closed_pon_sets", "=", "[", "]", "for", "item", "in", "pon_sets", ":", "if", "item", "in", "open_sets", ":", "continue", "# if we do the ron on syanpon wait our pon will be consider as open", "# and it is not 789999 set", "if", "win_tile", "in", "item", "and", "not", "is_tsumo", "and", "not", "len", "(", "chi_sets", ")", ":", "continue", "closed_pon_sets", ".", "append", "(", "item", ")", "return", "len", "(", "closed_pon_sets", ")", "==", "3" ]
32.689655
18.965517
def _support(self, caller): """Helper callback.""" markdown_content = caller() html_content = markdown.markdown( markdown_content, extensions=[ "markdown.extensions.fenced_code", CodeHiliteExtension(css_class="highlight"), "markdown.extensions.tables", ], ) return html_content
[ "def", "_support", "(", "self", ",", "caller", ")", ":", "markdown_content", "=", "caller", "(", ")", "html_content", "=", "markdown", ".", "markdown", "(", "markdown_content", ",", "extensions", "=", "[", "\"markdown.extensions.fenced_code\"", ",", "CodeHiliteExtension", "(", "css_class", "=", "\"highlight\"", ")", ",", "\"markdown.extensions.tables\"", ",", "]", ",", ")", "return", "html_content" ]
32.5
12.5
def inner(tensor0: BKTensor, tensor1: BKTensor) -> BKTensor: """Return the inner product between two tensors""" # Note: Relying on fact that vdot flattens arrays return np.vdot(tensor0, tensor1)
[ "def", "inner", "(", "tensor0", ":", "BKTensor", ",", "tensor1", ":", "BKTensor", ")", "->", "BKTensor", ":", "# Note: Relying on fact that vdot flattens arrays", "return", "np", ".", "vdot", "(", "tensor0", ",", "tensor1", ")" ]
50.75
9.25
def DOM_querySelector(self, nodeId, selector): """ Function path: DOM.querySelector Domain: DOM Method name: querySelector Parameters: Required arguments: 'nodeId' (type: NodeId) -> Id of the node to query upon. 'selector' (type: string) -> Selector string. Returns: 'nodeId' (type: NodeId) -> Query selector result. Description: Executes <code>querySelector</code> on a given node. """ assert isinstance(selector, (str,) ), "Argument 'selector' must be of type '['str']'. Received type: '%s'" % type( selector) subdom_funcs = self.synchronous_command('DOM.querySelector', nodeId= nodeId, selector=selector) return subdom_funcs
[ "def", "DOM_querySelector", "(", "self", ",", "nodeId", ",", "selector", ")", ":", "assert", "isinstance", "(", "selector", ",", "(", "str", ",", ")", ")", ",", "\"Argument 'selector' must be of type '['str']'. Received type: '%s'\"", "%", "type", "(", "selector", ")", "subdom_funcs", "=", "self", ".", "synchronous_command", "(", "'DOM.querySelector'", ",", "nodeId", "=", "nodeId", ",", "selector", "=", "selector", ")", "return", "subdom_funcs" ]
32.190476
19.047619
def fastp_filtered_reads_chart(self): """ Function to generate the fastp filtered reads bar plot """ # Specify the order of the different possible categories keys = OrderedDict() keys['filtering_result_passed_filter_reads'] = { 'name': 'Passed Filter' } keys['filtering_result_low_quality_reads'] = { 'name': 'Low Quality' } keys['filtering_result_too_many_N_reads'] = { 'name': 'Too Many N' } keys['filtering_result_too_short_reads'] = { 'name': 'Too short' } # Config for the plot pconfig = { 'id': 'fastp_filtered_reads_plot', 'title': 'Fastp: Filtered Reads', 'ylab': '# Reads', 'cpswitch_counts_label': 'Number of Reads', 'hide_zero_cats': False, } return bargraph.plot(self.fastp_data, keys, pconfig)
[ "def", "fastp_filtered_reads_chart", "(", "self", ")", ":", "# Specify the order of the different possible categories", "keys", "=", "OrderedDict", "(", ")", "keys", "[", "'filtering_result_passed_filter_reads'", "]", "=", "{", "'name'", ":", "'Passed Filter'", "}", "keys", "[", "'filtering_result_low_quality_reads'", "]", "=", "{", "'name'", ":", "'Low Quality'", "}", "keys", "[", "'filtering_result_too_many_N_reads'", "]", "=", "{", "'name'", ":", "'Too Many N'", "}", "keys", "[", "'filtering_result_too_short_reads'", "]", "=", "{", "'name'", ":", "'Too short'", "}", "# Config for the plot", "pconfig", "=", "{", "'id'", ":", "'fastp_filtered_reads_plot'", ",", "'title'", ":", "'Fastp: Filtered Reads'", ",", "'ylab'", ":", "'# Reads'", ",", "'cpswitch_counts_label'", ":", "'Number of Reads'", ",", "'hide_zero_cats'", ":", "False", ",", "}", "return", "bargraph", ".", "plot", "(", "self", ".", "fastp_data", ",", "keys", ",", "pconfig", ")" ]
47.277778
20.277778
def _set_learning_mode(self, v, load=False): """ Setter method for learning_mode, mapped from YANG variable /mac_address_table/learning_mode (enumeration) If this variable is read-only (config: false) in the source YANG file, then _set_learning_mode is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_learning_mode() directly. YANG Description: Conversational Learning Mode """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'conversational': {'value': 1}},), is_leaf=True, yang_name="learning-mode", rest_name="learning-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Conversational Learning Mode', u'cli-full-command': None, u'callpoint': u'learning-mode-callpoint', u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='enumeration', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """learning_mode must be of a type compatible with enumeration""", 'defined-type': "brocade-mac-address-table:enumeration", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'conversational': {'value': 1}},), is_leaf=True, yang_name="learning-mode", rest_name="learning-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Conversational Learning Mode', u'cli-full-command': None, u'callpoint': u'learning-mode-callpoint', u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='enumeration', is_config=True)""", }) self.__learning_mode = t if hasattr(self, '_set'): self._set()
[ "def", "_set_learning_mode", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "RestrictedClassType", "(", "base_type", "=", "unicode", ",", "restriction_type", "=", "\"dict_key\"", ",", "restriction_arg", "=", "{", "u'conversational'", ":", "{", "'value'", ":", "1", "}", "}", ",", ")", ",", "is_leaf", "=", "True", ",", "yang_name", "=", "\"learning-mode\"", ",", "rest_name", "=", "\"learning-mode\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Conversational Learning Mode'", ",", "u'cli-full-command'", ":", "None", ",", "u'callpoint'", ":", "u'learning-mode-callpoint'", ",", "u'display-when'", ":", "u'/vcsmode/vcs-mode = \"true\"'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-mac-address-table'", ",", "defining_module", "=", "'brocade-mac-address-table'", ",", "yang_type", "=", "'enumeration'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"learning_mode must be of a type compatible with enumeration\"\"\"", ",", "'defined-type'", ":", "\"brocade-mac-address-table:enumeration\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'conversational': {'value': 1}},), is_leaf=True, yang_name=\"learning-mode\", rest_name=\"learning-mode\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Conversational Learning Mode', u'cli-full-command': None, u'callpoint': u'learning-mode-callpoint', u'display-when': u'/vcsmode/vcs-mode = \"true\"'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='enumeration', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__learning_mode", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
95.375
46.625
def _detach_received(self, error): """Callback called when a link DETACH frame is received. This callback will process the received DETACH error to determine if the link is recoverable or whether it should be shutdown. :param error: The error information from the detach frame. :type error: ~uamqp.errors.ErrorResponse """ # pylint: disable=protected-access if error: condition = error.condition description = error.description info = error.info else: condition = b"amqp:unknown-error" description = None info = None self._error = errors._process_link_error(self.error_policy, condition, description, info) _logger.info("Received Link detach event: %r\nLink: %r\nDescription: %r" "\nDetails: %r\nRetryable: %r\nConnection: %r", condition, self.name, description, info, self._error.action.retry, self._session._connection.container_id)
[ "def", "_detach_received", "(", "self", ",", "error", ")", ":", "# pylint: disable=protected-access", "if", "error", ":", "condition", "=", "error", ".", "condition", "description", "=", "error", ".", "description", "info", "=", "error", ".", "info", "else", ":", "condition", "=", "b\"amqp:unknown-error\"", "description", "=", "None", "info", "=", "None", "self", ".", "_error", "=", "errors", ".", "_process_link_error", "(", "self", ".", "error_policy", ",", "condition", ",", "description", ",", "info", ")", "_logger", ".", "info", "(", "\"Received Link detach event: %r\\nLink: %r\\nDescription: %r\"", "\"\\nDetails: %r\\nRetryable: %r\\nConnection: %r\"", ",", "condition", ",", "self", ".", "name", ",", "description", ",", "info", ",", "self", ".", "_error", ".", "action", ".", "retry", ",", "self", ".", "_session", ".", "_connection", ".", "container_id", ")" ]
45.434783
19.565217
def called_with(self, *args, **kwargs): """ Before evaluating subsequent predicates, calls :attr:`subject` with given arguments (but unlike a direct call, catches and transforms any exceptions that arise during the call). """ self._args = args self._kwargs = kwargs self._call_subject = True return CallableInspector(self)
[ "def", "called_with", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_args", "=", "args", "self", ".", "_kwargs", "=", "kwargs", "self", ".", "_call_subject", "=", "True", "return", "CallableInspector", "(", "self", ")" ]
42
16.444444
def inquire_property(name, doc=None): """Creates a property based on an inquire result This method creates a property that calls the :python:`_inquire` method, and return the value of the requested information. Args: name (str): the name of the 'inquire' result information Returns: property: the created property """ def inquire_property(self): if not self._started: msg = ("Cannot read {0} from a security context whose " "establishment has not yet been started.") raise AttributeError(msg) return getattr(self._inquire(**{name: True}), name) return property(inquire_property, doc=doc)
[ "def", "inquire_property", "(", "name", ",", "doc", "=", "None", ")", ":", "def", "inquire_property", "(", "self", ")", ":", "if", "not", "self", ".", "_started", ":", "msg", "=", "(", "\"Cannot read {0} from a security context whose \"", "\"establishment has not yet been started.\"", ")", "raise", "AttributeError", "(", "msg", ")", "return", "getattr", "(", "self", ".", "_inquire", "(", "*", "*", "{", "name", ":", "True", "}", ")", ",", "name", ")", "return", "property", "(", "inquire_property", ",", "doc", "=", "doc", ")" ]
29.652174
20.217391
def _perform_unbinds(self, binds): """ Unbinds queues from exchanges. Parameters ---------- binds: list of dicts A list of dicts with the following keys: queue: string - name of the queue to bind exchange: string - name of the exchange to bind routing_key: string - routing key to use for this bind """ for bind in binds: self.logger.debug("Unbinding queue {0} from exchange {1} with key {2}".format(bind['queue'], bind['exchange'], bind['routing_key'])) self.channel.queue_unbind(**bind)
[ "def", "_perform_unbinds", "(", "self", ",", "binds", ")", ":", "for", "bind", "in", "binds", ":", "self", ".", "logger", ".", "debug", "(", "\"Unbinding queue {0} from exchange {1} with key {2}\"", ".", "format", "(", "bind", "[", "'queue'", "]", ",", "bind", "[", "'exchange'", "]", ",", "bind", "[", "'routing_key'", "]", ")", ")", "self", ".", "channel", ".", "queue_unbind", "(", "*", "*", "bind", ")" ]
34
18.933333
def to_spans(self): "Convert the tree to a set of nonterms and spans." s = set() self._convert_to_spans(self.tree, 1, s) return s
[ "def", "to_spans", "(", "self", ")", ":", "s", "=", "set", "(", ")", "self", ".", "_convert_to_spans", "(", "self", ".", "tree", ",", "1", ",", "s", ")", "return", "s" ]
31.4
18.6
def QA_SU_save_stock_min(client=DATABASE, ui_log=None, ui_progress=None): """save stock_min Keyword Arguments: client {[type]} -- [description] (default: {DATABASE}) """ stock_list = QA_fetch_get_stock_list().code.unique().tolist() coll = client.stock_min coll.create_index( [ ('code', pymongo.ASCENDING), ('time_stamp', pymongo.ASCENDING), ('date_stamp', pymongo.ASCENDING) ] ) err = [] def __saving_work(code, coll): QA_util_log_info( '##JOB03 Now Saving STOCK_MIN ==== {}'.format(str(code)), ui_log=ui_log ) try: for type in ['1min', '5min', '15min', '30min', '60min']: ref_ = coll.find({'code': str(code)[0:6], 'type': type}) end_time = str(now_time())[0:19] if ref_.count() > 0: start_time = ref_[ref_.count() - 1]['datetime'] QA_util_log_info( '##JOB03.{} Now Saving {} from {} to {} =={} '.format( ['1min', '5min', '15min', '30min', '60min'].index(type), str(code), start_time, end_time, type ), ui_log=ui_log ) if start_time != end_time: __data = QA_fetch_get_stock_min( str(code), start_time, end_time, type ) if len(__data) > 1: coll.insert_many( QA_util_to_json_from_pandas(__data)[1::] ) else: start_time = '2015-01-01' QA_util_log_info( '##JOB03.{} Now Saving {} from {} to {} =={} '.format( ['1min', '5min', '15min', '30min', '60min'].index(type), str(code), start_time, end_time, type ), ui_log=ui_log ) if start_time != end_time: __data = QA_fetch_get_stock_min( str(code), start_time, end_time, type ) if len(__data) > 1: coll.insert_many( QA_util_to_json_from_pandas(__data) ) except Exception as e: QA_util_log_info(e, ui_log=ui_log) err.append(code) QA_util_log_info(err, ui_log=ui_log) executor = ThreadPoolExecutor(max_workers=4) # executor.map((__saving_work, stock_list[i_], coll),URLS) res = { executor.submit(__saving_work, stock_list[i_], coll) for i_ in range(len(stock_list)) } count = 0 for i_ in concurrent.futures.as_completed(res): QA_util_log_info( 'The {} of Total {}'.format(count, len(stock_list)), ui_log=ui_log ) strProgress = 'DOWNLOAD PROGRESS {} '.format( str(float(count / len(stock_list) * 100))[0:4] + '%' ) intProgress = int(count / len(stock_list) * 10000.0) QA_util_log_info( strProgress, ui_log, ui_progress=ui_progress, ui_progress_int_value=intProgress ) count = count + 1 if len(err) < 1: QA_util_log_info('SUCCESS', ui_log=ui_log) else: QA_util_log_info(' ERROR CODE \n ', ui_log=ui_log) QA_util_log_info(err, ui_log=ui_log)
[ "def", "QA_SU_save_stock_min", "(", "client", "=", "DATABASE", ",", "ui_log", "=", "None", ",", "ui_progress", "=", "None", ")", ":", "stock_list", "=", "QA_fetch_get_stock_list", "(", ")", ".", "code", ".", "unique", "(", ")", ".", "tolist", "(", ")", "coll", "=", "client", ".", "stock_min", "coll", ".", "create_index", "(", "[", "(", "'code'", ",", "pymongo", ".", "ASCENDING", ")", ",", "(", "'time_stamp'", ",", "pymongo", ".", "ASCENDING", ")", ",", "(", "'date_stamp'", ",", "pymongo", ".", "ASCENDING", ")", "]", ")", "err", "=", "[", "]", "def", "__saving_work", "(", "code", ",", "coll", ")", ":", "QA_util_log_info", "(", "'##JOB03 Now Saving STOCK_MIN ==== {}'", ".", "format", "(", "str", "(", "code", ")", ")", ",", "ui_log", "=", "ui_log", ")", "try", ":", "for", "type", "in", "[", "'1min'", ",", "'5min'", ",", "'15min'", ",", "'30min'", ",", "'60min'", "]", ":", "ref_", "=", "coll", ".", "find", "(", "{", "'code'", ":", "str", "(", "code", ")", "[", "0", ":", "6", "]", ",", "'type'", ":", "type", "}", ")", "end_time", "=", "str", "(", "now_time", "(", ")", ")", "[", "0", ":", "19", "]", "if", "ref_", ".", "count", "(", ")", ">", "0", ":", "start_time", "=", "ref_", "[", "ref_", ".", "count", "(", ")", "-", "1", "]", "[", "'datetime'", "]", "QA_util_log_info", "(", "'##JOB03.{} Now Saving {} from {} to {} =={} '", ".", "format", "(", "[", "'1min'", ",", "'5min'", ",", "'15min'", ",", "'30min'", ",", "'60min'", "]", ".", "index", "(", "type", ")", ",", "str", "(", "code", ")", ",", "start_time", ",", "end_time", ",", "type", ")", ",", "ui_log", "=", "ui_log", ")", "if", "start_time", "!=", "end_time", ":", "__data", "=", "QA_fetch_get_stock_min", "(", "str", "(", "code", ")", ",", "start_time", ",", "end_time", ",", "type", ")", "if", "len", "(", "__data", ")", ">", "1", ":", "coll", ".", "insert_many", "(", "QA_util_to_json_from_pandas", "(", "__data", ")", "[", "1", ":", ":", "]", ")", "else", ":", "start_time", "=", "'2015-01-01'", "QA_util_log_info", "(", "'##JOB03.{} Now Saving {} from {} to {} =={} '", ".", "format", "(", "[", "'1min'", ",", "'5min'", ",", "'15min'", ",", "'30min'", ",", "'60min'", "]", ".", "index", "(", "type", ")", ",", "str", "(", "code", ")", ",", "start_time", ",", "end_time", ",", "type", ")", ",", "ui_log", "=", "ui_log", ")", "if", "start_time", "!=", "end_time", ":", "__data", "=", "QA_fetch_get_stock_min", "(", "str", "(", "code", ")", ",", "start_time", ",", "end_time", ",", "type", ")", "if", "len", "(", "__data", ")", ">", "1", ":", "coll", ".", "insert_many", "(", "QA_util_to_json_from_pandas", "(", "__data", ")", ")", "except", "Exception", "as", "e", ":", "QA_util_log_info", "(", "e", ",", "ui_log", "=", "ui_log", ")", "err", ".", "append", "(", "code", ")", "QA_util_log_info", "(", "err", ",", "ui_log", "=", "ui_log", ")", "executor", "=", "ThreadPoolExecutor", "(", "max_workers", "=", "4", ")", "# executor.map((__saving_work, stock_list[i_], coll),URLS)", "res", "=", "{", "executor", ".", "submit", "(", "__saving_work", ",", "stock_list", "[", "i_", "]", ",", "coll", ")", "for", "i_", "in", "range", "(", "len", "(", "stock_list", ")", ")", "}", "count", "=", "0", "for", "i_", "in", "concurrent", ".", "futures", ".", "as_completed", "(", "res", ")", ":", "QA_util_log_info", "(", "'The {} of Total {}'", ".", "format", "(", "count", ",", "len", "(", "stock_list", ")", ")", ",", "ui_log", "=", "ui_log", ")", "strProgress", "=", "'DOWNLOAD PROGRESS {} '", ".", "format", "(", "str", "(", "float", "(", "count", "/", "len", "(", "stock_list", ")", "*", "100", ")", ")", "[", "0", ":", "4", "]", "+", "'%'", ")", "intProgress", "=", "int", "(", "count", "/", "len", "(", "stock_list", ")", "*", "10000.0", ")", "QA_util_log_info", "(", "strProgress", ",", "ui_log", ",", "ui_progress", "=", "ui_progress", ",", "ui_progress_int_value", "=", "intProgress", ")", "count", "=", "count", "+", "1", "if", "len", "(", "err", ")", "<", "1", ":", "QA_util_log_info", "(", "'SUCCESS'", ",", "ui_log", "=", "ui_log", ")", "else", ":", "QA_util_log_info", "(", "' ERROR CODE \\n '", ",", "ui_log", "=", "ui_log", ")", "QA_util_log_info", "(", "err", ",", "ui_log", "=", "ui_log", ")" ]
34.704918
14.655738
def _posify_mask_subindexer(index): """Convert masked indices in a flat array to the nearest unmasked index. Parameters ---------- index : np.ndarray One dimensional ndarray with dtype=int. Returns ------- np.ndarray One dimensional ndarray with all values equal to -1 replaced by an adjacent non-masked element. """ masked = index == -1 unmasked_locs = np.flatnonzero(~masked) if not unmasked_locs.size: # indexing unmasked_locs is invalid return np.zeros_like(index) masked_locs = np.flatnonzero(masked) prev_value = np.maximum(0, np.searchsorted(unmasked_locs, masked_locs) - 1) new_index = index.copy() new_index[masked_locs] = index[unmasked_locs[prev_value]] return new_index
[ "def", "_posify_mask_subindexer", "(", "index", ")", ":", "masked", "=", "index", "==", "-", "1", "unmasked_locs", "=", "np", ".", "flatnonzero", "(", "~", "masked", ")", "if", "not", "unmasked_locs", ".", "size", ":", "# indexing unmasked_locs is invalid", "return", "np", ".", "zeros_like", "(", "index", ")", "masked_locs", "=", "np", ".", "flatnonzero", "(", "masked", ")", "prev_value", "=", "np", ".", "maximum", "(", "0", ",", "np", ".", "searchsorted", "(", "unmasked_locs", ",", "masked_locs", ")", "-", "1", ")", "new_index", "=", "index", ".", "copy", "(", ")", "new_index", "[", "masked_locs", "]", "=", "index", "[", "unmasked_locs", "[", "prev_value", "]", "]", "return", "new_index" ]
31.833333
17.208333
def do_alarm_definition_delete(mc, args): '''Delete the alarm definition.''' fields = {} fields['alarm_id'] = args.id try: mc.alarm_definitions.delete(**fields) except (osc_exc.ClientException, k_exc.HttpError) as he: raise osc_exc.CommandError('%s\n%s' % (he.message, he.details)) else: print('Successfully deleted alarm definition')
[ "def", "do_alarm_definition_delete", "(", "mc", ",", "args", ")", ":", "fields", "=", "{", "}", "fields", "[", "'alarm_id'", "]", "=", "args", ".", "id", "try", ":", "mc", ".", "alarm_definitions", ".", "delete", "(", "*", "*", "fields", ")", "except", "(", "osc_exc", ".", "ClientException", ",", "k_exc", ".", "HttpError", ")", "as", "he", ":", "raise", "osc_exc", ".", "CommandError", "(", "'%s\\n%s'", "%", "(", "he", ".", "message", ",", "he", ".", "details", ")", ")", "else", ":", "print", "(", "'Successfully deleted alarm definition'", ")" ]
37.3
16.9
def date(f, *args, **kwargs): """Automatically log progress on function entry and exit with date- and time- stamp. Default logging value: info. *Logging with values contained in the parameters of the decorated function* Message (args[0]) may be a string to be formatted with parameters passed to the decorated function. Each '{varname}' will be replaced by the value of the parameter of the same name. *Keyword parameters* - log :: integer - Specifies a custom level of logging to pass to the active logger. - Default: INFO *Exceptions:* - IndexError and ValueError - will be returned if *args contains a string that does not correspond to a parameter name of the decorated function, or if there are more '{}'s than there are *args. """ kwargs.update({'print_time': True}) return _stump(f, *args, **kwargs)
[ "def", "date", "(", "f", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "{", "'print_time'", ":", "True", "}", ")", "return", "_stump", "(", "f", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
38
22.086957
def read_seg(self, parc_type='aparc'): """Read the MRI segmentation. Parameters ---------- parc_type : str 'aparc' or 'aparc.a2009s' Returns ------- numpy.ndarray 3d matrix with values numpy.ndarray 4x4 affine matrix """ seg_file = self.dir / 'mri' / (parc_type + '+aseg.mgz') seg_mri = load(seg_file) seg_aff = seg_mri.affine seg_dat = seg_mri.get_data() return seg_dat, seg_aff
[ "def", "read_seg", "(", "self", ",", "parc_type", "=", "'aparc'", ")", ":", "seg_file", "=", "self", ".", "dir", "/", "'mri'", "/", "(", "parc_type", "+", "'+aseg.mgz'", ")", "seg_mri", "=", "load", "(", "seg_file", ")", "seg_aff", "=", "seg_mri", ".", "affine", "seg_dat", "=", "seg_mri", ".", "get_data", "(", ")", "return", "seg_dat", ",", "seg_aff" ]
25.5
15.2
def check_recommended_files(data, vcs): """Do check for recommended files. Returns True when all is fine. """ main_files = os.listdir(data['workingdir']) if not 'setup.py' in main_files and not 'setup.cfg' in main_files: # Not a python package. We have no recommendations. return True if not 'MANIFEST.in' in main_files and not 'MANIFEST' in main_files: q = ("This package is missing a MANIFEST.in file. This file is " "recommended. " "See http://docs.python.org/distutils/sourcedist.html" " for more info. Sample contents:" "\n" "recursive-include main_directory *" "recursive-include docs *" "include *" "global-exclude *.pyc" "\n" "You may want to quit and fix this.") if not vcs.is_setuptools_helper_package_installed(): q += "Installing %s may help too.\n" % \ vcs.setuptools_helper_package # We could ask, but simply printing it is nicer. Well, okay, # let's avoid some broken eggs on PyPI, per # https://github.com/zestsoftware/zest.releaser/issues/10 q += "Do you want to continue with the release?" if not ask(q, default=False): return False print(q) return True
[ "def", "check_recommended_files", "(", "data", ",", "vcs", ")", ":", "main_files", "=", "os", ".", "listdir", "(", "data", "[", "'workingdir'", "]", ")", "if", "not", "'setup.py'", "in", "main_files", "and", "not", "'setup.cfg'", "in", "main_files", ":", "# Not a python package. We have no recommendations.", "return", "True", "if", "not", "'MANIFEST.in'", "in", "main_files", "and", "not", "'MANIFEST'", "in", "main_files", ":", "q", "=", "(", "\"This package is missing a MANIFEST.in file. This file is \"", "\"recommended. \"", "\"See http://docs.python.org/distutils/sourcedist.html\"", "\" for more info. Sample contents:\"", "\"\\n\"", "\"recursive-include main_directory *\"", "\"recursive-include docs *\"", "\"include *\"", "\"global-exclude *.pyc\"", "\"\\n\"", "\"You may want to quit and fix this.\"", ")", "if", "not", "vcs", ".", "is_setuptools_helper_package_installed", "(", ")", ":", "q", "+=", "\"Installing %s may help too.\\n\"", "%", "vcs", ".", "setuptools_helper_package", "# We could ask, but simply printing it is nicer. Well, okay,", "# let's avoid some broken eggs on PyPI, per", "# https://github.com/zestsoftware/zest.releaser/issues/10", "q", "+=", "\"Do you want to continue with the release?\"", "if", "not", "ask", "(", "q", ",", "default", "=", "False", ")", ":", "return", "False", "print", "(", "q", ")", "return", "True" ]
40.030303
16.606061
async def pendings(self, tasks=None): """Used for await in coroutines. `await loop.pendings()` `await loop.pendings(tasks)` """ tasks = tasks or self.todo_tasks await asyncio.gather(*tasks, loop=self.loop)
[ "async", "def", "pendings", "(", "self", ",", "tasks", "=", "None", ")", ":", "tasks", "=", "tasks", "or", "self", ".", "todo_tasks", "await", "asyncio", ".", "gather", "(", "*", "tasks", ",", "loop", "=", "self", ".", "loop", ")" ]
35.285714
4
def get_repositories_by_genus_type(self, repository_genus_type=None): """Gets a ``RepositoryList`` corresponding to the given repository genus ``Type`` which does not include repositories of types derived from the specified ``Type``. In plenary mode, the returned list contains all known repositories or an error results. Otherwise, the returned list may contain only those repositories that are accessible through this session. :param repository_genus_type: a repository genus type :type repository_genus_type: ``osid.type.Type`` :return: the returned ``Repository list`` :rtype: ``osid.repository.RepositoryList`` :raise: ``NullArgument`` -- ``repository_genus_type`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure *compliance: mandatory -- This method must be implemented.* """ if repository_genus_type is None: raise NullArgument() url_path = construct_url('objective_banks') repositories_of_type = [] all_repositories = self._get_request(url_path) for repository in all_repositories: # DO WE NEED TO CHECK ALL THREE ATRIBUTES OF THE Id HERE? if repository['genusTypeId'] == repository_genus_type.get_identifier(): repositories_of_type.append[repository] return objects.RepositoryList(repositories_of_type)
[ "def", "get_repositories_by_genus_type", "(", "self", ",", "repository_genus_type", "=", "None", ")", ":", "if", "repository_genus_type", "is", "None", ":", "raise", "NullArgument", "(", ")", "url_path", "=", "construct_url", "(", "'objective_banks'", ")", "repositories_of_type", "=", "[", "]", "all_repositories", "=", "self", ".", "_get_request", "(", "url_path", ")", "for", "repository", "in", "all_repositories", ":", "# DO WE NEED TO CHECK ALL THREE ATRIBUTES OF THE Id HERE?", "if", "repository", "[", "'genusTypeId'", "]", "==", "repository_genus_type", ".", "get_identifier", "(", ")", ":", "repositories_of_type", ".", "append", "[", "repository", "]", "return", "objects", ".", "RepositoryList", "(", "repositories_of_type", ")" ]
49.533333
21.6
def query(self): """A QueryDict object holding the query parameters (QUERY_STRING).""" if self._query is None: query_string = self.environ.get('QUERY_STRING') self._query = QueryDict([ (k.decode('utf-8'), v.decode('utf-8')) for k, v in urlparse.parse_qsl( query_string, keep_blank_values=True) ]) return self._query
[ "def", "query", "(", "self", ")", ":", "if", "self", ".", "_query", "is", "None", ":", "query_string", "=", "self", ".", "environ", ".", "get", "(", "'QUERY_STRING'", ")", "self", ".", "_query", "=", "QueryDict", "(", "[", "(", "k", ".", "decode", "(", "'utf-8'", ")", ",", "v", ".", "decode", "(", "'utf-8'", ")", ")", "for", "k", ",", "v", "in", "urlparse", ".", "parse_qsl", "(", "query_string", ",", "keep_blank_values", "=", "True", ")", "]", ")", "return", "self", ".", "_query" ]
41.8
13.3
def default_middlewares(web3): """ List the default middlewares for the request manager. Leaving ens unspecified will prevent the middleware from resolving names. """ return [ (request_parameter_normalizer, 'request_param_normalizer'), (gas_price_strategy_middleware, 'gas_price_strategy'), (name_to_address_middleware(web3), 'name_to_address'), (attrdict_middleware, 'attrdict'), (pythonic_middleware, 'pythonic'), (normalize_errors_middleware, 'normalize_errors'), (validation_middleware, 'validation'), (abi_middleware, 'abi'), ]
[ "def", "default_middlewares", "(", "web3", ")", ":", "return", "[", "(", "request_parameter_normalizer", ",", "'request_param_normalizer'", ")", ",", "(", "gas_price_strategy_middleware", ",", "'gas_price_strategy'", ")", ",", "(", "name_to_address_middleware", "(", "web3", ")", ",", "'name_to_address'", ")", ",", "(", "attrdict_middleware", ",", "'attrdict'", ")", ",", "(", "pythonic_middleware", ",", "'pythonic'", ")", ",", "(", "normalize_errors_middleware", ",", "'normalize_errors'", ")", ",", "(", "validation_middleware", ",", "'validation'", ")", ",", "(", "abi_middleware", ",", "'abi'", ")", ",", "]" ]
44.133333
17.2
def modify_process_property(self, key, value, pid=None): ''' modify_process_property(self, key, value, pid=None) Modify process output property. Please note that the process property key provided must be declared as an output property in the relevant service specification. :Parameters: * *key* (`String`) -- key of property to modify * *key* (`value`) -- value of property to modify * *pid* (`string`) -- Identifier of an existing process :Example: .. code-block:: python pid = opereto_client.create_process(service='simple_shell_command', title='Test simple shell command service') opereto_client.modify_process_property("my_output_param", "1" , pid) ''' pid = self._get_pid(pid) request_data={"key" : key, "value": value} return self._call_rest_api('post', '/processes/'+pid+'/output', data=request_data, error='Failed to modify output property [%s]'%key)
[ "def", "modify_process_property", "(", "self", ",", "key", ",", "value", ",", "pid", "=", "None", ")", ":", "pid", "=", "self", ".", "_get_pid", "(", "pid", ")", "request_data", "=", "{", "\"key\"", ":", "key", ",", "\"value\"", ":", "value", "}", "return", "self", ".", "_call_rest_api", "(", "'post'", ",", "'/processes/'", "+", "pid", "+", "'/output'", ",", "data", "=", "request_data", ",", "error", "=", "'Failed to modify output property [%s]'", "%", "key", ")" ]
44.363636
33.454545
def register_request(self, valid_responses): """Register a RPC request. :param list valid_responses: List of possible Responses that we should be waiting for. :return: """ uuid = str(uuid4()) self._response[uuid] = [] for action in valid_responses: self._request[action] = uuid return uuid
[ "def", "register_request", "(", "self", ",", "valid_responses", ")", ":", "uuid", "=", "str", "(", "uuid4", "(", ")", ")", "self", ".", "_response", "[", "uuid", "]", "=", "[", "]", "for", "action", "in", "valid_responses", ":", "self", ".", "_request", "[", "action", "]", "=", "uuid", "return", "uuid" ]
32.666667
13.416667
def calendar(type='holiday', direction='next', last=1, startDate=None, token='', version=''): '''This call allows you to fetch a number of trade dates or holidays from a given date. For example, if you want the next trading day, you would call /ref-data/us/dates/trade/next/1. https://iexcloud.io/docs/api/#u-s-exchanges 8am, 9am, 12pm, 1pm UTC daily Args: type (string); "holiday" or "trade" direction (string); "next" or "last" last (int); number to move in direction startDate (date); start date for next or last, YYYYMMDD token (string); Access token version (string); API version Returns: dict: result ''' if startDate: startDate = _strOrDate(startDate) return _getJson('ref-data/us/dates/{type}/{direction}/{last}/{date}'.format(type=type, direction=direction, last=last, date=startDate), token, version) return _getJson('ref-data/us/dates/' + type + '/' + direction + '/' + str(last), token, version)
[ "def", "calendar", "(", "type", "=", "'holiday'", ",", "direction", "=", "'next'", ",", "last", "=", "1", ",", "startDate", "=", "None", ",", "token", "=", "''", ",", "version", "=", "''", ")", ":", "if", "startDate", ":", "startDate", "=", "_strOrDate", "(", "startDate", ")", "return", "_getJson", "(", "'ref-data/us/dates/{type}/{direction}/{last}/{date}'", ".", "format", "(", "type", "=", "type", ",", "direction", "=", "direction", ",", "last", "=", "last", ",", "date", "=", "startDate", ")", ",", "token", ",", "version", ")", "return", "_getJson", "(", "'ref-data/us/dates/'", "+", "type", "+", "'/'", "+", "direction", "+", "'/'", "+", "str", "(", "last", ")", ",", "token", ",", "version", ")" ]
47.333333
32.952381
def efficient_frontier(self, points): """Get the efficient frontier""" mu, sigma, weights = [], [], [] # remove the 1, to avoid duplications a = np.linspace(0, 1, points / len(self.w))[:-1] b = list(range(len(self.w) - 1)) for i in b: w0, w1 = self.w[i], self.w[i + 1] if i == b[-1]: # include the 1 in the last iteration a = np.linspace(0, 1, points / len(self.w)) for j in a: w = w1 * j + (1 - j) * w0 weights.append(np.copy(w)) mu.append(np.dot(w.T, self.mean)[0, 0]) sigma.append(np.dot(np.dot(w.T, self.cov_matrix), w)[0, 0] ** 0.5) return mu, sigma, weights
[ "def", "efficient_frontier", "(", "self", ",", "points", ")", ":", "mu", ",", "sigma", ",", "weights", "=", "[", "]", ",", "[", "]", ",", "[", "]", "# remove the 1, to avoid duplications", "a", "=", "np", ".", "linspace", "(", "0", ",", "1", ",", "points", "/", "len", "(", "self", ".", "w", ")", ")", "[", ":", "-", "1", "]", "b", "=", "list", "(", "range", "(", "len", "(", "self", ".", "w", ")", "-", "1", ")", ")", "for", "i", "in", "b", ":", "w0", ",", "w1", "=", "self", ".", "w", "[", "i", "]", ",", "self", ".", "w", "[", "i", "+", "1", "]", "if", "i", "==", "b", "[", "-", "1", "]", ":", "# include the 1 in the last iteration", "a", "=", "np", ".", "linspace", "(", "0", ",", "1", ",", "points", "/", "len", "(", "self", ".", "w", ")", ")", "for", "j", "in", "a", ":", "w", "=", "w1", "*", "j", "+", "(", "1", "-", "j", ")", "*", "w0", "weights", ".", "append", "(", "np", ".", "copy", "(", "w", ")", ")", "mu", ".", "append", "(", "np", ".", "dot", "(", "w", ".", "T", ",", "self", ".", "mean", ")", "[", "0", ",", "0", "]", ")", "sigma", ".", "append", "(", "np", ".", "dot", "(", "np", ".", "dot", "(", "w", ".", "T", ",", "self", ".", "cov_matrix", ")", ",", "w", ")", "[", "0", ",", "0", "]", "**", "0.5", ")", "return", "mu", ",", "sigma", ",", "weights" ]
43.235294
10.647059
def is_field_method(node): """Checks if a call to a field instance method is valid. A call is valid if the call is a method of the underlying type. So, in a StringField the methods from str are valid, in a ListField the methods from list are valid and so on...""" name = node.attrname parent = node.last_child() inferred = safe_infer(parent) if not inferred: return False for cls_name, inst in FIELD_TYPES.items(): if node_is_instance(inferred, cls_name) and hasattr(inst, name): return True return False
[ "def", "is_field_method", "(", "node", ")", ":", "name", "=", "node", ".", "attrname", "parent", "=", "node", ".", "last_child", "(", ")", "inferred", "=", "safe_infer", "(", "parent", ")", "if", "not", "inferred", ":", "return", "False", "for", "cls_name", ",", "inst", "in", "FIELD_TYPES", ".", "items", "(", ")", ":", "if", "node_is_instance", "(", "inferred", ",", "cls_name", ")", "and", "hasattr", "(", "inst", ",", "name", ")", ":", "return", "True", "return", "False" ]
32.882353
21.176471
def _save_state(self): """ Helper context manager for :meth:`buffer` which saves the whole state. This is broken out in a separate method for readability and tested indirectly by testing :meth:`buffer`. """ ns_prefixes_floating_in = copy.copy(self._ns_prefixes_floating_in) ns_prefixes_floating_out = copy.copy(self._ns_prefixes_floating_out) ns_decls_floating_in = copy.copy(self._ns_decls_floating_in) curr_ns_map = copy.copy(self._curr_ns_map) ns_map_stack = copy.copy(self._ns_map_stack) pending_start_element = self._pending_start_element ns_counter = self._ns_counter # XXX: I have been unable to find a test justifying copying this :/ # for completeness, I’m still doing it ns_auto_prefixes_floating_in = \ copy.copy(self._ns_auto_prefixes_floating_in) try: yield except: # NOQA: E722 self._ns_prefixes_floating_in = ns_prefixes_floating_in self._ns_prefixes_floating_out = ns_prefixes_floating_out self._ns_decls_floating_in = ns_decls_floating_in self._pending_start_element = pending_start_element self._curr_ns_map = curr_ns_map self._ns_map_stack = ns_map_stack self._ns_counter = ns_counter self._ns_auto_prefixes_floating_in = ns_auto_prefixes_floating_in raise
[ "def", "_save_state", "(", "self", ")", ":", "ns_prefixes_floating_in", "=", "copy", ".", "copy", "(", "self", ".", "_ns_prefixes_floating_in", ")", "ns_prefixes_floating_out", "=", "copy", ".", "copy", "(", "self", ".", "_ns_prefixes_floating_out", ")", "ns_decls_floating_in", "=", "copy", ".", "copy", "(", "self", ".", "_ns_decls_floating_in", ")", "curr_ns_map", "=", "copy", ".", "copy", "(", "self", ".", "_curr_ns_map", ")", "ns_map_stack", "=", "copy", ".", "copy", "(", "self", ".", "_ns_map_stack", ")", "pending_start_element", "=", "self", ".", "_pending_start_element", "ns_counter", "=", "self", ".", "_ns_counter", "# XXX: I have been unable to find a test justifying copying this :/", "# for completeness, I’m still doing it", "ns_auto_prefixes_floating_in", "=", "copy", ".", "copy", "(", "self", ".", "_ns_auto_prefixes_floating_in", ")", "try", ":", "yield", "except", ":", "# NOQA: E722", "self", ".", "_ns_prefixes_floating_in", "=", "ns_prefixes_floating_in", "self", ".", "_ns_prefixes_floating_out", "=", "ns_prefixes_floating_out", "self", ".", "_ns_decls_floating_in", "=", "ns_decls_floating_in", "self", ".", "_pending_start_element", "=", "pending_start_element", "self", ".", "_curr_ns_map", "=", "curr_ns_map", "self", ".", "_ns_map_stack", "=", "ns_map_stack", "self", ".", "_ns_counter", "=", "ns_counter", "self", ".", "_ns_auto_prefixes_floating_in", "=", "ns_auto_prefixes_floating_in", "raise" ]
47.2
18.866667
def _update_usage_plan_apis(plan_id, apis, op, region=None, key=None, keyid=None, profile=None): ''' Helper function that updates the usage plan identified by plan_id by adding or removing it to each of the stages, specified by apis parameter. apis a list of dictionaries, where each dictionary contains the following: apiId a string, which is the id of the created API in AWS ApiGateway stage a string, which is the stage that the created API is deployed to. op 'add' or 'remove' ''' try: patchOperations = [] for api in apis: patchOperations.append({ 'op': op, 'path': '/apiStages', 'value': '{0}:{1}'.format(api['apiId'], api['stage']) }) res = None if patchOperations: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) res = conn.update_usage_plan(usagePlanId=plan_id, patchOperations=patchOperations) return {'success': True, 'result': res} except ClientError as e: return {'error': __utils__['boto3.get_error'](e)} except Exception as e: return {'error': e}
[ "def", "_update_usage_plan_apis", "(", "plan_id", ",", "apis", ",", "op", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try", ":", "patchOperations", "=", "[", "]", "for", "api", "in", "apis", ":", "patchOperations", ".", "append", "(", "{", "'op'", ":", "op", ",", "'path'", ":", "'/apiStages'", ",", "'value'", ":", "'{0}:{1}'", ".", "format", "(", "api", "[", "'apiId'", "]", ",", "api", "[", "'stage'", "]", ")", "}", ")", "res", "=", "None", "if", "patchOperations", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "res", "=", "conn", ".", "update_usage_plan", "(", "usagePlanId", "=", "plan_id", ",", "patchOperations", "=", "patchOperations", ")", "return", "{", "'success'", ":", "True", ",", "'result'", ":", "res", "}", "except", "ClientError", "as", "e", ":", "return", "{", "'error'", ":", "__utils__", "[", "'boto3.get_error'", "]", "(", "e", ")", "}", "except", "Exception", "as", "e", ":", "return", "{", "'error'", ":", "e", "}" ]
38.794118
28.323529
def json(self, start=None): """ :param start: start key in dot notation returns the dict in json format :return: json string version :rtype: string """ if start is not None: data = self.data[start] return json.dumps(self.data, indent=4)
[ "def", "json", "(", "self", ",", "start", "=", "None", ")", ":", "if", "start", "is", "not", "None", ":", "data", "=", "self", ".", "data", "[", "start", "]", "return", "json", ".", "dumps", "(", "self", ".", "data", ",", "indent", "=", "4", ")" ]
30.3
6.5
def save_credentials(self, profile): """ Saves credentials to a dotfile so you can open them grab them later. Parameters ---------- profile: str name for your profile (i.e. "dev", "prod") """ filename = profile_path(S3_PROFILE_ID, profile) creds = { "access_key": self.access_key, "secret_key": self.secret_key } dump_to_json(filename, creds)
[ "def", "save_credentials", "(", "self", ",", "profile", ")", ":", "filename", "=", "profile_path", "(", "S3_PROFILE_ID", ",", "profile", ")", "creds", "=", "{", "\"access_key\"", ":", "self", ".", "access_key", ",", "\"secret_key\"", ":", "self", ".", "secret_key", "}", "dump_to_json", "(", "filename", ",", "creds", ")" ]
29.666667
15.533333
def notify(self, frame_type, headers=None, body=None): """ Utility function for notifying listeners of incoming and outgoing messages :param str frame_type: the type of message :param dict headers: the map of headers associated with the message :param body: the content of the message """ if frame_type == 'receipt': # logic for wait-on-receipt notification receipt = headers['receipt-id'] receipt_value = self.__receipts.get(receipt) with self.__send_wait_condition: self.set_receipt(receipt, None) self.__send_wait_condition.notify() if receipt_value == CMD_DISCONNECT: self.set_connected(False) # received a stomp 1.1+ disconnect receipt if receipt == self.__disconnect_receipt: self.disconnect_socket() self.__disconnect_receipt = None elif frame_type == 'connected': self.set_connected(True) elif frame_type == 'disconnected': self.set_connected(False) with self.__listeners_change_condition: listeners = sorted(self.listeners.items()) for (_, listener) in listeners: if not listener: continue notify_func = getattr(listener, 'on_%s' % frame_type, None) if not notify_func: log.debug("listener %s has no method on_%s", listener, frame_type) continue if frame_type in ('heartbeat', 'disconnected'): notify_func() continue if frame_type == 'connecting': notify_func(self.current_host_and_port) continue if frame_type == 'error' and not self.connected: with self.__connect_wait_condition: self.connection_error = True self.__connect_wait_condition.notify() rtn = notify_func(headers, body) if rtn: (headers, body) = rtn return (headers, body)
[ "def", "notify", "(", "self", ",", "frame_type", ",", "headers", "=", "None", ",", "body", "=", "None", ")", ":", "if", "frame_type", "==", "'receipt'", ":", "# logic for wait-on-receipt notification", "receipt", "=", "headers", "[", "'receipt-id'", "]", "receipt_value", "=", "self", ".", "__receipts", ".", "get", "(", "receipt", ")", "with", "self", ".", "__send_wait_condition", ":", "self", ".", "set_receipt", "(", "receipt", ",", "None", ")", "self", ".", "__send_wait_condition", ".", "notify", "(", ")", "if", "receipt_value", "==", "CMD_DISCONNECT", ":", "self", ".", "set_connected", "(", "False", ")", "# received a stomp 1.1+ disconnect receipt", "if", "receipt", "==", "self", ".", "__disconnect_receipt", ":", "self", ".", "disconnect_socket", "(", ")", "self", ".", "__disconnect_receipt", "=", "None", "elif", "frame_type", "==", "'connected'", ":", "self", ".", "set_connected", "(", "True", ")", "elif", "frame_type", "==", "'disconnected'", ":", "self", ".", "set_connected", "(", "False", ")", "with", "self", ".", "__listeners_change_condition", ":", "listeners", "=", "sorted", "(", "self", ".", "listeners", ".", "items", "(", ")", ")", "for", "(", "_", ",", "listener", ")", "in", "listeners", ":", "if", "not", "listener", ":", "continue", "notify_func", "=", "getattr", "(", "listener", ",", "'on_%s'", "%", "frame_type", ",", "None", ")", "if", "not", "notify_func", ":", "log", ".", "debug", "(", "\"listener %s has no method on_%s\"", ",", "listener", ",", "frame_type", ")", "continue", "if", "frame_type", "in", "(", "'heartbeat'", ",", "'disconnected'", ")", ":", "notify_func", "(", ")", "continue", "if", "frame_type", "==", "'connecting'", ":", "notify_func", "(", "self", ".", "current_host_and_port", ")", "continue", "if", "frame_type", "==", "'error'", "and", "not", "self", ".", "connected", ":", "with", "self", ".", "__connect_wait_condition", ":", "self", ".", "connection_error", "=", "True", "self", ".", "__connect_wait_condition", ".", "notify", "(", ")", "rtn", "=", "notify_func", "(", "headers", ",", "body", ")", "if", "rtn", ":", "(", "headers", ",", "body", ")", "=", "rtn", "return", "(", "headers", ",", "body", ")" ]
37.25
16.285714
def _checkpointLabelFromCheckpointDir(checkpointDir): """Returns a checkpoint label string for the given model checkpoint directory checkpointDir: relative or absolute model checkpoint directory path """ assert checkpointDir.endswith(g_defaultCheckpointExtension) lastSegment = os.path.split(checkpointDir)[1] checkpointLabel = lastSegment[0:-len(g_defaultCheckpointExtension)] return checkpointLabel
[ "def", "_checkpointLabelFromCheckpointDir", "(", "checkpointDir", ")", ":", "assert", "checkpointDir", ".", "endswith", "(", "g_defaultCheckpointExtension", ")", "lastSegment", "=", "os", ".", "path", ".", "split", "(", "checkpointDir", ")", "[", "1", "]", "checkpointLabel", "=", "lastSegment", "[", "0", ":", "-", "len", "(", "g_defaultCheckpointExtension", ")", "]", "return", "checkpointLabel" ]
33.916667
22.916667
def _pdf(self, phi): """ Evaluate the _unnormalized_ flow PDF. """ pdf = np.inner(self._vn, np.cos(np.outer(phi, self._n))) pdf *= 2. pdf += 1. return pdf
[ "def", "_pdf", "(", "self", ",", "phi", ")", ":", "pdf", "=", "np", ".", "inner", "(", "self", ".", "_vn", ",", "np", ".", "cos", "(", "np", ".", "outer", "(", "phi", ",", "self", ".", "_n", ")", ")", ")", "pdf", "*=", "2.", "pdf", "+=", "1.", "return", "pdf" ]
20.3
19.7
def _merge_and_bgzip(orig_files, out_file, base_file, ext=""): """Merge a group of gzipped input files into a final bgzipped output. Also handles providing unique names for each input file to avoid collisions on multi-region output. Handles renaming with awk magic from: https://www.biostars.org/p/68477/ """ assert out_file.endswith(".gz") full_file = out_file.replace(".gz", "") run_file = "%s-merge.bash" % utils.splitext_plus(base_file)[0] cmds = ["set -e\n"] for i, fname in enumerate(orig_files): cmd = ("""zcat %s | awk '{print (NR%%4 == 1) ? "@%s_" ++i "%s" : $0}' >> %s\n""" % (fname, i, ext, full_file)) cmds.append(cmd) cmds.append("bgzip -f %s\n" % full_file) with open(run_file, "w") as out_handle: out_handle.write("".join("".join(cmds))) do.run([do.find_bash(), run_file], "Rename, merge and bgzip CRAM fastq output") assert os.path.exists(out_file) and not _is_gzip_empty(out_file)
[ "def", "_merge_and_bgzip", "(", "orig_files", ",", "out_file", ",", "base_file", ",", "ext", "=", "\"\"", ")", ":", "assert", "out_file", ".", "endswith", "(", "\".gz\"", ")", "full_file", "=", "out_file", ".", "replace", "(", "\".gz\"", ",", "\"\"", ")", "run_file", "=", "\"%s-merge.bash\"", "%", "utils", ".", "splitext_plus", "(", "base_file", ")", "[", "0", "]", "cmds", "=", "[", "\"set -e\\n\"", "]", "for", "i", ",", "fname", "in", "enumerate", "(", "orig_files", ")", ":", "cmd", "=", "(", "\"\"\"zcat %s | awk '{print (NR%%4 == 1) ? \"@%s_\" ++i \"%s\" : $0}' >> %s\\n\"\"\"", "%", "(", "fname", ",", "i", ",", "ext", ",", "full_file", ")", ")", "cmds", ".", "append", "(", "cmd", ")", "cmds", ".", "append", "(", "\"bgzip -f %s\\n\"", "%", "full_file", ")", "with", "open", "(", "run_file", ",", "\"w\"", ")", "as", "out_handle", ":", "out_handle", ".", "write", "(", "\"\"", ".", "join", "(", "\"\"", ".", "join", "(", "cmds", ")", ")", ")", "do", ".", "run", "(", "[", "do", ".", "find_bash", "(", ")", ",", "run_file", "]", ",", "\"Rename, merge and bgzip CRAM fastq output\"", ")", "assert", "os", ".", "path", ".", "exists", "(", "out_file", ")", "and", "not", "_is_gzip_empty", "(", "out_file", ")" ]
44.272727
16.727273
def fit(self, P): """Fit the diagonal matrices in Sinkhorn Knopp's algorithm Parameters ---------- P : 2d array-like Must be a square non-negative 2d array-like object, that is convertible to a numpy array. The matrix must not be equal to 0 and it must have total support for the algorithm to converge. Returns ------- A double stochastic matrix. """ P = np.asarray(P) assert np.all(P >= 0) assert P.ndim == 2 assert P.shape[0] == P.shape[1] N = P.shape[0] max_thresh = 1 + self._epsilon min_thresh = 1 - self._epsilon # Initialize r and c, the diagonals of D1 and D2 # and warn if the matrix does not have support. r = np.ones((N, 1)) pdotr = P.T.dot(r) total_support_warning_str = ( "Matrix P must have total support. " "See documentation" ) if not np.all(pdotr != 0): warnings.warn(total_support_warning_str, UserWarning) c = 1 / pdotr pdotc = P.dot(c) if not np.all(pdotc != 0): warnings.warn(total_support_warning_str, UserWarning) r = 1 / pdotc del pdotr, pdotc P_eps = np.copy(P) while np.any(np.sum(P_eps, axis=1) < min_thresh) \ or np.any(np.sum(P_eps, axis=1) > max_thresh) \ or np.any(np.sum(P_eps, axis=0) < min_thresh) \ or np.any(np.sum(P_eps, axis=0) > max_thresh): c = 1 / P.T.dot(r) r = 1 / P.dot(c) self._D1 = np.diag(np.squeeze(r)) self._D2 = np.diag(np.squeeze(c)) P_eps = self._D1.dot(P).dot(self._D2) self._iterations += 1 if self._iterations >= self._max_iter: self._stopping_condition = "max_iter" break if not self._stopping_condition: self._stopping_condition = "epsilon" self._D1 = np.diag(np.squeeze(r)) self._D2 = np.diag(np.squeeze(c)) P_eps = self._D1.dot(P).dot(self._D2) return P_eps
[ "def", "fit", "(", "self", ",", "P", ")", ":", "P", "=", "np", ".", "asarray", "(", "P", ")", "assert", "np", ".", "all", "(", "P", ">=", "0", ")", "assert", "P", ".", "ndim", "==", "2", "assert", "P", ".", "shape", "[", "0", "]", "==", "P", ".", "shape", "[", "1", "]", "N", "=", "P", ".", "shape", "[", "0", "]", "max_thresh", "=", "1", "+", "self", ".", "_epsilon", "min_thresh", "=", "1", "-", "self", ".", "_epsilon", "# Initialize r and c, the diagonals of D1 and D2", "# and warn if the matrix does not have support.", "r", "=", "np", ".", "ones", "(", "(", "N", ",", "1", ")", ")", "pdotr", "=", "P", ".", "T", ".", "dot", "(", "r", ")", "total_support_warning_str", "=", "(", "\"Matrix P must have total support. \"", "\"See documentation\"", ")", "if", "not", "np", ".", "all", "(", "pdotr", "!=", "0", ")", ":", "warnings", ".", "warn", "(", "total_support_warning_str", ",", "UserWarning", ")", "c", "=", "1", "/", "pdotr", "pdotc", "=", "P", ".", "dot", "(", "c", ")", "if", "not", "np", ".", "all", "(", "pdotc", "!=", "0", ")", ":", "warnings", ".", "warn", "(", "total_support_warning_str", ",", "UserWarning", ")", "r", "=", "1", "/", "pdotc", "del", "pdotr", ",", "pdotc", "P_eps", "=", "np", ".", "copy", "(", "P", ")", "while", "np", ".", "any", "(", "np", ".", "sum", "(", "P_eps", ",", "axis", "=", "1", ")", "<", "min_thresh", ")", "or", "np", ".", "any", "(", "np", ".", "sum", "(", "P_eps", ",", "axis", "=", "1", ")", ">", "max_thresh", ")", "or", "np", ".", "any", "(", "np", ".", "sum", "(", "P_eps", ",", "axis", "=", "0", ")", "<", "min_thresh", ")", "or", "np", ".", "any", "(", "np", ".", "sum", "(", "P_eps", ",", "axis", "=", "0", ")", ">", "max_thresh", ")", ":", "c", "=", "1", "/", "P", ".", "T", ".", "dot", "(", "r", ")", "r", "=", "1", "/", "P", ".", "dot", "(", "c", ")", "self", ".", "_D1", "=", "np", ".", "diag", "(", "np", ".", "squeeze", "(", "r", ")", ")", "self", ".", "_D2", "=", "np", ".", "diag", "(", "np", ".", "squeeze", "(", "c", ")", ")", "P_eps", "=", "self", ".", "_D1", ".", "dot", "(", "P", ")", ".", "dot", "(", "self", ".", "_D2", ")", "self", ".", "_iterations", "+=", "1", "if", "self", ".", "_iterations", ">=", "self", ".", "_max_iter", ":", "self", ".", "_stopping_condition", "=", "\"max_iter\"", "break", "if", "not", "self", ".", "_stopping_condition", ":", "self", ".", "_stopping_condition", "=", "\"epsilon\"", "self", ".", "_D1", "=", "np", ".", "diag", "(", "np", ".", "squeeze", "(", "r", ")", ")", "self", ".", "_D2", "=", "np", ".", "diag", "(", "np", ".", "squeeze", "(", "c", ")", ")", "P_eps", "=", "self", ".", "_D1", ".", "dot", "(", "P", ")", ".", "dot", "(", "self", ".", "_D2", ")", "return", "P_eps" ]
29.422535
19.15493
def respond(text=None, ssml=None, attributes=None, reprompt_text=None, reprompt_ssml=None, end_session=True): """ Build a dict containing a valid response to an Alexa request. If speech output is desired, either of `text` or `ssml` should be specified. :param text: Plain text speech output to be said by Alexa device. :param ssml: Speech output in SSML form. :param attributes: Dictionary of attributes to store in the session. :param end_session: Should the session be terminated after this response? :param reprompt_text, reprompt_ssml: Works the same as `text`/`ssml`, but instead sets the reprompting speech output. """ obj = { 'version': '1.0', 'response': { 'outputSpeech': {'type': 'PlainText', 'text': ''}, 'shouldEndSession': end_session }, 'sessionAttributes': attributes or {} } if text: obj['response']['outputSpeech'] = {'type': 'PlainText', 'text': text} elif ssml: obj['response']['outputSpeech'] = {'type': 'SSML', 'ssml': ssml} reprompt_output = None if reprompt_text: reprompt_output = {'type': 'PlainText', 'text': reprompt_text} elif reprompt_ssml: reprompt_output = {'type': 'SSML', 'ssml': reprompt_ssml} if reprompt_output: obj['response']['reprompt'] = {'outputSpeech': reprompt_output} return obj
[ "def", "respond", "(", "text", "=", "None", ",", "ssml", "=", "None", ",", "attributes", "=", "None", ",", "reprompt_text", "=", "None", ",", "reprompt_ssml", "=", "None", ",", "end_session", "=", "True", ")", ":", "obj", "=", "{", "'version'", ":", "'1.0'", ",", "'response'", ":", "{", "'outputSpeech'", ":", "{", "'type'", ":", "'PlainText'", ",", "'text'", ":", "''", "}", ",", "'shouldEndSession'", ":", "end_session", "}", ",", "'sessionAttributes'", ":", "attributes", "or", "{", "}", "}", "if", "text", ":", "obj", "[", "'response'", "]", "[", "'outputSpeech'", "]", "=", "{", "'type'", ":", "'PlainText'", ",", "'text'", ":", "text", "}", "elif", "ssml", ":", "obj", "[", "'response'", "]", "[", "'outputSpeech'", "]", "=", "{", "'type'", ":", "'SSML'", ",", "'ssml'", ":", "ssml", "}", "reprompt_output", "=", "None", "if", "reprompt_text", ":", "reprompt_output", "=", "{", "'type'", ":", "'PlainText'", ",", "'text'", ":", "reprompt_text", "}", "elif", "reprompt_ssml", ":", "reprompt_output", "=", "{", "'type'", ":", "'SSML'", ",", "'ssml'", ":", "reprompt_ssml", "}", "if", "reprompt_output", ":", "obj", "[", "'response'", "]", "[", "'reprompt'", "]", "=", "{", "'outputSpeech'", ":", "reprompt_output", "}", "return", "obj" ]
35.358974
25.102564
def encode( self, word, language_arg=0, name_mode='gen', match_mode='approx', concat=False, filter_langs=False, ): """Return the Beider-Morse Phonetic Matching encoding(s) of a term. Parameters ---------- word : str The word to transform language_arg : int The language of the term; supported values include: - ``any`` - ``arabic`` - ``cyrillic`` - ``czech`` - ``dutch`` - ``english`` - ``french`` - ``german`` - ``greek`` - ``greeklatin`` - ``hebrew`` - ``hungarian`` - ``italian`` - ``latvian`` - ``polish`` - ``portuguese`` - ``romanian`` - ``russian`` - ``spanish`` - ``turkish`` name_mode : str The name mode of the algorithm: - ``gen`` -- general (default) - ``ash`` -- Ashkenazi - ``sep`` -- Sephardic match_mode : str Matching mode: ``approx`` or ``exact`` concat : bool Concatenation mode filter_langs : bool Filter out incompatible languages Returns ------- tuple The Beider-Morse phonetic value(s) Raises ------ ValueError Unknown language Examples -------- >>> pe = BeiderMorse() >>> pe.encode('Christopher') 'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir xristYfir xristopi xritopir xritopi xristofi xritofir xritofi tzristopir tzristofir zristopir zristopi zritopir zritopi zristofir zristofi zritofir zritofi' >>> pe.encode('Niall') 'nial niol' >>> pe.encode('Smith') 'zmit' >>> pe.encode('Schmidt') 'zmit stzmit' >>> pe.encode('Christopher', language_arg='German') 'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir xristYfir' >>> pe.encode('Christopher', language_arg='English') 'tzristofir tzrQstofir tzristafir tzrQstafir xristofir xrQstofir xristafir xrQstafir' >>> pe.encode('Christopher', language_arg='German', name_mode='ash') 'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir xristYfir' >>> pe.encode('Christopher', language_arg='German', match_mode='exact') 'xriStopher xriStofer xristopher xristofer' """ word = normalize('NFC', text_type(word.strip().lower())) name_mode = name_mode.strip().lower()[:3] if name_mode not in {'ash', 'sep', 'gen'}: name_mode = 'gen' if match_mode != 'exact': match_mode = 'approx' # Translate the supplied language_arg value into an integer # representing a set of languages all_langs = ( sum(_LANG_DICT[_] for _ in BMDATA[name_mode]['languages']) - 1 ) lang_choices = 0 if isinstance(language_arg, (int, float, long)): lang_choices = int(language_arg) elif language_arg != '' and isinstance(language_arg, (text_type, str)): for lang in text_type(language_arg).lower().split(','): if lang in _LANG_DICT and (_LANG_DICT[lang] & all_langs): lang_choices += _LANG_DICT[lang] elif not filter_langs: raise ValueError( 'Unknown \'' + name_mode + '\' language: \'' + lang + '\'' ) # Language choices are either all incompatible with the name mode or # no choices were given, so try to autodetect if lang_choices == 0: language_arg = self._language(word, name_mode) else: language_arg = lang_choices language_arg2 = self._language_index_from_code(language_arg, name_mode) rules = BMDATA[name_mode]['rules'][language_arg2] final_rules1 = BMDATA[name_mode][match_mode]['common'] final_rules2 = BMDATA[name_mode][match_mode][language_arg2] result = self._phonetic( word, name_mode, rules, final_rules1, final_rules2, language_arg, concat, ) result = self._phonetic_numbers(result) return result
[ "def", "encode", "(", "self", ",", "word", ",", "language_arg", "=", "0", ",", "name_mode", "=", "'gen'", ",", "match_mode", "=", "'approx'", ",", "concat", "=", "False", ",", "filter_langs", "=", "False", ",", ")", ":", "word", "=", "normalize", "(", "'NFC'", ",", "text_type", "(", "word", ".", "strip", "(", ")", ".", "lower", "(", ")", ")", ")", "name_mode", "=", "name_mode", ".", "strip", "(", ")", ".", "lower", "(", ")", "[", ":", "3", "]", "if", "name_mode", "not", "in", "{", "'ash'", ",", "'sep'", ",", "'gen'", "}", ":", "name_mode", "=", "'gen'", "if", "match_mode", "!=", "'exact'", ":", "match_mode", "=", "'approx'", "# Translate the supplied language_arg value into an integer", "# representing a set of languages", "all_langs", "=", "(", "sum", "(", "_LANG_DICT", "[", "_", "]", "for", "_", "in", "BMDATA", "[", "name_mode", "]", "[", "'languages'", "]", ")", "-", "1", ")", "lang_choices", "=", "0", "if", "isinstance", "(", "language_arg", ",", "(", "int", ",", "float", ",", "long", ")", ")", ":", "lang_choices", "=", "int", "(", "language_arg", ")", "elif", "language_arg", "!=", "''", "and", "isinstance", "(", "language_arg", ",", "(", "text_type", ",", "str", ")", ")", ":", "for", "lang", "in", "text_type", "(", "language_arg", ")", ".", "lower", "(", ")", ".", "split", "(", "','", ")", ":", "if", "lang", "in", "_LANG_DICT", "and", "(", "_LANG_DICT", "[", "lang", "]", "&", "all_langs", ")", ":", "lang_choices", "+=", "_LANG_DICT", "[", "lang", "]", "elif", "not", "filter_langs", ":", "raise", "ValueError", "(", "'Unknown \\''", "+", "name_mode", "+", "'\\' language: \\''", "+", "lang", "+", "'\\''", ")", "# Language choices are either all incompatible with the name mode or", "# no choices were given, so try to autodetect", "if", "lang_choices", "==", "0", ":", "language_arg", "=", "self", ".", "_language", "(", "word", ",", "name_mode", ")", "else", ":", "language_arg", "=", "lang_choices", "language_arg2", "=", "self", ".", "_language_index_from_code", "(", "language_arg", ",", "name_mode", ")", "rules", "=", "BMDATA", "[", "name_mode", "]", "[", "'rules'", "]", "[", "language_arg2", "]", "final_rules1", "=", "BMDATA", "[", "name_mode", "]", "[", "match_mode", "]", "[", "'common'", "]", "final_rules2", "=", "BMDATA", "[", "name_mode", "]", "[", "match_mode", "]", "[", "language_arg2", "]", "result", "=", "self", ".", "_phonetic", "(", "word", ",", "name_mode", ",", "rules", ",", "final_rules1", ",", "final_rules2", ",", "language_arg", ",", "concat", ",", ")", "result", "=", "self", ".", "_phonetic_numbers", "(", "result", ")", "return", "result" ]
31.479452
19.246575
def mime_type(self, type_: Optional[MimeType] = None) -> str: """Get a random mime type from list. :param type_: Enum object MimeType. :return: Mime type. """ key = self._validate_enum(item=type_, enum=MimeType) types = MIME_TYPES[key] return self.random.choice(types)
[ "def", "mime_type", "(", "self", ",", "type_", ":", "Optional", "[", "MimeType", "]", "=", "None", ")", "->", "str", ":", "key", "=", "self", ".", "_validate_enum", "(", "item", "=", "type_", ",", "enum", "=", "MimeType", ")", "types", "=", "MIME_TYPES", "[", "key", "]", "return", "self", ".", "random", ".", "choice", "(", "types", ")" ]
35.222222
11.777778
def rst_table(data, schema=None): """ Creates a reStructuredText simple table (list of strings) from a list of lists. """ # Process multi-rows (replaced by rows with empty columns when needed) pdata = [] for row in data: prow = [el if isinstance(el, list) else [el] for el in row] pdata.extend(pr for pr in xzip_longest(*prow, fillvalue="")) # Find the columns sizes sizes = [max(len("{0}".format(el)) for el in column) for column in xzip(*pdata)] sizes = [max(size, len(sch)) for size, sch in xzip(sizes, schema)] # Creates the title and border rows if schema is None: schema = pdata[0] pdata = pdata[1:] border = " ".join("=" * size for size in sizes) titles = " ".join("{1:^{0}}".format(*pair) for pair in xzip(sizes, schema)) # Creates the full table and returns rows = [border, titles, border] rows.extend(" ".join("{1:<{0}}".format(*pair) for pair in xzip(sizes, row)) for row in pdata) rows.append(border) return rows
[ "def", "rst_table", "(", "data", ",", "schema", "=", "None", ")", ":", "# Process multi-rows (replaced by rows with empty columns when needed)", "pdata", "=", "[", "]", "for", "row", "in", "data", ":", "prow", "=", "[", "el", "if", "isinstance", "(", "el", ",", "list", ")", "else", "[", "el", "]", "for", "el", "in", "row", "]", "pdata", ".", "extend", "(", "pr", "for", "pr", "in", "xzip_longest", "(", "*", "prow", ",", "fillvalue", "=", "\"\"", ")", ")", "# Find the columns sizes", "sizes", "=", "[", "max", "(", "len", "(", "\"{0}\"", ".", "format", "(", "el", ")", ")", "for", "el", "in", "column", ")", "for", "column", "in", "xzip", "(", "*", "pdata", ")", "]", "sizes", "=", "[", "max", "(", "size", ",", "len", "(", "sch", ")", ")", "for", "size", ",", "sch", "in", "xzip", "(", "sizes", ",", "schema", ")", "]", "# Creates the title and border rows", "if", "schema", "is", "None", ":", "schema", "=", "pdata", "[", "0", "]", "pdata", "=", "pdata", "[", "1", ":", "]", "border", "=", "\" \"", ".", "join", "(", "\"=\"", "*", "size", "for", "size", "in", "sizes", ")", "titles", "=", "\" \"", ".", "join", "(", "\"{1:^{0}}\"", ".", "format", "(", "*", "pair", ")", "for", "pair", "in", "xzip", "(", "sizes", ",", "schema", ")", ")", "# Creates the full table and returns", "rows", "=", "[", "border", ",", "titles", ",", "border", "]", "rows", ".", "extend", "(", "\" \"", ".", "join", "(", "\"{1:<{0}}\"", ".", "format", "(", "*", "pair", ")", "for", "pair", "in", "xzip", "(", "sizes", ",", "row", ")", ")", "for", "row", "in", "pdata", ")", "rows", ".", "append", "(", "border", ")", "return", "rows" ]
32.870968
17.709677
def power(maf=0.5,beta=0.1, N=100, cutoff=5e-8): """ estimate power for a given allele frequency, effect size beta and sample size N Assumption: z-score = beta_ML distributed as p(0) = N(0,1.0(maf*(1-maf)*N))) under the null hypothesis the actual beta_ML is distributed as p(alt) = N( beta , 1.0/(maf*(1-maf)N) ) Arguments: maf: minor allele frequency of the SNP beta: effect size of the SNP N: sample size (number of individuals) Returns: power: probability to detect a SNP in that study with the given parameters """ """ std(snp)=sqrt(2.0*maf*(1-maf)) power = \int beta_ML = (snp^T*snp)^{-1}*snp^T*Y = cov(snp,Y)/var(snp) E[beta_ML] = (snp^T*snp)^{-1}*snp^T*E[Y] = (snp^T*snp)^{-1}*snp^T*snp * beta = beta Var[beta_ML]= (snp^T*snp)^{-1}*(snp^T*snp)*(snp^T*snp)^{-1} = (snp^T*snp)^{-1} = 1/N * var(snp) = 1/N * maf*(1-maf) """ assert maf>=0.0 and maf<=0.5, "maf needs to be between 0.0 and 0.5, got %f" % maf if beta<0.0: beta=-beta std_beta = 1.0/np.sqrt(N*(2.0 * maf*(1.0-maf))) non_centrality = beta beta_samples = np.random.normal(loc=non_centrality, scale=std_beta) n_grid = 100000 beta_in = np.arange(0.5/(n_grid+1.0),(n_grid-0.5)/(n_grid+1.0),1.0/(n_grid+1.0)) beta_theoretical = ((st.norm.isf(beta_in)* std_beta) + non_centrality) pvals = st.chi2.sf( (beta_theoretical/std_beta)*(beta_theoretical/std_beta) ,1.0) power = (pvals<cutoff).mean() return power, pvals
[ "def", "power", "(", "maf", "=", "0.5", ",", "beta", "=", "0.1", ",", "N", "=", "100", ",", "cutoff", "=", "5e-8", ")", ":", "\"\"\"\n\tstd(snp)=sqrt(2.0*maf*(1-maf)) \n\tpower = \\int \n\n\tbeta_ML = (snp^T*snp)^{-1}*snp^T*Y = cov(snp,Y)/var(snp) \n\tE[beta_ML]\t= (snp^T*snp)^{-1}*snp^T*E[Y] \n\t\t\t\t= (snp^T*snp)^{-1}*snp^T*snp * beta\n\t\t\t\t= beta\n\tVar[beta_ML]= (snp^T*snp)^{-1}*(snp^T*snp)*(snp^T*snp)^{-1}\n\t\t\t\t= (snp^T*snp)^{-1}\n\t\t\t\t= 1/N * var(snp)\n\t\t\t\t= 1/N * maf*(1-maf)\n\t\"\"\"", "assert", "maf", ">=", "0.0", "and", "maf", "<=", "0.5", ",", "\"maf needs to be between 0.0 and 0.5, got %f\"", "%", "maf", "if", "beta", "<", "0.0", ":", "beta", "=", "-", "beta", "std_beta", "=", "1.0", "/", "np", ".", "sqrt", "(", "N", "*", "(", "2.0", "*", "maf", "*", "(", "1.0", "-", "maf", ")", ")", ")", "non_centrality", "=", "beta", "beta_samples", "=", "np", ".", "random", ".", "normal", "(", "loc", "=", "non_centrality", ",", "scale", "=", "std_beta", ")", "n_grid", "=", "100000", "beta_in", "=", "np", ".", "arange", "(", "0.5", "/", "(", "n_grid", "+", "1.0", ")", ",", "(", "n_grid", "-", "0.5", ")", "/", "(", "n_grid", "+", "1.0", ")", ",", "1.0", "/", "(", "n_grid", "+", "1.0", ")", ")", "beta_theoretical", "=", "(", "(", "st", ".", "norm", ".", "isf", "(", "beta_in", ")", "*", "std_beta", ")", "+", "non_centrality", ")", "pvals", "=", "st", ".", "chi2", ".", "sf", "(", "(", "beta_theoretical", "/", "std_beta", ")", "*", "(", "beta_theoretical", "/", "std_beta", ")", ",", "1.0", ")", "power", "=", "(", "pvals", "<", "cutoff", ")", ".", "mean", "(", ")", "return", "power", ",", "pvals" ]
31.244444
23.777778
def cli(out_fmt, input, output): """Converts text.""" _input = StringIO() for l in input: try: _input.write(str(l)) except TypeError: _input.write(bytes(l, 'utf-8')) _input = seria.load(_input) _out = (_input.dump(out_fmt)) output.write(_out)
[ "def", "cli", "(", "out_fmt", ",", "input", ",", "output", ")", ":", "_input", "=", "StringIO", "(", ")", "for", "l", "in", "input", ":", "try", ":", "_input", ".", "write", "(", "str", "(", "l", ")", ")", "except", "TypeError", ":", "_input", ".", "write", "(", "bytes", "(", "l", ",", "'utf-8'", ")", ")", "_input", "=", "seria", ".", "load", "(", "_input", ")", "_out", "=", "(", "_input", ".", "dump", "(", "out_fmt", ")", ")", "output", ".", "write", "(", "_out", ")" ]
26.909091
12.181818
def fsn2text(path, strict=False): """ Args: path (fsnative): The path to convert strict (bool): Fail in case the conversion is not reversible Returns: `text` Raises: TypeError: In case no `fsnative` has been passed ValueError: In case ``strict`` was True and the conversion failed Converts a `fsnative` path to `text`. Can be used to pass a path to some unicode API, like for example a GUI toolkit. If ``strict`` is True the conversion will fail in case it is not reversible. This can be useful for converting program arguments that are supposed to be text and erroring out in case they are not. Encoding with a Unicode encoding will always succeed with the result. """ path = _fsn2native(path) errors = "strict" if strict else "replace" if is_win: return path.encode("utf-16-le", _surrogatepass).decode("utf-16-le", errors) else: return path.decode(_encoding, errors)
[ "def", "fsn2text", "(", "path", ",", "strict", "=", "False", ")", ":", "path", "=", "_fsn2native", "(", "path", ")", "errors", "=", "\"strict\"", "if", "strict", "else", "\"replace\"", "if", "is_win", ":", "return", "path", ".", "encode", "(", "\"utf-16-le\"", ",", "_surrogatepass", ")", ".", "decode", "(", "\"utf-16-le\"", ",", "errors", ")", "else", ":", "return", "path", ".", "decode", "(", "_encoding", ",", "errors", ")" ]
32.09375
25.28125
def _partial_corr(self, x=None, y=None, covar=None, x_covar=None, y_covar=None, tail='two-sided', method='pearson'): """Partial and semi-partial correlation.""" stats = partial_corr(data=self, x=x, y=y, covar=covar, x_covar=x_covar, y_covar=y_covar, tail=tail, method=method) return stats
[ "def", "_partial_corr", "(", "self", ",", "x", "=", "None", ",", "y", "=", "None", ",", "covar", "=", "None", ",", "x_covar", "=", "None", ",", "y_covar", "=", "None", ",", "tail", "=", "'two-sided'", ",", "method", "=", "'pearson'", ")", ":", "stats", "=", "partial_corr", "(", "data", "=", "self", ",", "x", "=", "x", ",", "y", "=", "y", ",", "covar", "=", "covar", ",", "x_covar", "=", "x_covar", ",", "y_covar", "=", "y_covar", ",", "tail", "=", "tail", ",", "method", "=", "method", ")", "return", "stats" ]
56.333333
23.166667
def check_complete(task, out_queue): """ Checks if task is complete, puts the result to out_queue. """ logger.debug("Checking if %s is complete", task) try: is_complete = task.complete() except Exception: is_complete = TracebackWrapper(traceback.format_exc()) out_queue.put((task, is_complete))
[ "def", "check_complete", "(", "task", ",", "out_queue", ")", ":", "logger", ".", "debug", "(", "\"Checking if %s is complete\"", ",", "task", ")", "try", ":", "is_complete", "=", "task", ".", "complete", "(", ")", "except", "Exception", ":", "is_complete", "=", "TracebackWrapper", "(", "traceback", ".", "format_exc", "(", ")", ")", "out_queue", ".", "put", "(", "(", "task", ",", "is_complete", ")", ")" ]
32.9
11.5
def openconfig_interfaces(device_name=None): ''' .. versionadded:: 2019.2.0 Return a dictionary structured as standardised in the `openconfig-interfaces <http://ops.openconfig.net/branches/master/openconfig-interfaces.html>`_ YANG model, containing physical and configuration data available in Netbox, e.g., IP addresses, MTU, enabled / disabled, etc. device_name: ``None`` The name of the device to query the interface data for. If not provided, will use the Minion ID. CLI Example: .. code-block:: bash salt '*' netbox.openconfig_interfaces salt '*' netbox.openconfig_interfaces device_name=cr1.thn.lon ''' oc_if = {} interfaces = get_interfaces(device_name=device_name) ipaddresses = get_ipaddresses(device_name=device_name) for interface in interfaces: if_name, if_unit = _if_name_unit(interface['name']) if if_name not in oc_if: oc_if[if_name] = { 'config': { 'name': if_name }, 'subinterfaces': {'subinterface': {}} } if if_unit == '0': oc_if[if_name]['config']['enabled'] = interface['enabled'] if interface['description']: if if_name == interface['name']: # When that's a real unit 0 interface # Otherwise it will inherit the description from the subif oc_if[if_name]['config']['description'] = str(interface['description']) else: subif_descr = { 'subinterfaces': { 'subinterface': { if_unit: { 'config': { 'description': str(interface['description']) } } } } } oc_if[if_name] = __utils__['dictupdate.update'](oc_if[if_name], subif_descr) if interface['mtu']: oc_if[if_name]['config']['mtu'] = int(interface['mtu']) else: oc_if[if_name]['subinterfaces']['subinterface'][if_unit] = { 'config': { 'index': int(if_unit), 'enabled': interface['enabled'] } } if interface['description']: oc_if[if_name]['subinterfaces']['subinterface'][if_unit]['config']['description'] =\ str(interface['description']) for ipaddress in ipaddresses: ip, prefix_length = ipaddress['address'].split('/') if_name = ipaddress['interface']['name'] if_name, if_unit = _if_name_unit(if_name) ipvkey = 'ipv{}'.format(ipaddress['family']) if if_unit not in oc_if[if_name]['subinterfaces']['subinterface']: oc_if[if_name]['subinterfaces']['subinterface'][if_unit] = { 'config': { 'index': int(if_unit), 'enabled': True } } if ipvkey not in oc_if[if_name]['subinterfaces']['subinterface'][if_unit]: oc_if[if_name]['subinterfaces']['subinterface'][if_unit][ipvkey] = { 'addresses': { 'address': {} } } oc_if[if_name]['subinterfaces']['subinterface'][if_unit][ipvkey]['addresses']['address'][ip] = { 'config': { 'ip': ip, 'prefix_length': int(prefix_length) } } return { 'interfaces': { 'interface': oc_if } }
[ "def", "openconfig_interfaces", "(", "device_name", "=", "None", ")", ":", "oc_if", "=", "{", "}", "interfaces", "=", "get_interfaces", "(", "device_name", "=", "device_name", ")", "ipaddresses", "=", "get_ipaddresses", "(", "device_name", "=", "device_name", ")", "for", "interface", "in", "interfaces", ":", "if_name", ",", "if_unit", "=", "_if_name_unit", "(", "interface", "[", "'name'", "]", ")", "if", "if_name", "not", "in", "oc_if", ":", "oc_if", "[", "if_name", "]", "=", "{", "'config'", ":", "{", "'name'", ":", "if_name", "}", ",", "'subinterfaces'", ":", "{", "'subinterface'", ":", "{", "}", "}", "}", "if", "if_unit", "==", "'0'", ":", "oc_if", "[", "if_name", "]", "[", "'config'", "]", "[", "'enabled'", "]", "=", "interface", "[", "'enabled'", "]", "if", "interface", "[", "'description'", "]", ":", "if", "if_name", "==", "interface", "[", "'name'", "]", ":", "# When that's a real unit 0 interface", "# Otherwise it will inherit the description from the subif", "oc_if", "[", "if_name", "]", "[", "'config'", "]", "[", "'description'", "]", "=", "str", "(", "interface", "[", "'description'", "]", ")", "else", ":", "subif_descr", "=", "{", "'subinterfaces'", ":", "{", "'subinterface'", ":", "{", "if_unit", ":", "{", "'config'", ":", "{", "'description'", ":", "str", "(", "interface", "[", "'description'", "]", ")", "}", "}", "}", "}", "}", "oc_if", "[", "if_name", "]", "=", "__utils__", "[", "'dictupdate.update'", "]", "(", "oc_if", "[", "if_name", "]", ",", "subif_descr", ")", "if", "interface", "[", "'mtu'", "]", ":", "oc_if", "[", "if_name", "]", "[", "'config'", "]", "[", "'mtu'", "]", "=", "int", "(", "interface", "[", "'mtu'", "]", ")", "else", ":", "oc_if", "[", "if_name", "]", "[", "'subinterfaces'", "]", "[", "'subinterface'", "]", "[", "if_unit", "]", "=", "{", "'config'", ":", "{", "'index'", ":", "int", "(", "if_unit", ")", ",", "'enabled'", ":", "interface", "[", "'enabled'", "]", "}", "}", "if", "interface", "[", "'description'", "]", ":", "oc_if", "[", "if_name", "]", "[", "'subinterfaces'", "]", "[", "'subinterface'", "]", "[", "if_unit", "]", "[", "'config'", "]", "[", "'description'", "]", "=", "str", "(", "interface", "[", "'description'", "]", ")", "for", "ipaddress", "in", "ipaddresses", ":", "ip", ",", "prefix_length", "=", "ipaddress", "[", "'address'", "]", ".", "split", "(", "'/'", ")", "if_name", "=", "ipaddress", "[", "'interface'", "]", "[", "'name'", "]", "if_name", ",", "if_unit", "=", "_if_name_unit", "(", "if_name", ")", "ipvkey", "=", "'ipv{}'", ".", "format", "(", "ipaddress", "[", "'family'", "]", ")", "if", "if_unit", "not", "in", "oc_if", "[", "if_name", "]", "[", "'subinterfaces'", "]", "[", "'subinterface'", "]", ":", "oc_if", "[", "if_name", "]", "[", "'subinterfaces'", "]", "[", "'subinterface'", "]", "[", "if_unit", "]", "=", "{", "'config'", ":", "{", "'index'", ":", "int", "(", "if_unit", ")", ",", "'enabled'", ":", "True", "}", "}", "if", "ipvkey", "not", "in", "oc_if", "[", "if_name", "]", "[", "'subinterfaces'", "]", "[", "'subinterface'", "]", "[", "if_unit", "]", ":", "oc_if", "[", "if_name", "]", "[", "'subinterfaces'", "]", "[", "'subinterface'", "]", "[", "if_unit", "]", "[", "ipvkey", "]", "=", "{", "'addresses'", ":", "{", "'address'", ":", "{", "}", "}", "}", "oc_if", "[", "if_name", "]", "[", "'subinterfaces'", "]", "[", "'subinterface'", "]", "[", "if_unit", "]", "[", "ipvkey", "]", "[", "'addresses'", "]", "[", "'address'", "]", "[", "ip", "]", "=", "{", "'config'", ":", "{", "'ip'", ":", "ip", ",", "'prefix_length'", ":", "int", "(", "prefix_length", ")", "}", "}", "return", "{", "'interfaces'", ":", "{", "'interface'", ":", "oc_if", "}", "}" ]
39.44086
20.989247
def recursive_division(self, cells, min_size, width, height, x=0, y=0, depth=0): """ Recursive division: 1. Split room randomly 1a. Dodge towards larger half if in doorway 2. Place doorway randomly 3. Repeat for each half """ assert isinstance(cells, list) assert isinstance(min_size, int) or isinstance(min_size, float) assert isinstance(width, int) or isinstance(width, float) assert isinstance(height, int) or isinstance(height, float) assert isinstance(x, int) or isinstance(x, float) assert isinstance(y, int) or isinstance(y, float) assert isinstance(depth, int) if width <= min_size or height <= min_size: return # Choose axis to divide if width < height: axis = VERTICAL elif height < width: axis = HORIZONTAL else: axis = randint(0,1) cut_size = height gap_size = width if axis == HORIZONTAL: cut_size = width gap_size = height if cut_size-min_size < min_size: #print('min cut') return if gap_size-min_size < min_size: #print('min gap') return # Random division and doorway cut = randint(min_size, cut_size-min_size) gap = randint(min_size, gap_size-min_size) if not (cut > 0 and gap > 0): #print('Reached zero sized cell') return # Check if next tile is a doorway def dodge_doors(cut): assert isinstance(cut, int) or isinstance(cut, float) empty = False if axis == HORIZONTAL: idx = x+gap_size #print(idx,y+cut) door = cells[idx][y+cut] empty = empty or not door or not door.tile #door.tile = cells[49][1].tile idx = x #print(idx,y+cut) door = cells[idx][y+cut] empty = empty or not door or not door.tile #door.tile = cells[49][0].tile else: idx = y+gap_size #print(x+cut, idx) door = cells[x+cut][idx] empty = empty or not door or not door.tile #door.tile = cells[49][0].tile idx = y #print(x+cut,idx) door = cells[x+cut][idx] empty = empty or not door or not door.tile #door.tile = cells[49][1].tile # Try again on longest side if empty: #print('Door', idx, cut) if gap + (min_size / 2) > (gap_size / 2) - (min_size / 2): cut -= 1 else: cut += 1 if cut < min_size or cut > cut_size-min_size: #print('Reached minimum size') return None else: return dodge_doors(cut) return cut # Skip doors check first time around if depth > 0: cut = dodge_doors(cut) if cut is None: #print('No viable cut found') return None depth += 1 # Create new wall tiles for i in xrange(0, gap_size): if abs(gap - i) > 0: # Copy wall tile from (0,0) if axis == HORIZONTAL: cells[x+i][y+cut].tile = cells[0][0].tile else: cells[x+cut][y+i].tile = cells[0][0].tile # Recurse into each half #print(x, y, [cut, gap], [cut_size, gap_size], 'H' if (axis == HORIZONTAL) else 'V') # N nx, ny = x, y w, h = [cut, height] if (axis == HORIZONTAL) else [width, cut] self.recursive_division(cells, min_size, w, h, nx, ny, depth) # S nx, ny = [x+cut, y] if (axis != HORIZONTAL) else [x, y+cut] w, h = [cut_size-cut, height] if (axis == HORIZONTAL) else [width, cut_size-cut] self.recursive_division(cells, min_size, w, h, nx, ny, depth)
[ "def", "recursive_division", "(", "self", ",", "cells", ",", "min_size", ",", "width", ",", "height", ",", "x", "=", "0", ",", "y", "=", "0", ",", "depth", "=", "0", ")", ":", "assert", "isinstance", "(", "cells", ",", "list", ")", "assert", "isinstance", "(", "min_size", ",", "int", ")", "or", "isinstance", "(", "min_size", ",", "float", ")", "assert", "isinstance", "(", "width", ",", "int", ")", "or", "isinstance", "(", "width", ",", "float", ")", "assert", "isinstance", "(", "height", ",", "int", ")", "or", "isinstance", "(", "height", ",", "float", ")", "assert", "isinstance", "(", "x", ",", "int", ")", "or", "isinstance", "(", "x", ",", "float", ")", "assert", "isinstance", "(", "y", ",", "int", ")", "or", "isinstance", "(", "y", ",", "float", ")", "assert", "isinstance", "(", "depth", ",", "int", ")", "if", "width", "<=", "min_size", "or", "height", "<=", "min_size", ":", "return", "# Choose axis to divide", "if", "width", "<", "height", ":", "axis", "=", "VERTICAL", "elif", "height", "<", "width", ":", "axis", "=", "HORIZONTAL", "else", ":", "axis", "=", "randint", "(", "0", ",", "1", ")", "cut_size", "=", "height", "gap_size", "=", "width", "if", "axis", "==", "HORIZONTAL", ":", "cut_size", "=", "width", "gap_size", "=", "height", "if", "cut_size", "-", "min_size", "<", "min_size", ":", "#print('min cut')", "return", "if", "gap_size", "-", "min_size", "<", "min_size", ":", "#print('min gap')", "return", "# Random division and doorway", "cut", "=", "randint", "(", "min_size", ",", "cut_size", "-", "min_size", ")", "gap", "=", "randint", "(", "min_size", ",", "gap_size", "-", "min_size", ")", "if", "not", "(", "cut", ">", "0", "and", "gap", ">", "0", ")", ":", "#print('Reached zero sized cell')", "return", "# Check if next tile is a doorway", "def", "dodge_doors", "(", "cut", ")", ":", "assert", "isinstance", "(", "cut", ",", "int", ")", "or", "isinstance", "(", "cut", ",", "float", ")", "empty", "=", "False", "if", "axis", "==", "HORIZONTAL", ":", "idx", "=", "x", "+", "gap_size", "#print(idx,y+cut)", "door", "=", "cells", "[", "idx", "]", "[", "y", "+", "cut", "]", "empty", "=", "empty", "or", "not", "door", "or", "not", "door", ".", "tile", "#door.tile = cells[49][1].tile", "idx", "=", "x", "#print(idx,y+cut)", "door", "=", "cells", "[", "idx", "]", "[", "y", "+", "cut", "]", "empty", "=", "empty", "or", "not", "door", "or", "not", "door", ".", "tile", "#door.tile = cells[49][0].tile", "else", ":", "idx", "=", "y", "+", "gap_size", "#print(x+cut, idx)", "door", "=", "cells", "[", "x", "+", "cut", "]", "[", "idx", "]", "empty", "=", "empty", "or", "not", "door", "or", "not", "door", ".", "tile", "#door.tile = cells[49][0].tile", "idx", "=", "y", "#print(x+cut,idx)", "door", "=", "cells", "[", "x", "+", "cut", "]", "[", "idx", "]", "empty", "=", "empty", "or", "not", "door", "or", "not", "door", ".", "tile", "#door.tile = cells[49][1].tile", "# Try again on longest side", "if", "empty", ":", "#print('Door', idx, cut)", "if", "gap", "+", "(", "min_size", "/", "2", ")", ">", "(", "gap_size", "/", "2", ")", "-", "(", "min_size", "/", "2", ")", ":", "cut", "-=", "1", "else", ":", "cut", "+=", "1", "if", "cut", "<", "min_size", "or", "cut", ">", "cut_size", "-", "min_size", ":", "#print('Reached minimum size')", "return", "None", "else", ":", "return", "dodge_doors", "(", "cut", ")", "return", "cut", "# Skip doors check first time around", "if", "depth", ">", "0", ":", "cut", "=", "dodge_doors", "(", "cut", ")", "if", "cut", "is", "None", ":", "#print('No viable cut found')", "return", "None", "depth", "+=", "1", "# Create new wall tiles", "for", "i", "in", "xrange", "(", "0", ",", "gap_size", ")", ":", "if", "abs", "(", "gap", "-", "i", ")", ">", "0", ":", "# Copy wall tile from (0,0)", "if", "axis", "==", "HORIZONTAL", ":", "cells", "[", "x", "+", "i", "]", "[", "y", "+", "cut", "]", ".", "tile", "=", "cells", "[", "0", "]", "[", "0", "]", ".", "tile", "else", ":", "cells", "[", "x", "+", "cut", "]", "[", "y", "+", "i", "]", ".", "tile", "=", "cells", "[", "0", "]", "[", "0", "]", ".", "tile", "# Recurse into each half", "#print(x, y, [cut, gap], [cut_size, gap_size], 'H' if (axis == HORIZONTAL) else 'V')", "# N", "nx", ",", "ny", "=", "x", ",", "y", "w", ",", "h", "=", "[", "cut", ",", "height", "]", "if", "(", "axis", "==", "HORIZONTAL", ")", "else", "[", "width", ",", "cut", "]", "self", ".", "recursive_division", "(", "cells", ",", "min_size", ",", "w", ",", "h", ",", "nx", ",", "ny", ",", "depth", ")", "# S", "nx", ",", "ny", "=", "[", "x", "+", "cut", ",", "y", "]", "if", "(", "axis", "!=", "HORIZONTAL", ")", "else", "[", "x", ",", "y", "+", "cut", "]", "w", ",", "h", "=", "[", "cut_size", "-", "cut", ",", "height", "]", "if", "(", "axis", "==", "HORIZONTAL", ")", "else", "[", "width", ",", "cut_size", "-", "cut", "]", "self", ".", "recursive_division", "(", "cells", ",", "min_size", ",", "w", ",", "h", ",", "nx", ",", "ny", ",", "depth", ")" ]
33.327869
17.196721
def teardown(): """Remove integration""" if not self._has_been_setup: return deregister_plugins() deregister_host() if self._has_menu: remove_from_filemenu() self._has_menu = False self._has_been_setup = False print("pyblish: Integration torn down successfully")
[ "def", "teardown", "(", ")", ":", "if", "not", "self", ".", "_has_been_setup", ":", "return", "deregister_plugins", "(", ")", "deregister_host", "(", ")", "if", "self", ".", "_has_menu", ":", "remove_from_filemenu", "(", ")", "self", ".", "_has_menu", "=", "False", "self", ".", "_has_been_setup", "=", "False", "print", "(", "\"pyblish: Integration torn down successfully\"", ")" ]
21.714286
19.714286
def download(download_info): """Module method for downloading from S3 This public module method takes a key and the full path to the destination directory, assumes that the args have been validated by the public caller methods, and attempts to download the specified key to the dest_dir. :param download_info: (dict) Contains the following params key: (str) S3 key for the file to be downloaded dest_dir: (str) Full path destination directory bucket_name: (str) Name of the bucket to download from credentials: (dict) containing AWS credential info (optional) aws_region: (str) AWS S3 region aws_access_key_id: (str) AWS access key ID aws_secret_access_key: (str) AWS secret access key :return: (str) Downloaded file destination if the file was downloaded successfully :raises S3UtilError """ log = logging.getLogger(mod_logger + '.download') # Ensure the passed arg is a dict if not isinstance(download_info, dict): msg = 'download_info arg should be a dict, found: {t}'.format(t=download_info.__class__.__name__) raise TypeError(msg) # Check for and obtain required args required_args = ['key', 'dest_dir', 'bucket_name'] for required_arg in required_args: if required_arg not in download_info: msg = 'Required arg not provided: {r}'.format(r=required_arg) log.error(msg) raise S3UtilError(msg) log.debug('Processing download request: {r}'.format(r=download_info)) key = download_info['key'] dest_dir = download_info['dest_dir'] bucket_name = download_info['bucket_name'] region_name = None aws_access_key_id = None aws_secret_access_key = None try: creds = download_info['credentials'] except KeyError: log.debug('No credentials found for this download request') else: try: region_name = creds['region_name'] aws_access_key_id = creds['aws_access_key_id'] aws_secret_access_key = creds['aws_secret_access_key'] except KeyError: log.warn('Insufficient credentials found for download request') region_name = None aws_access_key_id = None aws_secret_access_key = None log.debug('Configuring S3 client with AWS Access key ID {k} and region {r}'.format( k=aws_access_key_id, r=region_name)) # Establish an S3 client client = boto3.client('s3', region_name=region_name, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) # Attempt to determine the file name from key filename = key.split('/')[-1] if filename is None: msg = 'Could not determine the filename from key: {k}'.format(k=key) log.error(msg) raise S3UtilError(msg) # Set the destination destination = os.path.join(dest_dir, filename) # Return if the destination file was already downloaded if os.path.isfile(destination): log.info('File already downloaded: {d}'.format(d=destination)) return destination # Attempt the download log.info('Attempting to download %s from bucket %s to destination %s', key, bucket_name, destination) max_tries = 10 retry_timer = 5 count = 1 while count <= max_tries: log.info('Attempting to download file {k}: try {c} of {m}'.format(k=key, c=count, m=max_tries)) try: client.download_file(Bucket=bucket_name, Key=key, Filename=destination) except ClientError: if count >= max_tries: _, ex, trace = sys.exc_info() msg = 'Unable to download key {k} from S3 bucket {b}:\n{e}'.format(k=key, b=bucket_name, e=str(ex)) log.error(msg) raise S3UtilError, msg, trace else: log.warn('Download failed, re-trying in {t} sec...'.format(t=retry_timer)) count += 1 time.sleep(retry_timer) continue else: log.info('Successfully downloaded {k} from S3 bucket {b} to: {d}'.format( k=key, b=bucket_name, d=destination)) return destination
[ "def", "download", "(", "download_info", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "mod_logger", "+", "'.download'", ")", "# Ensure the passed arg is a dict", "if", "not", "isinstance", "(", "download_info", ",", "dict", ")", ":", "msg", "=", "'download_info arg should be a dict, found: {t}'", ".", "format", "(", "t", "=", "download_info", ".", "__class__", ".", "__name__", ")", "raise", "TypeError", "(", "msg", ")", "# Check for and obtain required args", "required_args", "=", "[", "'key'", ",", "'dest_dir'", ",", "'bucket_name'", "]", "for", "required_arg", "in", "required_args", ":", "if", "required_arg", "not", "in", "download_info", ":", "msg", "=", "'Required arg not provided: {r}'", ".", "format", "(", "r", "=", "required_arg", ")", "log", ".", "error", "(", "msg", ")", "raise", "S3UtilError", "(", "msg", ")", "log", ".", "debug", "(", "'Processing download request: {r}'", ".", "format", "(", "r", "=", "download_info", ")", ")", "key", "=", "download_info", "[", "'key'", "]", "dest_dir", "=", "download_info", "[", "'dest_dir'", "]", "bucket_name", "=", "download_info", "[", "'bucket_name'", "]", "region_name", "=", "None", "aws_access_key_id", "=", "None", "aws_secret_access_key", "=", "None", "try", ":", "creds", "=", "download_info", "[", "'credentials'", "]", "except", "KeyError", ":", "log", ".", "debug", "(", "'No credentials found for this download request'", ")", "else", ":", "try", ":", "region_name", "=", "creds", "[", "'region_name'", "]", "aws_access_key_id", "=", "creds", "[", "'aws_access_key_id'", "]", "aws_secret_access_key", "=", "creds", "[", "'aws_secret_access_key'", "]", "except", "KeyError", ":", "log", ".", "warn", "(", "'Insufficient credentials found for download request'", ")", "region_name", "=", "None", "aws_access_key_id", "=", "None", "aws_secret_access_key", "=", "None", "log", ".", "debug", "(", "'Configuring S3 client with AWS Access key ID {k} and region {r}'", ".", "format", "(", "k", "=", "aws_access_key_id", ",", "r", "=", "region_name", ")", ")", "# Establish an S3 client", "client", "=", "boto3", ".", "client", "(", "'s3'", ",", "region_name", "=", "region_name", ",", "aws_access_key_id", "=", "aws_access_key_id", ",", "aws_secret_access_key", "=", "aws_secret_access_key", ")", "# Attempt to determine the file name from key", "filename", "=", "key", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "if", "filename", "is", "None", ":", "msg", "=", "'Could not determine the filename from key: {k}'", ".", "format", "(", "k", "=", "key", ")", "log", ".", "error", "(", "msg", ")", "raise", "S3UtilError", "(", "msg", ")", "# Set the destination", "destination", "=", "os", ".", "path", ".", "join", "(", "dest_dir", ",", "filename", ")", "# Return if the destination file was already downloaded", "if", "os", ".", "path", ".", "isfile", "(", "destination", ")", ":", "log", ".", "info", "(", "'File already downloaded: {d}'", ".", "format", "(", "d", "=", "destination", ")", ")", "return", "destination", "# Attempt the download", "log", ".", "info", "(", "'Attempting to download %s from bucket %s to destination %s'", ",", "key", ",", "bucket_name", ",", "destination", ")", "max_tries", "=", "10", "retry_timer", "=", "5", "count", "=", "1", "while", "count", "<=", "max_tries", ":", "log", ".", "info", "(", "'Attempting to download file {k}: try {c} of {m}'", ".", "format", "(", "k", "=", "key", ",", "c", "=", "count", ",", "m", "=", "max_tries", ")", ")", "try", ":", "client", ".", "download_file", "(", "Bucket", "=", "bucket_name", ",", "Key", "=", "key", ",", "Filename", "=", "destination", ")", "except", "ClientError", ":", "if", "count", ">=", "max_tries", ":", "_", ",", "ex", ",", "trace", "=", "sys", ".", "exc_info", "(", ")", "msg", "=", "'Unable to download key {k} from S3 bucket {b}:\\n{e}'", ".", "format", "(", "k", "=", "key", ",", "b", "=", "bucket_name", ",", "e", "=", "str", "(", "ex", ")", ")", "log", ".", "error", "(", "msg", ")", "raise", "S3UtilError", ",", "msg", ",", "trace", "else", ":", "log", ".", "warn", "(", "'Download failed, re-trying in {t} sec...'", ".", "format", "(", "t", "=", "retry_timer", ")", ")", "count", "+=", "1", "time", ".", "sleep", "(", "retry_timer", ")", "continue", "else", ":", "log", ".", "info", "(", "'Successfully downloaded {k} from S3 bucket {b} to: {d}'", ".", "format", "(", "k", "=", "key", ",", "b", "=", "bucket_name", ",", "d", "=", "destination", ")", ")", "return", "destination" ]
39.885714
20.8
def get_serial_ports_list(): """ Lists serial port names :raises EnvironmentError: On unsupported or unknown platforms :returns: A list of the serial ports available on the system """ if sys.platform.startswith('win'): ports = ['COM%s' % (i + 1) for i in range(256)] elif (sys.platform.startswith('linux') or sys.platform.startswith('cygwin')): # this excludes your current terminal "/dev/tty" ports = glob.glob('/dev/tty*') elif sys.platform.startswith('darwin'): ports = glob.glob('/dev/tty.*') else: raise EnvironmentError('Unsupported platform') result = [] port_filter = {'usbmodem', 'usbserial', 'COM', 'ACM', 'USB'} for port in ports: try: if any([f in port for f in port_filter]): s = serial.Serial() c = connection.Connection( s, port=port, baudrate=115200, timeout=0.01) c.open() result.append(port) except Exception as e: log.debug( 'Exception in testing port {}'.format(port)) log.debug(e) return result
[ "def", "get_serial_ports_list", "(", ")", ":", "if", "sys", ".", "platform", ".", "startswith", "(", "'win'", ")", ":", "ports", "=", "[", "'COM%s'", "%", "(", "i", "+", "1", ")", "for", "i", "in", "range", "(", "256", ")", "]", "elif", "(", "sys", ".", "platform", ".", "startswith", "(", "'linux'", ")", "or", "sys", ".", "platform", ".", "startswith", "(", "'cygwin'", ")", ")", ":", "# this excludes your current terminal \"/dev/tty\"", "ports", "=", "glob", ".", "glob", "(", "'/dev/tty*'", ")", "elif", "sys", ".", "platform", ".", "startswith", "(", "'darwin'", ")", ":", "ports", "=", "glob", ".", "glob", "(", "'/dev/tty.*'", ")", "else", ":", "raise", "EnvironmentError", "(", "'Unsupported platform'", ")", "result", "=", "[", "]", "port_filter", "=", "{", "'usbmodem'", ",", "'usbserial'", ",", "'COM'", ",", "'ACM'", ",", "'USB'", "}", "for", "port", "in", "ports", ":", "try", ":", "if", "any", "(", "[", "f", "in", "port", "for", "f", "in", "port_filter", "]", ")", ":", "s", "=", "serial", ".", "Serial", "(", ")", "c", "=", "connection", ".", "Connection", "(", "s", ",", "port", "=", "port", ",", "baudrate", "=", "115200", ",", "timeout", "=", "0.01", ")", "c", ".", "open", "(", ")", "result", ".", "append", "(", "port", ")", "except", "Exception", "as", "e", ":", "log", ".", "debug", "(", "'Exception in testing port {}'", ".", "format", "(", "port", ")", ")", "log", ".", "debug", "(", "e", ")", "return", "result" ]
34.294118
14.470588
def _vpcs_path(self): """ Returns the VPCS executable path. :returns: path to VPCS """ search_path = self._manager.config.get_section_config("VPCS").get("vpcs_path", "vpcs") path = shutil.which(search_path) # shutil.which return None if the path doesn't exists if not path: return search_path return path
[ "def", "_vpcs_path", "(", "self", ")", ":", "search_path", "=", "self", ".", "_manager", ".", "config", ".", "get_section_config", "(", "\"VPCS\"", ")", ".", "get", "(", "\"vpcs_path\"", ",", "\"vpcs\"", ")", "path", "=", "shutil", ".", "which", "(", "search_path", ")", "# shutil.which return None if the path doesn't exists", "if", "not", "path", ":", "return", "search_path", "return", "path" ]
29.076923
18.153846
def process_event(self, event_name: str, data: dict) -> None: """ Process event after epoch Args: event_name: whether event is send after epoch or batch. Set of values: ``"after_epoch", "after_batch"`` data: event data (dictionary) Returns: None """ if event_name == "after_epoch": self.epochs_done = data["epochs_done"] self.batches_seen = data["batches_seen"] self.train_examples_seen = data["train_examples_seen"] return
[ "def", "process_event", "(", "self", ",", "event_name", ":", "str", ",", "data", ":", "dict", ")", "->", "None", ":", "if", "event_name", "==", "\"after_epoch\"", ":", "self", ".", "epochs_done", "=", "data", "[", "\"epochs_done\"", "]", "self", ".", "batches_seen", "=", "data", "[", "\"batches_seen\"", "]", "self", ".", "train_examples_seen", "=", "data", "[", "\"train_examples_seen\"", "]", "return" ]
34.8125
17.0625
def user_absent(name, profile=None, **connection_args): ''' Ensure that the keystone user is absent. name The name of the user that should not exist ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'User "{0}" is already absent'.format(name)} # Check if user is present user = __salt__['keystone.user_get'](name=name, profile=profile, **connection_args) if 'Error' not in user: if __opts__.get('test'): ret['result'] = None ret['comment'] = 'User "{0}" will be deleted'.format(name) return ret # Delete that user! __salt__['keystone.user_delete'](name=name, profile=profile, **connection_args) ret['comment'] = 'User "{0}" has been deleted'.format(name) ret['changes']['User'] = 'Deleted' return ret
[ "def", "user_absent", "(", "name", ",", "profile", "=", "None", ",", "*", "*", "connection_args", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "True", ",", "'comment'", ":", "'User \"{0}\" is already absent'", ".", "format", "(", "name", ")", "}", "# Check if user is present", "user", "=", "__salt__", "[", "'keystone.user_get'", "]", "(", "name", "=", "name", ",", "profile", "=", "profile", ",", "*", "*", "connection_args", ")", "if", "'Error'", "not", "in", "user", ":", "if", "__opts__", ".", "get", "(", "'test'", ")", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'User \"{0}\" will be deleted'", ".", "format", "(", "name", ")", "return", "ret", "# Delete that user!", "__salt__", "[", "'keystone.user_delete'", "]", "(", "name", "=", "name", ",", "profile", "=", "profile", ",", "*", "*", "connection_args", ")", "ret", "[", "'comment'", "]", "=", "'User \"{0}\" has been deleted'", ".", "format", "(", "name", ")", "ret", "[", "'changes'", "]", "[", "'User'", "]", "=", "'Deleted'", "return", "ret" ]
34.407407
21
def compute_overlayable_zorders(obj, path=[]): """ Traverses an overlayable composite container to determine which objects are associated with specific (Nd)Overlay layers by z-order, making sure to take DynamicMap Callables into account. Returns a mapping between the zorders of each layer and a corresponding lists of objects. Used to determine which overlaid subplots should be linked with Stream callbacks. """ path = path+[obj] zorder_map = defaultdict(list) # Process non-dynamic layers if not isinstance(obj, DynamicMap): if isinstance(obj, CompositeOverlay): for z, o in enumerate(obj): zorder_map[z] = [o, obj] elif isinstance(obj, HoloMap): for el in obj.values(): if isinstance(el, CompositeOverlay): for k, v in compute_overlayable_zorders(el, path).items(): zorder_map[k] += v + [obj] else: zorder_map[0] += [obj, el] else: if obj not in zorder_map[0]: zorder_map[0].append(obj) return zorder_map isoverlay = isinstance(obj.last, CompositeOverlay) isdynoverlay = obj.callback._is_overlay if obj not in zorder_map[0] and not isoverlay: zorder_map[0].append(obj) depth = overlay_depth(obj) # Process the inputs of the DynamicMap callback dmap_inputs = obj.callback.inputs if obj.callback.link_inputs else [] for z, inp in enumerate(dmap_inputs): no_zorder_increment = False if any(not (isoverlay_fn(p) or p.last is None) for p in path) and isoverlay_fn(inp): # If overlay has been collapsed do not increment zorder no_zorder_increment = True input_depth = overlay_depth(inp) if depth is not None and input_depth is not None and depth < input_depth: # Skips branch of graph where the number of elements in an # overlay has been reduced but still contains more than one layer if depth > 1: continue else: no_zorder_increment = True # Recurse into DynamicMap.callback.inputs and update zorder_map z = z if isdynoverlay else 0 deep_zorders = compute_overlayable_zorders(inp, path=path) offset = max(zorder_map.keys()) for dz, objs in deep_zorders.items(): global_z = offset+z if no_zorder_increment else offset+dz+z zorder_map[global_z] = list(unique_iterator(zorder_map[global_z]+objs)) # If object branches but does not declare inputs (e.g. user defined # DynamicMaps returning (Nd)Overlay) add the items on the DynamicMap.last found = any(isinstance(p, DynamicMap) and p.callback._is_overlay for p in path) linked = any(isinstance(s, LinkedStream) and s.linked for s in obj.streams) if (found or linked) and isoverlay and not isdynoverlay: offset = max(zorder_map.keys()) for z, o in enumerate(obj.last): if isoverlay and linked: zorder_map[offset+z].append(obj) if o not in zorder_map[offset+z]: zorder_map[offset+z].append(o) return zorder_map
[ "def", "compute_overlayable_zorders", "(", "obj", ",", "path", "=", "[", "]", ")", ":", "path", "=", "path", "+", "[", "obj", "]", "zorder_map", "=", "defaultdict", "(", "list", ")", "# Process non-dynamic layers", "if", "not", "isinstance", "(", "obj", ",", "DynamicMap", ")", ":", "if", "isinstance", "(", "obj", ",", "CompositeOverlay", ")", ":", "for", "z", ",", "o", "in", "enumerate", "(", "obj", ")", ":", "zorder_map", "[", "z", "]", "=", "[", "o", ",", "obj", "]", "elif", "isinstance", "(", "obj", ",", "HoloMap", ")", ":", "for", "el", "in", "obj", ".", "values", "(", ")", ":", "if", "isinstance", "(", "el", ",", "CompositeOverlay", ")", ":", "for", "k", ",", "v", "in", "compute_overlayable_zorders", "(", "el", ",", "path", ")", ".", "items", "(", ")", ":", "zorder_map", "[", "k", "]", "+=", "v", "+", "[", "obj", "]", "else", ":", "zorder_map", "[", "0", "]", "+=", "[", "obj", ",", "el", "]", "else", ":", "if", "obj", "not", "in", "zorder_map", "[", "0", "]", ":", "zorder_map", "[", "0", "]", ".", "append", "(", "obj", ")", "return", "zorder_map", "isoverlay", "=", "isinstance", "(", "obj", ".", "last", ",", "CompositeOverlay", ")", "isdynoverlay", "=", "obj", ".", "callback", ".", "_is_overlay", "if", "obj", "not", "in", "zorder_map", "[", "0", "]", "and", "not", "isoverlay", ":", "zorder_map", "[", "0", "]", ".", "append", "(", "obj", ")", "depth", "=", "overlay_depth", "(", "obj", ")", "# Process the inputs of the DynamicMap callback", "dmap_inputs", "=", "obj", ".", "callback", ".", "inputs", "if", "obj", ".", "callback", ".", "link_inputs", "else", "[", "]", "for", "z", ",", "inp", "in", "enumerate", "(", "dmap_inputs", ")", ":", "no_zorder_increment", "=", "False", "if", "any", "(", "not", "(", "isoverlay_fn", "(", "p", ")", "or", "p", ".", "last", "is", "None", ")", "for", "p", "in", "path", ")", "and", "isoverlay_fn", "(", "inp", ")", ":", "# If overlay has been collapsed do not increment zorder", "no_zorder_increment", "=", "True", "input_depth", "=", "overlay_depth", "(", "inp", ")", "if", "depth", "is", "not", "None", "and", "input_depth", "is", "not", "None", "and", "depth", "<", "input_depth", ":", "# Skips branch of graph where the number of elements in an", "# overlay has been reduced but still contains more than one layer", "if", "depth", ">", "1", ":", "continue", "else", ":", "no_zorder_increment", "=", "True", "# Recurse into DynamicMap.callback.inputs and update zorder_map", "z", "=", "z", "if", "isdynoverlay", "else", "0", "deep_zorders", "=", "compute_overlayable_zorders", "(", "inp", ",", "path", "=", "path", ")", "offset", "=", "max", "(", "zorder_map", ".", "keys", "(", ")", ")", "for", "dz", ",", "objs", "in", "deep_zorders", ".", "items", "(", ")", ":", "global_z", "=", "offset", "+", "z", "if", "no_zorder_increment", "else", "offset", "+", "dz", "+", "z", "zorder_map", "[", "global_z", "]", "=", "list", "(", "unique_iterator", "(", "zorder_map", "[", "global_z", "]", "+", "objs", ")", ")", "# If object branches but does not declare inputs (e.g. user defined", "# DynamicMaps returning (Nd)Overlay) add the items on the DynamicMap.last", "found", "=", "any", "(", "isinstance", "(", "p", ",", "DynamicMap", ")", "and", "p", ".", "callback", ".", "_is_overlay", "for", "p", "in", "path", ")", "linked", "=", "any", "(", "isinstance", "(", "s", ",", "LinkedStream", ")", "and", "s", ".", "linked", "for", "s", "in", "obj", ".", "streams", ")", "if", "(", "found", "or", "linked", ")", "and", "isoverlay", "and", "not", "isdynoverlay", ":", "offset", "=", "max", "(", "zorder_map", ".", "keys", "(", ")", ")", "for", "z", ",", "o", "in", "enumerate", "(", "obj", ".", "last", ")", ":", "if", "isoverlay", "and", "linked", ":", "zorder_map", "[", "offset", "+", "z", "]", ".", "append", "(", "obj", ")", "if", "o", "not", "in", "zorder_map", "[", "offset", "+", "z", "]", ":", "zorder_map", "[", "offset", "+", "z", "]", ".", "append", "(", "o", ")", "return", "zorder_map" ]
42.824324
17.608108
def newline(self): """Effects a newline by moving the cursor down and clearing""" self.write(self.term.move_down) self.write(self.term.clear_bol)
[ "def", "newline", "(", "self", ")", ":", "self", ".", "write", "(", "self", ".", "term", ".", "move_down", ")", "self", ".", "write", "(", "self", ".", "term", ".", "clear_bol", ")" ]
41.5
6
def display_name(self): """Calculates the display name for a room.""" if self.name: return self.name elif self.canonical_alias: return self.canonical_alias # Member display names without me members = [u.get_display_name(self) for u in self.get_joined_members() if self.client.user_id != u.user_id] members.sort() if len(members) == 1: return members[0] elif len(members) == 2: return "{0} and {1}".format(members[0], members[1]) elif len(members) > 2: return "{0} and {1} others".format(members[0], len(members) - 1) else: # len(members) <= 0 or not an integer # TODO i18n return "Empty room"
[ "def", "display_name", "(", "self", ")", ":", "if", "self", ".", "name", ":", "return", "self", ".", "name", "elif", "self", ".", "canonical_alias", ":", "return", "self", ".", "canonical_alias", "# Member display names without me", "members", "=", "[", "u", ".", "get_display_name", "(", "self", ")", "for", "u", "in", "self", ".", "get_joined_members", "(", ")", "if", "self", ".", "client", ".", "user_id", "!=", "u", ".", "user_id", "]", "members", ".", "sort", "(", ")", "if", "len", "(", "members", ")", "==", "1", ":", "return", "members", "[", "0", "]", "elif", "len", "(", "members", ")", "==", "2", ":", "return", "\"{0} and {1}\"", ".", "format", "(", "members", "[", "0", "]", ",", "members", "[", "1", "]", ")", "elif", "len", "(", "members", ")", ">", "2", ":", "return", "\"{0} and {1} others\"", ".", "format", "(", "members", "[", "0", "]", ",", "len", "(", "members", ")", "-", "1", ")", "else", ":", "# len(members) <= 0 or not an integer", "# TODO i18n", "return", "\"Empty room\"" ]
36.095238
16.428571
def filter_matrix_columns(A, theta): """Filter each column of A with tol. i.e., drop all entries in column k where abs(A[i,k]) < tol max( abs(A[:,k]) ) Parameters ---------- A : sparse_matrix theta : float In range [0,1) and defines drop-tolerance used to filter the columns of A Returns ------- A_filter : sparse_matrix Each column has been filtered by dropping all entries where abs(A[i,k]) < tol max( abs(A[:,k]) ) Examples -------- >>> from pyamg.gallery import poisson >>> from pyamg.util.utils import filter_matrix_columns >>> from scipy import array >>> from scipy.sparse import csr_matrix >>> A = csr_matrix( array([[ 0.24, 1. , 0. ], ... [-0.5 , 1. , -0.5 ], ... [ 0. , 0.49, 1. ], ... [ 0. , 0. , -0.5 ]]) ) >>> filter_matrix_columns(A, 0.5).todense() matrix([[ 0. , 1. , 0. ], [-0.5, 1. , -0.5], [ 0. , 0. , 1. ], [ 0. , 0. , -0.5]]) """ if not isspmatrix(A): raise ValueError("Sparse matrix input needed") if isspmatrix_bsr(A): blocksize = A.blocksize Aformat = A.format if (theta < 0) or (theta >= 1.0): raise ValueError("theta must be in [0,1)") # Apply drop-tolerance to each column of A, which is most easily # accessed by converting to CSC. We apply the drop-tolerance with # amg_core.classical_strength_of_connection(), which ignores # diagonal entries, thus necessitating the trick where we add # A.shape[1] to each of the column indices A = A.copy().tocsc() A_filter = A.copy() A.indices += A.shape[1] A_filter.indices += A.shape[1] # classical_strength_of_connection takes an absolute value internally pyamg.amg_core.classical_strength_of_connection_abs( A.shape[1], theta, A.indptr, A.indices, A.data, A_filter.indptr, A_filter.indices, A_filter.data) A_filter.indices[:A_filter.indptr[-1]] -= A_filter.shape[1] A_filter = csc_matrix((A_filter.data[:A_filter.indptr[-1]], A_filter.indices[:A_filter.indptr[-1]], A_filter.indptr), shape=A_filter.shape) del A if Aformat == 'bsr': A_filter = A_filter.tobsr(blocksize) else: A_filter = A_filter.asformat(Aformat) return A_filter
[ "def", "filter_matrix_columns", "(", "A", ",", "theta", ")", ":", "if", "not", "isspmatrix", "(", "A", ")", ":", "raise", "ValueError", "(", "\"Sparse matrix input needed\"", ")", "if", "isspmatrix_bsr", "(", "A", ")", ":", "blocksize", "=", "A", ".", "blocksize", "Aformat", "=", "A", ".", "format", "if", "(", "theta", "<", "0", ")", "or", "(", "theta", ">=", "1.0", ")", ":", "raise", "ValueError", "(", "\"theta must be in [0,1)\"", ")", "# Apply drop-tolerance to each column of A, which is most easily", "# accessed by converting to CSC. We apply the drop-tolerance with", "# amg_core.classical_strength_of_connection(), which ignores", "# diagonal entries, thus necessitating the trick where we add", "# A.shape[1] to each of the column indices", "A", "=", "A", ".", "copy", "(", ")", ".", "tocsc", "(", ")", "A_filter", "=", "A", ".", "copy", "(", ")", "A", ".", "indices", "+=", "A", ".", "shape", "[", "1", "]", "A_filter", ".", "indices", "+=", "A", ".", "shape", "[", "1", "]", "# classical_strength_of_connection takes an absolute value internally", "pyamg", ".", "amg_core", ".", "classical_strength_of_connection_abs", "(", "A", ".", "shape", "[", "1", "]", ",", "theta", ",", "A", ".", "indptr", ",", "A", ".", "indices", ",", "A", ".", "data", ",", "A_filter", ".", "indptr", ",", "A_filter", ".", "indices", ",", "A_filter", ".", "data", ")", "A_filter", ".", "indices", "[", ":", "A_filter", ".", "indptr", "[", "-", "1", "]", "]", "-=", "A_filter", ".", "shape", "[", "1", "]", "A_filter", "=", "csc_matrix", "(", "(", "A_filter", ".", "data", "[", ":", "A_filter", ".", "indptr", "[", "-", "1", "]", "]", ",", "A_filter", ".", "indices", "[", ":", "A_filter", ".", "indptr", "[", "-", "1", "]", "]", ",", "A_filter", ".", "indptr", ")", ",", "shape", "=", "A_filter", ".", "shape", ")", "del", "A", "if", "Aformat", "==", "'bsr'", ":", "A_filter", "=", "A_filter", ".", "tobsr", "(", "blocksize", ")", "else", ":", "A_filter", "=", "A_filter", ".", "asformat", "(", "Aformat", ")", "return", "A_filter" ]
31.506494
19.805195
def designPrimers(p3_args, input_log=None, output_log=None, err_log=None): ''' Return the raw primer3_core output for the provided primer3 args. Returns an ordered dict of the boulderIO-format primer3 output file ''' sp = subprocess.Popen([pjoin(PRIMER3_HOME, 'primer3_core')], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT) p3_args.setdefault('PRIMER_THERMODYNAMIC_PARAMETERS_PATH', pjoin(PRIMER3_HOME, 'primer3_config/')) in_str = _formatBoulderIO(p3_args) if input_log: input_log.write(in_str) input_log.flush() out_str, err_str = sp.communicate(input=in_str) if output_log: output_log.write(out_str) output_log.flush() if err_log and err_str is not None: err_log.write(err_str) err_log.flush() return _parseBoulderIO(out_str)
[ "def", "designPrimers", "(", "p3_args", ",", "input_log", "=", "None", ",", "output_log", "=", "None", ",", "err_log", "=", "None", ")", ":", "sp", "=", "subprocess", ".", "Popen", "(", "[", "pjoin", "(", "PRIMER3_HOME", ",", "'primer3_core'", ")", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "p3_args", ".", "setdefault", "(", "'PRIMER_THERMODYNAMIC_PARAMETERS_PATH'", ",", "pjoin", "(", "PRIMER3_HOME", ",", "'primer3_config/'", ")", ")", "in_str", "=", "_formatBoulderIO", "(", "p3_args", ")", "if", "input_log", ":", "input_log", ".", "write", "(", "in_str", ")", "input_log", ".", "flush", "(", ")", "out_str", ",", "err_str", "=", "sp", ".", "communicate", "(", "input", "=", "in_str", ")", "if", "output_log", ":", "output_log", ".", "write", "(", "out_str", ")", "output_log", ".", "flush", "(", ")", "if", "err_log", "and", "err_str", "is", "not", "None", ":", "err_log", ".", "write", "(", "err_str", ")", "err_log", ".", "flush", "(", ")", "return", "_parseBoulderIO", "(", "out_str", ")" ]
41
19
def filter_rep_set(inF, otuSet): """ Parse the rep set file and remove all sequences not associated with unique OTUs. :@type inF: file :@param inF: The representative sequence set :@rtype: list :@return: The set of sequences associated with unique OTUs """ seqs = [] for record in SeqIO.parse(inF, "fasta"): if record.id in otuSet: seqs.append(record) return seqs
[ "def", "filter_rep_set", "(", "inF", ",", "otuSet", ")", ":", "seqs", "=", "[", "]", "for", "record", "in", "SeqIO", ".", "parse", "(", "inF", ",", "\"fasta\"", ")", ":", "if", "record", ".", "id", "in", "otuSet", ":", "seqs", ".", "append", "(", "record", ")", "return", "seqs" ]
25.875
19
def run_sorter(sorter_name_or_class, recording, output_folder=None, delete_output_folder=False, grouping_property=None, parallel=False, debug=False, **params): """ Generic function to run a sorter via function approach. 2 Usage with name or class: by name: >>> sorting = run_sorter('tridesclous', recording) by class: >>> sorting = run_sorter(TridesclousSorter, recording) """ if isinstance(sorter_name_or_class, str): SorterClass = sorter_dict[sorter_name_or_class] elif sorter_name_or_class in sorter_full_list: SorterClass = sorter_name_or_class else: raise(ValueError('Unknown sorter')) sorter = SorterClass(recording=recording, output_folder=output_folder, grouping_property=grouping_property, parallel=parallel, debug=debug, delete_output_folder=delete_output_folder) sorter.set_params(**params) sorter.run() sortingextractor = sorter.get_result() return sortingextractor
[ "def", "run_sorter", "(", "sorter_name_or_class", ",", "recording", ",", "output_folder", "=", "None", ",", "delete_output_folder", "=", "False", ",", "grouping_property", "=", "None", ",", "parallel", "=", "False", ",", "debug", "=", "False", ",", "*", "*", "params", ")", ":", "if", "isinstance", "(", "sorter_name_or_class", ",", "str", ")", ":", "SorterClass", "=", "sorter_dict", "[", "sorter_name_or_class", "]", "elif", "sorter_name_or_class", "in", "sorter_full_list", ":", "SorterClass", "=", "sorter_name_or_class", "else", ":", "raise", "(", "ValueError", "(", "'Unknown sorter'", ")", ")", "sorter", "=", "SorterClass", "(", "recording", "=", "recording", ",", "output_folder", "=", "output_folder", ",", "grouping_property", "=", "grouping_property", ",", "parallel", "=", "parallel", ",", "debug", "=", "debug", ",", "delete_output_folder", "=", "delete_output_folder", ")", "sorter", ".", "set_params", "(", "*", "*", "params", ")", "sorter", ".", "run", "(", ")", "sortingextractor", "=", "sorter", ".", "get_result", "(", ")", "return", "sortingextractor" ]
34.137931
25.448276
def getWordList(ipFile, delim): """ extract a unique list of words and have line numbers that word appears """ indexedWords = {} totWords = 0 totLines = 0 with codecs.open(ipFile, "r",encoding='utf-8', errors='replace') as f: for line in f: totLines = totLines + 1 words = multi_split(line, delim) totWords = totWords + len(words) for word in words: cleanedWord = word.lower().strip() if cleanedWord not in indexedWords: indexedWords[cleanedWord] = str(totLines) else: indexedWords[cleanedWord] = indexedWords[cleanedWord] + ' ' + str(totLines) return totWords, totLines, indexedWords
[ "def", "getWordList", "(", "ipFile", ",", "delim", ")", ":", "indexedWords", "=", "{", "}", "totWords", "=", "0", "totLines", "=", "0", "with", "codecs", ".", "open", "(", "ipFile", ",", "\"r\"", ",", "encoding", "=", "'utf-8'", ",", "errors", "=", "'replace'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "totLines", "=", "totLines", "+", "1", "words", "=", "multi_split", "(", "line", ",", "delim", ")", "totWords", "=", "totWords", "+", "len", "(", "words", ")", "for", "word", "in", "words", ":", "cleanedWord", "=", "word", ".", "lower", "(", ")", ".", "strip", "(", ")", "if", "cleanedWord", "not", "in", "indexedWords", ":", "indexedWords", "[", "cleanedWord", "]", "=", "str", "(", "totLines", ")", "else", ":", "indexedWords", "[", "cleanedWord", "]", "=", "indexedWords", "[", "cleanedWord", "]", "+", "' '", "+", "str", "(", "totLines", ")", "return", "totWords", ",", "totLines", ",", "indexedWords" ]
39.105263
16.052632
def parse_time_specification(units, dt=None, n_steps=None, nsteps=None, t1=None, t2=None, t=None): """ Return an array of times given a few combinations of kwargs that are accepted -- see below. Parameters ---------- dt, n_steps[, t1] : (numeric, int[, numeric]) A fixed timestep dt and a number of steps to run for. dt, t1, t2 : (numeric, numeric, numeric) A fixed timestep dt, an initial time, and an final time. dt, t1 : (array_like, numeric) An array of timesteps dt and an initial time. n_steps, t1, t2 : (int, numeric, numeric) Number of steps between an initial time, and a final time. t : array_like An array of times (dts = t[1:] - t[:-1]) """ if nsteps is not None: warn("The argument 'nsteps' is deprecated and will be removed in a future version." "Use 'n_steps' instead.") n_steps = nsteps if n_steps is not None: # parse and validate n_steps n_steps = int(n_steps) if hasattr(dt, 'unit'): dt = dt.decompose(units).value if hasattr(t1, 'unit'): t1 = t1.decompose(units).value if hasattr(t2, 'unit'): t2 = t2.decompose(units).value if hasattr(t, 'unit'): t = t.decompose(units).value # t : array_like if t is not None: times = t return times else: if dt is None and (t1 is None or t2 is None or n_steps is None): raise ValueError("Invalid spec. See docstring.") # dt, n_steps[, t1] : (numeric, int[, numeric]) elif dt is not None and n_steps is not None: if t1 is None: t1 = 0. times = parse_time_specification(units, dt=np.ones(n_steps+1)*dt, t1=t1) # dt, t1, t2 : (numeric, numeric, numeric) elif dt is not None and t1 is not None and t2 is not None: if t2 < t1 and dt < 0: t_i = t1 times = [] ii = 0 while (t_i > t2) and (ii < 1E6): times.append(t_i) t_i += dt if times[-1] != t2: times.append(t2) return np.array(times) elif t2 > t1 and dt > 0: t_i = t1 times = [] ii = 0 while (t_i < t2) and (ii < 1E6): times.append(t_i) t_i += dt return np.array(times) else: raise ValueError("If t2 < t1, dt must be negative. If t1 < t2, " "dt should be positive.") # dt, t1 : (array_like, numeric) elif isinstance(dt, np.ndarray) and t1 is not None: times = np.cumsum(np.append([0.], dt)) + t1 times = times[:-1] # n_steps, t1, t2 : (int, numeric, numeric) elif dt is None and not (t1 is None or t2 is None or n_steps is None): times = np.linspace(t1, t2, n_steps, endpoint=True) else: raise ValueError("Invalid options. See docstring.") return times
[ "def", "parse_time_specification", "(", "units", ",", "dt", "=", "None", ",", "n_steps", "=", "None", ",", "nsteps", "=", "None", ",", "t1", "=", "None", ",", "t2", "=", "None", ",", "t", "=", "None", ")", ":", "if", "nsteps", "is", "not", "None", ":", "warn", "(", "\"The argument 'nsteps' is deprecated and will be removed in a future version.\"", "\"Use 'n_steps' instead.\"", ")", "n_steps", "=", "nsteps", "if", "n_steps", "is", "not", "None", ":", "# parse and validate n_steps", "n_steps", "=", "int", "(", "n_steps", ")", "if", "hasattr", "(", "dt", ",", "'unit'", ")", ":", "dt", "=", "dt", ".", "decompose", "(", "units", ")", ".", "value", "if", "hasattr", "(", "t1", ",", "'unit'", ")", ":", "t1", "=", "t1", ".", "decompose", "(", "units", ")", ".", "value", "if", "hasattr", "(", "t2", ",", "'unit'", ")", ":", "t2", "=", "t2", ".", "decompose", "(", "units", ")", ".", "value", "if", "hasattr", "(", "t", ",", "'unit'", ")", ":", "t", "=", "t", ".", "decompose", "(", "units", ")", ".", "value", "# t : array_like", "if", "t", "is", "not", "None", ":", "times", "=", "t", "return", "times", "else", ":", "if", "dt", "is", "None", "and", "(", "t1", "is", "None", "or", "t2", "is", "None", "or", "n_steps", "is", "None", ")", ":", "raise", "ValueError", "(", "\"Invalid spec. See docstring.\"", ")", "# dt, n_steps[, t1] : (numeric, int[, numeric])", "elif", "dt", "is", "not", "None", "and", "n_steps", "is", "not", "None", ":", "if", "t1", "is", "None", ":", "t1", "=", "0.", "times", "=", "parse_time_specification", "(", "units", ",", "dt", "=", "np", ".", "ones", "(", "n_steps", "+", "1", ")", "*", "dt", ",", "t1", "=", "t1", ")", "# dt, t1, t2 : (numeric, numeric, numeric)", "elif", "dt", "is", "not", "None", "and", "t1", "is", "not", "None", "and", "t2", "is", "not", "None", ":", "if", "t2", "<", "t1", "and", "dt", "<", "0", ":", "t_i", "=", "t1", "times", "=", "[", "]", "ii", "=", "0", "while", "(", "t_i", ">", "t2", ")", "and", "(", "ii", "<", "1E6", ")", ":", "times", ".", "append", "(", "t_i", ")", "t_i", "+=", "dt", "if", "times", "[", "-", "1", "]", "!=", "t2", ":", "times", ".", "append", "(", "t2", ")", "return", "np", ".", "array", "(", "times", ")", "elif", "t2", ">", "t1", "and", "dt", ">", "0", ":", "t_i", "=", "t1", "times", "=", "[", "]", "ii", "=", "0", "while", "(", "t_i", "<", "t2", ")", "and", "(", "ii", "<", "1E6", ")", ":", "times", ".", "append", "(", "t_i", ")", "t_i", "+=", "dt", "return", "np", ".", "array", "(", "times", ")", "else", ":", "raise", "ValueError", "(", "\"If t2 < t1, dt must be negative. If t1 < t2, \"", "\"dt should be positive.\"", ")", "# dt, t1 : (array_like, numeric)", "elif", "isinstance", "(", "dt", ",", "np", ".", "ndarray", ")", "and", "t1", "is", "not", "None", ":", "times", "=", "np", ".", "cumsum", "(", "np", ".", "append", "(", "[", "0.", "]", ",", "dt", ")", ")", "+", "t1", "times", "=", "times", "[", ":", "-", "1", "]", "# n_steps, t1, t2 : (int, numeric, numeric)", "elif", "dt", "is", "None", "and", "not", "(", "t1", "is", "None", "or", "t2", "is", "None", "or", "n_steps", "is", "None", ")", ":", "times", "=", "np", ".", "linspace", "(", "t1", ",", "t2", ",", "n_steps", ",", "endpoint", "=", "True", ")", "else", ":", "raise", "ValueError", "(", "\"Invalid options. See docstring.\"", ")", "return", "times" ]
30.94898
21.214286
def public_ip_address_get(name, resource_group, **kwargs): ''' .. versionadded:: 2019.2.0 Get details about a specific public IP address. :param name: The name of the public IP address to query. :param resource_group: The resource group name assigned to the public IP address. CLI Example: .. code-block:: bash salt-call azurearm_network.public_ip_address_get test-pub-ip testgroup ''' expand = kwargs.get('expand') netconn = __utils__['azurearm.get_client']('network', **kwargs) try: pub_ip = netconn.public_ip_addresses.get( public_ip_address_name=name, resource_group_name=resource_group, expand=expand ) result = pub_ip.as_dict() except CloudError as exc: __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs) result = {'error': str(exc)} return result
[ "def", "public_ip_address_get", "(", "name", ",", "resource_group", ",", "*", "*", "kwargs", ")", ":", "expand", "=", "kwargs", ".", "get", "(", "'expand'", ")", "netconn", "=", "__utils__", "[", "'azurearm.get_client'", "]", "(", "'network'", ",", "*", "*", "kwargs", ")", "try", ":", "pub_ip", "=", "netconn", ".", "public_ip_addresses", ".", "get", "(", "public_ip_address_name", "=", "name", ",", "resource_group_name", "=", "resource_group", ",", "expand", "=", "expand", ")", "result", "=", "pub_ip", ".", "as_dict", "(", ")", "except", "CloudError", "as", "exc", ":", "__utils__", "[", "'azurearm.log_cloud_error'", "]", "(", "'network'", ",", "str", "(", "exc", ")", ",", "*", "*", "kwargs", ")", "result", "=", "{", "'error'", ":", "str", "(", "exc", ")", "}", "return", "result" ]
26.235294
25.058824
def create_joints(self): '''Traverse the bone hierarchy and create physics joints.''' stack = ['root'] while stack: parent = stack.pop() for child in self.hierarchy.get(parent, ()): stack.append(child) if parent not in self.bones: continue bone = self.bones[parent] body = [b for b in self.bodies if b.name == parent][0] for child in self.hierarchy.get(parent, ()): child_bone = self.bones[child] child_body = [b for b in self.bodies if b.name == child][0] shape = ('', 'hinge', 'universal', 'ball')[len(child_bone.dof)] self.joints.append(self.world.join(shape, body, child_body))
[ "def", "create_joints", "(", "self", ")", ":", "stack", "=", "[", "'root'", "]", "while", "stack", ":", "parent", "=", "stack", ".", "pop", "(", ")", "for", "child", "in", "self", ".", "hierarchy", ".", "get", "(", "parent", ",", "(", ")", ")", ":", "stack", ".", "append", "(", "child", ")", "if", "parent", "not", "in", "self", ".", "bones", ":", "continue", "bone", "=", "self", ".", "bones", "[", "parent", "]", "body", "=", "[", "b", "for", "b", "in", "self", ".", "bodies", "if", "b", ".", "name", "==", "parent", "]", "[", "0", "]", "for", "child", "in", "self", ".", "hierarchy", ".", "get", "(", "parent", ",", "(", ")", ")", ":", "child_bone", "=", "self", ".", "bones", "[", "child", "]", "child_body", "=", "[", "b", "for", "b", "in", "self", ".", "bodies", "if", "b", ".", "name", "==", "child", "]", "[", "0", "]", "shape", "=", "(", "''", ",", "'hinge'", ",", "'universal'", ",", "'ball'", ")", "[", "len", "(", "child_bone", ".", "dof", ")", "]", "self", ".", "joints", ".", "append", "(", "self", ".", "world", ".", "join", "(", "shape", ",", "body", ",", "child_body", ")", ")" ]
47.375
17.875
def clean(self, value): """ Convert the value's type and run validation. Validation errors from to_python and validate are propagated. The correct value is returned if no error is raised. """ value = self.to_python(value) self.validate(value) return value
[ "def", "clean", "(", "self", ",", "value", ")", ":", "value", "=", "self", ".", "to_python", "(", "value", ")", "self", ".", "validate", "(", "value", ")", "return", "value" ]
34.555556
15.444444
def fun_inverse(fun=None, y=0, x0=None, args=(), disp=False, method='Nelder-Mead', **kwargs): r"""Find the threshold level that accomplishes the desired specificity Call indicated function repeatedly to find answer to the inverse function evaluation Arguments: fun (function): function to be calculate an inverse for y (float): desired output of fun x0 (float): initial guess at input to fun, the fun arg that will be adjusted args (list or tuple): constants arguments to fun which will not be adjusted constraints (tuple): dictionary of optimizer constraints (see scipy.optimize.minimize) disp (bool): whether to display incremental results during optimization method (str): one of the scipy.optimize.minimize methods additional kwargs are passed along to the minimize function fun_inverse can be used to calculate a trivial square root: >>> round(fun_inverse(fun=lambda x: x**2, y=9, x0=0), 6) 3.0 """ fun_inverse.fun = cost_fun.fun = fun if fun is not None else getattr(fun_inverse, 'fun', lambda x: x) fun_inverse.target = cost_fun.target = y or 0 fun_inverse.verbose = verbose = cost_fun.verbose = kwargs.pop( 'verbose', getattr(cost_fun, 'verbose', getattr(fun_inverse, 'verbose', False))) fun_inverse.x0 = x0 = x0 if x0 is not None else getattr(fun_inverse, 'x0', 0) or 0 if verbose: print(' x0: {}\ntarget: {}\n'.format(fun_inverse.x0, fun_inverse.target)) res = minimize(cost_fun, x0=x0, args=args, options=kwargs.pop('options', {}), method=method, **kwargs ) if isinstance(x0, NUMERIC_TYPES): return res.x[0] return res.x
[ "def", "fun_inverse", "(", "fun", "=", "None", ",", "y", "=", "0", ",", "x0", "=", "None", ",", "args", "=", "(", ")", ",", "disp", "=", "False", ",", "method", "=", "'Nelder-Mead'", ",", "*", "*", "kwargs", ")", ":", "fun_inverse", ".", "fun", "=", "cost_fun", ".", "fun", "=", "fun", "if", "fun", "is", "not", "None", "else", "getattr", "(", "fun_inverse", ",", "'fun'", ",", "lambda", "x", ":", "x", ")", "fun_inverse", ".", "target", "=", "cost_fun", ".", "target", "=", "y", "or", "0", "fun_inverse", ".", "verbose", "=", "verbose", "=", "cost_fun", ".", "verbose", "=", "kwargs", ".", "pop", "(", "'verbose'", ",", "getattr", "(", "cost_fun", ",", "'verbose'", ",", "getattr", "(", "fun_inverse", ",", "'verbose'", ",", "False", ")", ")", ")", "fun_inverse", ".", "x0", "=", "x0", "=", "x0", "if", "x0", "is", "not", "None", "else", "getattr", "(", "fun_inverse", ",", "'x0'", ",", "0", ")", "or", "0", "if", "verbose", ":", "print", "(", "' x0: {}\\ntarget: {}\\n'", ".", "format", "(", "fun_inverse", ".", "x0", ",", "fun_inverse", ".", "target", ")", ")", "res", "=", "minimize", "(", "cost_fun", ",", "x0", "=", "x0", ",", "args", "=", "args", ",", "options", "=", "kwargs", ".", "pop", "(", "'options'", ",", "{", "}", ")", ",", "method", "=", "method", ",", "*", "*", "kwargs", ")", "if", "isinstance", "(", "x0", ",", "NUMERIC_TYPES", ")", ":", "return", "res", ".", "x", "[", "0", "]", "return", "res", ".", "x" ]
49.942857
26.371429
def min_value(self): """ The minimum pixel value of the ``data`` within the source segment. """ if self._is_completely_masked: return np.nan * self._data_unit else: return np.min(self.values)
[ "def", "min_value", "(", "self", ")", ":", "if", "self", ".", "_is_completely_masked", ":", "return", "np", ".", "nan", "*", "self", ".", "_data_unit", "else", ":", "return", "np", ".", "min", "(", "self", ".", "values", ")" ]
25.5
14.3
def _DiscoverElementTypeFromLocalname(self, type_localname): """Searches all namespaces for a type by name. Args: type_localname: The name of the type. Returns: A fully qualified SOAP type with the specified name. Raises: A zeep.exceptions.LookupError if the type cannot be found in any namespace. """ elem_type = None last_exception = None for ns_prefix in self.zeep_client.wsdl.types.prefix_map.values(): try: elem_type = self.zeep_client.get_type( '{%s}%s' % (ns_prefix, type_localname)) except zeep.exceptions.LookupError as e: last_exception = e continue break if not elem_type: raise last_exception return elem_type
[ "def", "_DiscoverElementTypeFromLocalname", "(", "self", ",", "type_localname", ")", ":", "elem_type", "=", "None", "last_exception", "=", "None", "for", "ns_prefix", "in", "self", ".", "zeep_client", ".", "wsdl", ".", "types", ".", "prefix_map", ".", "values", "(", ")", ":", "try", ":", "elem_type", "=", "self", ".", "zeep_client", ".", "get_type", "(", "'{%s}%s'", "%", "(", "ns_prefix", ",", "type_localname", ")", ")", "except", "zeep", ".", "exceptions", ".", "LookupError", "as", "e", ":", "last_exception", "=", "e", "continue", "break", "if", "not", "elem_type", ":", "raise", "last_exception", "return", "elem_type" ]
27.884615
20.692308
def delete_group(group_name, region=None, key=None, keyid=None, profile=None): ''' Delete a group policy. CLI Example:: .. code-block:: bash salt myminion boto_iam.delete_group mygroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False _group = get_group( group_name, region, key, keyid, profile ) if not _group: return True try: conn.delete_group(group_name) log.info('Successfully deleted IAM group %s.', group_name) return True except boto.exception.BotoServerError as e: log.debug(e) log.error('Failed to delete IAM group %s.', group_name) return False
[ "def", "delete_group", "(", "group_name", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "not", "conn", ":", "return", "False", "_group", "=", "get_group", "(", "group_name", ",", "region", ",", "key", ",", "keyid", ",", "profile", ")", "if", "not", "_group", ":", "return", "True", "try", ":", "conn", ".", "delete_group", "(", "group_name", ")", "log", ".", "info", "(", "'Successfully deleted IAM group %s.'", ",", "group_name", ")", "return", "True", "except", "boto", ".", "exception", ".", "BotoServerError", "as", "e", ":", "log", ".", "debug", "(", "e", ")", "log", ".", "error", "(", "'Failed to delete IAM group %s.'", ",", "group_name", ")", "return", "False" ]
27.222222
22.333333
def wrapHeart(service): """Wrap a service in a MultiService with a heart""" master = taservice.MultiService() service.setServiceParent(master) maybeAddHeart(master) return master
[ "def", "wrapHeart", "(", "service", ")", ":", "master", "=", "taservice", ".", "MultiService", "(", ")", "service", ".", "setServiceParent", "(", "master", ")", "maybeAddHeart", "(", "master", ")", "return", "master" ]
32.166667
10.333333
def groupby_count(i, key=None, force_keys=None): """ Aggregate iterator values into buckets based on how frequently the values appear. Example:: >>> list(groupby_count([1, 1, 1, 2, 3])) [(1, 3), (2, 1), (3, 1)] """ counter = defaultdict(lambda: 0) if not key: key = lambda o: o for k in i: counter[key(k)] += 1 if force_keys: for k in force_keys: counter[k] += 0 return counter.items()
[ "def", "groupby_count", "(", "i", ",", "key", "=", "None", ",", "force_keys", "=", "None", ")", ":", "counter", "=", "defaultdict", "(", "lambda", ":", "0", ")", "if", "not", "key", ":", "key", "=", "lambda", "o", ":", "o", "for", "k", "in", "i", ":", "counter", "[", "key", "(", "k", ")", "]", "+=", "1", "if", "force_keys", ":", "for", "k", "in", "force_keys", ":", "counter", "[", "k", "]", "+=", "0", "return", "counter", ".", "items", "(", ")" ]
21.809524
19.761905
def get_status(app, repo_config, repo_name, sha): """Gets the status of a commit. .. note:: ``repo_name`` might not ever be anything other than ``repo_config['github_repo']``. :param app: Flask app for leeroy :param repo_config: configuration for the repo :param repo_name: The name of the owner/repo :param sha: SHA for the status we are looking for :return: returns json response of status """ url = get_api_url(app, repo_config, github_status_url).format( repo_name=repo_name, sha=sha) logging.debug("Getting status for %s %s", repo_name, sha) s = get_session_for_repo(app, repo_config) response = s.get(url) if not response.ok: raise Exception("Unable to get status: {}".format(response.status_code)) return response
[ "def", "get_status", "(", "app", ",", "repo_config", ",", "repo_name", ",", "sha", ")", ":", "url", "=", "get_api_url", "(", "app", ",", "repo_config", ",", "github_status_url", ")", ".", "format", "(", "repo_name", "=", "repo_name", ",", "sha", "=", "sha", ")", "logging", ".", "debug", "(", "\"Getting status for %s %s\"", ",", "repo_name", ",", "sha", ")", "s", "=", "get_session_for_repo", "(", "app", ",", "repo_config", ")", "response", "=", "s", ".", "get", "(", "url", ")", "if", "not", "response", ".", "ok", ":", "raise", "Exception", "(", "\"Unable to get status: {}\"", ".", "format", "(", "response", ".", "status_code", ")", ")", "return", "response" ]
37.619048
15.428571
def new_code_block(self, **kwargs): """Create a new code block.""" proto = {'content': '', 'type': self.code, 'IO': '', 'attributes': ''} proto.update(**kwargs) return proto
[ "def", "new_code_block", "(", "self", ",", "*", "*", "kwargs", ")", ":", "proto", "=", "{", "'content'", ":", "''", ",", "'type'", ":", "self", ".", "code", ",", "'IO'", ":", "''", ",", "'attributes'", ":", "''", "}", "proto", ".", "update", "(", "*", "*", "kwargs", ")", "return", "proto" ]
31.125
8.625
def _upload_to_gcs(self, files_to_upload): """ Upload all of the file splits (and optionally the schema .json file) to Google cloud storage. """ hook = GoogleCloudStorageHook( google_cloud_storage_conn_id=self.google_cloud_storage_conn_id, delegate_to=self.delegate_to) for tmp_file in files_to_upload: hook.upload(self.bucket, tmp_file.get('file_name'), tmp_file.get('file_handle').name, mime_type=tmp_file.get('file_mime_type'))
[ "def", "_upload_to_gcs", "(", "self", ",", "files_to_upload", ")", ":", "hook", "=", "GoogleCloudStorageHook", "(", "google_cloud_storage_conn_id", "=", "self", ".", "google_cloud_storage_conn_id", ",", "delegate_to", "=", "self", ".", "delegate_to", ")", "for", "tmp_file", "in", "files_to_upload", ":", "hook", ".", "upload", "(", "self", ".", "bucket", ",", "tmp_file", ".", "get", "(", "'file_name'", ")", ",", "tmp_file", ".", "get", "(", "'file_handle'", ")", ".", "name", ",", "mime_type", "=", "tmp_file", ".", "get", "(", "'file_mime_type'", ")", ")" ]
45.916667
12.916667
def create(input, template, field, outdir, prefix, otype, command, index, dpi, verbose, unicode_support): """Use docstamp to create documents from the content of a CSV file or a Google Spreadsheet. Examples: \n docstamp create -i badge.csv -t badge_template.svg -o badges docstamp create -i badge.csv -t badge_template.svg -o ./badges -d pdf """ logging.basicConfig(level=LOGGING_LVL) log = logging.getLogger(__name__) # setup verbose mode verbose_switch(verbose) input_file = input fields = field # init set of template contents log.debug('Reading CSV elements from {}.'.format(input_file)) items, fieldnames = get_items_from_csv(input_file) # check if got any item if len(items) == 0: click.echo('Quiting because found 0 items.') exit(-1) if not fields: # set the number of zeros that the files will have n_zeros = int(math.floor(math.log10(len(items))) + 1) else: # check that fields has all valid fields for field_name in fields: if field_name not in fieldnames: raise ValueError('Field name {} not found in input file ' ' header.'.format(field_name)) # filter the items if index if index: myitems = {int(idx): items[int(idx)] for idx in index} items = myitems log.debug('Using the elements with index {} of the input ' 'file.'.format(index)) # make output folder if not os.path.exists(outdir): os.mkdir(outdir) # create template document model log.debug('Creating the template object using the file {}.'.format(template)) template_doc = TextDocument.from_template_file(template, command) log.debug('Created an object of type {}.'.format(type(template_doc))) # let's stamp them! for idx in items: item = items[idx] if not len(fields): file_name = str(idx).zfill(n_zeros) else: field_values = [] try: for field_name in fields: field_values.append(item[field_name].replace(' ', '')) except: log.exception('Could not get field {} value from' ' {}'.format(field_name, item)) exit(-1) else: file_name = '_'.join(field_values) log.debug('Filling template {} with values of item {}.'.format(file_name, idx)) try: template_doc.fill(item) except: log.exception('Error filling document for {}th item'.format(idx)) continue # set output file path file_extension = get_extension(template) if prefix is None: basename = os.path.basename(template).replace(file_extension, '') file_name = basename + '_' + file_name file_path = os.path.join(outdir, file_name + '.' + otype) kwargs = {'file_type': otype, 'dpi': dpi, 'support_unicode': unicode_support} log.debug('Rendering file {}.'.format(file_path)) try: template_doc.render(file_path, **kwargs) except: log.exception('Error creating {} for {}.'.format(file_path, item)) exit(-1) else: log.debug('Successfully rendered {}.'.format(file_path))
[ "def", "create", "(", "input", ",", "template", ",", "field", ",", "outdir", ",", "prefix", ",", "otype", ",", "command", ",", "index", ",", "dpi", ",", "verbose", ",", "unicode_support", ")", ":", "logging", ".", "basicConfig", "(", "level", "=", "LOGGING_LVL", ")", "log", "=", "logging", ".", "getLogger", "(", "__name__", ")", "# setup verbose mode", "verbose_switch", "(", "verbose", ")", "input_file", "=", "input", "fields", "=", "field", "# init set of template contents", "log", ".", "debug", "(", "'Reading CSV elements from {}.'", ".", "format", "(", "input_file", ")", ")", "items", ",", "fieldnames", "=", "get_items_from_csv", "(", "input_file", ")", "# check if got any item", "if", "len", "(", "items", ")", "==", "0", ":", "click", ".", "echo", "(", "'Quiting because found 0 items.'", ")", "exit", "(", "-", "1", ")", "if", "not", "fields", ":", "# set the number of zeros that the files will have", "n_zeros", "=", "int", "(", "math", ".", "floor", "(", "math", ".", "log10", "(", "len", "(", "items", ")", ")", ")", "+", "1", ")", "else", ":", "# check that fields has all valid fields", "for", "field_name", "in", "fields", ":", "if", "field_name", "not", "in", "fieldnames", ":", "raise", "ValueError", "(", "'Field name {} not found in input file '", "' header.'", ".", "format", "(", "field_name", ")", ")", "# filter the items if index", "if", "index", ":", "myitems", "=", "{", "int", "(", "idx", ")", ":", "items", "[", "int", "(", "idx", ")", "]", "for", "idx", "in", "index", "}", "items", "=", "myitems", "log", ".", "debug", "(", "'Using the elements with index {} of the input '", "'file.'", ".", "format", "(", "index", ")", ")", "# make output folder", "if", "not", "os", ".", "path", ".", "exists", "(", "outdir", ")", ":", "os", ".", "mkdir", "(", "outdir", ")", "# create template document model", "log", ".", "debug", "(", "'Creating the template object using the file {}.'", ".", "format", "(", "template", ")", ")", "template_doc", "=", "TextDocument", ".", "from_template_file", "(", "template", ",", "command", ")", "log", ".", "debug", "(", "'Created an object of type {}.'", ".", "format", "(", "type", "(", "template_doc", ")", ")", ")", "# let's stamp them!", "for", "idx", "in", "items", ":", "item", "=", "items", "[", "idx", "]", "if", "not", "len", "(", "fields", ")", ":", "file_name", "=", "str", "(", "idx", ")", ".", "zfill", "(", "n_zeros", ")", "else", ":", "field_values", "=", "[", "]", "try", ":", "for", "field_name", "in", "fields", ":", "field_values", ".", "append", "(", "item", "[", "field_name", "]", ".", "replace", "(", "' '", ",", "''", ")", ")", "except", ":", "log", ".", "exception", "(", "'Could not get field {} value from'", "' {}'", ".", "format", "(", "field_name", ",", "item", ")", ")", "exit", "(", "-", "1", ")", "else", ":", "file_name", "=", "'_'", ".", "join", "(", "field_values", ")", "log", ".", "debug", "(", "'Filling template {} with values of item {}.'", ".", "format", "(", "file_name", ",", "idx", ")", ")", "try", ":", "template_doc", ".", "fill", "(", "item", ")", "except", ":", "log", ".", "exception", "(", "'Error filling document for {}th item'", ".", "format", "(", "idx", ")", ")", "continue", "# set output file path", "file_extension", "=", "get_extension", "(", "template", ")", "if", "prefix", "is", "None", ":", "basename", "=", "os", ".", "path", ".", "basename", "(", "template", ")", ".", "replace", "(", "file_extension", ",", "''", ")", "file_name", "=", "basename", "+", "'_'", "+", "file_name", "file_path", "=", "os", ".", "path", ".", "join", "(", "outdir", ",", "file_name", "+", "'.'", "+", "otype", ")", "kwargs", "=", "{", "'file_type'", ":", "otype", ",", "'dpi'", ":", "dpi", ",", "'support_unicode'", ":", "unicode_support", "}", "log", ".", "debug", "(", "'Rendering file {}.'", ".", "format", "(", "file_path", ")", ")", "try", ":", "template_doc", ".", "render", "(", "file_path", ",", "*", "*", "kwargs", ")", "except", ":", "log", ".", "exception", "(", "'Error creating {} for {}.'", ".", "format", "(", "file_path", ",", "item", ")", ")", "exit", "(", "-", "1", ")", "else", ":", "log", ".", "debug", "(", "'Successfully rendered {}.'", ".", "format", "(", "file_path", ")", ")" ]
33.897959
21.795918
def as_a_dict(self): """ Displays the index as a dictionary. This includes the design document id, index name, index type, and index definition. :returns: Dictionary representation of the index as a dictionary """ index_dict = { 'ddoc': self._ddoc_id, 'name': self._name, 'type': self._type, 'def': self._def } if self._partitioned: index_dict['partitioned'] = True return index_dict
[ "def", "as_a_dict", "(", "self", ")", ":", "index_dict", "=", "{", "'ddoc'", ":", "self", ".", "_ddoc_id", ",", "'name'", ":", "self", ".", "_name", ",", "'type'", ":", "self", ".", "_type", ",", "'def'", ":", "self", ".", "_def", "}", "if", "self", ".", "_partitioned", ":", "index_dict", "[", "'partitioned'", "]", "=", "True", "return", "index_dict" ]
27.888889
19
def negotiate_sasl(transport, xmlstream, sasl_providers, negotiation_timeout, jid, features): """ Perform SASL authentication on the given :class:`.protocol.XMLStream` `stream`. `transport` must be the :class:`asyncio.Transport` over which the `stream` runs. It is used to detect whether TLS is used and may be required by some SASL mechanisms. `sasl_providers` must be an iterable of :class:`SASLProvider` objects. They will be tried in iteration order to authenticate against the server. If one of the `sasl_providers` fails with a :class:`aiosasl.AuthenticationFailure` exception, the other providers are still tried; only if all providers fail, the last :class:`aiosasl.AuthenticationFailure` exception is re-raised. If no mechanism was able to authenticate but not due to authentication failures (other failures include no matching mechanism on the server side), :class:`aiosasl.SASLUnavailable` is raised. Return the :class:`.nonza.StreamFeatures` obtained after resetting the stream after successful SASL authentication. .. versionadded:: 0.6 .. deprecated:: 0.10 The `negotiation_timeout` argument is ignored. The timeout is controlled using the :attr:`~.XMLStream.deadtime_hard_limit` timeout of the stream. The argument will be removed in version 1.0. To prepare for this, please pass `jid` and `features` as keyword arguments. """ if not transport.get_extra_info("sslcontext"): transport = None last_auth_error = None for sasl_provider in sasl_providers: try: result = yield from sasl_provider.execute( jid, features, xmlstream, transport) except ValueError as err: raise errors.StreamNegotiationFailure( "invalid credentials: {}".format(err) ) from err except aiosasl.AuthenticationFailure as err: last_auth_error = err continue if result: features = yield from protocol.reset_stream_and_get_features( xmlstream ) break else: if last_auth_error: raise last_auth_error else: raise errors.SASLUnavailable("No common mechanisms") return features
[ "def", "negotiate_sasl", "(", "transport", ",", "xmlstream", ",", "sasl_providers", ",", "negotiation_timeout", ",", "jid", ",", "features", ")", ":", "if", "not", "transport", ".", "get_extra_info", "(", "\"sslcontext\"", ")", ":", "transport", "=", "None", "last_auth_error", "=", "None", "for", "sasl_provider", "in", "sasl_providers", ":", "try", ":", "result", "=", "yield", "from", "sasl_provider", ".", "execute", "(", "jid", ",", "features", ",", "xmlstream", ",", "transport", ")", "except", "ValueError", "as", "err", ":", "raise", "errors", ".", "StreamNegotiationFailure", "(", "\"invalid credentials: {}\"", ".", "format", "(", "err", ")", ")", "from", "err", "except", "aiosasl", ".", "AuthenticationFailure", "as", "err", ":", "last_auth_error", "=", "err", "continue", "if", "result", ":", "features", "=", "yield", "from", "protocol", ".", "reset_stream_and_get_features", "(", "xmlstream", ")", "break", "else", ":", "if", "last_auth_error", ":", "raise", "last_auth_error", "else", ":", "raise", "errors", ".", "SASLUnavailable", "(", "\"No common mechanisms\"", ")", "return", "features" ]
36.761905
23.52381
def append(self, transitions, rows=None): """Append a batch of transitions to rows of the memory. Args: transitions: Tuple of transition quantities with batch dimension. rows: Episodes to append to, defaults to all. Returns: Operation. """ rows = tf.range(self._capacity) if rows is None else rows assert rows.shape.ndims == 1 assert_capacity = tf.assert_less( rows, self._capacity, message='capacity exceeded') with tf.control_dependencies([assert_capacity]): assert_max_length = tf.assert_less( tf.gather(self._length, rows), self._max_length, message='max length exceeded') with tf.control_dependencies([assert_max_length]): timestep = tf.gather(self._length, rows) indices = tf.stack([rows, timestep], 1) append_ops = tools.nested.map( lambda var, val: tf.scatter_nd_update(var, indices, val), self._buffers, transitions, flatten=True) with tf.control_dependencies(append_ops): episode_mask = tf.reduce_sum(tf.one_hot( rows, self._capacity, dtype=tf.int32), 0) return self._length.assign_add(episode_mask)
[ "def", "append", "(", "self", ",", "transitions", ",", "rows", "=", "None", ")", ":", "rows", "=", "tf", ".", "range", "(", "self", ".", "_capacity", ")", "if", "rows", "is", "None", "else", "rows", "assert", "rows", ".", "shape", ".", "ndims", "==", "1", "assert_capacity", "=", "tf", ".", "assert_less", "(", "rows", ",", "self", ".", "_capacity", ",", "message", "=", "'capacity exceeded'", ")", "with", "tf", ".", "control_dependencies", "(", "[", "assert_capacity", "]", ")", ":", "assert_max_length", "=", "tf", ".", "assert_less", "(", "tf", ".", "gather", "(", "self", ".", "_length", ",", "rows", ")", ",", "self", ".", "_max_length", ",", "message", "=", "'max length exceeded'", ")", "with", "tf", ".", "control_dependencies", "(", "[", "assert_max_length", "]", ")", ":", "timestep", "=", "tf", ".", "gather", "(", "self", ".", "_length", ",", "rows", ")", "indices", "=", "tf", ".", "stack", "(", "[", "rows", ",", "timestep", "]", ",", "1", ")", "append_ops", "=", "tools", ".", "nested", ".", "map", "(", "lambda", "var", ",", "val", ":", "tf", ".", "scatter_nd_update", "(", "var", ",", "indices", ",", "val", ")", ",", "self", ".", "_buffers", ",", "transitions", ",", "flatten", "=", "True", ")", "with", "tf", ".", "control_dependencies", "(", "append_ops", ")", ":", "episode_mask", "=", "tf", ".", "reduce_sum", "(", "tf", ".", "one_hot", "(", "rows", ",", "self", ".", "_capacity", ",", "dtype", "=", "tf", ".", "int32", ")", ",", "0", ")", "return", "self", ".", "_length", ".", "assign_add", "(", "episode_mask", ")" ]
39.413793
13.206897
def color(self) -> str: """Get a random name of color. :return: Color name. :Example: Red. """ colors = self._data['color'] return self.random.choice(colors)
[ "def", "color", "(", "self", ")", "->", "str", ":", "colors", "=", "self", ".", "_data", "[", "'color'", "]", "return", "self", ".", "random", ".", "choice", "(", "colors", ")" ]
21
16.1
def extractall(self, directory, auto_create_dir=False, patool_path=None): ''' :param directory: directory to extract to :param auto_create_dir: auto create directory :param patool_path: the path to the patool backend ''' log.debug('extracting %s into %s (backend=%s)', self.filename, directory, self.backend) is_zipfile = zipfile.is_zipfile(self.filename) directory = _fullpath(directory) if not os.path.exists(self.filename): raise ValueError( 'archive file does not exist:' + str(self.filename)) if not os.path.exists(directory): if auto_create_dir: os.makedirs(directory) else: raise ValueError('directory does not exist:' + str(directory)) if self.backend == 'auto': if is_zipfile: self.extractall_zipfile(directory) else: self.extractall_patool(directory, patool_path) if self.backend == 'zipfile': if not is_zipfile: raise ValueError('file is not zip file:' + str(self.filename)) self.extractall_zipfile(directory) if self.backend == 'patool': self.extractall_patool(directory, patool_path)
[ "def", "extractall", "(", "self", ",", "directory", ",", "auto_create_dir", "=", "False", ",", "patool_path", "=", "None", ")", ":", "log", ".", "debug", "(", "'extracting %s into %s (backend=%s)'", ",", "self", ".", "filename", ",", "directory", ",", "self", ".", "backend", ")", "is_zipfile", "=", "zipfile", ".", "is_zipfile", "(", "self", ".", "filename", ")", "directory", "=", "_fullpath", "(", "directory", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "filename", ")", ":", "raise", "ValueError", "(", "'archive file does not exist:'", "+", "str", "(", "self", ".", "filename", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "directory", ")", ":", "if", "auto_create_dir", ":", "os", ".", "makedirs", "(", "directory", ")", "else", ":", "raise", "ValueError", "(", "'directory does not exist:'", "+", "str", "(", "directory", ")", ")", "if", "self", ".", "backend", "==", "'auto'", ":", "if", "is_zipfile", ":", "self", ".", "extractall_zipfile", "(", "directory", ")", "else", ":", "self", ".", "extractall_patool", "(", "directory", ",", "patool_path", ")", "if", "self", ".", "backend", "==", "'zipfile'", ":", "if", "not", "is_zipfile", ":", "raise", "ValueError", "(", "'file is not zip file:'", "+", "str", "(", "self", ".", "filename", ")", ")", "self", ".", "extractall_zipfile", "(", "directory", ")", "if", "self", ".", "backend", "==", "'patool'", ":", "self", ".", "extractall_patool", "(", "directory", ",", "patool_path", ")" ]
40.806452
19.064516
def _get_indexable_sentences(document): """ Parameters ---------- document : Text Article, book, paragraph, chapter, etc. Anything that is considered a document on its own. Yields ------ str json representation of elasticsearch type sentence """ def unroll_lists(list_of_lists): for i in itertools.product(*[set(j) for j in list_of_lists]): yield ' '.join(i) sents = document.split_by_sentences() for order, sent in enumerate(sents): postags = list(unroll_lists(sent.postag_lists)) lemmas = list(unroll_lists(sent.lemma_lists)) text = sent.text words = copy.deepcopy(sent.words) for i in words: del i['start'] del i['end'] sentence = { 'estnltk_text_object': json.dumps(sent), 'meta': { 'order_in_parent': order }, 'text': text, 'words': words, 'postags': postags, 'lemmas': lemmas } yield json.dumps(sentence)
[ "def", "_get_indexable_sentences", "(", "document", ")", ":", "def", "unroll_lists", "(", "list_of_lists", ")", ":", "for", "i", "in", "itertools", ".", "product", "(", "*", "[", "set", "(", "j", ")", "for", "j", "in", "list_of_lists", "]", ")", ":", "yield", "' '", ".", "join", "(", "i", ")", "sents", "=", "document", ".", "split_by_sentences", "(", ")", "for", "order", ",", "sent", "in", "enumerate", "(", "sents", ")", ":", "postags", "=", "list", "(", "unroll_lists", "(", "sent", ".", "postag_lists", ")", ")", "lemmas", "=", "list", "(", "unroll_lists", "(", "sent", ".", "lemma_lists", ")", ")", "text", "=", "sent", ".", "text", "words", "=", "copy", ".", "deepcopy", "(", "sent", ".", "words", ")", "for", "i", "in", "words", ":", "del", "i", "[", "'start'", "]", "del", "i", "[", "'end'", "]", "sentence", "=", "{", "'estnltk_text_object'", ":", "json", ".", "dumps", "(", "sent", ")", ",", "'meta'", ":", "{", "'order_in_parent'", ":", "order", "}", ",", "'text'", ":", "text", ",", "'words'", ":", "words", ",", "'postags'", ":", "postags", ",", "'lemmas'", ":", "lemmas", "}", "yield", "json", ".", "dumps", "(", "sentence", ")" ]
30.102564
18
def filter_scanline(type, line, fo, prev=None): """Apply a scanline filter to a scanline. `type` specifies the filter type (0 to 4); `line` specifies the current (unfiltered) scanline as a sequence of bytes; `prev` specifies the previous (unfiltered) scanline as a sequence of bytes. `fo` specifies the filter offset; normally this is size of a pixel in bytes (the number of bytes per sample times the number of channels), but when this is < 1 (for bit depths < 8) then the filter offset is 1. """ assert 0 <= type < 5 # The output array. Which, pathetically, we extend one-byte at a # time (fortunately this is linear). out = array('B', [type]) def sub(): ai = -fo for x in line: if ai >= 0: x = (x - line[ai]) & 0xff out.append(x) ai += 1 def up(): for i,x in enumerate(line): x = (x - prev[i]) & 0xff out.append(x) def average(): ai = -fo for i,x in enumerate(line): if ai >= 0: x = (x - ((line[ai] + prev[i]) >> 1)) & 0xff else: x = (x - (prev[i] >> 1)) & 0xff out.append(x) ai += 1 def paeth(): # http://www.w3.org/TR/PNG/#9Filter-type-4-Paeth ai = -fo # also used for ci for i,x in enumerate(line): a = 0 b = prev[i] c = 0 if ai >= 0: a = line[ai] c = prev[ai] p = a + b - c pa = abs(p - a) pb = abs(p - b) pc = abs(p - c) if pa <= pb and pa <= pc: Pr = a elif pb <= pc: Pr = b else: Pr = c x = (x - Pr) & 0xff out.append(x) ai += 1 if not prev: # We're on the first line. Some of the filters can be reduced # to simpler cases which makes handling the line "off the top" # of the image simpler. "up" becomes "none"; "paeth" becomes # "left" (non-trivial, but true). "average" needs to be handled # specially. if type == 2: # "up" type = 0 elif type == 3: prev = [0]*len(line) elif type == 4: # "paeth" type = 1 if type == 0: out.extend(line) elif type == 1: sub() elif type == 2: up() elif type == 3: average() else: # type == 4 paeth() return out
[ "def", "filter_scanline", "(", "type", ",", "line", ",", "fo", ",", "prev", "=", "None", ")", ":", "assert", "0", "<=", "type", "<", "5", "# The output array. Which, pathetically, we extend one-byte at a", "# time (fortunately this is linear).", "out", "=", "array", "(", "'B'", ",", "[", "type", "]", ")", "def", "sub", "(", ")", ":", "ai", "=", "-", "fo", "for", "x", "in", "line", ":", "if", "ai", ">=", "0", ":", "x", "=", "(", "x", "-", "line", "[", "ai", "]", ")", "&", "0xff", "out", ".", "append", "(", "x", ")", "ai", "+=", "1", "def", "up", "(", ")", ":", "for", "i", ",", "x", "in", "enumerate", "(", "line", ")", ":", "x", "=", "(", "x", "-", "prev", "[", "i", "]", ")", "&", "0xff", "out", ".", "append", "(", "x", ")", "def", "average", "(", ")", ":", "ai", "=", "-", "fo", "for", "i", ",", "x", "in", "enumerate", "(", "line", ")", ":", "if", "ai", ">=", "0", ":", "x", "=", "(", "x", "-", "(", "(", "line", "[", "ai", "]", "+", "prev", "[", "i", "]", ")", ">>", "1", ")", ")", "&", "0xff", "else", ":", "x", "=", "(", "x", "-", "(", "prev", "[", "i", "]", ">>", "1", ")", ")", "&", "0xff", "out", ".", "append", "(", "x", ")", "ai", "+=", "1", "def", "paeth", "(", ")", ":", "# http://www.w3.org/TR/PNG/#9Filter-type-4-Paeth", "ai", "=", "-", "fo", "# also used for ci", "for", "i", ",", "x", "in", "enumerate", "(", "line", ")", ":", "a", "=", "0", "b", "=", "prev", "[", "i", "]", "c", "=", "0", "if", "ai", ">=", "0", ":", "a", "=", "line", "[", "ai", "]", "c", "=", "prev", "[", "ai", "]", "p", "=", "a", "+", "b", "-", "c", "pa", "=", "abs", "(", "p", "-", "a", ")", "pb", "=", "abs", "(", "p", "-", "b", ")", "pc", "=", "abs", "(", "p", "-", "c", ")", "if", "pa", "<=", "pb", "and", "pa", "<=", "pc", ":", "Pr", "=", "a", "elif", "pb", "<=", "pc", ":", "Pr", "=", "b", "else", ":", "Pr", "=", "c", "x", "=", "(", "x", "-", "Pr", ")", "&", "0xff", "out", ".", "append", "(", "x", ")", "ai", "+=", "1", "if", "not", "prev", ":", "# We're on the first line. Some of the filters can be reduced", "# to simpler cases which makes handling the line \"off the top\"", "# of the image simpler. \"up\" becomes \"none\"; \"paeth\" becomes", "# \"left\" (non-trivial, but true). \"average\" needs to be handled", "# specially.", "if", "type", "==", "2", ":", "# \"up\"", "type", "=", "0", "elif", "type", "==", "3", ":", "prev", "=", "[", "0", "]", "*", "len", "(", "line", ")", "elif", "type", "==", "4", ":", "# \"paeth\"", "type", "=", "1", "if", "type", "==", "0", ":", "out", ".", "extend", "(", "line", ")", "elif", "type", "==", "1", ":", "sub", "(", ")", "elif", "type", "==", "2", ":", "up", "(", ")", "elif", "type", "==", "3", ":", "average", "(", ")", "else", ":", "# type == 4", "paeth", "(", ")", "return", "out" ]
29.164706
19.258824
def _compute_distance(cls, dists, coeffs): """ Compute third term of equation (1) on p. 1200: ``b3 * log(sqrt(Rjb ** 2 + b4 ** 2))`` """ return coeffs['b3']*np.log10(np.sqrt(dists.rjb**2. + coeffs['b4']**2.))
[ "def", "_compute_distance", "(", "cls", ",", "dists", ",", "coeffs", ")", ":", "return", "coeffs", "[", "'b3'", "]", "*", "np", ".", "log10", "(", "np", ".", "sqrt", "(", "dists", ".", "rjb", "**", "2.", "+", "coeffs", "[", "'b4'", "]", "**", "2.", ")", ")" ]
34.714286
14.428571
def ask(message, options): """Ask the message interactively, with the given possible responses""" while 1: if os.environ.get('PIP_NO_INPUT'): raise Exception('No input was expected ($PIP_NO_INPUT set); question: %s' % message) response = raw_input(message) response = response.strip().lower() if response not in options: print('Your response (%r) was not one of the expected responses: %s' % ( response, ', '.join(options))) else: return response
[ "def", "ask", "(", "message", ",", "options", ")", ":", "while", "1", ":", "if", "os", ".", "environ", ".", "get", "(", "'PIP_NO_INPUT'", ")", ":", "raise", "Exception", "(", "'No input was expected ($PIP_NO_INPUT set); question: %s'", "%", "message", ")", "response", "=", "raw_input", "(", "message", ")", "response", "=", "response", ".", "strip", "(", ")", ".", "lower", "(", ")", "if", "response", "not", "in", "options", ":", "print", "(", "'Your response (%r) was not one of the expected responses: %s'", "%", "(", "response", ",", "', '", ".", "join", "(", "options", ")", ")", ")", "else", ":", "return", "response" ]
44.583333
16.75
def com_google_fonts_check_name_license(ttFont, license): """Check copyright namerecords match license file.""" from fontbakery.constants import PLACEHOLDER_LICENSING_TEXT failed = False placeholder = PLACEHOLDER_LICENSING_TEXT[license] entry_found = False for i, nameRecord in enumerate(ttFont["name"].names): if nameRecord.nameID == NameID.LICENSE_DESCRIPTION: entry_found = True value = nameRecord.toUnicode() if value != placeholder: failed = True yield FAIL, Message("wrong", \ ("License file {} exists but" " NameID {} (LICENSE DESCRIPTION) value" " on platform {} ({})" " is not specified for that." " Value was: \"{}\"" " Must be changed to \"{}\"" "").format(license, NameID.LICENSE_DESCRIPTION, nameRecord.platformID, PlatformID(nameRecord.platformID).name, value, placeholder)) if not entry_found: yield FAIL, Message("missing", \ ("Font lacks NameID {} " "(LICENSE DESCRIPTION). A proper licensing entry" " must be set.").format(NameID.LICENSE_DESCRIPTION)) elif not failed: yield PASS, "Licensing entry on name table is correctly set."
[ "def", "com_google_fonts_check_name_license", "(", "ttFont", ",", "license", ")", ":", "from", "fontbakery", ".", "constants", "import", "PLACEHOLDER_LICENSING_TEXT", "failed", "=", "False", "placeholder", "=", "PLACEHOLDER_LICENSING_TEXT", "[", "license", "]", "entry_found", "=", "False", "for", "i", ",", "nameRecord", "in", "enumerate", "(", "ttFont", "[", "\"name\"", "]", ".", "names", ")", ":", "if", "nameRecord", ".", "nameID", "==", "NameID", ".", "LICENSE_DESCRIPTION", ":", "entry_found", "=", "True", "value", "=", "nameRecord", ".", "toUnicode", "(", ")", "if", "value", "!=", "placeholder", ":", "failed", "=", "True", "yield", "FAIL", ",", "Message", "(", "\"wrong\"", ",", "(", "\"License file {} exists but\"", "\" NameID {} (LICENSE DESCRIPTION) value\"", "\" on platform {} ({})\"", "\" is not specified for that.\"", "\" Value was: \\\"{}\\\"\"", "\" Must be changed to \\\"{}\\\"\"", "\"\"", ")", ".", "format", "(", "license", ",", "NameID", ".", "LICENSE_DESCRIPTION", ",", "nameRecord", ".", "platformID", ",", "PlatformID", "(", "nameRecord", ".", "platformID", ")", ".", "name", ",", "value", ",", "placeholder", ")", ")", "if", "not", "entry_found", ":", "yield", "FAIL", ",", "Message", "(", "\"missing\"", ",", "(", "\"Font lacks NameID {} \"", "\"(LICENSE DESCRIPTION). A proper licensing entry\"", "\" must be set.\"", ")", ".", "format", "(", "NameID", ".", "LICENSE_DESCRIPTION", ")", ")", "elif", "not", "failed", ":", "yield", "PASS", ",", "\"Licensing entry on name table is correctly set.\"" ]
48.59375
16.8125
def add_line_error(self, line_data, error_info, log_level=logging.ERROR): """Helper function to record and log an error message :param line_data: dict :param error_info: dict :param logger: :param log_level: int :return: """ if not error_info: return try: line_data['line_errors'].append(error_info) except KeyError: line_data['line_errors'] = [error_info] except TypeError: # no line_data pass try: self.logger.log(log_level, Gff3.error_format.format(current_line_num=line_data['line_index'] + 1, error_type=error_info['error_type'], message=error_info['message'], line=line_data['line_raw'].rstrip())) except AttributeError: # no logger pass
[ "def", "add_line_error", "(", "self", ",", "line_data", ",", "error_info", ",", "log_level", "=", "logging", ".", "ERROR", ")", ":", "if", "not", "error_info", ":", "return", "try", ":", "line_data", "[", "'line_errors'", "]", ".", "append", "(", "error_info", ")", "except", "KeyError", ":", "line_data", "[", "'line_errors'", "]", "=", "[", "error_info", "]", "except", "TypeError", ":", "# no line_data", "pass", "try", ":", "self", ".", "logger", ".", "log", "(", "log_level", ",", "Gff3", ".", "error_format", ".", "format", "(", "current_line_num", "=", "line_data", "[", "'line_index'", "]", "+", "1", ",", "error_type", "=", "error_info", "[", "'error_type'", "]", ",", "message", "=", "error_info", "[", "'message'", "]", ",", "line", "=", "line_data", "[", "'line_raw'", "]", ".", "rstrip", "(", ")", ")", ")", "except", "AttributeError", ":", "# no logger", "pass" ]
39.45
23.75
def load_data_old(self): """ Loads time series of 2D data grids from each opened file. The code handles loading a full time series from one file or individual time steps from multiple files. Missing files are supported. """ units = "" if len(self.file_objects) == 1 and self.file_objects[0] is not None: data = self.file_objects[0].variables[self.variable][self.forecast_hours] if hasattr(self.file_objects[0].variables[self.variable], "units"): units = self.file_objects[0].variables[self.variable].units elif len(self.file_objects) > 1: grid_shape = [len(self.file_objects), 1, 1] for file_object in self.file_objects: if file_object is not None: if self.variable in file_object.variables.keys(): grid_shape = file_object.variables[self.variable].shape elif self.variable.ljust(6, "_") in file_object.variables.keys(): grid_shape = file_object.variables[self.variable.ljust(6, "_")].shape else: print("{0} not found".format(self.variable)) raise KeyError break data = np.zeros((len(self.file_objects), grid_shape[1], grid_shape[2])) for f, file_object in enumerate(self.file_objects): if file_object is not None: if self.variable in file_object.variables.keys(): var_name = self.variable elif self.variable.ljust(6, "_") in file_object.variables.keys(): var_name = self.variable.ljust(6, "_") else: print("{0} not found".format(self.variable)) raise KeyError data[f] = file_object.variables[var_name][0] if units == "" and hasattr(file_object.variables[var_name], "units"): units = file_object.variables[var_name].units else: data = None return data, units
[ "def", "load_data_old", "(", "self", ")", ":", "units", "=", "\"\"", "if", "len", "(", "self", ".", "file_objects", ")", "==", "1", "and", "self", ".", "file_objects", "[", "0", "]", "is", "not", "None", ":", "data", "=", "self", ".", "file_objects", "[", "0", "]", ".", "variables", "[", "self", ".", "variable", "]", "[", "self", ".", "forecast_hours", "]", "if", "hasattr", "(", "self", ".", "file_objects", "[", "0", "]", ".", "variables", "[", "self", ".", "variable", "]", ",", "\"units\"", ")", ":", "units", "=", "self", ".", "file_objects", "[", "0", "]", ".", "variables", "[", "self", ".", "variable", "]", ".", "units", "elif", "len", "(", "self", ".", "file_objects", ")", ">", "1", ":", "grid_shape", "=", "[", "len", "(", "self", ".", "file_objects", ")", ",", "1", ",", "1", "]", "for", "file_object", "in", "self", ".", "file_objects", ":", "if", "file_object", "is", "not", "None", ":", "if", "self", ".", "variable", "in", "file_object", ".", "variables", ".", "keys", "(", ")", ":", "grid_shape", "=", "file_object", ".", "variables", "[", "self", ".", "variable", "]", ".", "shape", "elif", "self", ".", "variable", ".", "ljust", "(", "6", ",", "\"_\"", ")", "in", "file_object", ".", "variables", ".", "keys", "(", ")", ":", "grid_shape", "=", "file_object", ".", "variables", "[", "self", ".", "variable", ".", "ljust", "(", "6", ",", "\"_\"", ")", "]", ".", "shape", "else", ":", "print", "(", "\"{0} not found\"", ".", "format", "(", "self", ".", "variable", ")", ")", "raise", "KeyError", "break", "data", "=", "np", ".", "zeros", "(", "(", "len", "(", "self", ".", "file_objects", ")", ",", "grid_shape", "[", "1", "]", ",", "grid_shape", "[", "2", "]", ")", ")", "for", "f", ",", "file_object", "in", "enumerate", "(", "self", ".", "file_objects", ")", ":", "if", "file_object", "is", "not", "None", ":", "if", "self", ".", "variable", "in", "file_object", ".", "variables", ".", "keys", "(", ")", ":", "var_name", "=", "self", ".", "variable", "elif", "self", ".", "variable", ".", "ljust", "(", "6", ",", "\"_\"", ")", "in", "file_object", ".", "variables", ".", "keys", "(", ")", ":", "var_name", "=", "self", ".", "variable", ".", "ljust", "(", "6", ",", "\"_\"", ")", "else", ":", "print", "(", "\"{0} not found\"", ".", "format", "(", "self", ".", "variable", ")", ")", "raise", "KeyError", "data", "[", "f", "]", "=", "file_object", ".", "variables", "[", "var_name", "]", "[", "0", "]", "if", "units", "==", "\"\"", "and", "hasattr", "(", "file_object", ".", "variables", "[", "var_name", "]", ",", "\"units\"", ")", ":", "units", "=", "file_object", ".", "variables", "[", "var_name", "]", ".", "units", "else", ":", "data", "=", "None", "return", "data", ",", "units" ]
53.225
23.925
def map_sid2sub(self, sid, sub): """ Store the connection between a Session ID and a subject ID. :param sid: Session ID :param sub: subject ID """ self.set('sid2sub', sid, sub) self.set('sub2sid', sub, sid)
[ "def", "map_sid2sub", "(", "self", ",", "sid", ",", "sub", ")", ":", "self", ".", "set", "(", "'sid2sub'", ",", "sid", ",", "sub", ")", "self", ".", "set", "(", "'sub2sid'", ",", "sub", ",", "sid", ")" ]
28.333333
11.222222
def read(fname): """Quick way to read a file content.""" content = None with open(os.path.join(here, fname)) as f: content = f.read() return content
[ "def", "read", "(", "fname", ")", ":", "content", "=", "None", "with", "open", "(", "os", ".", "path", ".", "join", "(", "here", ",", "fname", ")", ")", "as", "f", ":", "content", "=", "f", ".", "read", "(", ")", "return", "content" ]
27.833333
14.666667
def read_xdg_config_home(name, extension): """ Read from file found in XDG-specified configuration home directory, expanding to ``${HOME}/.config/name.extension`` by default. Depends on ``XDG_CONFIG_HOME`` or ``HOME`` environment variables. :param name: application or configuration set name :param extension: file extension to look for :return: a `.Configuration` instance, possibly `.NotConfigured` """ # find optional value of ${XDG_CONFIG_HOME} config_home = environ.get('XDG_CONFIG_HOME') if not config_home: # XDG spec: "If $XDG_CONFIG_HOME is either not set or empty, a default equal to $HOME/.config should be used." # see https://specifications.freedesktop.org/basedir-spec/latest/ar01s03.html config_home = path.expanduser('~/.config') # expand to full path to configuration file in XDG config path return loadf(path.join(config_home, '{name}.{extension}'.format(name=name, extension=extension)), default=NotConfigured)
[ "def", "read_xdg_config_home", "(", "name", ",", "extension", ")", ":", "# find optional value of ${XDG_CONFIG_HOME}", "config_home", "=", "environ", ".", "get", "(", "'XDG_CONFIG_HOME'", ")", "if", "not", "config_home", ":", "# XDG spec: \"If $XDG_CONFIG_HOME is either not set or empty, a default equal to $HOME/.config should be used.\"", "# see https://specifications.freedesktop.org/basedir-spec/latest/ar01s03.html", "config_home", "=", "path", ".", "expanduser", "(", "'~/.config'", ")", "# expand to full path to configuration file in XDG config path", "return", "loadf", "(", "path", ".", "join", "(", "config_home", ",", "'{name}.{extension}'", ".", "format", "(", "name", "=", "name", ",", "extension", "=", "extension", ")", ")", ",", "default", "=", "NotConfigured", ")" ]
50.25
23.35
def _check_load_paths(load_path): ''' Checks the validity of the load_path, returns a sanitized version with invalid paths removed. ''' if load_path is None or not isinstance(load_path, six.string_types): return None _paths = [] for _path in load_path.split(':'): if os.path.isabs(_path) and os.path.isdir(_path): _paths.append(_path) else: log.info('Invalid augeas_cfg load_path entry: %s removed', _path) if not _paths: return None return ':'.join(_paths)
[ "def", "_check_load_paths", "(", "load_path", ")", ":", "if", "load_path", "is", "None", "or", "not", "isinstance", "(", "load_path", ",", "six", ".", "string_types", ")", ":", "return", "None", "_paths", "=", "[", "]", "for", "_path", "in", "load_path", ".", "split", "(", "':'", ")", ":", "if", "os", ".", "path", ".", "isabs", "(", "_path", ")", "and", "os", ".", "path", ".", "isdir", "(", "_path", ")", ":", "_paths", ".", "append", "(", "_path", ")", "else", ":", "log", ".", "info", "(", "'Invalid augeas_cfg load_path entry: %s removed'", ",", "_path", ")", "if", "not", "_paths", ":", "return", "None", "return", "':'", ".", "join", "(", "_paths", ")" ]
26.7
24.8
def NewType(name, tp): """NewType creates simple unique types with almost zero runtime overhead. NewType(name, tp) is considered a subtype of tp by static type checkers. At runtime, NewType(name, tp) returns a dummy function that simply returns its argument. Usage:: UserId = NewType('UserId', int) def name_by_id(user_id: UserId) -> str: ... UserId('user') # Fails type check name_by_id(42) # Fails type check name_by_id(UserId(42)) # OK num = UserId(5) + 1 # type: int """ def new_type(x): return x new_type.__name__ = name new_type.__supertype__ = tp return new_type
[ "def", "NewType", "(", "name", ",", "tp", ")", ":", "def", "new_type", "(", "x", ")", ":", "return", "x", "new_type", ".", "__name__", "=", "name", "new_type", ".", "__supertype__", "=", "tp", "return", "new_type" ]
27.16
20.84
def _tick(self): """Write progress info and move cursor to beginning of line.""" if (self.verbose >= 3 and not IS_REDIRECTED) or self.options.get("progress"): stats = self.get_stats() prefix = DRY_RUN_PREFIX if self.dry_run else "" sys.stdout.write( "{}Touched {}/{} entries in {} directories...\r".format( prefix, stats["entries_touched"], stats["entries_seen"], stats["local_dirs"], ) ) sys.stdout.flush() return
[ "def", "_tick", "(", "self", ")", ":", "if", "(", "self", ".", "verbose", ">=", "3", "and", "not", "IS_REDIRECTED", ")", "or", "self", ".", "options", ".", "get", "(", "\"progress\"", ")", ":", "stats", "=", "self", ".", "get_stats", "(", ")", "prefix", "=", "DRY_RUN_PREFIX", "if", "self", ".", "dry_run", "else", "\"\"", "sys", ".", "stdout", ".", "write", "(", "\"{}Touched {}/{} entries in {} directories...\\r\"", ".", "format", "(", "prefix", ",", "stats", "[", "\"entries_touched\"", "]", ",", "stats", "[", "\"entries_seen\"", "]", ",", "stats", "[", "\"local_dirs\"", "]", ",", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "return" ]
39.466667
16.333333
def equivalent_sites(self, scaled_positions, ondublicates='error', symprec=1e-3): """Returns the scaled positions and all their equivalent sites. Parameters: scaled_positions: list | array List of non-equivalent sites given in unit cell coordinates. ondublicates : 'keep' | 'replace' | 'warn' | 'error' Action if `scaled_positions` contain symmetry-equivalent positions: 'keep' ignore additional symmetry-equivalent positions 'replace' replace 'warn' like 'keep', but issue an UserWarning 'error' raises a SpacegroupValueError symprec: float Minimum "distance" betweed two sites in scaled coordinates before they are counted as the same site. Returns: sites: array A NumPy array of equivalent sites. kinds: list A list of integer indices specifying which input site is equivalent to the corresponding returned site. Example: >>> from ase.lattice.spacegroup import Spacegroup >>> sg = Spacegroup(225) # fcc >>> sites, kinds = sg.equivalent_sites([[0, 0, 0], [0.5, 0.0, 0.0]]) >>> sites array([[ 0. , 0. , 0. ], [ 0. , 0.5, 0.5], [ 0.5, 0. , 0.5], [ 0.5, 0.5, 0. ], [ 0.5, 0. , 0. ], [ 0. , 0.5, 0. ], [ 0. , 0. , 0.5], [ 0.5, 0.5, 0.5]]) >>> kinds [0, 0, 0, 0, 1, 1, 1, 1] """ kinds = [] sites = [] symprec2 = symprec**2 scaled = np.array(scaled_positions, ndmin=2) for kind, pos in enumerate(scaled): for rot, trans in self.get_symop(): site = np.mod(np.dot(rot, pos) + trans, 1.) if not sites: sites.append(site) kinds.append(kind) continue t = site - sites mask = np.sum(t*t, 1) < symprec2 if np.any(mask): ind = np.argwhere(mask)[0][0] if kinds[ind] == kind: pass elif ondublicates == 'keep': pass elif ondublicates == 'replace': kinds[ind] = kind elif ondublicates == 'warn': warnings.warn('scaled_positions %d and %d ' 'are equivalent'%(kinds[ind], kind)) elif ondublicates == 'error': raise SpacegroupValueError( 'scaled_positions %d and %d are equivalent'%( kinds[ind], kind)) else: raise SpacegroupValueError( 'Argument "ondublicates" must be one of: ' '"keep", "replace", "warn" or "error".') else: sites.append(site) kinds.append(kind) return np.array(sites), kinds
[ "def", "equivalent_sites", "(", "self", ",", "scaled_positions", ",", "ondublicates", "=", "'error'", ",", "symprec", "=", "1e-3", ")", ":", "kinds", "=", "[", "]", "sites", "=", "[", "]", "symprec2", "=", "symprec", "**", "2", "scaled", "=", "np", ".", "array", "(", "scaled_positions", ",", "ndmin", "=", "2", ")", "for", "kind", ",", "pos", "in", "enumerate", "(", "scaled", ")", ":", "for", "rot", ",", "trans", "in", "self", ".", "get_symop", "(", ")", ":", "site", "=", "np", ".", "mod", "(", "np", ".", "dot", "(", "rot", ",", "pos", ")", "+", "trans", ",", "1.", ")", "if", "not", "sites", ":", "sites", ".", "append", "(", "site", ")", "kinds", ".", "append", "(", "kind", ")", "continue", "t", "=", "site", "-", "sites", "mask", "=", "np", ".", "sum", "(", "t", "*", "t", ",", "1", ")", "<", "symprec2", "if", "np", ".", "any", "(", "mask", ")", ":", "ind", "=", "np", ".", "argwhere", "(", "mask", ")", "[", "0", "]", "[", "0", "]", "if", "kinds", "[", "ind", "]", "==", "kind", ":", "pass", "elif", "ondublicates", "==", "'keep'", ":", "pass", "elif", "ondublicates", "==", "'replace'", ":", "kinds", "[", "ind", "]", "=", "kind", "elif", "ondublicates", "==", "'warn'", ":", "warnings", ".", "warn", "(", "'scaled_positions %d and %d '", "'are equivalent'", "%", "(", "kinds", "[", "ind", "]", ",", "kind", ")", ")", "elif", "ondublicates", "==", "'error'", ":", "raise", "SpacegroupValueError", "(", "'scaled_positions %d and %d are equivalent'", "%", "(", "kinds", "[", "ind", "]", ",", "kind", ")", ")", "else", ":", "raise", "SpacegroupValueError", "(", "'Argument \"ondublicates\" must be one of: '", "'\"keep\", \"replace\", \"warn\" or \"error\".'", ")", "else", ":", "sites", ".", "append", "(", "site", ")", "kinds", ".", "append", "(", "kind", ")", "return", "np", ".", "array", "(", "sites", ")", ",", "kinds" ]
37.395349
16.046512
def _get_value(self, node, scope, ctxt, stream): """Return the value of the node. It is expected to be either an AST.ID instance or a constant :node: TODO :returns: TODO """ res = self._handle_node(node, scope, ctxt, stream) if isinstance(res, fields.Field): return res._pfp__value # assume it's a constant else: return res
[ "def", "_get_value", "(", "self", ",", "node", ",", "scope", ",", "ctxt", ",", "stream", ")", ":", "res", "=", "self", ".", "_handle_node", "(", "node", ",", "scope", ",", "ctxt", ",", "stream", ")", "if", "isinstance", "(", "res", ",", "fields", ".", "Field", ")", ":", "return", "res", ".", "_pfp__value", "# assume it's a constant", "else", ":", "return", "res" ]
24
19.529412
def standings(self): '''Get standings from the community's account''' headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain","User-Agent": user_agent} req = self.session.get('http://'+self.domain+'/standings.phtml',headers=headers).content soup = BeautifulSoup(req) table = soup.find('table',{'id':'tablestandings'}).find_all('tr') clasificacion = [] [clasificacion.append(('%s\t%s\t%s\t%s\t%s')%(tablas.find('td').text,tablas.find('div')['id'],tablas.a.text,tablas.find_all('td')[3].text,tablas.find_all('td')[4].text)) for tablas in table[1:]] return clasificacion
[ "def", "standings", "(", "self", ")", ":", "headers", "=", "{", "\"Content-type\"", ":", "\"application/x-www-form-urlencoded\"", ",", "\"Accept\"", ":", "\"text/plain\"", ",", "\"User-Agent\"", ":", "user_agent", "}", "req", "=", "self", ".", "session", ".", "get", "(", "'http://'", "+", "self", ".", "domain", "+", "'/standings.phtml'", ",", "headers", "=", "headers", ")", ".", "content", "soup", "=", "BeautifulSoup", "(", "req", ")", "table", "=", "soup", ".", "find", "(", "'table'", ",", "{", "'id'", ":", "'tablestandings'", "}", ")", ".", "find_all", "(", "'tr'", ")", "clasificacion", "=", "[", "]", "[", "clasificacion", ".", "append", "(", "(", "'%s\\t%s\\t%s\\t%s\\t%s'", ")", "%", "(", "tablas", ".", "find", "(", "'td'", ")", ".", "text", ",", "tablas", ".", "find", "(", "'div'", ")", "[", "'id'", "]", ",", "tablas", ".", "a", ".", "text", ",", "tablas", ".", "find_all", "(", "'td'", ")", "[", "3", "]", ".", "text", ",", "tablas", ".", "find_all", "(", "'td'", ")", "[", "4", "]", ".", "text", ")", ")", "for", "tablas", "in", "table", "[", "1", ":", "]", "]", "return", "clasificacion" ]
72.555556
44.333333
def commit(cls, client=None): """Commit everything from datapoints via the client. :param client: InfluxDBClient instance for writing points to InfluxDB. :attention: any provided client will supersede the class client. :return: result of client.write_points. """ if not client: client = cls._client rtn = client.write_points(cls._json_body_()) cls._reset_() return rtn
[ "def", "commit", "(", "cls", ",", "client", "=", "None", ")", ":", "if", "not", "client", ":", "client", "=", "cls", ".", "_client", "rtn", "=", "client", ".", "write_points", "(", "cls", ".", "_json_body_", "(", ")", ")", "cls", ".", "_reset_", "(", ")", "return", "rtn" ]
36.833333
17.25
def info( self, path="/", # type: Text namespaces=None, # type: Optional[Collection[Text]] **kwargs # type: Any ): # type: (...) -> Iterator[Tuple[Text, Info]] """Walk a filesystem, yielding path and `Info` of resources. Arguments: path (str): A path to a directory. namespaces (list, optional): A list of namespaces to include in the resource information, e.g. ``['basic', 'access']`` (defaults to ``['basic']``). Keyword Arguments: ignore_errors (bool): If `True`, any errors reading a directory will be ignored, otherwise exceptions will be raised. on_error (callable): If ``ignore_errors`` is `False`, then this callable will be invoked with a path and the exception object. It should return `True` to ignore the error, or `False` to re-raise it. search (str): If ``'breadth'`` then the directory will be walked *top down*. Set to ``'depth'`` to walk *bottom up*. filter (list): If supplied, this parameter should be a list of file name patterns, e.g. ``['*.py']``. Files will only be returned if the final component matches one of the patterns. exclude (list, optional): If supplied, this parameter should be a list of filename patterns, e.g. ``['~*', '.*']``. Files matching any of these patterns will be removed from the walk. filter_dirs (list, optional): A list of patterns that will be used to match directories paths. The walk will only open directories that match at least one of these patterns. exclude_dirs (list): A list of patterns that will be used to filter out directories from the walk, e.g. ``['*.svn', '*.git']``. max_depth (int, optional): Maximum directory depth to walk. Returns: ~collections.Iterable: an iterable yielding tuples of ``(<absolute path>, <resource info>)``. This method invokes `Walker.info` with the bound `FS` object. """ walker = self._make_walker(**kwargs) return walker.info(self.fs, path=path, namespaces=namespaces)
[ "def", "info", "(", "self", ",", "path", "=", "\"/\"", ",", "# type: Text", "namespaces", "=", "None", ",", "# type: Optional[Collection[Text]]", "*", "*", "kwargs", "# type: Any", ")", ":", "# type: (...) -> Iterator[Tuple[Text, Info]]", "walker", "=", "self", ".", "_make_walker", "(", "*", "*", "kwargs", ")", "return", "walker", ".", "info", "(", "self", ".", "fs", ",", "path", "=", "path", ",", "namespaces", "=", "namespaces", ")" ]
47.897959
24.979592
def serialize(cls, obj, buf, lineLength, validate): """ Apple's Address Book is *really* weird with images, it expects base64 data to have very specific whitespace. It seems Address Book can handle PHOTO if it's not wrapped, so don't wrap it. """ if wacky_apple_photo_serialize: lineLength = REALLY_LARGE VCardTextBehavior.serialize(obj, buf, lineLength, validate)
[ "def", "serialize", "(", "cls", ",", "obj", ",", "buf", ",", "lineLength", ",", "validate", ")", ":", "if", "wacky_apple_photo_serialize", ":", "lineLength", "=", "REALLY_LARGE", "VCardTextBehavior", ".", "serialize", "(", "obj", ",", "buf", ",", "lineLength", ",", "validate", ")" ]
47.222222
14.555556
def calculate_elem_per_kb(max_chunk_kb, matrix_dtype): """ Calculates the number of elem per kb depending on the max chunk size set. Input: - max_chunk_kb (int, default=1024): The maximum number of KB a given chunk will occupy - matrix_dtype (numpy dtype, default=numpy.float32): Storage data type for data matrix. Currently needs to be np.float32 or np.float64 (TODO: figure out a better way to get bits from a numpy dtype). Returns: elem_per_kb (int), the number of elements per kb for matrix dtype specified. """ if matrix_dtype == numpy.float32: return (max_chunk_kb * 8)/32 elif matrix_dtype == numpy.float64: return (max_chunk_kb * 8)/64 else: msg = "Invalid matrix_dtype: {}; only numpy.float32 and numpy.float64 are currently supported".format(matrix_dtype) logger.error(msg) raise Exception("write_gctx.calculate_elem_per_kb " + msg)
[ "def", "calculate_elem_per_kb", "(", "max_chunk_kb", ",", "matrix_dtype", ")", ":", "if", "matrix_dtype", "==", "numpy", ".", "float32", ":", "return", "(", "max_chunk_kb", "*", "8", ")", "/", "32", "elif", "matrix_dtype", "==", "numpy", ".", "float64", ":", "return", "(", "max_chunk_kb", "*", "8", ")", "/", "64", "else", ":", "msg", "=", "\"Invalid matrix_dtype: {}; only numpy.float32 and numpy.float64 are currently supported\"", ".", "format", "(", "matrix_dtype", ")", "logger", ".", "error", "(", "msg", ")", "raise", "Exception", "(", "\"write_gctx.calculate_elem_per_kb \"", "+", "msg", ")" ]
46.9
29.6