Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
14,000
def process_exception_message(exception): exception_message = str(exception) for replace_char in [, , ]: exception_message = exception_message.replace(replace_char, if replace_char != else ) return exception_message.replace(, )
Process an exception message. Args: exception: The exception to process. Returns: A filtered string summarizing the exception.
14,001
def endStep(self,key): ptime = _ptime() if key is not None: self.steps[key][] = ptime self.steps[key][] = ptime[1] - self.steps[key][][1] self.end = ptime print(,key,,ptime[0]) print()
Record the end time for the step. If key==None, simply record ptime as end time for class to represent the overall runtime since the initialization of the class.
14,002
def change_email(*_, user_id=None, new_email=None): click.echo(green()) click.echo(green( * 40)) with get_app().app_context(): user = find_user(dict(id=user_id)) if not user: click.echo(red()) return user.email = new_email result = user_service.save(user) if not isinstance(result, User): print_validation_errors(result) return user.confirm_email() user_service.save(user) msg = click.echo(green(msg.format(user.email, new_email)))
Change email for a user
14,003
def create_widget(self): d = self.declaration self.widget = TabLayout(self.get_context(), None, d.style)
Create the underlying widget.
14,004
def getStatus(self) : if self.status == CONST.COLLECTION_LOADING_STATUS : return "loading" elif self.status == CONST.COLLECTION_LOADED_STATUS : return "loaded" elif self.status == CONST.COLLECTION_DELETED_STATUS : return "deleted" elif self.status == CONST.COLLECTION_UNLOADED_STATUS : return "unloaded" elif self.status == CONST.COLLECTION_NEWBORN_STATUS : return "newborn" else : raise ValueError("The collection has an Unknown status %s" % self.status)
returns a word describing the status of the collection (loaded, loading, deleted, unloaded, newborn) instead of a number, if you prefer the number it's in self.status
14,005
def async_save_result(self): if hasattr(self, "_async_future") and self._async_future.done(): self._async_future.result() return True else: return False
Retrieves the result of this subject's asynchronous save. - Returns `True` if the subject was saved successfully. - Raises `concurrent.futures.CancelledError` if the save was cancelled. - If the save failed, raises the relevant exception. - Returns `False` if the subject hasn't finished saving or if the subject has not been queued for asynchronous save.
14,006
def extra_create_kwargs(self): user = self.get_agnocomplete_context() if user: _, domain = user.email.split() return { : domain } return {}
Inject the domain of the current user in the new model instances.
14,007
def _request(self, service, **kw): fb_request = { : service, } for key in [, , , ]: fb_request[key] = kw.pop(key, None) if kw: raise _exc.FastbillRequestError("Unknown arguments: %s" % ", ".join(kw.keys())) data = _jsonencoder.dumps(fb_request) _logger.debug("Sending data: %r", data) self._pre_request_callback(service, fb_request) http_resp = self.session.post(self.SERVICE_URL, auth=self.auth, headers=self.headers, timeout=self.timeout, data=data) self._post_request_callback(service, fb_request, http_resp) try: json_resp = http_resp.json() except ValueError: _logger.debug("Got data: %r", http_resp.content) _abort_http(service, http_resp) return else: _logger.debug("Got data: %r", json_resp) errors = json_resp[].get() if errors: _abort_api(service, json_resp, errors) if json_resp[][] != service: raise _exc.FastbillError( "API Error: Got response from wrong service.") return _response.FastbillResponse(json_resp[], self)
Do the actual request to Fastbill's API server. If successful returns the RESPONSE section the of response, in case of an error raises a subclass of FastbillError.
14,008
def create(gandi, resource, domain, duration, owner, admin, tech, bill, nameserver, extra_parameter, background): if domain: gandi.echo( ) gandi.echo("You should use instead." % domain) if (domain and resource) and (domain != resource): gandi.echo( % (domain, resource)) return _domain = domain or resource if not _domain: _domain = click.prompt() result = gandi.domain.create(_domain, duration, owner, admin, tech, bill, nameserver, extra_parameter, background) if background: gandi.pretty_echo(result) return result
Buy a domain.
14,009
def headerHTML(header,fname): html="<html><body><code>" html+="<h2>%s</h2>"%(fname) html+=pprint.pformat(header, indent=1) html=html.replace("\n",).replace(" ","&nbsp;") html=html.replace(r"\x00","") html+="</code></body></html>" print("saving header file:",fname) f=open(fname,) f.write(html) f.close() webbrowser.open(fname)
given the bytestring ABF header, make and launch HTML.
14,010
def parse_args(spectypes): arg_parser = argparse.ArgumentParser() arg_parser.add_argument( "-c", "--constants", help="emit constants instead of spec dict", action="store_true" ) arg_parser.add_argument( "spectype", help="specifies the spec type to be generated", choices=spectypes ) return arg_parser.parse_args()
Return arguments object formed by parsing the command line used to launch the program.
14,011
def import_domaindump(): parser = argparse.ArgumentParser( description="Imports users, groups and computers result files from the ldapdomaindump tool, will resolve the names from domain_computers output for IPs") parser.add_argument("files", nargs=, help="The domaindump files to import") arguments = parser.parse_args() domain_users_file = domain_groups_file = computer_count = 0 user_count = 0 stats = {} for filename in arguments.files: if filename.endswith(): print_notification() computer_count = parse_domain_computers(filename) if computer_count: stats[] = computer_count print_success("{} hosts imported".format(computer_count)) elif filename.endswith(): domain_users_file = filename elif filename.endswith(): domain_groups_file = filename if domain_users_file: print_notification("Parsing domain users") user_count = parse_domain_users(domain_users_file, domain_groups_file) if user_count: print_success("{} users imported".format(user_count)) stats[] = user_count Logger().log("import_domaindump", .format(user_count, computer_count), stats)
Parses ldapdomaindump files and stores hosts and users in elasticsearch.
14,012
def _tile(self, n): pos = self._trans(self.pos[n]) return Tile(pos, pos).pad(self.support_pad)
Get the update tile surrounding particle `n`
14,013
def perform_command(self): if len(self.actual_arguments) < 1: return self.print_help(short=True) for cls, switches in self.TOOLS: if self.has_option(switches): arguments = [a for a in sys.argv if a not in switches] return cls(invoke=(self.invoke + u" %s" % switches[0])).run(arguments=arguments) if u"-h" in self.actual_arguments: return self.print_help(short=True) if u"--help" in self.actual_arguments: return self.print_help(short=False) if u"--version" in self.actual_arguments: return self.print_name_version() return ExecuteTaskCLI(invoke=self.invoke).run(arguments=sys.argv)
Perform command and return the appropriate exit code. :rtype: int
14,014
def _query_zendesk(self, endpoint, object_type, *endpoint_args, **endpoint_kwargs): _id = endpoint_kwargs.get(, None) if _id: item = self.cache.get(object_type, _id) if item: return item else: return self._get(url=self._build_url(endpoint(*endpoint_args, **endpoint_kwargs))) elif in endpoint_kwargs: cached_objects = [] for _id in endpoint_kwargs[]: obj = self.cache.get(object_type, _id) if not obj: return self._get(self._build_url(endpoint=endpoint(*endpoint_args, **endpoint_kwargs))) cached_objects.append(obj) return ZendeskResultGenerator(self, {}, response_objects=cached_objects, object_type=object_type) else: return self._get(self._build_url(endpoint=endpoint(*endpoint_args, **endpoint_kwargs)))
Query Zendesk for items. If an id or list of ids are passed, attempt to locate these items in the relevant cache. If they cannot be found, or no ids are passed, execute a call to Zendesk to retrieve the items. :param endpoint: target endpoint. :param object_type: object type we are expecting. :param endpoint_args: args for endpoint :param endpoint_kwargs: kwargs for endpoint :return: either a ResultGenerator or a Zenpy object.
14,015
def propagate_defaults(config_doc): for group_name, group_doc in config_doc.items(): if isinstance(group_doc, dict): defaults = group_doc.get(, {}) for item_name, item_doc in group_doc.items(): if item_name == : continue if isinstance(item_doc, dict): group_doc[item_name] = \ dict_merge_pair(copy.deepcopy(defaults), item_doc) return config_doc
Propagate default values to sections of the doc.
14,016
def server(self): try: tar = urllib2.urlopen(self.registry) meta = tar.info() return int(meta.getheaders("Content-Length")[0]) except (urllib2.URLError, IndexError): return " "
Returns the size of remote files
14,017
def menucheck(self, window_name, object_name): menu_handle = self._get_menu_handle(window_name, object_name) if not menu_handle.AXEnabled: raise LdtpServerException(u"Object %s state disabled" % object_name) try: if menu_handle.AXMenuItemMarkChar: return 1 except atomac._a11y.Error: pass menu_handle.Press() return 1
Check (click) a menu item. @param window_name: Window name to look for, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to look for, either full name, LDTP's name convention, or a Unix glob. Or menu heirarchy @type object_name: string @return: 1 on success. @rtype: integer
14,018
def makeAsn(segID,N, CA, C, O, geo): CA_CB_length=geo.CA_CB_length C_CA_CB_angle=geo.C_CA_CB_angle N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle CB_CG_length=geo.CB_CG_length CA_CB_CG_angle=geo.CA_CB_CG_angle N_CA_CB_CG_diangle=geo.N_CA_CB_CG_diangle CG_OD1_length=geo.CG_OD1_length CB_CG_OD1_angle=geo.CB_CG_OD1_angle CA_CB_CG_OD1_diangle=geo.CA_CB_CG_OD1_diangle CG_ND2_length=geo.CG_ND2_length CB_CG_ND2_angle=geo.CB_CG_ND2_angle CA_CB_CG_ND2_diangle=geo.CA_CB_CG_ND2_diangle carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle) CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C") carbon_g= calculateCoordinates(N, CA, CB, CB_CG_length, CA_CB_CG_angle, N_CA_CB_CG_diangle) CG= Atom("CG", carbon_g, 0.0, 1.0, " ", " CG", 0, "C") oxygen_d1= calculateCoordinates(CA, CB, CG, CG_OD1_length, CB_CG_OD1_angle, CA_CB_CG_OD1_diangle) OD1= Atom("OD1", oxygen_d1, 0.0, 1.0, " ", " OD1", 0, "O") nitrogen_d2= calculateCoordinates(CA, CB, CG, CG_ND2_length, CB_CG_ND2_angle, CA_CB_CG_ND2_diangle) ND2= Atom("ND2", nitrogen_d2, 0.0, 1.0, " ", " ND2", 0, "N") res= Residue((, segID, ), "ASN", ) res.add(N) res.add(CA) res.add(C) res.add(O) res.add(CB) res.add(CG) res.add(OD1) res.add(ND2) return res
Creates an Asparagine residue
14,019
def get_ad_via_hitid(hit_id): username = CONFIG.get(, ) password = CONFIG.get(, ) try: req = requests.get( + hit_id, auth=(username, password)) except: raise ExperimentError() else: if req.status_code == 200: return req.json()[] else: return "error"
Get ad via HIT id
14,020
def user_remove(name, database=None, user=None, password=None, host=None, port=None): *** if not user_exists(name, database, user, password, host, port): if database: log.info(%s\%s\, name, database) else: log.info(%s\, name) return False client = _client(user=user, password=password, host=host, port=port) if not database: return client.delete_cluster_admin(name) client.switch_database(database) return client.delete_database_user(name)
Remove a cluster admin or a database user. If a database is specified: it will remove the database user. If a database is not specified: it will remove the cluster admin. name User name to remove database The database to remove the user from user User name for the new user to delete user The user to connect as password The password of the user host The host to connect to port The port to connect to CLI Example: .. code-block:: bash salt '*' influxdb08.user_remove <name> salt '*' influxdb08.user_remove <name> <database> salt '*' influxdb08.user_remove <name> <database> <user> <password> <host> <port>
14,021
def _parse_reserved_marker(self, fptr): the_id = .format(self._marker_id) segment = Segment(marker_id=the_id, offset=self._offset, length=0) return segment
Marker range between 0xff30 and 0xff39.
14,022
def step_undefined_step_snippets_should_not_exist_for_table(context): assert context.table, "REQUIRES: table" for row in context.table.rows: step = row["Step"] step_undefined_step_snippet_should_not_exist_for(context, step)
Checks if undefined-step snippets are not provided. EXAMPLE: Then undefined-step snippets should not exist for: | Step | | When an known step is used | | Then another known step is used |
14,023
def _format_regular_value(self, str_in): try: value = int(str_in, base=10) return str(value) except ValueError as e: msg = "Invalid integer. Read .".format(str_in) e_new = InvalidEntryError(msg) e_new.field_spec = self raise_from(e_new, e)
we need to reformat integer strings, as there can be different strings for the same integer. The strategy of unification here is to first parse the integer string to an Integer type. Thus all of '+13', ' 13', '13' will be parsed to 13. We then convert the integer value to an unambiguous string (no whitespaces, leading '-' for negative numbers, no leading '+'). :param str_in: integer string :return: integer string without whitespaces, leading '-' for negative numbers, no leading '+'
14,024
def matches(self, name): return ((self.match.search(name) or (self.include and filter(None, [inc.search(name) for inc in self.include]))) and ((not self.exclude) or not filter(None, [exc.search(name) for exc in self.exclude]) ))
Does the name match my requirements? To match, a name must match config.testMatch OR config.include and it must not match config.exclude
14,025
def restart(name, timeout=90, with_deps=False, with_parents=False): * if in name: create_win_salt_restart_task() return execute_salt_restart_task() ret = set() ret.add(stop(name=name, timeout=timeout, with_deps=with_deps, with_parents=with_parents)) ret.add(start(name=name, timeout=timeout, with_deps=with_deps, with_parents=with_parents)) return False not in ret
Restart the named service. This issues a stop command followed by a start. Args: name: The name of the service to restart. .. note:: If the name passed is ``salt-minion`` a scheduled task is created and executed to restart the salt-minion service. timeout (int): The time in seconds to wait for the service to stop and start before returning. Default is 90 seconds .. note:: The timeout is cumulative meaning it is applied to the stop and then to the start command. A timeout of 90 could take up to 180 seconds if the service is long in stopping and starting .. versionadded:: 2017.7.9,2018.3.4 with_deps (bool): If enabled restart the given service and the services the current service depends on. with_parents (bool): If enabled and in case other running services depend on the to be restarted service, this flag indicates that those other services will be restarted as well. If disabled, the service restart will fail in case other running services depend on the to be restarted service. Returns: bool: ``True`` if successful, otherwise ``False`` CLI Example: .. code-block:: bash salt '*' service.restart <service name>
14,026
def reset(self): self.activeCells = np.empty(0, dtype="uint32") self.activeDeltaSegments = np.empty(0, dtype="uint32") self.activeFeatureLocationSegments = np.empty(0, dtype="uint32")
Deactivate all cells.
14,027
def get_commands_from(self, args): commands = [] for i in itertools.count(0): try: commands.append(getattr(args, self.arg_label_fmt % i)) except AttributeError: break return commands
We have to code the key names for each depth. This method scans for each level and returns a list of the command arguments.
14,028
def create_df_file_with_query(self, query, output): chunk_size = 100000 offset = 0 data = defaultdict(lambda : defaultdict(list)) with open(output, ) as outfile: query = query.replace(, ) query += while True: print(offset) query = query.format( chunk_size=chunk_size, offset=offset ) df = pd.read_sql(query, self.engine) pickle.dump(df, outfile) offset += chunk_size if len(df) < chunk_size: break outfile.close()
Dumps in df in chunks to avoid crashes.
14,029
def _do_identity_role_list(args): rest_client = RestClient(args.url) state = rest_client.list_state(subtree=IDENTITY_NAMESPACE + _ROLE_PREFIX) head = state[] state_values = state[] printable_roles = [] for state_value in state_values: role_list = RoleList() decoded = b64decode(state_value[]) role_list.ParseFromString(decoded) for role in role_list.roles: printable_roles.append(role) printable_roles.sort(key=lambda r: r.name) if args.format == : tty_width = tty.width() for role in printable_roles: width = tty_width - len(role.name) - 3 width = width if width > _MIN_PRINT_WIDTH else _MIN_PRINT_WIDTH value = (role.policy_name[:width] + if len(role.policy_name) > width else role.policy_name) print(.format(role.name, value)) elif args.format == : try: writer = csv.writer(sys.stdout, quoting=csv.QUOTE_ALL) writer.writerow([, ]) for role in printable_roles: writer.writerow([role.name, role.policy_name]) except csv.Error: raise CliException() elif args.format == or args.format == : roles_snapshot = { : head, : {role.name: role.policy_name for role in printable_roles} } if args.format == : print(json.dumps(roles_snapshot, indent=2, sort_keys=True)) else: print(yaml.dump(roles_snapshot, default_flow_style=False)[0:-1]) else: raise AssertionError(.format(args.format))
Lists the current on-chain configuration values.
14,030
def numdiff(fun, args): epsilon = 1e-8 args = list(args) v0 = fun(*args) N = v0.shape[0] l_v = len(v0) dvs = [] for i, a in enumerate(args): l_a = (a).shape[1] dv = numpy.zeros((N, l_v, l_a)) nargs = list(args) for j in range(l_a): xx = args[i].copy() xx[:, j] += epsilon nargs[i] = xx dv[:, :, j] = (fun(*nargs) - v0) / epsilon dvs.append(dv) return [v0] + dvs
Vectorized numerical differentiation
14,031
def locations(self, exists=True): result = [] for config_files in self.config_paths: if not config_files: continue if os.path.isdir(config_files): config_files = [os.path.join(config_files, i) for i in sorted(os.listdir(config_files)) if i.endswith()] else: config_files = [config_files] for config_file in config_files: if not exists or os.path.exists(config_file): config_file = os.path.abspath(config_file) if config_file in result: result.remove(config_file) result.append(config_file) return result
Return the location of the config file(s). A given directory will be scanned for ``*.conf`` files, in alphabetical order. Any duplicates will be eliminated. If ``exists`` is True, only existing configuration locations are returned.
14,032
def get_videos_for_ids( edx_video_ids, sort_field=None, sort_dir=SortDirection.asc ): videos, __ = _get_videos_for_filter( {"edx_video_id__in":edx_video_ids}, sort_field, sort_dir, ) return videos
Returns an iterator of videos that match the given list of ids. Args: edx_video_ids (list) sort_field (VideoSortField) sort_dir (SortDirection) Returns: A generator expression that contains the videos found, sorted by the given field and direction, with ties broken by edx_video_id to ensure a total order
14,033
def getmembers(obj, *predicates): if not predicates or predicates[0] is not None: predicates = (lambda key, value: not key.startswith(),) + predicates def predicate(key_value_tuple): key, value = key_value_tuple for p in predicates: if p is not None and not p(key, value): return False return True return filter(predicate, inspect.getmembers(obj))
Return all the members of an object as a list of `(key, value)` tuples, sorted by name. The optional list of predicates can be used to filter the members. The default predicate drops members whose name starts with '_'. To disable it, pass `None` as the first predicate. :param obj: Object to list the members for :param predicates: Functions to filter the members. If the first value is not None, a default predicate is added that filters private members out (name starts with '_') :type predicates: tuple[Callable|None] :returns: Sorted list of (name, value) tuples :rtype: list[(str, *)]
14,034
def quote_single_identifier(self, string): c = self.get_identifier_quote_character() return "%s%s%s" % (c, string.replace(c, c + c), c)
Quotes a single identifier (no dot chain separation). :param string: The identifier name to be quoted. :type string: str :return: The quoted identifier string. :rtype: str
14,035
def run(self, command): boto.log.debug( % (command, self.server.instance_id)) status = 0 try: t = self._ssh_client.exec_command(command) except paramiko.SSHException: status = 1 std_out = t[1].read() std_err = t[2].read() t[0].close() t[1].close() t[2].close() boto.log.debug( % std_out) boto.log.debug( % std_err) return (status, std_out, std_err)
Execute a command on the remote host. Return a tuple containing an integer status and a two strings, the first containing stdout and the second containing stderr from the command.
14,036
def attach_network_interface(device_index, name=None, network_interface_id=None, instance_name=None, instance_id=None, region=None, key=None, keyid=None, profile=None): if not salt.utils.data.exactly_one((name, network_interface_id)): raise SaltInvocationError( "Exactly one (but not both) of or " "must be provided." ) if not salt.utils.data.exactly_one((instance_name, instance_id)): raise SaltInvocationError( "Exactly one (but not both) of or " "must be provided." ) conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) r = {} result = _get_network_interface(conn, name, network_interface_id) if in result: return result eni = result[] try: info = _describe_network_interface(eni) network_interface_id = info[] except KeyError: r[] = {: } return r if instance_name: try: instance_id = get_id(name=instance_name, region=region, key=key, keyid=keyid, profile=profile) except boto.exception.BotoServerError as e: log.error(e) return False try: r[] = conn.attach_network_interface( network_interface_id, instance_id, device_index ) except boto.exception.EC2ResponseError as e: r[] = __utils__[](e) return r
Attach an Elastic Network Interface. .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt myminion boto_ec2.attach_network_interface my_eni instance_name=salt-master device_index=0
14,037
def get(self, *args, **kwargs): if args: kwargs = {: str(args[0])} key, value = kwargs.popitem() for item in self: if in key and getattr(item, key, None) == value: return item for vlan in item.interfaces: if getattr(vlan, key, None) == value: return item
Get the sub interfaces for this VlanInterface >>> itf = engine.interface.get(3) >>> list(itf.vlan_interface) [Layer3PhysicalInterfaceVlan(name=VLAN 3.3), Layer3PhysicalInterfaceVlan(name=VLAN 3.5), Layer3PhysicalInterfaceVlan(name=VLAN 3.4)] :param int args: args are translated to vlan_id=args[0] :param kwargs: key value for sub interface :rtype: VlanInterface or None
14,038
def flatten(repertoire, big_endian=False): if repertoire is None: return None order = if big_endian else return repertoire.squeeze().ravel(order=order)
Flatten a repertoire, removing empty dimensions. By default, the flattened repertoire is returned in little-endian order. Args: repertoire (np.ndarray or None): A repertoire. Keyword Args: big_endian (boolean): If ``True``, flatten the repertoire in big-endian order. Returns: np.ndarray: The flattened repertoire.
14,039
def _init_rgb(self, r: int, g: int, b: int) -> None: if self.rgb_mode: self.rgb = (r, g, b) self.hexval = rgb2hex(r, g, b) else: self.rgb = hex2rgb(rgb2termhex(r, g, b)) self.hexval = rgb2termhex(r, g, b) self.code = hex2term(self.hexval)
Initialize from red, green, blue args.
14,040
def save(self, filename=None): content = self.data.yaml() with open(Config.path_expand(ConfigDict.filename), ) as f: f.write(content)
saves the configuration in the given filename, if it is none the filename at load time is used. :param filename: the file name :type filename: string :return:
14,041
def delete_invalid_route(self): try: routing = self._engine.routing.get(self.interface_id) for route in routing: if route.invalid or route.to_delete: route.delete() except InterfaceNotFound: pass
Delete any invalid routes for this interface. An invalid route is a left over when an interface is changed to a different network. :return: None
14,042
def chgrp(path, group): primary groupprimary group* func_name = .format(__virtualname__) if __opts__.get(, ) == func_name: log.info( , func_name) log.debug(, func_name, path) return None
Change the group of a file Under Windows, this will do nothing. While a file in Windows does have a 'primary group', this rarely used attribute generally has no bearing on permissions unless intentionally configured and is only used to support Unix compatibility features (e.g. Services For Unix, NFS services). Salt, therefore, remaps this function to do nothing while still being compatible with Unix behavior. When managing Windows systems, this function is superfluous and will generate an info level log entry if used directly. If you do actually want to set the 'primary group' of a file, use ``file .chpgrp``. To set group permissions use ``file.set_perms`` Args: path (str): The path to the file or directory group (str): The group (unused) Returns: None CLI Example: .. code-block:: bash salt '*' file.chpgrp c:\\temp\\test.txt administrators
14,043
def getrawpart(self, msgid, stream=sys.stdout): for hdr, part in self._get(msgid): pl = part.get_payload(decode=True) if pl != None: print(pl, file=stream) break
Get the first part from the message and print it raw.
14,044
def stop(self): self._running.clear() with self._lock: if self._timer: self._timer.cancel() self._timer = None
Stop the Heartbeat Checker. :return:
14,045
def on(self): isOK = True try: if self.channelR!=None: sub.call(["gpio", "-g", "mode", "{}".format(self.channelR), self.PIN_MODE_AUDIO ]) except: isOK = False print("Open audio right channel failed.") try: if self.channelL!=None: sub.call(["gpio","-g","mode", "{}".format(self.channelL), self.PIN_MODE_AUDIO ]) except: isOK = False print("Open audio left channel failed.") return isOK
! \~english Open Audio output. set pin mode to ALT0 @return a boolean value. if True means open audio output is OK otherwise failed to open. \~chinese 打开音频输出。 将引脚模式设置为ALT0 @return 布尔值。 如果 True 表示打开音频输出成功,否则不成功。
14,046
def records(self): if self._records is None: self._records = RecordList(self._version, account_sid=self._solution[], ) return self._records
Access the records :returns: twilio.rest.api.v2010.account.usage.record.RecordList :rtype: twilio.rest.api.v2010.account.usage.record.RecordList
14,047
def _estimate_AICc(self, y, mu, weights=None): edof = self.statistics_[] if self.statistics_[] is None: self.statistics_[] = self._estimate_AIC(y, mu, weights) return self.statistics_[] + 2*(edof + 1)*(edof + 2)/(y.shape[0] - edof -2)
estimate the corrected Akaike Information Criterion relies on the estimated degrees of freedom, which must be computed before. Parameters ---------- y : array-like of shape (n_samples,) output data vector mu : array-like of shape (n_samples,) expected value of the targets given the model and inputs weights : array-like shape (n_samples,) or None, optional containing sample weights if None, defaults to array of ones Returns ------- None
14,048
def serial(self, may_block=True): if not self.capabilities.have_serial_number(): raise yubikey_base.YubiKeyVersionError("Serial number unsupported in YubiKey %s" % self.version() ) return self._read_serial(may_block)
Get the YubiKey serial number (requires YubiKey 2.2).
14,049
def width(self, level): return self.x_at_y(level, reverse=True) - self.x_at_y(level)
Width at given level :param level: :return:
14,050
def split_signature(klass, signature): if signature == : return [] if not signature.startswith(): return [signature] result = [] head = tail = signature[1:-1] while tail: c = tail[0] head += c tail = tail[1:] if c in (, ): continue if c in (, ): level = 1 up = c if up == : down = else: down = while level > 0: c = tail[0] head += c tail = tail[1:] if c == up: level += 1 elif c == down: level -= 1 result.append(head) head = return result
Return a list of the element signatures of the topmost signature tuple. If the signature is not a tuple, it returns one element with the entire signature. If the signature is an empty tuple, the result is []. This is useful for e. g. iterating over method parameters which are passed as a single Variant.
14,051
def lm_deltat(freqs, damping_times, modes): dt = {} for lmn in modes: l, m, nmodes = int(lmn[0]), int(lmn[1]), int(lmn[2]) for n in range(nmodes): dt[ %(l,m,n)] = 1. / qnm_freq_decay(freqs[ %(l,m,n)], damping_times[ %(l,m,n)], 1./1000) delta_t = min(dt.values()) if delta_t < min_dt: delta_t = min_dt return delta_t
Return the minimum delta_t of all the modes given, with delta_t given by the inverse of the frequency at which the amplitude of the ringdown falls to 1/1000 of the peak amplitude.
14,052
def clone(self, **kwargs): parent = self.parent() return self._client._create_clone(parent, self, **kwargs)
Clone a part. .. versionadded:: 2.3 :param kwargs: (optional) additional keyword=value arguments :type kwargs: dict :return: cloned :class:`models.Part` :raises APIError: if the `Part` could not be cloned Example ------- >>> bike = client.model('Bike') >>> bike2 = bike.clone()
14,053
def register_event(self, event_type, pattern, handler): if event_type not in self._supported_events: raise ValueError("Unsupported event type {}".format(event_type)) if event_type != "CHANGE" and not isinstance(pattern, Pattern) and not isinstance(pattern, basestring): raise ValueError("Expected pattern to be a Pattern or string") if event_type == "CHANGE" and not (len(pattern)==2 and isinstance(pattern[0], int) and isinstance(pattern[1], numpy.ndarray)): raise ValueError("For \"CHANGE\" events, ``pattern`` should be a tuple of ``min_changed_pixels`` and the base screen state.") event = { "pattern": pattern, "event_type": event_type, "count": 0, "handler": handler, "name": uuid.uuid4(), "active": True } self._events[event["name"]] = event return event["name"]
When ``event_type`` is observed for ``pattern``, triggers ``handler``. For "CHANGE" events, ``pattern`` should be a tuple of ``min_changed_pixels`` and the base screen state.
14,054
def add_relationship(self, rel_uri, obj): if isinstance(rel_uri, URIRef): rel_uri = force_text(rel_uri) obj_is_literal = True if isinstance(obj, DigitalObject): obj = obj.uri obj_is_literal = False elif (isinstance(obj, str) or isinstance(obj, six.string_types)) \ and obj.startswith(): obj_is_literal = False self._ds_list = None return self.api.addRelationship(self.pid, self.uri, rel_uri, obj, obj_is_literal)
Add a new relationship to the RELS-EXT for this object. Calls :meth:`API_M.addRelationship`. Example usage:: isMemberOfCollection = 'info:fedora/fedora-system:def/relations-external#isMemberOfCollection' collection_uri = 'info:fedora/foo:456' object.add_relationship(isMemberOfCollection, collection_uri) :param rel_uri: URI for the new relationship :param obj: related object; can be :class:`DigitalObject` or string; if string begins with info:fedora/ it will be treated as a resource, otherwise it will be treated as a literal :rtype: boolean
14,055
def observed_data_to_xarray(self): posterior_model = self.posterior_model if self.dims is None: dims = {} else: dims = self.dims observed_names = self.observed_data if isinstance(observed_names, str): observed_names = [observed_names] observed_data = OrderedDict() for key in observed_names: vals = np.atleast_1d(posterior_model.data[key]) val_dims = dims.get(key) val_dims, coords = generate_dims_coords( vals.shape, key, dims=val_dims, coords=self.coords ) observed_data[key] = xr.DataArray(vals, dims=val_dims, coords=coords) return xr.Dataset(data_vars=observed_data, attrs=make_attrs(library=self.stan))
Convert observed data to xarray.
14,056
def init(self, context): self.cache = Cache() self.current_page_context = context self.current_request = context.get(, None) if context else None self.current_lang = get_language() self._current_app_is_admin = None self._current_user_permissions = _UNSET self._items_urls = {} self._current_items = {}
Initializes sitetree to handle new request. :param Context|None context:
14,057
def _Close(self): self._vslvm_volume_group = None self._vslvm_handle.close() self._vslvm_handle = None self._file_object.close() self._file_object = None
Closes the file system object. Raises: IOError: if the close failed.
14,058
def init(options=None, ini_paths=None, argv=None, strict=False, **parser_kwargs): global SINGLETON SINGLETON = Config( options=options, ini_paths=ini_paths, argv=argv, **parser_kwargs) SINGLETON.parse(argv, strict=strict) return SINGLETON
Initialize singleton config and read/parse configuration. :keyword bool strict: when true, will error out on invalid arguments (default behavior is to ignore them) :returns: the loaded configuration.
14,059
def preview_email_marketing_campaign(self, email_marketing_campaign): url = self.api.join(.join([ self.EMAIL_MARKETING_CAMPAIGN_URL, str(email_marketing_campaign.constant_contact_id), ])) response = url.get() self.handle_response_status(response) return (response.json()[], response.json()[])
Returns HTML and text previews of an EmailMarketingCampaign.
14,060
def get (self, key, def_val=None): assert isinstance(key, basestring) return dict.get(self, key.lower(), def_val)
Return lowercase key value.
14,061
def dump(result): if isinstance(result, dict): statuses = result[] else: statuses = result status_str_list = [] for status in statuses: status_str_list.append(textwrap.dedent(u).strip().format( screen_name=status[][], text=status[])) return u.join(status_str_list)
Dump result into a string, useful for debugging.
14,062
def setup(docker_mount=None, force=False): if not is_ubuntu() and not is_boot2docker(): raise Exception() if os.path.exists() and not fabric.contrib.files.exists(): put(, ) if not fabric.contrib.files.exists(): fab.run() if docker_is_installed() and not force: return for attempt in range(3): sudo() sudo() with settings(warn_only=True): sudo() failed = sudo().failed if not failed: break if docker_mount: create_docker_mount(docker_mount)
Prepare a vanilla server by installing docker, curl, and sshpass. If a file called ``dot_dockercfg`` exists in the current working directory, it is uploaded as ``~/.dockercfg``. Args: * docker_mount=None: Partition that will be mounted as /var/lib/docker
14,063
def new_dxworkflow(title=None, summary=None, description=None, output_folder=None, init_from=None, **kwargs): dxworkflow = DXWorkflow() dxworkflow.new(title=title, summary=summary, description=description, output_folder=output_folder, init_from=init_from, **kwargs) return dxworkflow
:param title: Workflow title (optional) :type title: string :param summary: Workflow summary (optional) :type summary: string :param description: Workflow description (optional) :type description: string :param output_folder: Default output folder of the workflow (optional) :type output_folder: string :param init_from: Another analysis workflow object handler or and analysis (string or handler) from which to initialize the metadata (optional) :type init_from: :class:`~dxpy.bindings.dxworkflow.DXWorkflow`, :class:`~dxpy.bindings.dxanalysis.DXAnalysis`, or string (for analysis IDs only) :rtype: :class:`DXWorkflow` Additional optional parameters not listed: all those under :func:`dxpy.bindings.DXDataObject.new`, except `details`. Creates a new remote workflow object with project set to *project* and returns the appropriate handler. Example: r = dxpy.new_dxworkflow(title="My Workflow", description="This workflow contains...") Note that this function is shorthand for:: dxworkflow = DXWorkflow() dxworkflow.new(**kwargs)
14,064
def telnet_config(self, status): ret = self.command( .format( status) ) return ret.content.decode()
status: false - Telnet is disabled true - Telnet is enabled
14,065
def incr(self, name, amount=1): amount = get_integer(, amount) return self.execute_command(, name, amount)
Increase the value at key ``name`` by ``amount``. If no key exists, the value will be initialized as ``amount`` . Like **Redis.INCR** :param string name: the key name :param int amount: increments :return: the integer value at key ``name`` :rtype: int >>> ssdb.incr('set_count', 3) 13 >>> ssdb.incr('set_count', 1) 14 >>> ssdb.incr('set_count', -2) 12 >>> ssdb.incr('temp_count', 42) 42
14,066
def ping(dest_ip=None, **kwargs): device_name8.8.8.8device_name8.8.8.8 conn = __proxy__[]() ret = {} if dest_ip is None: ret[] = ret[] = False return ret op = {: dest_ip} if in kwargs: if kwargs[]: if isinstance(kwargs[][-1], dict): op.update(kwargs[][-1]) else: op.update(kwargs) op[] = six.text_type(op.pop(, 5)) if in op: op[] = six.text_type(op[]) ret[] = True try: ret[] = jxmlease.parse(etree.tostring(conn.rpc.ping(**op))) except Exception as exception: ret[] = .format(exception) ret[] = False return ret
Send a ping RPC to a device dest_ip The IP of the device to ping dev_timeout : 30 The NETCONF RPC timeout (in seconds) rapid : False When ``True``, executes ping at 100pps instead of 1pps ttl Maximum number of IP routers (IP hops) allowed between source and destination routing_instance Name of the routing instance to use to send the ping interface Interface used to send traffic count : 5 Number of packets to send CLI Examples: .. code-block:: bash salt 'device_name' junos.ping '8.8.8.8' count=5 salt 'device_name' junos.ping '8.8.8.8' ttl=1 rapid=True
14,067
def from_custom_template(cls, searchpath, name): loader = ChoiceLoader([ FileSystemLoader(searchpath), cls.loader, ]) class MyStyler(cls): env = Environment(loader=loader) template = env.get_template(name) return MyStyler
Factory function for creating a subclass of ``Styler`` with a custom template and Jinja environment. Parameters ---------- searchpath : str or list Path or paths of directories containing the templates name : str Name of your custom template to use for rendering Returns ------- MyStyler : subclass of Styler Has the correct ``env`` and ``template`` class attributes set.
14,068
def load(self, config_data): if not isinstance(config_data, dict): raise ConfigurationError( "Configuration data is %s instead of dict." % ( type(config_data), ) ) self.load_addon_packages(config_data) self.load_sanitizers(config_data)
Loads sanitizers according to rulesets defined in given already parsed configuration file. :param config_data: Already parsed configuration data, as dictionary. :type config_data: dict[str,any]
14,069
def next(self): if not self._filestream: if not self._zip: self._zip = zipfile.ZipFile(self._reader(self._blob_key)) self._entries = self._zip.infolist()[self._start_file_index: self._end_file_index] self._entries.reverse() if not self._entries: raise StopIteration() entry = self._entries.pop() value = self._zip.read(entry.filename) self._filestream = StringIO.StringIO(value) if self._initial_offset: self._filestream.seek(self._initial_offset) self._filestream.readline() start_position = self._filestream.tell() line = self._filestream.readline() if not line: self._filestream.close() self._filestream = None self._start_file_index += 1 self._initial_offset = 0 return self.next() return ((self._blob_key, self._start_file_index, start_position), line.rstrip("\n"))
Returns the next line from this input reader as (lineinfo, line) tuple. Returns: The next input from this input reader, in the form of a 2-tuple. The first element of the tuple describes the source, it is itself a tuple (blobkey, filenumber, byteoffset). The second element of the tuple is the line found at that offset.
14,070
def read_ttl(path): ~/data/myfilemyfile~/data/ warnings.warn("Document.read_ttl() is deprecated and will be removed in near future. Use read() instead", DeprecationWarning) doc_path = os.path.dirname(path) doc_name = os.path.basename(path) return Document(doc_name, doc_path).read()
Helper function to read Document in TTL-TXT format (i.e. ${docname}_*.txt) E.g. Document.read_ttl('~/data/myfile') is the same as Document('myfile', '~/data/').read()
14,071
def create(self, fname, lname, group, type, group_api): self.__username(fname, lname) self.client.add( self.__distinguished_name(type, fname=fname, lname=lname), API.__object_class(), self.__ldap_attr(fname, lname, type, group, group_api))
Create an LDAP User.
14,072
def get_environ(self, sock): cipher = sock.cipher() ssl_environ = { "wsgi.url_scheme": "https", "HTTPS": "on", : cipher[1], : cipher[0] } return ssl_environ
Create WSGI environ entries to be merged into each request.
14,073
def process_results(self): for result in self._results: provider = result.provider self.providers.append(provider) if result.error: self.failed_providers.append(provider) continue if not result.response: continue self.blacklisted = True provider_categories = provider.process_response(result.response) assert provider_categories.issubset(DNSBL_CATEGORIES) self.categories = self.categories.union(provider_categories) self.detected_by[provider.host] = list(provider_categories)
Process results by providers
14,074
def lpc(blk, order=None): phi = lag_matrix(blk, order) order = len(phi) - 1 def inner(a, b): return sum(phi[i][j] * ai * bj for i, ai in enumerate(a.numlist) for j, bj in enumerate(b.numlist) ) A = ZFilter(1) B = [z ** -1] beta = [inner(B[0], B[0])] m = 1 while True: try: k = -inner(A, z ** -m) / beta[m - 1] except ZeroDivisionError: raise ZeroDivisionError("Can't find next coefficient") if k >= 1 or k <= -1: raise ValueError("Unstable filter") A += k * B[m - 1] if m >= order: A.error = inner(A, A) return A gamma = [inner(z ** -(m + 1), B[q]) / beta[q] for q in xrange(m)] B.append(z ** -(m + 1) - sum(gamma[q] * B[q] for q in xrange(m))) beta.append(inner(B[m], B[m])) m += 1
Find the Linear Predictive Coding (LPC) coefficients as a ZFilter object, the analysis whitening filter. This implementation is based on the covariance method, assuming a zero-mean stochastic process, finding the coefficients iteratively and greedily like the lattice implementation in Levinson-Durbin algorithm, although the lag matrix found from the given block don't have to be toeplitz. Slow, but this strategy don't need NumPy.
14,075
def transpose(self): kraus_l, kraus_r = self._data kraus_l = [k.T for k in kraus_l] if kraus_r is not None: kraus_r = [k.T for k in kraus_r] return Kraus((kraus_l, kraus_r), input_dims=self.output_dims(), output_dims=self.input_dims())
Return the transpose of the QuantumChannel.
14,076
def arc(pRA, pDecl, sRA, sDecl, mcRA, lat): pDArc, pNArc = utils.dnarcs(pDecl, lat) sDArc, sNArc = utils.dnarcs(sDecl, lat) mdRA = mcRA sArc = sDArc pArc = pDArc if not utils.isAboveHorizon(sRA, sDecl, mcRA, lat): mdRA = angle.norm(mcRA + 180) sArc = sNArc pArc = pNArc pDist = angle.closestdistance(mdRA, pRA) sDist = angle.closestdistance(mdRA, sRA) if pDist < sDist: pDist += 360 sPropDist = sDist / (sArc / 2.0) pPropDist = pDist / (pArc / 2.0) return (pPropDist - sPropDist) * (pArc / 2.0)
Returns the arc of direction between a Promissor and Significator. It uses the generic proportional semi-arc method.
14,077
def put(self, request, bot_id, id, format=None): return super(MessengerBotDetail, self).put(request, bot_id, id, format)
Update existing MessengerBot --- serializer: MessengerBotUpdateSerializer responseMessages: - code: 401 message: Not authenticated - code: 400 message: Not valid request
14,078
def editText(self, y, x, w, record=True, **kwargs): v = self.callHook() if record else None if not v or v[0] is None: with EnableCursor(): v = editText(self.scr, y, x, w, **kwargs) else: v = v[0] if kwargs.get(, True): status( % v) self.callHook(, v) if record else None return v
Wrap global editText with `preedit` and `postedit` hooks.
14,079
def DragDrop(x1: int, y1: int, x2: int, y2: int, moveSpeed: float = 1, waitTime: float = OPERATION_WAIT_TIME) -> None: PressMouse(x1, y1, 0.05) MoveTo(x2, y2, moveSpeed, 0.05) ReleaseMouse(waitTime)
Simulate mouse left button drag from point x1, y1 drop to point x2, y2. x1: int. y1: int. x2: int. y2: int. moveSpeed: float, 1 normal speed, < 1 move slower, > 1 move faster. waitTime: float.
14,080
def ordered_storage(config, name=None): typedictredisredisenvREDIS_HOSTNAMEdefaultlocalhost tp = config[] if tp == : return DictListStorage(config) if tp == : return RedisListStorage(config, name=name)
Return ordered storage system based on the specified config. The canonical example of such a storage container is ``defaultdict(list)``. Thus, the return value of this method contains keys and values. The values are ordered lists with the last added item at the end. Args: config (dict): Defines the configurations for the storage. For in-memory storage, the config ``{'type': 'dict'}`` will suffice. For Redis storage, the type should be ``'redis'`` and the configurations for the Redis database should be supplied under the key ``'redis'``. These parameters should be in a form suitable for `redis.Redis`. The parameters may alternatively contain references to environment variables, in which case literal configuration values should be replaced by dicts of the form:: {'env': 'REDIS_HOSTNAME', 'default': 'localhost'} For a full example, see :ref:`minhash_lsh_at_scale` name (bytes, optional): A reference name for this storage container. For dict-type containers, this is ignored. For Redis containers, this name is used to prefix keys pertaining to this storage container within the database.
14,081
def compact(self): self.docFactory = webtheme.getLoader(self.compactFragmentName) for param in self.parameters: param.compact()
Switch to the compact variant of the live form template. By default, this will simply create a loader for the C{self.compactFragmentName} template and compact all of this form's parameters.
14,082
def get_memory_info(self): rss, vms = _psutil_osx.get_process_memory_info(self.pid)[:2] return nt_meminfo(rss, vms)
Return a tuple with the process' RSS and VMS size.
14,083
def delete(self, name): if name in self._cache: del self._cache[name] self.writeCache() return True return False
Deletes the named entry in the cache. :param name: the name. :return: true if it is deleted.
14,084
def update(self, portfolio, date, perfs=None): self.portfolio = portfolio self.perfs = perfs self.date = date
Actualizes the portfolio universe with the alog state
14,085
def remove_item(self, item_id, assessment_part_id): if (not isinstance(assessment_part_id, ABCId) and assessment_part_id.get_identifier_namespace() != ): raise errors.InvalidArgument() assessment_part_map, collection = self._get_assessment_part_collection(assessment_part_id) try: assessment_part_map[].remove(str(item_id)) except (KeyError, ValueError): raise errors.NotFound() collection.save(assessment_part_map)
Removes an ``Item`` from an ``AssessmentPartId``. arg: item_id (osid.id.Id): ``Id`` of the ``Item`` arg: assessment_part_id (osid.id.Id): ``Id`` of the ``AssessmentPartId`` raise: NotFound - ``item_id`` ``not found in assessment_part_id`` raise: NullArgument - ``item_id`` or ``assessment_part_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization fauilure *compliance: mandatory -- This method must be implemented.*
14,086
def endpoint_from_model_data(self, model_s3_location, deployment_image, initial_instance_count, instance_type, name=None, role=None, wait=True, model_environment_vars=None, model_vpc_config=None, accelerator_type=None): model_environment_vars = model_environment_vars or {} name = name or name_from_image(deployment_image) model_vpc_config = vpc_utils.sanitize(model_vpc_config) if _deployment_entity_exists(lambda: self.sagemaker_client.describe_endpoint(EndpointName=name)): raise ValueError(.format(name)) if not _deployment_entity_exists(lambda: self.sagemaker_client.describe_model(ModelName=name)): primary_container = container_def(image=deployment_image, model_data_url=model_s3_location, env=model_environment_vars) self.create_model(name=name, role=role, container_defs=primary_container, vpc_config=model_vpc_config) if not _deployment_entity_exists( lambda: self.sagemaker_client.describe_endpoint_config(EndpointConfigName=name)): self.create_endpoint_config(name=name, model_name=name, initial_instance_count=initial_instance_count, instance_type=instance_type, accelerator_type=accelerator_type) self.create_endpoint(endpoint_name=name, config_name=name, wait=wait) return name
Create and deploy to an ``Endpoint`` using existing model data stored in S3. Args: model_s3_location (str): S3 URI of the model artifacts to use for the endpoint. deployment_image (str): The Docker image which defines the runtime code to be used as the entry point for accepting prediction requests. initial_instance_count (int): Minimum number of EC2 instances to launch. The actual number of active instances for an endpoint at any given time varies due to autoscaling. instance_type (str): Type of EC2 instance to deploy to an endpoint for prediction, e.g. 'ml.c4.xlarge'. name (str): Name of the ``Endpoint`` to create. If not specified, uses a name generated by combining the image name with a timestamp. role (str): An AWS IAM role (either name or full ARN). The Amazon SageMaker training jobs and APIs that create Amazon SageMaker endpoints use this role to access training data and model artifacts. You must grant sufficient permissions to this role. wait (bool): Whether to wait for the endpoint deployment to complete before returning (default: True). model_environment_vars (dict[str, str]): Environment variables to set on the model container (default: None). model_vpc_config (dict[str, list[str]]): The VpcConfig set on the model (default: None) * 'Subnets' (list[str]): List of subnet ids. * 'SecurityGroupIds' (list[str]): List of security group ids. accelerator_type (str): Type of Elastic Inference accelerator to attach to the instance. For example, 'ml.eia1.medium'. For more information: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html Returns: str: Name of the ``Endpoint`` that is created.
14,087
def check_slice_perms(self, slice_id): form_data, slc = get_form_data(slice_id, use_slice_data=True) datasource_type = slc.datasource.type datasource_id = slc.datasource.id viz_obj = get_viz( datasource_type=datasource_type, datasource_id=datasource_id, form_data=form_data, force=False, ) security_manager.assert_datasource_permission(viz_obj.datasource)
Check if user can access a cached response from slice_json. This function takes `self` since it must have the same signature as the the decorated method.
14,088
def expand_config(d, dirs): context = { : dirs.user_cache_dir, : dirs.user_config_dir, : dirs.user_data_dir, : dirs.user_log_dir, : dirs.site_config_dir, : dirs.site_data_dir, } for k, v in d.items(): if isinstance(v, dict): expand_config(v, dirs) if isinstance(v, string_types): d[k] = os.path.expanduser(os.path.expandvars(d[k])) d[k] = d[k].format(**context)
Expand configuration XDG variables, environmental variables, and tildes. Parameters ---------- d : dict config information dirs : appdirs.AppDirs XDG application mapping Notes ----- *Environmentable variables* are expanded via :py:func:`os.path.expandvars`. So ``${PWD}`` would be replaced by the current PWD in the shell, ``${USER}`` would be the user running the app. *XDG variables* are expanded via :py:meth:`str.format`. These do not have a dollar sign. They are: - ``{user_cache_dir}`` - ``{user_config_dir}`` - ``{user_data_dir}`` - ``{user_log_dir}`` - ``{site_config_dir}`` - ``{site_data_dir}`` See Also -------- os.path.expanduser, os.path.expandvars : Standard library functions for expanding variables. Same concept, used inside.
14,089
def next_token(self, tok, include_extra=False): i = tok.index + 1 if not include_extra: while is_non_coding_token(self._tokens[i].type): i += 1 return self._tokens[i]
Returns the next token after the given one. If include_extra is True, includes non-coding tokens from the tokenize module, such as NL and COMMENT.
14,090
def write_int8(self, value, little_endian=True): if little_endian: endian = "<" else: endian = ">" return self.pack( % endian, value)
Pack the value as a signed byte and write 1 byte to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written.
14,091
def rewrite_elife_authors_json(json_content, doi): article_doi = elifetools.utils.convert_testing_doi(doi) if article_doi == "10.7554/eLife.06956": for i, ref in enumerate(json_content): if ref.get("orcid") and ref.get("orcid") == "0000-0001-6798-0064": json_content[i]["affiliations"][0]["name"] = ["Cambridge"] if article_doi == "10.7554/eLife.09376": for i, ref in enumerate(json_content): if ref.get("orcid") and ref.get("orcid") == "000-0001-7224-925X": json_content[i]["orcid"] = "0000-0001-7224-925X" if article_doi == "10.7554/eLife.00102": for i, ref in enumerate(json_content): if not ref.get("competingInterests"): if ref["name"]["index"].startswith("Chen,"): json_content[i]["competingInterests"] = "ZJC: Reviewing Editor, <i>eLife</i>" elif ref["name"]["index"].startswith("Li,"): json_content[i]["competingInterests"] = "The remaining authors have no competing interests to declare." if article_doi == "10.7554/eLife.00270": for i, ref in enumerate(json_content): if not ref.get("competingInterests"): if ref["name"]["index"].startswith("Patterson,"): json_content[i]["competingInterests"] = "MP: Managing Executive Editor, <i>eLife</i>" elife_author_competing_interests = {} elife_author_competing_interests["10.7554/eLife.00133"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.00190"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.00230"] = "The authors have declared that no competing interests exist" elife_author_competing_interests["10.7554/eLife.00288"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.00352"] = "The author declares that no competing interest exist" elife_author_competing_interests["10.7554/eLife.00362"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.00475"] = "The remaining authors have no competing interests to declare." elife_author_competing_interests["10.7554/eLife.00592"] = "The other authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.00633"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.02725"] = "The other authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.02935"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.04126"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.04878"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.05322"] = "The other authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.06011"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.06416"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.07383"] = "The other authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.08421"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.08494"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.08648"] = "The other authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.08924"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.09083"] = "The other authors declare that no competing interests exists." elife_author_competing_interests["10.7554/eLife.09102"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.09460"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.09591"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.09600"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.10113"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.10230"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.10453"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.10635"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.11407"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.11473"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.11750"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.12217"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.12620"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.12724"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.13023"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.13732"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.14116"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.14258"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.14694"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.15085"] = "The other authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.15312"] = "The other authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.16011"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.16940"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.17023"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.17092"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.17218"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.17267"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.17523"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.17556"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.17769"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.17834"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.18101"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.18515"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.18544"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.18648"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.19071"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.19334"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.19510"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.20183"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.20242"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.20375"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.20797"] = "The other authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.21454"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.21491"] = "The other authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.22187"] = "The authors declare that no competing interests exist." if article_doi in elife_author_competing_interests: for i, ref in enumerate(json_content): if not ref.get("competingInterests"): json_content[i]["competingInterests"] = elife_author_competing_interests[article_doi] for i, ref in enumerate(json_content): if (ref.get("competingInterests") and ( ref.get("competingInterests").startswith("The other author") or ref.get("competingInterests").startswith("The others author") or ref.get("competingInterests").startswith("The remaining authors") or ref.get("competingInterests").startswith("The remaining have declared") )): json_content[i]["competingInterests"] = "No competing interests declared." return json_content
this does the work of rewriting elife authors json
14,092
def remove_server_data(server_id): logger.debug("Removing server from serverdata") data = datatools.get_data() if server_id in data["discord"]["servers"]: data["discord"]["servers"].pop(server_id) datatools.write_data(data)
Remove a server from the server data Args: server_id (int): The server to remove from the server data
14,093
def force_invalidate(self, vts): for vt in vts.versioned_targets: self._invalidator.force_invalidate(vt.cache_key) vt.valid = False self._invalidator.force_invalidate(vts.cache_key) vts.valid = False
Force invalidation of a VersionedTargetSet.
14,094
def cmd_link(self, args): if len(args) < 1: self.show_link() elif args[0] == "list": self.cmd_link_list() elif args[0] == "add": if len(args) != 2: print("Usage: link add LINK") return self.cmd_link_add(args[1:]) elif args[0] == "ports": self.cmd_link_ports() elif args[0] == "remove": if len(args) != 2: print("Usage: link remove LINK") return self.cmd_link_remove(args[1:]) else: print("usage: link <list|add|remove>")
handle link commands
14,095
def name_variants(self): out = [] variant = namedtuple(, ) for var in chained_get(self._json, [, ], []): new = variant(name=var[], doc_count=var.get()) out.append(new) return out
A list of namedtuples representing variants of the affiliation name with number of documents referring to this variant.
14,096
def assign_taxonomy(dataPath, reference_sequences_fp, id_to_taxonomy_fp, read_1_seqs_fp, read_2_seqs_fp, single_ok=False, no_single_ok_generic=False, header_id_regex=None, read_id_regex = "\S+\s+(\S+)", amplicon_id_regex = "(\S+)\s+(\S+?)\/", output_fp=None, log_path=None, HALT_EXEC=False, base_tmp_dir = ): usearch_command = "usearch" if not (exists(usearch_command) or app_path(usearch_command)): raise ApplicationNotFoundError,\ "Cannot find %s. Is it installed? Is it in your path?"\ % usearch_command my_tmp_dir = get_tmp_filename(tmp_dir=base_tmp_dir,prefix=,suffix=,result_constructor=str) os.makedirs(my_tmp_dir) try: confidence = 1.0 read_1_id = amplicon_to_read_1_id[rtax_id] orig_id = read_1_id_to_orig_id[read_1_id] if lineage: assignments[orig_id] = (.join(lineage), confidence) else: assignments[orig_id] = (, 1.0) if output_fp: try: output_file = open(output_fp, ) except OSError: raise OSError("Can%s\t%s\t%1.3f\n' % (seq_id, lineage, confidence)) output_file.close() return None else: return assignments finally: try: rmtree(my_tmp_dir) except OSError: pass
Assign taxonomy to each sequence in data with the RTAX classifier # data: open fasta file object or list of fasta lines dataPath: path to a fasta file output_fp: path to write output; if not provided, result will be returned in a dict of {seq_id:(taxonomy_assignment,confidence)}
14,097
def wait_time(departure, now=None): now = now or datetime.datetime.now() yn, mn, dn = now.year, now.month, now.day hour, minute = map(int, departure.split()) dt = datetime.datetime(yn, mn, dn, hour=hour, minute=minute) delta = (dt - now).seconds if (dt - now).days < 0: delta = 0 if delta < 3600: return % (delta // 60, delta % 60) else: delta_hh = delta // 3600 delta_rest = delta - delta_hh * 3600 return % (delta_hh, delta_rest // 60, delta_rest % 60)
Calculate waiting time until the next departure time in 'HH:MM' format. Return time-delta (as 'MM:SS') from now until next departure time in the future ('HH:MM') given as (year, month, day, hour, minute, seconds). Time-deltas shorter than 60 seconds are reduced to 0.
14,098
def _write_cvvr(self, f, data): f.seek(0, 2) byte_loc = f.tell() cSize = len(data) block_size = CDF.CVVR_BASE_SIZE64 + cSize section_type = CDF.CVVR_ rfuA = 0 cvvr1 = bytearray(24) cvvr1[0:8] = struct.pack(, block_size) cvvr1[8:12] = struct.pack(, section_type) cvvr1[12:16] = struct.pack(, rfuA) cvvr1[16:24] = struct.pack(, cSize) f.write(cvvr1) f.write(data) return byte_loc
Write compressed "data" variable to the end of the file in a CVVR
14,099
def gdf_from_places(queries, gdf_name=, buffer_dist=None): gdf = gpd.GeoDataFrame() for query in queries: gdf = gdf.append(gdf_from_place(query, buffer_dist=buffer_dist)) gdf = gdf.reset_index().drop(labels=, axis=1) gdf.gdf_name = gdf_name gdf.crs = settings.default_crs log(.format(len(gdf), len(queries))) return gdf
Create a GeoDataFrame from a list of place names to query. Parameters ---------- queries : list list of query strings or structured query dicts to geocode/download, one at a time gdf_name : string name attribute metadata for GeoDataFrame (this is used to save shapefile later) buffer_dist : float distance to buffer around the place geometry, in meters Returns ------- GeoDataFrame