Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
17,900
def calculate_size(name, id): data_size = 0 data_size += calculate_size_str(name) data_size += calculate_size_str(id) return data_size
Calculates the request payload size
17,901
def clear_list_value(self, value): if not value: return self.empty_value if self.clean_empty: value = [v for v in value if v] return value or self.empty_value
Clean the argument value to eliminate None or Falsy values if needed.
17,902
def _evaluate(self,*args,**kwargs): if len(args) == 5: R,vR,vT, z, vz= args elif len(args) == 6: R,vR,vT, z, vz, phi= args else: self._parse_eval_args(*args) R= self._eval_R vR= self._eval_vR vT= self._eval_vT z= self._eval_z vz= self._eval_vz Phi= _evaluatePotentials(self._pot,R,z) try: Phio= _evaluatePotentials(self._pot,R,numpy.zeros(len(R))) except TypeError: Phio= _evaluatePotentials(self._pot,R,0.) Ez= Phi-Phio+vz**2./2. thisEzZmax= numpy.exp(self._EzZmaxsInterp(R)) if isinstance(R,numpy.ndarray): indx= (R > self._Rmax) indx+= (R < self._Rmin) indx+= (Ez != 0.)*(numpy.log(Ez) > thisEzZmax) indxc= True^indx jz= numpy.empty(R.shape) if numpy.sum(indxc) > 0: jz[indxc]= (self._jzInterp.ev(R[indxc],Ez[indxc]/thisEzZmax[indxc])\ *(numpy.exp(self._jzEzmaxInterp(R[indxc]))-10.**-5.)) if numpy.sum(indx) > 0: jz[indx]= self._aA(R[indx], numpy.zeros(numpy.sum(indx)), numpy.ones(numpy.sum(indx)), numpy.zeros(numpy.sum(indx)), numpy.sqrt(2.*Ez[indx]), _justjz=True, **kwargs)[2] else: if R > self._Rmax or R < self._Rmin or (Ez != 0 and numpy.log(Ez) > thisEzZmax): if _PRINTOUTSIDEGRID: print("Outside of grid in Ez", R > self._Rmax , R < self._Rmin , (Ez != 0 and numpy.log(Ez) > thisEzZmax)) jz= self._aA(R,0.,1., 0.,math.sqrt(2.*Ez), _justjz=True, **kwargs)[2] else: jz= (self._jzInterp(R,Ez/thisEzZmax)\ *(numpy.exp(self._jzEzmaxInterp(R))-10.**-5.))[0][0] ERLz= numpy.fabs(R*vT)+self._gamma*jz ER= Phio+vR**2./2.+ERLz**2./2./R**2. thisRL= self._RLInterp(ERLz) thisERRL= -numpy.exp(self._ERRLInterp(ERLz))+self._ERRLmax thisERRa= -numpy.exp(self._ERRaInterp(ERLz))+self._ERRamax if isinstance(R,numpy.ndarray): indx= ((ER-thisERRa)/(thisERRL-thisERRa) > 1.)\ *(((ER-thisERRa)/(thisERRL-thisERRa)-1.) < 10.**-2.) ER[indx]= thisERRL[indx] indx= ((ER-thisERRa)/(thisERRL-thisERRa) < 0.)\ *((ER-thisERRa)/(thisERRL-thisERRa) > -10.**-2.) ER[indx]= thisERRa[indx] indx= (ERLz < self._Lzmin) indx+= (ERLz > self._Lzmax) indx+= ((ER-thisERRa)/(thisERRL-thisERRa) > 1.) indx+= ((ER-thisERRa)/(thisERRL-thisERRa) < 0.) indxc= True^indx jr= numpy.empty(R.shape) if numpy.sum(indxc) > 0: jr[indxc]= (self._jrInterp.ev(ERLz[indxc], (ER[indxc]-thisERRa[indxc])/(thisERRL[indxc]-thisERRa[indxc]))\ *(numpy.exp(self._jrERRaInterp(ERLz[indxc]))-10.**-5.)) if numpy.sum(indx) > 0: jr[indx]= self._aA(thisRL[indx], numpy.sqrt(2.*(ER[indx]-_evaluatePotentials(self._pot,thisRL[indx],0.))-ERLz[indx]**2./thisRL[indx]**2.), ERLz[indx]/thisRL[indx], numpy.zeros(len(thisRL)), numpy.zeros(len(thisRL)), _justjr=True, **kwargs)[0] else: if (ER-thisERRa)/(thisERRL-thisERRa) > 1. \ and ((ER-thisERRa)/(thisERRL-thisERRa)-1.) < 10.**-2.: ER= thisERRL elif (ER-thisERRa)/(thisERRL-thisERRa) < 0. \ and (ER-thisERRa)/(thisERRL-thisERRa) > -10.**-2.: ER= thisERRa if ERLz < self._Lzmin or ERLz > self._Lzmax \ or (ER-thisERRa)/(thisERRL-thisERRa) > 1. \ or (ER-thisERRa)/(thisERRL-thisERRa) < 0.: if _PRINTOUTSIDEGRID: print("Outside of grid in ER/Lz", ERLz < self._Lzmin , ERLz > self._Lzmax \ , (ER-thisERRa)/(thisERRL-thisERRa) > 1. \ , (ER-thisERRa)/(thisERRL-thisERRa) < 0., ER, thisERRL, thisERRa, (ER-thisERRa)/(thisERRL-thisERRa)) jr= self._aA(thisRL[0], numpy.sqrt(2.*(ER-_evaluatePotentials(self._pot,thisRL,0.))-ERLz**2./thisRL**2.)[0], (ERLz/thisRL)[0], 0.,0., _justjr=True, **kwargs)[0] else: jr= (self._jrInterp(ERLz, (ER-thisERRa)/(thisERRL-thisERRa))\ *(numpy.exp(self._jrERRaInterp(ERLz))-10.**-5.))[0][0] return (jr,R*vT,jz)
NAME: __call__ (_evaluate) PURPOSE: evaluate the actions (jr,lz,jz) INPUT: Either: a) R,vR,vT,z,vz[,phi]: 1) floats: phase-space value for single object (phi is optional) (each can be a Quantity) 2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity) b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument scipy.integrate.quadrature keywords (used when directly evaluating a point off the grid) OUTPUT: (jr,lz,jz) HISTORY: 2012-07-27 - Written - Bovy (IAS@MPIA) NOTE: For a Miyamoto-Nagai potential, this seems accurate to 0.1% and takes ~0.13 ms For a MWPotential, this takes ~ 0.17 ms
17,903
def post(method, hmc, uri, uri_parms, body, logon_required, wait_for_completion): assert wait_for_completion is True partition_oid = uri_parms[0] partition_uri = + partition_oid try: partition = hmc.lookup_by_uri(partition_uri) except KeyError: raise InvalidResourceError(method, uri) cpc = partition.manager.parent assert cpc.dpm_enabled check_valid_cpc_status(method, uri, cpc) check_partition_status(method, uri, partition, invalid_statuses=[, ]) query_parms = parse_query_parms(method, uri, uri_parms[1]) try: image_name = query_parms[] except KeyError: raise BadRequestError( method, uri, reason=1, message="Missing required URI query parameter ") try: ins_file_name = query_parms[] except KeyError: raise BadRequestError( method, uri, reason=1, message="Missing required URI query parameter ") partition.properties[] = image_name partition.properties[] = ins_file_name return {}
Operation: Mount ISO Image (requires DPM mode).
17,904
def kill_zombies(self, zombies, session=None): from airflow.models.taskinstance import TaskInstance for zombie in zombies: if zombie.dag_id in self.dags: dag = self.dags[zombie.dag_id] if zombie.task_id in dag.task_ids: task = dag.get_task(zombie.task_id) ti = TaskInstance(task, zombie.execution_date) ti.start_date = zombie.start_date ti.end_date = zombie.end_date ti.try_number = zombie.try_number ti.state = zombie.state ti.test_mode = configuration.getboolean(, ) ti.handle_failure("{} detected as zombie".format(ti), ti.test_mode, ti.get_template_context()) self.log.info( , ti, ti.state) Stats.incr() session.commit()
Fail given zombie tasks, which are tasks that haven't had a heartbeat for too long, in the current DagBag. :param zombies: zombie task instances to kill. :type zombies: airflow.utils.dag_processing.SimpleTaskInstance :param session: DB session. :type session: sqlalchemy.orm.session.Session
17,905
def iri_to_uri(value, normalize=False): if not isinstance(value, str_cls): raise TypeError(unwrap( , type_name(value) )) scheme = None if sys.version_info < (2, 7) and not value.startswith() and not value.startswith(): real_prefix = None prefix_match = re.match(, value) if prefix_match: real_prefix = prefix_match.group(0) value = + value[len(real_prefix):] parsed = urlsplit(value) if real_prefix: value = real_prefix + value[7:] scheme = _urlquote(real_prefix[:-3]) else: parsed = urlsplit(value) if scheme is None: scheme = _urlquote(parsed.scheme) hostname = parsed.hostname if hostname is not None: hostname = hostname.encode() username = _urlquote(parsed.username, safe=()*+,;=!$&\) port = parsed.port if port is not None: port = str_cls(port).encode() netloc = b if username is not None: netloc += username if password: netloc += b + password netloc += b if hostname is not None: netloc += hostname if port is not None: default_http = scheme == b and port == b default_https = scheme == b and port == b if not normalize or (not default_http and not default_https): netloc += b + port path = _urlquote(parsed.path, safe=()*+,;=@:/?!$&\) fragment = _urlquote(parsed.fragment, safe=()*+,;=@:/latin1') return output
Encodes a unicode IRI into an ASCII byte string URI :param value: A unicode string of an IRI :param normalize: A bool that controls URI normalization :return: A byte string of the ASCII-encoded URI
17,906
def get_directories_re( self, directory_re, full_path=False, ignorecase=False): if ignorecase: compiled_re = re.compile(directory_re, re.I) else: compiled_re = re.compile(directory_re) found = set() if self.handle: for member in self.handle.getmembers(): if isinstance(member, ZipInfo): to_match = os.path.dirname(member.name) elif isinstance(member, TarInfo) and member.isdir(): to_match = member.name else: to_match = None if to_match: if ((full_path and compiled_re.search(to_match)) or ( not full_path and compiled_re.search( os.path.basename(to_match)))): found.add(to_match) return list(found)
Same as get_files_re, but for directories
17,907
def reset(self): animation_gen = self._frame_function(*self._animation_args, **self._animation_kwargs) self._current_generator = itertools.cycle( util.concatechain(animation_gen, self._back_up_generator))
Reset the current animation generator.
17,908
def main(): parser = optparse.OptionParser() parser.add_option( , , help=, metavar=) parser.add_option( , , help=, metavar=) parser.add_option( , , help=, metavar=) parser.add_option( , , help=, metavar=) options, _ = parser.parse_args() myrc = pypirc.PyPiRC() if options.server: if myrc.servers:
Main loop.
17,909
def monthdayscalendar(cls, year, month): weeks = [] week = [] for day in NepCal.itermonthdays(year, month): week.append(day) if len(week) == 7: weeks.append(week) week = [] if len(week) > 0: weeks.append(week) return weeks
Return a list of the weeks in the month month of the year as full weeks. Weeks are lists of seven day numbers.
17,910
def abort_io(self, iocb, err): if _debug: IOChainMixIn._debug("abort_io %r %r", iocb, err) if iocb is not self.ioChain: raise RuntimeError("broken chain") self.abort(err)
Forward the abort downstream.
17,911
def list_resource_record_sets(self, max_results=None, page_token=None, client=None): client = self._require_client(client) path = "/projects/%s/managedZones/%s/rrsets" % (self.project, self.name) iterator = page_iterator.HTTPIterator( client=client, api_request=client._connection.api_request, path=path, item_to_value=_item_to_resource_record_set, items_key="rrsets", page_token=page_token, max_results=max_results, ) iterator.zone = self return iterator
List resource record sets for this zone. See https://cloud.google.com/dns/api/v1/resourceRecordSets/list :type max_results: int :param max_results: Optional. The maximum number of resource record sets to return. Defaults to a sensible value set by the API. :type page_token: str :param page_token: Optional. If present, return the next batch of resource record sets, using the value, which must correspond to the ``nextPageToken`` value returned in the previous response. Deprecated: use the ``pages`` property of the returned iterator instead of manually passing the token. :type client: :class:`google.cloud.dns.client.Client` :param client: (Optional) the client to use. If not passed, falls back to the ``client`` stored on the current zone. :rtype: :class:`~google.api_core.page_iterator.Iterator` :returns: Iterator of :class:`~.resource_record_set.ResourceRecordSet` belonging to this zone.
17,912
def _check_ipcidr_minions(self, expr, greedy): cache_enabled = self.opts.get(, False) if greedy: minions = self._pki_minions() elif cache_enabled: minions = self.cache.list() else: return {: [], : []} if cache_enabled: if greedy: cminions = self.cache.list() else: cminions = minions if cminions is None: return {: minions, : []} tgt = expr try: tgt = ipaddress.ip_address(tgt) except Exception: try: tgt = ipaddress.ip_network(tgt) except Exception: log.error(, tgt) return {: [], : []} proto = .format(tgt.version) minions = set(minions) for id_ in cminions: mdata = self.cache.fetch(.format(id_), ) if mdata is None: if not greedy: minions.remove(id_) continue grains = mdata.get() if grains is None or proto not in grains: match = False elif isinstance(tgt, (ipaddress.IPv4Address, ipaddress.IPv6Address)): match = six.text_type(tgt) in grains[proto] else: match = salt.utils.network.in_subnet(tgt, grains[proto]) if not match and id_ in minions: minions.remove(id_) return {: list(minions), : []}
Return the minions found by looking via ipcidr
17,913
def get_section_metrics(cls): return { "overview": { "activity_metrics": [Closed, Submitted], "author_metrics": None, "bmi_metrics": [BMI], "time_to_close_metrics": [DaysToMergeMedian], "projects_metrics": [Projects], }, "com_channels": { "activity_metrics": [], "author_metrics": [] }, "project_activity": { "metrics": [Submitted, Closed] }, "project_community": { "author_metrics": [], "people_top_metrics": [], "orgs_top_metrics": [], }, "project_process": { "bmi_metrics": [BMI], "time_to_close_metrics": [], "time_to_close_title": "", "time_to_close_review_metrics": [DaysToMergeAverage, DaysToMergeMedian], "time_to_close_review_title": "Days to close review (median and average)", "patchsets_metrics": [PatchsetsMedian, PatchsetsAverage], "patchsets_title": "Number of patchsets per review (median and average)" } }
Get the mapping between metrics and sections in Manuscripts report :return: a dict with the mapping between metrics and sections in Manuscripts report
17,914
def get_device(self): addr = self.address servers = [server for server in pyrax.cloudservers.list() if addr in server.networks.get("private", "")] try: return servers[0] except IndexError: return None
Returns a reference to the device that is represented by this node. Returns None if no such device can be determined.
17,915
def get_impala_queries(self, start_time, end_time, filter_str="", limit=100, offset=0): params = { : start_time.isoformat(), : end_time.isoformat(), : filter_str, : limit, : offset, } return self._get("impalaQueries", ApiImpalaQueryResponse, params=params, api_version=4)
Returns a list of queries that satisfy the filter @type start_time: datetime.datetime. Note that the datetime must either be time zone aware or specified in the server time zone. See the python datetime documentation for more details about python's time zone handling. @param start_time: Queries must have ended after this time @type end_time: datetime.datetime. Note that the datetime must either be time zone aware or specified in the server time zone. See the python datetime documentation for more details about python's time zone handling. @param end_time: Queries must have started before this time @param filter_str: A filter to apply to the queries. For example: 'user = root and queryDuration > 5s' @param limit: The maximum number of results to return @param offset: The offset into the return list @since: API v4
17,916
def post(self, request, key): try: hook = Hook.objects.get(key=key, enabled=True) except Hook.DoesNotExist: msg = _("Key %s not associated to an enabled hook or bot") % key logger.warning(msg) return Response(msg, status=status.HTTP_404_NOT_FOUND) if hook.bot.owner != request.user: raise exceptions.AuthenticationFailed() try: parsed_data = request.data logger.debug("Hook %s attending request %s" % (hook, parsed_data)) handle_hook.delay(hook.id, parsed_data) except ParseError as e: return Response(str(e), status=status.HTTP_400_BAD_REQUEST) except: exc_info = sys.exc_info() traceback.print_exception(*exc_info) msg = _("Error processing %s for key %s") % (request.data, key) logger.error(msg) return Response(msg, status=status.HTTP_500_INTERNAL_SERVER_ERROR) else: return Response(status=status.HTTP_200_OK)
Process notitication hooks: 1. Obtain Hook 2. Check Auth 3. Delay processing to a task 4. Respond requester
17,917
def bind(self, **config): while self.unbound_types: typedef = self.unbound_types.pop() try: load, dump = typedef.bind(self, **config) self.bound_types[typedef] = { "load": load, "dump": dump } except Exception: self.unbound_types.add(typedef) raise
Bind all unbound types to the engine. Bind each unbound typedef to the engine, passing in the engine and :attr:`config`. The resulting ``load`` and ``dump`` functions can be found under ``self.bound_types[typedef]["load"]`` and ``self.bound_types[typedef]["dump"], respectively. Parameters ---------- config : dict, optional Engine-binding configuration to pass to each typedef that will be bound. Examples include floating-point precision values, maximum lengths for strings, or any other translation constraints/settings that a typedef needs to construct a load/dump function pair.
17,918
def delete(self, primary_key): title = % self.__class__.__name__ delete_statement = self.table.delete(self.table.c.id==primary_key) self.session.execute(delete_statement) exit_msg = % primary_key return exit_msg
a method to delete a record in the table :param primary_key: string with primary key of record :return: string with status message
17,919
def copy_ssh_keys_to_hosts(self, hosts, known_hosts=DEFAULT_KNOWN_HOSTS, dry=False): exceptions = [] for host in hosts: logger.info(, host.hostname, self.sshcopyid.pub_key) if not dry: try: self.copy_ssh_keys_to_host(host, known_hosts=known_hosts) except (paramiko.ssh_exception.SSHException, socket.error) as ex: logger.error(format_error(format_exception(ex))) logger.debug(traceback.format_exc()) exceptions.append(CopySSHKeyError(host=host, exception=ex)) if exceptions: raise CopySSHKeysError(exceptions=exceptions)
Copy the SSH keys to the given hosts. :param hosts: the list of `Host` objects to copy the SSH keys to. :param known_hosts: the `known_hosts` file to store the SSH public keys. :param dry: perform a dry run. :raise msshcopyid.errors.CopySSHKeysError:
17,920
def alterar(self, id_user_group, name, read, write, edit, remove): if not is_valid_int_param(id_user_group): raise InvalidParameterError( u) url = + str(id_user_group) + ugroup_map = dict() ugroup_map[] = name ugroup_map[] = read ugroup_map[] = write ugroup_map[] = edit ugroup_map[] = remove code, xml = self.submit({: ugroup_map}, , url) return self.response(code, xml)
Edit user group data from its identifier. :param id_user_group: User group id. :param name: User group name. :param read: If user group has read permission ('S' ou 'N'). :param write: If user group has write permission ('S' ou 'N'). :param edit: If user group has edit permission ('S' ou 'N'). :param remove: If user group has remove permission ('S' ou 'N'). :return: None :raise NomeGrupoUsuarioDuplicadoError: User group name already exists. :raise ValorIndicacaoPermissaoInvalidoError: Read, write, edit or remove value is invalid. :raise GrupoUsuarioNaoExisteError: User Group not found. :raise InvalidParameterError: At least one of the parameters is invalid or none. :raise DataBaseError: Networkapi failed to access database. :raise XMLError: Networkapi fails generating response XML.
17,921
def exit_success(jid, ext_source=None): ret = dict() data = list_job( jid, ext_source=ext_source ) minions = data.get(, []) result = data.get(, {}) for minion in minions: if minion in result and in result[minion]: ret[minion] = True if result[minion][] else False else: ret[minion] = False for minion in result: if in result[minion] and result[minion][]: ret[minion] = True return ret
Check if a job has been executed and exit successfully jid The jid to look up. ext_source The external job cache to use. Default: `None`. CLI Example: .. code-block:: bash salt-run jobs.exit_success 20160520145827701627
17,922
def list_nodes_full(**kwargs): nodes = _query() ret = {} for node in nodes: name = nodes[node][] ret[name] = nodes[node].copy() ret[name][] = node ret[name][] = nodes[node][] ret[name][] = nodes[node][] ret[name][] = nodes[node][] ret[name][] = nodes[node][] ret[name][] = nodes[node][] return ret
Return all data on nodes
17,923
def gen_sponsor_schedule(user, sponsor=None, num_blocks=6, surrounding_blocks=None, given_date=None): r no_attendance_today = None acts = [] if sponsor is None: sponsor = user.get_eighth_sponsor() if surrounding_blocks is None: surrounding_blocks = EighthBlock.objects.get_upcoming_blocks(num_blocks) activities_sponsoring = (EighthScheduledActivity.objects.for_sponsor(sponsor).select_related("block").filter(block__in=surrounding_blocks)) sponsoring_block_map = {} for sa in activities_sponsoring: bid = sa.block.id if bid in sponsoring_block_map: sponsoring_block_map[bid] += [sa] else: sponsoring_block_map[bid] = [sa] num_acts = 0 for b in surrounding_blocks: num_added = 0 sponsored_for_block = sponsoring_block_map.get(b.id, []) for schact in sponsored_for_block: acts.append(schact) if schact.block.is_today(): if not schact.attendance_taken and schact.block.locked: no_attendance_today = True num_added += 1 if num_added == 0: acts.append({"block": b, "id": None, "fake": True}) else: num_acts += 1 logger.debug(acts) cur_date = surrounding_blocks[0].date if acts else given_date if given_date else datetime.now().date() last_block = surrounding_blocks[len(surrounding_blocks) - 1] if surrounding_blocks else None last_block_date = last_block.date + timedelta(days=1) if last_block else cur_date next_blocks = list(last_block.next_blocks(1)) if last_block else None next_date = next_blocks[0].date if next_blocks else last_block_date first_block = surrounding_blocks[0] if surrounding_blocks else None if cur_date and not first_block: first_block = EighthBlock.objects.filter(date__lte=cur_date).last() first_block_date = first_block.date + timedelta(days=-7) if first_block else cur_date prev_blocks = list(first_block.previous_blocks(num_blocks - 1)) if first_block else None prev_date = prev_blocks[0].date if prev_blocks else first_block_date return { "sponsor_schedule": acts, "no_attendance_today": no_attendance_today, "num_attendance_acts": num_acts, "sponsor_schedule_cur_date": cur_date, "sponsor_schedule_next_date": next_date, "sponsor_schedule_prev_date": prev_date }
r"""Return a list of :class:`EighthScheduledActivity`\s in which the given user is sponsoring. Returns: Dictionary with: activities no_attendance_today num_acts
17,924
def get_localhost(): t work properly and takes a lot of time (had this issue on the pyunit server). Using the IP directly solves the problem. 127.0.0.1127.0.0.1t have getaddrinfo or SOL_TCP... Just consider it 127.0.0.1 in this case. _cache = else: _cache = return _cache
Should return 127.0.0.1 in ipv4 and ::1 in ipv6 localhost is not used because on windows vista/windows 7, there can be issues where the resolving doesn't work properly and takes a lot of time (had this issue on the pyunit server). Using the IP directly solves the problem.
17,925
def _table_sort_by(table, sort_exprs): result = table.op().sort_by(table, sort_exprs) return result.to_expr()
Sort table by the indicated column expressions and sort orders (ascending/descending) Parameters ---------- sort_exprs : sorting expressions Must be one of: - Column name or expression - Sort key, e.g. desc(col) - (column name, True (ascending) / False (descending)) Examples -------- >>> import ibis >>> t = ibis.table([('a', 'int64'), ('b', 'string')]) >>> ab_sorted = t.sort_by([('a', True), ('b', False)]) Returns ------- sorted : TableExpr
17,926
def set_language(self): try: self.language = self.soup.find().string except AttributeError: self.language = None
Parses feed language and set value
17,927
def output_results(results, mvdelim = , output = sys.stdout): fields = set() for result in results: for key in result.keys(): if(isinstance(result[key], list)): result[ + key] = encode_mv(result[key]) result[key] = mvdelim.join(result[key]) fields.update(list(result.keys())) fields = sorted(list(fields)) writer = csv.DictWriter(output, fields) writer.writerow(dict(list(zip(fields, fields)))) writer.writerows(results)
Given a list of dictionaries, each representing a single result, and an optional list of fields, output those results to stdout for consumption by the Splunk pipeline
17,928
def dump( state, host, remote_filename, database=None, postgresql_user=None, postgresql_password=None, postgresql_host=None, postgresql_port=None, ): yield .format(make_psql_command( executable=, database=database, user=postgresql_user, password=postgresql_password, host=postgresql_host, port=postgresql_port, ), remote_filename)
Dump a PostgreSQL database into a ``.sql`` file. Requires ``mysqldump``. + database: name of the database to dump + remote_filename: name of the file to dump the SQL to + postgresql_*: global module arguments, see above
17,929
def clean(self: , *, atol: float=1e-9) -> : negligible = [v for v, c in self._terms.items() if abs(c) <= atol] for v in negligible: del self._terms[v] return self
Remove terms with coefficients of absolute value atol or less.
17,930
def safe_filename(self, otype, oid): permitted = set([, , , ]) oid = .join([c for c in oid if c.isalnum() or c in permitted]) while oid.find() != -1: oid = oid.replace(, ) ext = ts = datetime.now().strftime("%Y%m%dT%H%M%S") fname = is_new = False while not is_new: oid_len = 255 - len( % (otype, ts, ext)) fname = % (otype, oid[:oid_len], ts, ext) is_new = True if os.path.exists(fname): is_new = False ts += return fname
Santize obj name into fname and verify doesn't already exist
17,931
def _resetID(self): self._setID((None,) * len(self._sqlPrimary)) self._new = True
Reset all ID fields.
17,932
def load_from_cache(path=user_path): if not path: return try: with open(path, ) as f: dversion, mversion, data = pickle.load(f) if dversion == data_version and mversion == module_version: return data except (FileNotFoundError, ValueError, EOFError): pass
Try to load category ranges from userlevel cache file. :param path: path to userlevel cache file :type path: str :returns: category ranges dict or None :rtype: None or dict of RangeGroup
17,933
def evaluate(self, sequence, transformations): result = sequence parallel = partial( parallelize, processes=self.processes, partition_size=self.partition_size) staged = [] for transform in transformations: strategies = transform.execution_strategies or {} if ExecutionStrategies.PARALLEL in strategies: staged.insert(0, transform.function) else: if staged: result = parallel(compose(*staged), result) staged = [] if ExecutionStrategies.PRE_COMPUTE in strategies: result = list(result) result = transform.function(result) if staged: result = parallel(compose(*staged), result) return iter(result)
Execute the sequence of transformations in parallel :param sequence: Sequence to evaluation :param transformations: Transformations to apply :return: Resulting sequence or value
17,934
def support_jsonp(api_instance, callback_name_source=): output_json = api_instance.representations[] @api_instance.representation() def handle_jsonp(data, code, headers=None): resp = output_json(data, code, headers) if code == 200: callback = request.args.get(callback_name_source, False) if not callable(callback_name_source) \ else callback_name_source() if callback: resp.set_data(str(callback) + + resp.get_data().decode("utf-8") + ) return resp
Let API instance can respond jsonp request automatically. `callback_name_source` can be a string or a callback. If it is a string, the system will find the argument that named by this string in `query string`. If found, determine this request to be a jsonp request, and use the argument's value as the js callback name. If `callback_name_source` is a callback, this callback should return js callback name when request is a jsonp request, and return False when request is not jsonp request. And system will handle request according to its return value. default support format:url?callback=js_callback_name
17,935
def text(self): if callable(self._text): return str(self._text()) return str(self._text)
Return the string to render.
17,936
def get_processes(self): procs = set() try: p = sp.Popen([, , , ], stdout=sp.PIPE) output, error = p.communicate() if sys.version_info > (3, 0): output = output.decode() for comm in output.split(): name = comm.split()[-1] if name and len(name) >= 2 and not in name: procs.add(name) finally: proc_list = list(procs) random.shuffle(proc_list) return proc_list
Grab a shuffled list of all currently running process names
17,937
def decode(self, encoded): encoded = super().decode(encoded) if encoded.numel() > 1: raise ValueError( ) return self.itos[encoded.squeeze().item()]
Decodes ``encoded`` label. Args: encoded (torch.Tensor): Encoded label. Returns: object: Label decoded from ``encoded``.
17,938
def _validate(self, all_valid_addresses): for dependency, dependents in iteritems(self._dependent_address_map): if dependency not in all_valid_addresses: raise AddressLookupError( .format( dependency.spec, .join(d.spec for d in dependents) ) )
Validate that all of the dependencies in the graph exist in the given addresses set.
17,939
def plot_gos(fout_png, goids, obo_dag, *args, **kws): engine = kws[] if in kws else godagsmall = OboToGoDagSmall(goids=goids, obodag=obo_dag).godag godagplot = GODagSmallPlot(godagsmall, *args, **kws) godagplot.plt(fout_png, engine)
Given GO ids and the obo_dag, create a plot of paths from GO ids.
17,940
def format_header_cell(val): return re.sub(, , re.sub(r, , re.sub(r, , str(val) )))
Formats given header column. This involves changing '_Px_' to '(', '_xP_' to ')' and all other '_' to spaces.
17,941
def get_stops(records, group_dist): def traverse(start, next): position_prev = records[next - 1].position.location position_next = records[next].position.location dist = 1000 * great_circle_distance(position_prev, position_next) return dist <= group_dist groups = _groupwhile(records, traverse) def median(x): return sorted(x)[len(x) // 2] stops = [] for g in groups: _lat = median([gv.position.location[0] for gv in g]) _lon = median([gv.position.location[1] for gv in g]) stops.append({ : (_lat, _lon), : g, }) return stops
Group records arounds stop locations and returns a list of dict(location, records) for each stop. Parameters ---------- records : list A list of Record objects ordered by non-decreasing datetime group_dist : float Minimum distance (in meters) to switch to a new stop.
17,942
def round(self, multiple=1): if not isinstance(multiple, int): raise TypeError("The round multiple must be an int not %s." % multiple.__class__.__name__) self._round(multiple)
Rounds the kerning values to increments of **multiple**, which will be an ``int``. The default behavior is to round to increments of 1.
17,943
def popitem(self): with self.__timer as time: self.expire(time) try: key = next(iter(self.__links)) except StopIteration: raise KeyError( % self.__class__.__name__) else: return (key, self.pop(key))
Remove and return the `(key, value)` pair least recently used that has not already expired.
17,944
def methods(self, methods): self._methods = NocaseDict() if methods: try: iterator = methods.items() except AttributeError: iterator = methods for item in iterator: if isinstance(item, CIMMethod): key = item.name value = item elif isinstance(item, tuple): key, value = item else: raise TypeError( _format("Input object for methods has invalid item in " "iterable: {0!A}", item)) self.methods[key] = _cim_method(key, value)
Setter method; for a description see the getter method.
17,945
def runner(self): printtime(, self.starttime) if not self.pipeline: objects = Objectprep(self) objects.objectprep() self.runmetadata = objects.samples Mash(self, self.analysistype)
Run the necessary methods in the correct order
17,946
def GpsSecondsFromPyUTC( pyUTC, leapSecs=14 ): t = t=gpsFromUTC(*ymdhmsFromPyUTC( pyUTC )) return int(t[0] * 60 * 60 * 24 * 7 + t[1])
converts the python epoch to gps seconds pyEpoch = the python epoch from time.time()
17,947
def _combine(self, other, conn=): f = F() self_filters = copy.deepcopy(self.filters) other_filters = copy.deepcopy(other.filters) if not self.filters: f.filters = other_filters elif not other.filters: f.filters = self_filters elif conn in self.filters[0]: f.filters = self_filters f.filters[0][conn].extend(other_filters) elif conn in other.filters[0]: f.filters = other_filters f.filters[0][conn].extend(self_filters) else: f.filters = [{conn: self_filters + other_filters}] return f
OR and AND will create a new F, with the filters from both F objects combined with the connector `conn`.
17,948
def init_app(self, app): app.config.setdefault(, self.defaults[]) app.config.setdefault(, self.defaults[]) app.config.setdefault(, self.defaults[]) app.config.setdefault(, self.defaults[]) app.before_request(self.redirect_to_ssl) app.after_request(self.set_hsts_header)
Configures the specified Flask app to enforce SSL.
17,949
def getlist(self, section, option, *, raw=False, vars=None, fallback=None): val = self.get(section, option, raw=raw, vars=vars, fallback=fallback) values = [] if val: for line in val.split("\n"): values += [s.strip() for s in line.split(",")] return values
Return the [section] option values as a list. The list items must be delimited with commas and/or newlines.
17,950
def get_search_engine(index=None): search_engine_class = _load_class(getattr(settings, "SEARCH_ENGINE", None), None) return search_engine_class(index=index) if search_engine_class else None
Returns the desired implementor (defined in settings)
17,951
def IsDirectory(self): if self._stat_object is None: self._stat_object = self._GetStat() if self._stat_object is not None: self.entry_type = self._stat_object.type return self.entry_type == definitions.FILE_ENTRY_TYPE_DIRECTORY
Determines if the file entry is a directory. Returns: bool: True if the file entry is a directory.
17,952
def _create_download_failed_message(exception, url): message = .format(url, exception.__class__.__name__, exception) if _is_temporal_problem(exception): if isinstance(exception, requests.ConnectionError): message += else: message += \ elif isinstance(exception, requests.HTTPError): try: server_message = for elem in decode_data(exception.response.content, MimeType.XML): if in elem.tag or in elem.tag: server_message += elem.text.strip() except ElementTree.ParseError: server_message = exception.response.text message += .format(server_message) return message
Creates message describing why download has failed :param exception: Exception raised during download :type exception: Exception :param url: An URL from where download was attempted :type url: str :return: Error message :rtype: str
17,953
def set_args(self, args, unknown_args=None): if unknown_args is None: unknown_args = [] self.logger.setLevel(getattr(logging, args.log_level)) parent = hdfs.path.dirname(hdfs.path.abspath(args.output.rstrip("/"))) self.remote_wd = hdfs.path.join( parent, utils.make_random_str(prefix="pydoop_submit_") ) self.remote_exe = hdfs.path.join(self.remote_wd, str(uuid.uuid4())) self.properties[JOB_NAME] = args.job_name or self.properties[IS_JAVA_RR] = ( if args.do_not_use_java_record_reader else ) self.properties[IS_JAVA_RW] = ( if args.do_not_use_java_record_writer else ) self.properties[JOB_REDUCES] = args.num_reducers if args.job_name: self.properties[JOB_NAME] = args.job_name self.properties.update(args.job_conf or {}) self.__set_files_to_cache(args) self.__set_archives_to_cache(args) self.requested_env = self._env_arg_to_dict(args.set_env or []) self.args = args self.unknown_args = unknown_args
Configure job, based on the arguments provided.
17,954
def execute( mapchete_files, zoom=None, bounds=None, point=None, wkt_geometry=None, tile=None, overwrite=False, multi=None, input_file=None, logfile=None, verbose=False, no_pbar=False, debug=False, max_chunksize=None, vrt=False, idx_out_dir=None ): multi = multi if multi else cpu_count() mode = "overwrite" if overwrite else "continue" if debug or not verbose: verbose_dst = open(os.devnull, ) else: verbose_dst = sys.stdout for mapchete_file in mapchete_files: tqdm.tqdm.write("preparing to process %s" % mapchete_file, file=verbose_dst) with click_spinner.spinner(disable=debug) as spinner: if tile: tile = raw_conf_process_pyramid(raw_conf(mapchete_file)).tile(*tile) with mapchete.open( mapchete_file, mode=mode, bounds=tile.bounds, zoom=tile.zoom, single_input_file=input_file ) as mp: spinner.stop() tqdm.tqdm.write("processing 1 tile", file=verbose_dst) for result in mp.batch_processor(tile=tile): utils.write_verbose_msg(result, dst=verbose_dst) tqdm.tqdm.write( "processing %s finished" % mapchete_file, file=verbose_dst ) if vrt: tqdm.tqdm.write("creating VRT", file=verbose_dst) for tile in tqdm.tqdm( zoom_index_gen( mp=mp, zoom=tile.zoom, out_dir=( idx_out_dir if idx_out_dir else mp.config.output.path ), vrt=vrt, ), total=mp.count_tiles(tile.zoom, tile.zoom), unit="tile", disable=debug or no_pbar ): logger.debug("%s indexed", tile) tqdm.tqdm.write( "VRT(s) creation for %s finished" % mapchete_file, file=verbose_dst ) else: with mapchete.open( mapchete_file, mode=mode, zoom=zoom, bounds=bounds_from_opts( wkt_geometry=wkt_geometry, point=point, bounds=bounds, raw_conf=raw_conf(mapchete_file) ), single_input_file=input_file ) as mp: spinner.stop() tiles_count = mp.count_tiles( min(mp.config.init_zoom_levels), max(mp.config.init_zoom_levels) ) tqdm.tqdm.write( "processing %s tile(s) on %s worker(s)" % (tiles_count, multi), file=verbose_dst ) for process_info in tqdm.tqdm( mp.batch_processor( multi=multi, zoom=zoom, max_chunksize=max_chunksize ), total=tiles_count, unit="tile", disable=debug or no_pbar ): utils.write_verbose_msg(process_info, dst=verbose_dst) tqdm.tqdm.write( "processing %s finished" % mapchete_file, file=verbose_dst ) if vrt: tqdm.tqdm.write("creating VRT(s)", file=verbose_dst) for tile in tqdm.tqdm( zoom_index_gen( mp=mp, zoom=mp.config.init_zoom_levels, out_dir=( idx_out_dir if idx_out_dir else mp.config.output.path ), vrt=vrt ), total=mp.count_tiles( min(mp.config.init_zoom_levels), max(mp.config.init_zoom_levels) ), unit="tile", disable=debug or no_pbar ): logger.debug("%s indexed", tile) tqdm.tqdm.write( "VRT(s) creation for %s finished" % mapchete_file, file=verbose_dst )
Execute a Mapchete process.
17,955
def write_text_files(args, infilenames, outfilename): if not outfilename.endswith(): outfilename = outfilename + outfilename = overwrite_file_check(args, outfilename) all_text = [] for i, infilename in enumerate(infilenames): parsed_text = get_parsed_text(args, infilename) if parsed_text: if args[]: if not args[]: print(.format(outfilename)) write_file(parsed_text, outfilename) elif args[]: all_text += parsed_text if len(infilenames) > 1 and i < len(infilenames) - 1: all_text.append() if args[] and all_text: if not args[]: print( .format(len(infilenames), outfilename)) write_file(all_text, outfilename)
Write text file(s) to disk. Keyword arguments: args -- program arguments (dict) infilenames -- names of user-inputted and/or downloaded files (list) outfilename -- name of output text file (str)
17,956
def _write_superbox(self, fptr, box_id): orig_pos = fptr.tell() fptr.write(struct.pack(, 0, box_id)) for box in self.box: box.write(fptr) end_pos = fptr.tell() fptr.seek(orig_pos) fptr.write(struct.pack(, end_pos - orig_pos)) fptr.seek(end_pos)
Write a superbox. Parameters ---------- fptr : file or file object Superbox (box of boxes) to be written to this file. box_id : bytes 4-byte sequence that identifies the superbox.
17,957
def _raise_glfw_errors_as_exceptions(error_code, description): global ERROR_REPORTING if ERROR_REPORTING: message = "(%d) %s" % (error_code, description) raise GLFWError(message)
Default error callback that raises GLFWError exceptions for glfw errors. Set an alternative error callback or set glfw.ERROR_REPORTING to False to disable this behavior.
17,958
def json_schema_type(schema_file: str, **kwargs) -> typing.Type: from doctor.resource import ResourceSchema schema = ResourceSchema.from_file(schema_file) kwargs[] = schema definition_key = kwargs.get() if definition_key: params = [definition_key] request_schema = schema._create_request_schema(params, params) try: definition = request_schema[][definition_key] except KeyError: raise TypeSystemError( .format( definition_key)) description = get_value_from_schema( schema, definition, , definition_key) example = get_value_from_schema( schema, definition, , definition_key) json_type = get_value_from_schema( schema, definition, , definition_key) json_type, native_type = get_types(json_type) kwargs[] = description kwargs[] = example kwargs[] = json_type kwargs[] = native_type else: try: kwargs[] = schema.schema[] except KeyError: raise TypeSystemError() try: json_type = schema.schema[] except KeyError: raise TypeSystemError() json_type, native_type = get_types(json_type) kwargs[] = json_type kwargs[] = native_type try: kwargs[] = schema.schema[] except KeyError: if schema.schema.get(): example = {} for prop, definition in schema.schema[].items(): example[prop] = get_value_from_schema( schema, definition, , ) kwargs[] = example else: raise TypeSystemError() return type(, (JsonSchema,), kwargs)
Create a :class:`~doctor.types.JsonSchema` type. This function will automatically load the schema and set it as an attribute of the class along with the description and example. :param schema_file: The full path to the json schema file to load. :param kwargs: Can include any attribute defined in :class:`~doctor.types.JsonSchema`
17,959
def closedopen(lower_value, upper_value): return Interval(Interval.CLOSED, lower_value, upper_value, Interval.OPEN)
Helper function to construct an interval object with a closed lower and open upper. For example: >>> closedopen(100.2, 800.9) [100.2, 800.9)
17,960
def detect_pattern_format(pattern_filename, encoding, on_word_boundaries): tsv = True boundaries = on_word_boundaries with open_file(pattern_filename) as input_file: for line in input_file: line = line.decode(encoding) if line.count() != 1: tsv = False if in line: boundaries = True if boundaries and not tsv: break return tsv, boundaries
Automatically detects the pattern file format, and determines whether the Aho-Corasick string matching should pay attention to word boundaries or not. Arguments: - `pattern_filename`: - `encoding`: - `on_word_boundaries`:
17,961
def get_couchdb_admins(): user_list = [] req = curl_couchdb() for user in req.json().keys(): user_list.append(user) return user_list
Return the actual CouchDB admins
17,962
def calc_effective_conductivity(self, inlets=None, outlets=None, domain_area=None, domain_length=None): r return self._calc_eff_prop(inlets=inlets, outlets=outlets, domain_area=domain_area, domain_length=domain_length)
r""" This calculates the effective electrical conductivity. Parameters ---------- inlets : array_like The pores where the inlet voltage boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. outlets : array_like The pores where the outlet voltage boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. domain_area : scalar, optional The area of the inlet (and outlet) boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. domain_length : scalar, optional The length of the domain between the inlet and outlet boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. Notes ----- The area and length of the domain are found using the bounding box around the inlet and outlet pores which do not necessarily lie on the edge of the domain, resulting in underestimation of sizes.
17,963
def iris(display=False): d = sklearn.datasets.load_iris() df = pd.DataFrame(data=d.data, columns=d.feature_names) if display: return df, [d.target_names[v] for v in d.target] else: return df, d.target
Return the classic iris data in a nice package.
17,964
def check_physical(self, line): self.physical_line = line for name, check, argument_names in self._physical_checks: self.init_checker_state(name, argument_names) result = self.run_check(check, argument_names) if result is not None: (offset, text) = result self.report_error(self.line_number, offset, text, check) if text[:4] == : self.indent_char = line[0]
Run all physical checks on a raw input line.
17,965
def get_resources_by_search(self, resource_query, resource_search): and_list = list() or_list = list() for term in resource_query._query_terms: and_list.append({term: resource_query._query_terms[term]}) for term in resource_query._keyword_terms: or_list.append({term: resource_query._keyword_terms[term]}) if resource_search._id_list is not None: identifiers = [ObjectId(i.identifier) for i in resource_search._id_list] and_list.append({: {: identifiers}}) if or_list: and_list.append({: or_list}) view_filter = self._view_filter() if view_filter: and_list.append(view_filter) if and_list: query_terms = {: and_list} collection = JSONClientValidated(, collection=, runtime=self._runtime) if resource_search.start is not None and resource_search.end is not None: result = collection.find(query_terms)[resource_search.start:resource_search.end] else: result = collection.find(query_terms) return searches.ResourceSearchResults(result, dict(resource_query._query_terms), runtime=self._runtime)
Gets the search results matching the given search query using the given search. arg: resource_query (osid.resource.ResourceQuery): the resource query arg: resource_search (osid.resource.ResourceSearch): the resource search return: (osid.resource.ResourceSearchResults) - the resource search results raise: NullArgument - ``resource_query`` or ``resource_search`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``resource_query`` or ``resource_search`` is not of this service *compliance: mandatory -- This method must be implemented.*
17,966
def __replace_capitalise(sentence): if sentence is not None: while sentence.find() != -1: cap_index = _sentence.find() part1 = sentence[:cap_index] part2 = sentence[cap_index + 12:cap_index + 13] part3 = sentence[cap_index + 13:] if part2 in "abcdefghijklmnopqrstuvwxyz": sentence = part1 + part2.capitalize() + part3 else: sentence = part1 + part2 + part3 if sentence.find() == -1: return sentence else: return sentence
here we replace all instances of #CAPITALISE and cap the next word. ############ #NOTE: Buggy as hell, as it doesn't account for words that are already #capitalized ############ :param _sentence:
17,967
def _inject_cookie_message(self, msg): if isinstance(msg, unicode): msg = msg.encode(, ) try: self.request._cookies = Cookie.SimpleCookie(msg) except: logging.warn("couldn't parse cookie string: %s",msg, exc_info=True)
Inject the first message, which is the document cookie, for authentication.
17,968
def find_family(self, pattern=r".*", flags=0, node=None): return [node for node in foundations.walkers.nodes_walker(node or self) if re.search(pattern, node.family, flags)]
Returns the Nodes from given family. :param pattern: Matching pattern. :type pattern: unicode :param flags: Matching regex flags. :type flags: int :param node: Node to start walking from. :type node: AbstractNode or AbstractCompositeNode or Object :return: Family nodes. :rtype: list
17,969
def rest_put(url, data, timeout, show_error=False): try: response = requests.put(url, headers={: , : },\ data=data, timeout=timeout) return response except Exception as exception: if show_error: print_error(exception) return None
Call rest put method
17,970
def _gen_shuffles(self): si = 0 self.shuffles = [] for song in self.loop: for i in range(song[1]): self.shuffles.append(si) si += 1
Used internally to build a list for mapping between a random number and a song index.
17,971
def write_string(self, s, codec): for i in range(0, len(s), self.bufsize): chunk = s[i:i + self.bufsize] buf, consumed = codec.encode(chunk) assert consumed == len(chunk) self.write(buf)
Write string encoding it with codec into stream
17,972
def bbox(self, out_crs=None): return reproject_geometry( box(*self._bounds), src_crs=self.td_pyramid.crs, dst_crs=self.pyramid.crs if out_crs is None else out_crs )
Return data bounding box. Parameters ---------- out_crs : ``rasterio.crs.CRS`` rasterio CRS object (default: CRS of process pyramid) Returns ------- bounding box : geometry Shapely geometry object
17,973
def query(self, sql, timeout=10): if not sql: raise QueryError() result = ResultQuery(*self.perform_request(**{ : , : , : { "queryType": "SQL", "query": sql }, : { : timeout } })) return result
Submit a query and return results. :param sql: string :param timeout: int :return: pydrill.client.ResultQuery
17,974
def stop(self): self._response[] = True self._response[][][] = self._response[][][] = [] return self
Send signal to stop the current stream playback
17,975
def members(self): all_members = [] for page in range(1, self.max_page() + 1): all_members.extend(self.single_page_members(page)) return all_members
获取小组所有成员的信息列表
17,976
def get_cards(self, **query_params): cards = self.get_cards_json(self.base_uri, query_params=query_params) cards_list = [] for card_json in cards: cards_list.append(self.create_card(card_json)) return cards_list
Get all cards this member is attached to. Return a list of Card objects. Returns: list(Card): Return all cards this member is attached to
17,977
def Main(): argument_parser = argparse.ArgumentParser(description=( )) argument_parser.add_argument( , , dest=, action=, metavar=, default=None, help=( )) argument_parser.add_argument( , nargs=, action=, metavar=, default=None, help=) options = argument_parser.parse_args() if not options.source: print() print() argument_parser.print_help() print() return False logging.basicConfig( level=logging.INFO, format=) if options.output_file: output_writer = FileOutputWriter(options.output_file) else: output_writer = StdoutWriter() try: output_writer.Open() except IOError as exception: print(.format( exception)) print() return False return_value = True mediator = command_line.CLIVolumeScannerMediator() recursive_hasher = RecursiveHasher(mediator=mediator) try: base_path_specs = recursive_hasher.GetBasePathSpecs(options.source) if not base_path_specs: print() print() return False recursive_hasher.CalculateHashes(base_path_specs, output_writer) print() print() except errors.ScannerError as exception: return_value = False print() print(.format(exception)) except errors.UserAbort as exception: return_value = False print() print() output_writer.Close() return return_value
The main program function. Returns: bool: True if successful or False if not.
17,978
def format_attributes_json(self): attributes_json = {} for key, value in self.attributes.items(): key = utils.check_str_length(key)[0] value = _format_attribute_value(value) if value is not None: attributes_json[key] = value result = { : attributes_json } return result
Convert the Attributes object to json format.
17,979
def check(self): self._validate_settings() r = self.local_renderer r.env.alias = r.env.aliases[0] r.sudo(r.env.check_command_template)
Run inadyn from the commandline to test the configuration. To be run like: fab role inadyn.check
17,980
def execute(self, input_data): output = input_data[] output[] = {} for data in [input_data[key] for key in ViewMemoryDeep.dependencies]: for name,table in data[].iteritems(): output[].update({name: table}) return output
Execute the ViewMemoryDeep worker
17,981
def load_and_preprocess_imdb_data(n_gram=None): X_train, y_train, X_test, y_test = tl.files.load_imdb_dataset(nb_words=VOCAB_SIZE) if n_gram is not None: X_train = np.array([augment_with_ngrams(x, VOCAB_SIZE, N_BUCKETS, n=n_gram) for x in X_train]) X_test = np.array([augment_with_ngrams(x, VOCAB_SIZE, N_BUCKETS, n=n_gram) for x in X_test]) return X_train, y_train, X_test, y_test
Load IMDb data and augment with hashed n-gram features.
17,982
def available(name): * cmd = .format(name) name = __salt__[](cmd, python_shell=False) return name in get_all()
Returns ``True`` if the specified service is available, otherwise returns ``False``. We look up the name with the svcs command to get back the FMRI This allows users to use simpler service names CLI Example: .. code-block:: bash salt '*' service.available net-snmp
17,983
def drag_and_drop_by_offset(self, source, xoffset, yoffset): self.click_and_hold(source) self.move_by_offset(xoffset, yoffset) self.release() return self
Holds down the left mouse button on the source element, then moves to the target offset and releases the mouse button. :Args: - source: The element to mouse down. - xoffset: X offset to move to. - yoffset: Y offset to move to.
17,984
def _parse_configs(self, config): for config_dict in config: label = config_dict.keys()[0] cfg = config_dict[label] dbpath = cfg[] pattern = self._parse_dbpath(dbpath) read_preference = cfg.get(, ).upper() read_preference = self._get_read_preference(read_preference) cluster_config = { : { : cfg[], : cfg[], : read_preference, : cfg.get() }, : pattern, : label } self._clusters.append(cluster_config)
Builds a dict with information to connect to Clusters. Parses the list of configuration dictionaries passed by the user and builds an internal dict (_clusters) that holds information for creating Clients connecting to Clusters and matching database names. Args: config: A list of dictionaries containing connecting and identification information about Clusters. A dict has the following structure: {label: {host, port, read_preference, dbpath}}. Raises: Exception('No configuration provided'): no configuration provided.
17,985
def _get_ema(cls, df, column, windows): window = cls.get_only_one_positive_int(windows) column_name = .format(column, window) if len(df[column]) > 0: df[column_name] = df[column].ewm( ignore_na=False, span=window, min_periods=0, adjust=True).mean() else: df[column_name] = []
get exponential moving average :param df: data :param column: column to calculate :param windows: collection of window of exponential moving average :return: None
17,986
def update_port_statuses_cfg(self, context, port_ids, status): self._l3plugin.update_router_port_statuses(context, port_ids, status)
Update the operational statuses of a list of router ports. This is called by the Cisco cfg agent to update the status of a list of ports. :param context: contains user information :param port_ids: list of ids of all the ports for the given status :param status: PORT_STATUS_ACTIVE/PORT_STATUS_DOWN.
17,987
def create(): if not all(map(os.path.isdir, ARGS.directory)): exit() with sqlite3.connect(ARGS.database) as connection: connection.text_factory = str cursor = connection.cursor() cursor.execute() cursor.execute() for dir in ARGS.directory: cursor.executemany(, local_data(dir))
Create a new database with information about the films in the specified directory or directories.
17,988
def convert(word): if six.PY2: if isinstance(word, unicode): return word.encode() else: return word.decode().encode() else: if isinstance(word, bytes): return word.decode() return word
This method converts given `word` to UTF-8 encoding and `bytes` type for the SWIG wrapper.
17,989
def begin(self): variables_to_restore = tf.contrib.framework.get_variables_to_restore( include=self._include, exclude=self._exclude) assignment_map = {variable.name[len(self._new_model_scope):]: variable for variable in variables_to_restore if variable.name.startswith(self._new_model_scope)} assignment_map = {name.split(":")[0]: variable for name, variable in six.iteritems(assignment_map) if name.startswith(self._old_model_scope)} self._assignment_map = assignment_map tf.logging.info("restoring %d variables from checkpoint %s"%( len(assignment_map), self._checkpoint_path)) tf.train.init_from_checkpoint(self._checkpoint_path, self._assignment_map)
Load variables from checkpoint. New model variables have the following name foramt: new_model_scope/old_model_scope/xxx/xxx:0 To find the map of name to variable, need to strip the new_model_scope and then match the old_model_scope and remove the suffix :0.
17,990
def has_entities(status): try: if sum(len(v) for v in status.entities.values()) > 0: return True except AttributeError: if sum(len(v) for v in status[].values()) > 0: return True return False
Returns true if a Status object has entities. Args: status: either a tweepy.Status object or a dict returned from Twitter API
17,991
def token_is_correct(self, token): if self.is_rus_word(token): return True elif self.ONLY_MARKS.search(token): return True elif self.END_TOKENS.search(token): return True elif token in "$^": return True return False
Подходит ли токен, для генерации текста. Допускаются русские слова, знаки препинания и символы начала и конца.
17,992
def _get_bound_pressure_height(pressure, bound, heights=None, interpolate=True): sort_inds = np.argsort(pressure)[::-1] pressure = pressure[sort_inds] if heights is not None: heights = heights[sort_inds] if bound.dimensionality == {: -1.0, : 1.0, : -2.0}: if bound in pressure: bound_pressure = bound if heights is not None: bound_height = heights[pressure == bound_pressure] else: bound_height = pressure_to_height_std(bound_pressure) else: if interpolate: bound_pressure = bound if heights is not None: bound_height = log_interpolate_1d(bound_pressure, pressure, heights) else: bound_height = pressure_to_height_std(bound_pressure) else: idx = (np.abs(pressure - bound)).argmin() bound_pressure = pressure[idx] if heights is not None: bound_height = heights[idx] else: bound_height = pressure_to_height_std(bound_pressure) elif bound.dimensionality == {: 1.0}: if heights is not None: if bound in heights: bound_height = bound bound_pressure = pressure[heights == bound] else: if interpolate: bound_height = bound bound_pressure = np.interp(np.atleast_1d(bound), heights, pressure).astype(bound.dtype) * pressure.units else: idx = (np.abs(heights - bound)).argmin() bound_pressure = pressure[idx] bound_height = heights[idx] else: if not (_greater_or_close(bound_pressure, np.nanmin(pressure) * pressure.units) and _less_or_close(bound_pressure, np.nanmax(pressure) * pressure.units)): raise ValueError() if heights is not None: if not (_less_or_close(bound_height, np.nanmax(heights) * heights.units) and _greater_or_close(bound_height, np.nanmin(heights) * heights.units)): raise ValueError() return bound_pressure, bound_height
Calculate the bounding pressure and height in a layer. Given pressure, optional heights, and a bound, return either the closest pressure/height or interpolated pressure/height. If no heights are provided, a standard atmosphere is assumed. Parameters ---------- pressure : `pint.Quantity` Atmospheric pressures bound : `pint.Quantity` Bound to retrieve (in pressure or height) heights : `pint.Quantity`, optional Atmospheric heights associated with the pressure levels. Defaults to using heights calculated from ``pressure`` assuming a standard atmosphere. interpolate : boolean, optional Interpolate the bound or return the nearest. Defaults to True. Returns ------- `pint.Quantity` The bound pressure and height.
17,993
def within_radians(self, key, point, max_distance, min_distance=None): self.near(key, point) self._add_condition(key, , max_distance) if min_distance is not None: self._add_condition(key, , min_distance) return self
增加查询条件,限制返回结果指定字段值的位置在某点的一段距离之内。 :param key: 查询条件字段名 :param point: 查询地理位置 :param max_distance: 最大距离限定(弧度) :param min_distance: 最小距离限定(弧度) :rtype: Query
17,994
def mean_imls(self): return numpy.array( [max(0, self.imls[0] - (self.imls[1] - self.imls[0]) / 2.)] + [numpy.mean(pair) for pair in pairwise(self.imls)] + [self.imls[-1] + (self.imls[-1] - self.imls[-2]) / 2.])
Compute the mean IMLs (Intensity Measure Level) for the given vulnerability function. :param vulnerability_function: the vulnerability function where the IMLs (Intensity Measure Level) are taken from. :type vuln_function: :py:class:`openquake.risklib.vulnerability_function.\ VulnerabilityFunction`
17,995
def compute_checksum(self): if self._filename.startswith("s3://"): print("Warning: Did not perform client-side checksumming for file in S3. To be implemented.") pass else: checksumCalculator = self.ChecksumCalculator(self._filename) self._checksums = checksumCalculator.compute()
Calculates checksums for a given file.
17,996
def mode_run_common_obs(args, extra_args): loaded_obs = [] sessions = [] if args.session: for obfile in args.obsresult: _logger.info("session file from %r", obfile) with open(obfile) as fd: sess = yaml.load(fd) sessions.append(sess[]) else: for obfile in args.obsresult: _logger.info("Loading observation results from %r", obfile) with open(obfile) as fd: sess = [] for doc in yaml.load_all(fd): enabled = doc.get(, True) docid = doc[] requirements = doc.get(, {}) sess.append(dict(id=docid, enabled=enabled, requirements=requirements)) if enabled: _logger.debug("load observation result with id %s", docid) else: _logger.debug("skip observation result with id %s", docid) loaded_obs.append(doc) sessions.append(sess) if args.reqs: _logger.info(, args.reqs) with open(args.reqs, ) as fd: loaded_data = yaml.load(fd) else: _logger.info() loaded_data = {} if extra_args.extra_control: _logger.info(, extra_args.extra_control) loaded_data_extra = parse_as_yaml(extra_args.extra_control) else: loaded_data_extra = None control_format = loaded_data.get(, 1) _logger.info(, control_format) if control_format == 1: _backend = process_format_version_1(loaded_obs, loaded_data, loaded_data_extra, args.profilepath) datamanager = DataManager(args.basedir, args.datadir, _backend) datamanager.workdir_tmpl = "obsid{obsid}_work" datamanager.resultdir_tmpl = "obsid{obsid}_results" datamanager.serial_format = datamanager.result_file = datamanager.task_file = elif control_format == 2: _backend = process_format_version_2(loaded_obs, loaded_data, loaded_data_extra, args.profilepath) datamanager = DataManager(args.basedir, args.datadir, _backend) else: print(, control_format, , args.reqs) sys.exit(1) jobs = [] for session in sessions: for job in session: if job[]: jobs.append(job) for job in jobs: request = request_params = {} obid = job[] request_params[] = obid request_params["pipeline"] = args.pipe_name request_params["instrument_configuration"] = args.insconf logger_control = dict( default=DEFAULT_RECIPE_LOGGER, logfile=, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", enabled=True ) request_params[] = logger_control task = datamanager.backend.new_task(request, request_params) task.request = request task.request_params = request_params task.request_runinfo[] = task.request_runinfo[] = __version__ _logger.info("procesing OB with id={}".format(obid)) workenv = datamanager.create_workenv(task) task.request_runinfo["results_dir"] = workenv.resultsdir_rel task.request_runinfo["work_dir"] = workenv.workdir_rel with working_directory(workenv.datadir): obsres = datamanager.backend.obsres_from_oblock_id(obid, configuration=args.insconf) _logger.debug("pipeline from CLI is %r", args.pipe_name) pipe_name = args.pipe_name obsres.pipeline = pipe_name recipe = datamanager.backend.search_recipe_from_ob(obsres) _logger.debug(, recipe.__class__) _logger.debug() recipe.intermediate_results = True _logger.debug() recipe.runinfo[] = recipe.runinfo[] = __version__ recipe.runinfo[] = task.id recipe.runinfo[] = workenv.datadir recipe.runinfo[] = workenv.workdir recipe.runinfo[] = workenv.resultsdir _logger.debug() try: rinput = recipe.build_recipe_input(obsres, datamanager.backend) except (ValueError, numina.exceptions.ValidationError) as err: _logger.error("During recipe input construction") _logger.error("%s", err) sys.exit(0) _logger.debug() for key in recipe.requirements(): v = getattr(rinput, key) _logger.debug("recipe requires %r, value is %s", key, v) for req in recipe.products().values(): _logger.debug(, req.type.__class__.__name__, req.description) task.request_runinfo[] = obsres.instrument task.request_runinfo[] = obsres.mode task.request_runinfo[] = recipe.__class__.__name__ task.request_runinfo[] = fully_qualified_name(recipe.__class__) task.request_runinfo[] = recipe.__version__ if args.copy_files: _logger.debug() workenv.sane_work() workenv.copyfiles_stage1(obsres) workenv.copyfiles_stage2(rinput) workenv.adapt_obsres(obsres) completed_task = run_recipe(recipe=recipe, task=task, rinput=rinput, workenv=workenv, logger_control=logger_control) datamanager.store_task(completed_task) if args.dump_control: _logger.debug() with open(, ) as fp: datamanager.backend.dump(fp)
Observing mode processing mode of numina.
17,997
def _invert(color, **kwargs): col = ColorValue(color) args = [ 255.0 - col.value[0], 255.0 - col.value[1], 255.0 - col.value[2], col.value[3], ] inverted = ColorValue(args) return inverted
Returns the inverse (negative) of a color. The red, green, and blue values are inverted, while the opacity is left alone.
17,998
def list_types_poi(self, **kwargs): url_args = {: util.language_code(kwargs.get())} result = self.make_request(, url_args) if not util.check_result(result): return False, result.get(, ) values = util.response_list(result, ) return True, [emtype.ParkingPoiType(**a) for a in values]
Obtain a list of families, types and categories of POI. Args: lang (str): Language code (*es* or *en*). Returns: Status boolean and parsed response (list[ParkingPoiType]), or message string in case of error.
17,999
def force_process_ordered(self): for instance_id, messages in self.replicas.take_ordereds_out_of_turn(): num_processed = 0 for message in messages: self.try_processing_ordered(message) num_processed += 1 logger.info( .format(self, num_processed, instance_id))
Take any messages from replica that have been ordered and process them, this should be done rarely, like before catchup starts so a more current LedgerStatus can be sent. can be called either 1. when node is participating, this happens just before catchup starts so the node can have the latest ledger status or 2. when node is not participating but a round of catchup is about to be started, here is forces all the replica ordered messages to be appended to the stashed ordered requests and the stashed ordered requests are processed with appropriate checks