Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
387,300
def vms(nictag): * ret = {} cmd = .format(nictag) res = __salt__[](cmd) retcode = res[] if retcode != 0: ret[] = res[] if in res else else: ret = res[].splitlines() return ret
List all vms connect to nictag nictag : string name of nictag CLI Example: .. code-block:: bash salt '*' nictagadm.vms admin
387,301
def getShocks(self): super(self.__class__,self).getShocks() newborns = self.t_age == 0 self.TranShkNow[newborns] = self.TranShkAggNow*self.wRteNow self.PermShkNow[newborns] = self.PermShkAggNow self.getUpdaters() pLvlErrNew = self.getpLvlError() self.pLvlErrNow *= pLvlErrNew PermShkPcvd = self.PermShkNow/pLvlErrNew PermShkPcvd[self.update] *= self.pLvlErrNow[self.update] self.pLvlErrNow[self.update] = 1.0 self.PermShkNow = PermShkPcvd
Gets permanent and transitory shocks (combining idiosyncratic and aggregate shocks), but only consumers who update their macroeconomic beliefs this period incorporate all pre- viously unnoticed aggregate permanent shocks. Agents correctly observe the level of all real variables (market resources, consumption, assets, etc), but misperceive the aggregate productivity level. Parameters ---------- None Returns ------- None
387,302
def verify_checksum(file_id, pessimistic=False, chunk_size=None, throws=True, checksum_kwargs=None): f = FileInstance.query.get(uuid.UUID(file_id)) if pessimistic: f.clear_last_check() db.session.commit() f.verify_checksum( progress_callback=progress_updater, chunk_size=chunk_size, throws=throws, checksum_kwargs=checksum_kwargs) db.session.commit()
Verify checksum of a file instance. :param file_id: The file ID.
387,303
def save_file(self, filename = ): filename = filename + with open(filename, ) as f: f.write(self.htmlcontent) f.closed
save htmlcontent as .html file
387,304
def trades(self, cursor=None, order=, limit=10, sse=False): return self.horizon.account_trades( self.address, cursor=cursor, order=order, limit=limit, sse=sse)
Retrieve the trades JSON from this instance's Horizon server. Retrieve the trades JSON response for the account associated with this :class:`Address`. :param cursor: A paging token, specifying where to start returning records from. When streaming this can be set to "now" to stream object created since your request time. :type cursor: int, str :param str order: The order in which to return rows, "asc" or "desc". :param int limit: Maximum number of records to return. :param bool sse: Use the SSE client for connecting to Horizon.
387,305
def min_or(a, b, c, d, w): m = (1 << (w - 1)) while m != 0: if ((~a) & c & m) != 0: temp = (a | m) & -m if temp <= b: a = temp break elif (a & (~c) & m) != 0: temp = (c | m) & -m if temp <= d: c = temp break m >>= 1 return a | c
Lower bound of result of ORing 2-intervals. :param a: Lower bound of first interval :param b: Upper bound of first interval :param c: Lower bound of second interval :param d: Upper bound of second interval :param w: bit width :return: Lower bound of ORing 2-intervals
387,306
def expand(self, basedir, config, sourcedir, targetdir, cwd): expanded_basedir = os.path.expanduser(basedir) expanded_config = os.path.expanduser(config) expanded_sourcedir = os.path.expanduser(sourcedir) expanded_targetdir = os.path.expanduser(targetdir) if not os.path.isabs(expanded_basedir): expanded_basedir = os.path.join(cwd, expanded_basedir) if not os.path.isabs(expanded_config): expanded_config = os.path.join(expanded_basedir, expanded_config) if not os.path.isabs(expanded_sourcedir): expanded_sourcedir = os.path.join(expanded_basedir, expanded_sourcedir) if not os.path.isabs(expanded_targetdir): expanded_targetdir = os.path.join(expanded_basedir, expanded_targetdir) expanded_basedir = os.path.normpath(expanded_basedir) expanded_config = os.path.normpath(expanded_config) expanded_sourcedir = os.path.normpath(expanded_sourcedir) expanded_targetdir = os.path.normpath(expanded_targetdir) return (expanded_basedir, expanded_config, expanded_sourcedir, expanded_targetdir)
Validate that given paths are not the same. Args: basedir (string): Project base directory used to prepend relative paths. If empty or equal to '.', it will be filled with current directory path. config (string): Settings file path. sourcedir (string): Source directory path. targetdir (string): Compiled files target directory path. cwd (string): Current directory path to prepend base dir if empty. Returns: tuple: Expanded arguments in the same order
387,307
def set_alias(self, alias_hosted_zone_id, alias_dns_name): self.alias_hosted_zone_id = alias_hosted_zone_id self.alias_dns_name = alias_dns_name
Make this an alias resource record set
387,308
def get_trackrs(self): trackrs = [] for trackr in self.state: trackrs.append(trackrDevice(trackr, self)) return trackrs
Extract each Trackr device from the trackrApiInterface state. return a list of all Trackr objects from account.
387,309
def run(self, mod): if not mod.body: return aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"), ast.alias("dessert.rewrite", "@dessert_ar")] expect_docstring = True pos = 0 lineno = 0 for item in mod.body: if (expect_docstring and isinstance(item, ast.Expr) and isinstance(item.value, ast.Str)): doc = item.value.s if "PYTEST_DONT_REWRITE" in doc: return lineno += len(doc) - 1 expect_docstring = False elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or item.module != "__future__"): lineno = item.lineno break pos += 1 imports = [ast.Import([alias], lineno=lineno, col_offset=0) for alias in aliases] mod.body[pos:pos] = imports nodes = [mod] while nodes: node = nodes.pop() for name, field in ast.iter_fields(node): if isinstance(field, list): new = [] for i, child in enumerate(field): if isinstance(child, ast.Assert): new.extend(self.visit(child)) else: new.append(child) if isinstance(child, ast.AST): nodes.append(child) setattr(node, name, new) elif (isinstance(field, ast.AST) and not isinstance(field, ast.expr)): nodes.append(field)
Find all assert statements in *mod* and rewrite them.
387,310
def translate_doc(self, d, field_mapping=None, map_identifiers=None, **kwargs): if field_mapping is not None: self.map_doc(d, field_mapping) subject = self.translate_obj(d, M.SUBJECT) obj = self.translate_obj(d, M.OBJECT) if map_identifiers is not None: if M.SUBJECT_CLOSURE in d: subject[] = self.map_id(subject, map_identifiers, d[M.SUBJECT_CLOSURE]) else: logging.info("NO SUBJECT CLOSURE IN: "+str(d)) if M.SUBJECT_TAXON in d: subject[] = self.translate_obj(d,M.SUBJECT_TAXON) if M.OBJECT_TAXON in d: obj[] = self.translate_obj(d, M.OBJECT_TAXON) qualifiers = [] if M.RELATION in d and isinstance(d[M.RELATION],list): relation = None for rel in d[M.RELATION]: if rel.lower() == : qualifiers.append(rel) else: relation = rel if relation is not None: d[M.RELATION] = relation else: d[M.RELATION] = None negated = in qualifiers assoc = {:d.get(M.ID), : subject, : obj, : negated, : self.translate_obj(d,M.RELATION), : self.translate_objs(d,M.SOURCE), } if self.invert_subject_object and assoc[] is not None: assoc[][] = True if len(qualifiers) > 0: assoc[] = qualifiers if M.OBJECT_CLOSURE in d: assoc[] = d.get(M.OBJECT_CLOSURE) if M.IS_DEFINED_BY in d: if isinstance(d[M.IS_DEFINED_BY],list): assoc[] = d[M.IS_DEFINED_BY] else: assoc[] = [d[M.IS_DEFINED_BY]] if M.EVIDENCE_OBJECT in d: assoc[] = d[M.EVIDENCE_OBJECT] assoc[] = [t for t in d[M.EVIDENCE_OBJECT] if t.startswith()] if self._use_amigo_schema(self.object_category): for f in M.AMIGO_SPECIFIC_FIELDS: if f in d: assoc[f] = d[f] if M.EVIDENCE_GRAPH in d: assoc[M.EVIDENCE_GRAPH] = json.loads(d[M.EVIDENCE_GRAPH]) return assoc
Translate a solr document (i.e. a single result row)
387,311
def mod_watch(name, url=, timeout=180): msg = __salt__[](name, url, timeout) result = msg.startswith() ret = {: name, : result, : {name: result}, : msg } return ret
The tomcat watcher, called to invoke the watch command. When called, it will reload the webapp in question .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered.
387,312
def pathIndex(self, path): if path == self.root.path: return QModelIndex() if not path.startswith(self.root.path): return QModelIndex() parts = [] while True: if path == self.root.path: break head, tail = os.path.split(path) if head == path: if path: parts.append(path) break parts.append(tail) path = head parts.reverse() if parts: item = self.root count = 0 for count, part in enumerate(parts): matched = False for child in item.children: if child.name == part: item = child matched = True break if not matched: break if count + 1 == len(parts): return self.createIndex(item.row, 0, item) return QModelIndex()
Return index of item with *path*.
387,313
def list_asgs(access_token, subscription_id, resource_group): endpoint = .join([get_rm_endpoint(), , subscription_id, , resource_group, , , NETWORK_API]) return do_get(endpoint, access_token)
Get details about the application security groups for a resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. Returns: HTTP response. ASG JSON body.
387,314
def _bind_parameters(operation, parameters): string_parameters = {} for (name, value) in iteritems(parameters): if value is None: string_parameters[name] = elif isinstance(value, basestring): string_parameters[name] = "" else: string_parameters[name] = str(value) return operation % string_parameters
Helper method that binds parameters to a SQL query.
387,315
def ets(self): r = (self.table[0, 0] + self.table[0, 1]) * (self.table[0, 0] + self.table[1, 0]) / self.N return (self.table[0, 0] - r) / (self.table[0, 0] + self.table[0, 1] + self.table[1, 0] - r)
Equitable Threat Score, Gilbert Skill Score, v, (a - R)/(a + b + c - R), R=(a+b)(a+c)/N
387,316
def json_2_text(inp, out, verbose = False): for root, dirs, filenames in os.walk(inp): for f in filenames: log = codecs.open(os.path.join(root, f), ) j_obj = json.load(log) j_obj = json_format(j_obj) textWriter(j_obj, out, verbose)
Convert a Wikipedia article to Text object. Concatenates the sections in wikipedia file and rearranges other information so it can be interpreted as a Text object. Links and other elements with start and end positions are annotated as layers. Parameters ---------- inp: directory of parsed et.wikipedia articles in json format out: output directory of .txt files verbose: if True, prints every article title and total count of converted files if False prints every 50th count Returns ------- estnltk.text.Text The Text object.
387,317
def raise_error(error_type: str) -> None: try: error = next((v for k, v in ERROR_CODES.items() if k in error_type)) except StopIteration: error = AirVisualError raise error(error_type)
Raise the appropriate error based on error message.
387,318
def remove_scene(self, scene_id): if self.state.activeSceneId == scene_id: err_msg = "Requested to delete scene {sceneNum}, which is currently active. Cannot delete active scene.".format(sceneNum=scene_id) logging.info(err_msg) return(False, 0, err_msg) try: del self.state.scenes[scene_id] logging.debug("Deleted scene {sceneNum}".format(sceneNum=scene_id)) except KeyError: err_msg = "Requested to delete scene {sceneNum}, which does not exist".format(sceneNum=scene_id) logging.info(err_msg) return(False, 0, err_msg) sequence_number = self.zmq_publisher.publish_scene_remove(scene_id) logging.debug("Removed scene {sceneNum}".format(sceneNum=scene_id)) return (True, sequence_number, "OK")
remove a scene by Scene ID
387,319
def __set_title(self, value): self._target.setPropertyValue(self._has_axis_title_property, True) target = self._get_title_target() target.setPropertyValue(, text_type(value))
Sets title of this axis.
387,320
def check_process_counts(self): LOGGER.debug() for name in self.consumers: processes_needed = self.process_spawn_qty(name) if processes_needed: LOGGER.info(, processes_needed, name) self.start_processes(name, processes_needed)
Check for the minimum consumer process levels and start up new processes needed.
387,321
def check_for_errors(self): if not self.exceptions: if not self.is_closed: return why = AMQPConnectionError() self.exceptions.append(why) self.set_state(self.CLOSED) self.close() raise self.exceptions[0]
Check Connection for errors. :raises AMQPConnectionError: Raises if the connection encountered an error. :return:
387,322
def key_exists(hive, key, use_32bit_registry=False): HKLMSOFTWARE\\Microsoft local_hive = _to_unicode(hive) local_key = _to_unicode(key) registry = Registry() try: hkey = registry.hkeys[local_hive] except KeyError: raise CommandExecutionError(.format(local_hive)) access_mask = registry.registry_32[use_32bit_registry] handle = None try: handle = win32api.RegOpenKeyEx(hkey, local_key, 0, access_mask) return True except pywintypes.error as exc: if exc.winerror == 2: return False raise finally: if handle: win32api.RegCloseKey(handle)
Check that the key is found in the registry. This refers to keys and not value/data pairs. To check value/data pairs, use ``value_exists`` Args: hive (str): The hive to connect to key (str): The key to check use_32bit_registry (bool): Look in the 32bit portion of the registry Returns: bool: True if exists, otherwise False Usage: .. code-block:: python import salt.utils.win_reg as reg reg.key_exists(hive='HKLM', key='SOFTWARE\\Microsoft')
387,323
def reset(self, indices=None): if indices is None: indices = np.arange(len(self._envs)) if self._blocking: observs = [self._envs[index].reset() for index in indices] else: observs = [self._envs[index].reset(blocking=False) for index in indices] observs = [observ() for observ in observs] observ = np.stack(observs) return observ
Reset the environment and convert the resulting observation. Args: indices: The batch indices of environments to reset; defaults to all. Returns: Batch of observations.
387,324
def args(parsed_args, name=None): strings = parsed_args.arg_strings(name) files = [s for s in strings if os.path.isfile(s)] if files: streams = [open(f) for f in files] else: streams = [] if getattr(parsed_args, , not files): streams.append(clipboard_stream()) if getattr(parsed_args, , False): streams.append(sys.stdin) elif not streams: streams = [sys.stdin] return streams
Interpret parsed args to streams
387,325
def molmz(df, noise=10000): d = ((df.values > noise) * df.columns).max(axis=1) return Trace(d, df.index, name=)
The mz of the molecular ion.
387,326
def get_current_user(self): url = self.current_user_url result = self.get(url) return result
Get data from the current user endpoint
387,327
def write(gctoo, out_fname, data_null="NaN", metadata_null="-666", filler_null="-666", data_float_format="%.4f"): if not out_fname.endswith(".gct"): out_fname += ".gct" f = open(out_fname, "w") dims = [str(gctoo.data_df.shape[0]), str(gctoo.data_df.shape[1]), str(gctoo.row_metadata_df.shape[1]), str(gctoo.col_metadata_df.shape[1])] write_version_and_dims(VERSION, dims, f) write_top_half(f, gctoo.row_metadata_df, gctoo.col_metadata_df, metadata_null, filler_null) write_bottom_half(f, gctoo.row_metadata_df, gctoo.data_df, data_null, data_float_format, metadata_null) f.close() logger.info("GCT has been written to {}".format(out_fname))
Write a gctoo object to a gct file. Args: gctoo (gctoo object) out_fname (string): filename for output gct file data_null (string): how to represent missing values in the data (default = "NaN") metadata_null (string): how to represent missing values in the metadata (default = "-666") filler_null (string): what value to fill the top-left filler block with (default = "-666") data_float_format (string): how many decimal points to keep in representing data (default = 4 digits; None will keep all digits) Returns: None
387,328
def compile_insert(self, query, values): table = self.wrap_table(query.from__) if not isinstance(values, list): values = [values] if len(values) == 1: return super(SQLiteQueryGrammar, self).compile_insert(query, values) names = self.columnize(values[0].keys()) columns = [] for column in values[0].keys(): columns.append("%s AS %s" % (self.get_marker(), self.wrap(column))) columns = [", ".join(columns)] * len(values) return "INSERT INTO %s (%s) SELECT %s" % ( table, names, " UNION ALL SELECT ".join(columns), )
Compile insert statement into SQL :param query: A QueryBuilder instance :type query: QueryBuilder :param values: The insert values :type values: dict or list :return: The compiled insert :rtype: str
387,329
def to_fmt(self): params = "" txt = fmt.sep(" ", []) name = self.show_name() if name != "": txt.lsdata.append(name) tparams = [] if self.tparams is not None: tparams = list(self.tparams) if self.variadic: tparams.append() params = + ", ".join(tparams) + txt.lsdata.append( + params) txt.lsdata.append( + self.tret) return txt
Return an Fmt representation for pretty-printing
387,330
def get_phenotype(self, individual_id): phenotype = 0 if individual_id in self.individuals: phenotype = self.individuals[individual_id].phenotype return phenotype
Return the phenotype of an individual If individual does not exist return 0 Arguments: individual_id (str): Represents the individual id Returns: int : Integer that represents the phenotype
387,331
def read(database, table, key): with database.snapshot() as snapshot: result = snapshot.execute_sql( % (table, key)) for row in result: key = row[0] for i in range(NUM_FIELD): field = row[i + 1]
Does a single read operation.
387,332
def parse_yaml(self, y): self.connector_id = y[] self.name = y[] if in y: self.trans_method = y[] else: self.trans_method = if RTS_EXT_NS_YAML + in y: self.comment = y[RTS_EXT_NS_YAML + ] else: self.comment = if RTS_EXT_NS_YAML + in y: visible = y[RTS_EXT_NS_YAML + ] if visible == True or visible == or visible == : self.visible = True else: self.visible = False if not in y: raise InvalidServicePortConnectorNodeError self.source_service_port = \ TargetPort().parse_yaml(y[]) if not in y: raise InvalidServicePortConnectorNodeError self.target_service_port = \ TargetPort().parse_yaml(y[]) if RTS_EXT_NS_YAML + in y: for p in y[RTS_EXT_NS_YAML + ]: if in p: value = p[] else: value = None self._properties[p[]] = value return self
Parse a YAML specification of a service port connector into this object.
387,333
def trigger(self, id, **kwargs): kwargs[] = True if kwargs.get(): return self.trigger_with_http_info(id, **kwargs) else: (data) = self.trigger_with_http_info(id, **kwargs) return data
Triggers a build of a specific Build Configuration This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.trigger(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build Configuration id (required) :param str callback_url: Optional Callback URL :param bool temporary_build: Is it a temporary build or a standard build? :param bool force_rebuild: DEPRECATED: Use RebuildMode. :param bool build_dependencies: Should we build also dependencies of this BuildConfiguration? :param bool keep_pod_on_failure: Should we keep the build container running, if the build fails? :param bool timestamp_alignment: Should we add a timestamp during the alignment? Valid only for temporary builds. :param str rebuild_mode: Rebuild Modes: FORCE: always rebuild the configuration; EXPLICIT_DEPENDENCY_CHECK: check if any of user defined dependencies has been update; IMPLICIT_DEPENDENCY_CHECK: check if any captured dependency has been updated; :return: BuildRecordSingleton If the method is called asynchronously, returns the request thread.
387,334
def lock_file(path, maxdelay=.1, lock_cls=LockFile, timeout=10.0): lock = lock_cls(path) max_t = time.time() + timeout while True: if time.time() >= max_t: raise LockTimeout("Timeout waiting to acquire lock for %s" % (path,)) try: lock.acquire(timeout=0) except AlreadyLocked: sleep(maxdelay) else: try: yield lock break finally: lock.release()
Cooperative file lock. Uses `lockfile.LockFile` polling under the hood. `maxdelay` defines the interval between individual polls.
387,335
def write_markdown_to_file(self, f): print("---", file=f) print("---", file=f) print("<!-- This file is machine generated: DO NOT EDIT! -->", file=f) print("", file=f) print(" if self._prefix: print(self._prefix, file=f) print("[TOC]", file=f) print("", file=f) if self._module is not None: self._write_module_markdown_to_file(f, self._module)
Prints this library to file `f`. Args: f: File to write to. Returns: Dictionary of documented members.
387,336
def show_replace(self): self.show(hide_replace=False) for widget in self.replace_widgets: widget.show()
Show replace widgets
387,337
def extension (network, session, version, scn_extension, start_snapshot, end_snapshot, **kwargs): if version is None: ormcls_prefix = else: ormcls_prefix = scenario = NetworkScenario(session, version = version, prefix=ormcls_prefix, method=kwargs.get(, ), start_snapshot=start_snapshot, end_snapshot=end_snapshot, scn_name= + scn_extension) network = scenario.build_network(network) network.links.loc[network.links.efficiency == 1.0, ] = -1 extension_buses = network.buses[network.buses.scn_name == + scn_extension] for idx, row in extension_buses.iterrows(): wkt_geom = to_shape(row[]) network.buses.loc[idx, ] = wkt_geom.x network.buses.loc[idx, ] = wkt_geom.y return network
Function that adds an additional network to the existing network container. The new network can include every PyPSA-component (e.g. buses, lines, links). To connect it to the existing network, transformers are needed. All components and its timeseries of the additional scenario need to be inserted in the fitting 'model_draft.ego_grid_pf_hv_extension_' table. The scn_name in the tables have to be labled with 'extension_' + scn_name (e.g. 'extension_nep2035'). Until now, the tables include three additional scenarios: 'nep2035_confirmed': all new lines and needed transformers planed in the 'Netzentwicklungsplan 2035' (NEP2035) that have been confirmed by the Bundesnetzagentur (BNetzA) 'nep2035_b2': all new lines and needed transformers planned in the NEP 2035 in the scenario 2035 B2 'BE_NO_NEP 2035': DC-lines and transformers to connect the upcomming electrical-neighbours Belgium and Norway Generation, loads and its timeseries in Belgium and Norway for scenario 'NEP 2035' Parameters ----- network : The existing network container (e.g. scenario 'NEP 2035') session : session-data overlay_scn_name : Name of the additional scenario (WITHOUT 'extension_') start_snapshot, end_snapshot: Simulation time Returns ------ network : Network container including existing and additional network
387,338
def get_document_models(): mappings = {} for i in get_index_names(): for m in get_index_models(i): key = "%s.%s" % (i, m._meta.model_name) mappings[key] = m return mappings
Return dict of index.doc_type: model.
387,339
def parse_seconds(value): svalue = str(value) colons = svalue.count() if colons == 2: hours, minutes, seconds = [int(v) for v in svalue.split()] elif colons == 1: hours, minutes = [int(v) for v in svalue.split()] seconds = 0 elif colons == 0: hours = 0 minutes = 0 seconds = int(svalue) else: raise ValueError() return Seconds.from_hms(hours, minutes, seconds)
Parse string into Seconds instances. Handled formats: HH:MM:SS HH:MM SS
387,340
def get_resource_uri(self, obj): url = % ( self.api_version, getattr( self, , self.Meta.model._meta.model_name ) ) return reverse(url, request=self.context.get(, None), kwargs={ self.lookup_field: getattr(obj, self.lookup_field) })
Return the uri of the given object.
387,341
def get_orderbook(self): if self in self.parent.books.keys(): return self.parent.books[self] return { "bid": [0], "bidsize": [0], "ask": [0], "asksize": [0] }
Get orderbook for the instrument :Retruns: orderbook : dict orderbook dict for the instrument
387,342
def _match_data_to_parameter(cls, data): in_value = data["in"] for cls in [QueryParameter, HeaderParameter, FormDataParameter, PathParameter, BodyParameter]: if in_value == cls.IN: return cls return None
find the appropriate parameter for a parameter field
387,343
def absent(email, profile="splunk", **kwargs): [email protected] user_identity = kwargs.get() ret = { : user_identity, : {}, : None, : .format(user_identity) } target = __salt__[](email, profile=profile) if not target: ret[] = .format(user_identity) ret[] = True return ret if __opts__[]: ret[] = "User {0} is all set to be deleted".format(user_identity) ret[] = None return ret result = __salt__[](email, profile=profile) if result: ret[] = .format(user_identity) ret[].setdefault(, .format(user_identity)) ret[].setdefault(, .format(user_identity)) ret[] = True else: ret[] = .format(user_identity) ret[] = False return ret
Ensure a splunk user is absent .. code-block:: yaml ensure example test user 1: splunk.absent: - email: '[email protected]' - name: 'exampleuser' The following parameters are required: email This is the email of the user in splunk name This is the splunk username used to identify the user.
387,344
def save_new_environment(name, datadir, srcdir, ckan_version, deploy_target=None, always_prod=False): with open(datadir + , ) as f: f.write() cp = ConfigParser.SafeConfigParser() cp.read(srcdir + ) if not cp.has_section(): cp.add_section() cp.set(, , name) cp.set(, , ckan_version) if deploy_target: if not cp.has_section(): cp.add_section() cp.set(, , deploy_target) if always_prod: cp.set(, , ) with open(srcdir + , ) as config: cp.write(config) save_srcdir_location(datadir, srcdir)
Save an environment's configuration to the source dir and data dir
387,345
def count_curves(self, keys=None, alias=None): if keys is None: keys = [k for k, v in self.data.items() if isinstance(v, Curve)] else: keys = utils.flatten_list(keys) return len(list(filter(None, [self.get_mnemonic(k, alias=alias) for k in keys])))
Counts the number of curves in the well that will be selected with the given key list and the given alias dict. Used by Project's curve table.
387,346
def save_as(self, new_filename): xfile._save_file(self._filename, self._datasourceTree, new_filename)
Save our file with the name provided. Args: new_filename: New name for the workbook file. String. Returns: Nothing.
387,347
def wipe_cfg_vals_from_git_cfg(*cfg_opts): for cfg_key_suffix in cfg_opts: cfg_key = f cmd = "git", "config", "--local", "--unset-all", cfg_key subprocess.check_call(cmd, stderr=subprocess.STDOUT)
Remove a set of options from Git config.
387,348
def hkeys(self, name, key_start, key_end, limit=10): limit = get_positive_integer(, limit) return self.execute_command(, name, key_start, key_end, limit)
Return a list of the top ``limit`` keys between ``key_start`` and ``key_end`` in hash ``name`` Similiar with **Redis.HKEYS** .. note:: The range is (``key_start``, ``key_end``]. The ``key_start`` isn't in the range, but ``key_end`` is. :param string name: the hash name :param string key_start: The lower bound(not included) of keys to be returned, empty string ``''`` means -inf :param string key_end: The upper bound(included) of keys to be returned, empty string ``''`` means +inf :param int limit: number of elements will be returned. :return: a list of keys :rtype: list >>> ssdb.hkeys('hash_1', 'a', 'g', 10) ['b', 'c', 'd', 'e', 'f', 'g'] >>> ssdb.hkeys('hash_2', 'key ', 'key4', 3) ['key1', 'key2', 'key3'] >>> ssdb.hkeys('hash_1', 'f', '', 10) ['g'] >>> ssdb.hkeys('hash_2', 'keys', '', 10) []
387,349
def get_string_from_data(self, offset, data): s = self.get_bytes_from_data(offset, data) end = s.find(b) if end >= 0: s = s[:end] return s
Get an ASCII string from data.
387,350
def _add_encoded(self, encoded): if self.public_key != encoded.public_key: raise ValueError("Attempted to add numbers encoded against " "different public keys!") a, b = self, encoded if a.exponent > b.exponent: a = self.decrease_exponent_to(b.exponent) elif a.exponent < b.exponent: b = b.decrease_exponent_to(a.exponent) encrypted_scalar = a.public_key.raw_encrypt(b.encoding, 1) sum_ciphertext = a._raw_add(a.ciphertext(False), encrypted_scalar) return EncryptedNumber(a.public_key, sum_ciphertext, a.exponent)
Returns E(a + b), given self=E(a) and b. Args: encoded (EncodedNumber): an :class:`EncodedNumber` to be added to `self`. Returns: EncryptedNumber: E(a + b), calculated by encrypting b and taking the product of E(a) and E(b) modulo :attr:`~PaillierPublicKey.n` ** 2. Raises: ValueError: if scalar is out of range or precision.
387,351
def msg_curse(self, args=None, max_width=None): ret = [] if args.disable_process: msg = "PROCESSES DISABLED (press to display)" ret.append(self.curse_add_line(msg)) return ret if not self.stats: return ret if glances_processes.process_filter is not None: msg = ret.append(self.curse_add_line(msg, "TITLE")) msg = .format(glances_processes.process_filter) if glances_processes.process_filter_key is not None: msg += .format(glances_processes.process_filter_key) ret.append(self.curse_add_line(msg, "FILTER")) msg = ENTER\E\ ret.append(self.curse_add_line(msg)) ret.append(self.curse_new_line()) msg = ret.append(self.curse_add_line(msg, "TITLE")) other = self.stats[] msg = .format(self.stats[]) ret.append(self.curse_add_line(msg)) if in self.stats: msg = .format(self.stats[]) ret.append(self.curse_add_line(msg)) if in self.stats: other -= self.stats[] msg = .format(self.stats[]) ret.append(self.curse_add_line(msg)) if in self.stats: other -= self.stats[] msg = .format(self.stats[]) ret.append(self.curse_add_line(msg)) msg = .format(other) ret.append(self.curse_add_line(msg)) try: sort_human = self.sort_for_human[glances_processes.sort_key] except KeyError: sort_human = if glances_processes.auto_sort: msg = ret.append(self.curse_add_line(msg)) msg = .format(sort_human) else: msg = .format(sort_human) ret.append(self.curse_add_line(msg)) return ret
Return the dict to display in the curse interface.
387,352
def _release_info(): pypi_url = headers = { : , } request = urllib.Request(pypi_url, headers=headers) response = urllib.urlopen(request).read().decode() data = json.loads(response) return data
Check latest fastfood release info from PyPI.
387,353
def sync_matchers(saltenv=None, refresh=False, extmod_whitelist=None, extmod_blacklist=None): ** ret = _sync(, saltenv, extmod_whitelist, extmod_blacklist) if refresh: refresh_modules() return ret
.. versionadded:: 2019.2.0 Sync engine modules from ``salt://_matchers`` to the minion saltenv The fileserver environment from which to sync. To sync from more than one environment, pass a comma-separated list. If not passed, then all environments configured in the :ref:`top files <states-top>` will be checked for engines to sync. If no top files are found, then the ``base`` environment will be synced. refresh : True If ``True``, refresh the available execution modules on the minion. This refresh will be performed even if no new matcher modules are synced. Set to ``False`` to prevent this refresh. extmod_whitelist : None comma-separated list of modules to sync extmod_blacklist : None comma-separated list of modules to blacklist based on type CLI Examples: .. code-block:: bash salt '*' saltutil.sync_matchers salt '*' saltutil.sync_matchers saltenv=base,dev
387,354
def pvariance(data, mu=None): if iter(data) is data: data = list(data) n = len(data) if n < 1: raise StatisticsError() ss = _ss(data, mu) return ss / n
Return the population variance of ``data``. data should be an iterable of Real-valued numbers, with at least one value. The optional argument mu, if given, should be the mean of the data. If it is missing or None, the mean is automatically calculated. Use this function to calculate the variance from the entire population. To estimate the variance from a sample, the ``variance`` function is usually a better choice. If you have already calculated the mean of the data, you can pass it as the optional second argument to avoid recalculating it: This function does not check that ``mu`` is actually the mean of ``data``. Giving arbitrary values for ``mu`` may lead to invalid or impossible results. Decimals and Fractions are supported:
387,355
def _create_update_tracking_related_event(instance): events = {} for field, related_fields in instance._tracked_related_fields.items(): if not isinstance(instance._meta.get_field(field), ManyToManyField): if isinstance(instance._meta.get_field(field), ForeignKey): value = getattr(instance, .format(field)) else: value = getattr(instance, field) if instance._original_fields[field] != value: for related_field in related_fields: events.setdefault(related_field, []).append(field) for related_field, fields in events.items(): try: related_instances = getattr(instance, related_field[1]) except ObjectDoesNotExist: continue if hasattr(related_instances, ): related_instances = related_instances.all() else: related_instances = [related_instances] for related_instance in related_instances: event = _create_event(related_instance, UPDATE) for field in fields: fieldname = .format(related_field[0], field) _create_tracked_field( event, instance, field, fieldname=fieldname )
Create a TrackingEvent and TrackedFieldModification for an UPDATE event for each related model.
387,356
def transformer_base_v1(): hparams = common_hparams.basic_params1() hparams.norm_type = "layer" hparams.hidden_size = 512 hparams.batch_size = 4096 hparams.max_length = 256 hparams.clip_grad_norm = 0. hparams.optimizer_adam_epsilon = 1e-9 hparams.learning_rate_schedule = "legacy" hparams.learning_rate_decay_scheme = "noam" hparams.learning_rate = 0.1 hparams.learning_rate_warmup_steps = 4000 hparams.initializer_gain = 1.0 hparams.num_hidden_layers = 6 hparams.initializer = "uniform_unit_scaling" hparams.weight_decay = 0.0 hparams.optimizer_adam_beta1 = 0.9 hparams.optimizer_adam_beta2 = 0.98 hparams.num_sampled_classes = 0 hparams.label_smoothing = 0.1 hparams.shared_embedding_and_softmax_weights = True hparams.symbol_modality_num_shards = 16 hparams.add_hparam("filter_size", 2048) hparams.add_hparam("num_encoder_layers", 0) hparams.add_hparam("num_decoder_layers", 0) hparams.add_hparam("num_heads", 8) hparams.add_hparam("attention_key_channels", 0) hparams.add_hparam("attention_value_channels", 0) hparams.add_hparam("ffn_layer", "dense_relu_dense") hparams.add_hparam("parameter_attention_key_channels", 0) hparams.add_hparam("parameter_attention_value_channels", 0) hparams.add_hparam("attention_dropout", 0.0) hparams.add_hparam("attention_dropout_broadcast_dims", "") hparams.add_hparam("relu_dropout", 0.0) hparams.add_hparam("relu_dropout_broadcast_dims", "") hparams.add_hparam("pos", "timing") hparams.add_hparam("nbr_decoder_problems", 1) hparams.add_hparam("proximity_bias", False) hparams.add_hparam("causal_decoder_self_attention", True) hparams.add_hparam("use_pad_remover", True) hparams.add_hparam("self_attention_type", "dot_product") hparams.add_hparam("conv_first_kernel", 3) hparams.add_hparam("attention_variables_3d", False) hparams.add_hparam("use_target_space_embedding", True) hparams.add_hparam("moe_overhead_train", 1.0) hparams.add_hparam("moe_overhead_eval", 2.0) hparams.moe_num_experts = 16 hparams.moe_loss_coef = 1e-3 hparams.add_hparam("overload_eval_metric_name", "") hparams.add_hparam("unidirectional_encoder", False) hparams.add_hparam("hard_attention_k", 0) return hparams
Set of hyperparameters.
387,357
def str_is_well_formed(xml_str): try: str_to_etree(xml_str) except xml.etree.ElementTree.ParseError: return False else: return True
Args: xml_str : str DataONE API XML doc. Returns: bool: **True** if XML doc is well formed.
387,358
def resolve_addresses(self, node): prop_alignment = self.alignment_stack[-1] if prop_alignment is None: prop_alignment = 1 prev_node = None for child_node in node.children(skip_not_present=False): if not isinstance(child_node, AddressableNode): continue if child_node.inst.addr_offset is not None: prev_node = child_node continue if node.env.chk_implicit_addr: node.env.msg.message( node.env.chk_implicit_addr, "Address offset of component is not explicitly set" % child_node.inst.inst_name, child_node.inst.inst_src_ref ) alloc_alignment = child_node.inst.addr_align if alloc_alignment is None: alloc_alignment = 1 if self.addressing_mode_stack[-1] == rdltypes.AddressingType.compact: if isinstance(child_node, RegNode): mode_alignment = child_node.get_property() // 8 else: mode_alignment = 1 elif self.addressing_mode_stack[-1] == rdltypes.AddressingType.regalign: mode_alignment = child_node.size mode_alignment = roundup_pow2(mode_alignment) elif self.addressing_mode_stack[-1] == rdltypes.AddressingType.fullalign: mode_alignment = child_node.total_size mode_alignment = roundup_pow2(mode_alignment) else: raise RuntimeError alignment = max(prop_alignment, alloc_alignment, mode_alignment) if prev_node is None: next_offset = 0 else: next_offset = prev_node.inst.addr_offset + prev_node.total_size child_node.inst.addr_offset = roundup_to(next_offset, alignment) prev_node = child_node def get_child_sort_key(inst): if not isinstance(inst, comp.AddressableComponent): return -1 else: return inst.addr_offset node.inst.children.sort(key=get_child_sort_key)
Resolve addresses of children of Addrmap and Regfile components
387,359
def GetTSKFileByPathSpec(self, path_spec): inode = getattr(path_spec, , None) location = getattr(path_spec, , None) if inode is not None: tsk_file = self._tsk_file_system.open_meta(inode=inode) elif location is not None: tsk_file = self._tsk_file_system.open(location) else: raise errors.PathSpecError( ) return tsk_file
Retrieves the SleuthKit file object for a path specification. Args: path_spec (PathSpec): path specification. Returns: pytsk3.File: TSK file. Raises: PathSpecError: if the path specification is missing inode and location.
387,360
def get_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE, create=False): try: rds = self.find_rdataset(rdclass, rdtype, covers, create) except KeyError: rds = None return rds
Get an rdataset matching the specified properties in the current node. None is returned if an rdataset of the specified type and class does not exist and I{create} is not True. @param rdclass: The class of the rdataset @type rdclass: int @param rdtype: The type of the rdataset @type rdtype: int @param covers: The covered type. @type covers: int @param create: If True, create the rdataset if it is not found. @type create: bool @rtype: dns.rdataset.Rdataset object or None
387,361
def info(torrent_path): my_torrent = Torrent.from_file(torrent_path) size = my_torrent.total_size click.secho( % my_torrent.name, fg=) click.secho() for file_tuple in my_torrent.files: click.secho(file_tuple.name) click.secho( % my_torrent.info_hash, fg=) click.secho( % (humanize_filesize(size), size), fg=) click.secho( % my_torrent.get_magnet(), fg=)
Print out information from .torrent file.
387,362
def fromutc(self, dt): if dt.tzinfo is not None and dt.tzinfo is not self: raise ValueError() return (dt + self._utcoffset).replace(tzinfo=self)
See datetime.tzinfo.fromutc
387,363
def get_rate_limits(): client = get_rates_api() with catch_raise_api_exception(): data, _, headers = client.rates_limits_list_with_http_info() ratelimits.maybe_rate_limit(client, headers) return { k: RateLimitsInfo.from_dict(v) for k, v in six.iteritems(data.to_dict().get("resources", {})) }
Retrieve status (and optionally) version from the API.
387,364
def monitoring_problems(self): if self.app.type != : return {: u, : u"This service is only available for a scheduler daemon"} res = self.identity() res.update(self.app.get_monitoring_problems()) return res
Get Alignak scheduler monitoring status Returns an object with the scheduler livesynthesis and the known problems :return: scheduler live synthesis :rtype: dict
387,365
def cublasZhpmv(handle, uplo, n, alpha, AP, x, incx, beta, y, incy): status = _libcublas.cublasZhpmv_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(cuda.cuDoubleComplex(alpha.real, alpha.imag)), int(AP), int(x), incx, ctypes.byref(cuda.cuDoubleComplex(beta.real, beta.imag)), int(y), incy) cublasCheckStatus(status)
Matrix-vector product for Hermitian-packed matrix.
387,366
def run_compute(self, compute=None, model=None, detach=False, times=None, **kwargs): if isinstance(detach, str): self.as_client(server=detach) self.run_compute(compute=compute, model=model, time=time, **kwargs) self.as_client(False) return self.get_model(model) if in kwargs.keys(): raise ValueError("protomesh is no longer a valid option") if in kwargs.keys(): raise ValueError("pbmesh is no longer a valid option") if model is None: model = if model in self.models: logger.warning("overwriting model: {}".format(model)) self.remove_model(model) self._check_label(model) if isinstance(times, float) or isinstance(times, int): times = [times] if compute is None: computes = self.get_compute(**kwargs).computes if len(computes)==0: if isinstance(compute, str): computes = [compute] else: computes = compute for k in parameters._meta_fields_filter: if k in kwargs.keys(): dump = kwargs.pop(k) datasets = [] logger.warning("overriding time is not supported within detach - ignoring") script_fname = "_{}.py".format(jobid) f = open(script_fname, ) f.write("import os; os.environ[] = ; os.environ[] = ; os.environ[] = ;\n") f.write("import phoebe; import json\n") f.write("bdict = json.loads(\"\"\"{}\"\"\")\n".format(json.dumps(self.to_json()))) f.write("b = phoebe.Bundle(bdict)\n") compute_kwargs = kwargs.items()+[(, compute), (, model)] compute_kwargs_string = .join(["{}={}".format(k,"\".format(v) if isinstance(v, str) else v) for k,v in compute_kwargs]) f.write("model_ps = b.run_compute({})\n".format(compute_kwargs_string)) f.write("model_ps.save(, incl_uniqueid=True)\n".format(jobid)) f.close() script_fname = os.path.abspath(script_fname) cmd = mpi.detach_cmd.format(script_fname) subprocess.call(cmd, shell=True) job_param = JobParameter(self, location=os.path.dirname(script_fname), status_method=, retrieve_method=, uniqueid=jobid) metawargs = {: , : model} self._attach_params([job_param], **metawargs) if isinstance(detach, str): self.save(detach) if not detach: return job_param.attach() else: logger.info("detaching from run_compute. Call get_model().attach() to re-attach".format(model)) return job_param for compute in computes: computeparams = self.get_compute(compute=compute) if not computeparams.kind: raise KeyError("could not recognize backend from compute: {}".format(compute)) logger.info("running {} backend to create model".format(computeparams.kind, model)) compute_class = getattr(backends, .format(computeparams.kind.title())) metawargs = {: compute, : model, : } params = compute_class().run(self, compute, times=times, **kwargs) if computeparams.kind == : for dataset in params.datasets: if len(self.filter(dataset=dataset, qualifier=)): exptime = self.get_value(qualifier=, dataset=dataset, context=, unit=u.d) if exptime > 0: if self.get_value(qualifier=, dataset=dataset, compute=compute, context=, **kwargs)==: times_ds = self.get_value(qualifier=, dataset=dataset, context=) fti_oversample = self.get_value(qualifier=, dataset=dataset, compute=compute, context=, check_visible=False, **kwargs) fluxes = np.zeros(times_ds.shape) return self.get_model(model)
Run a forward model of the system on the enabled dataset using a specified set of compute options. To attach and set custom values for compute options, including choosing which backend to use, see: * :meth:`add_compute` To define the dataset types and times at which the model should be computed see: * :meth:`add_dataset` To disable or enable existing datasets see: * :meth:`enable_dataset` * :meth:`disable_dataset` :parameter str compute: (optional) name of the compute options to use. If not provided or None, run_compute will use an existing set of attached compute options if only 1 exists. If more than 1 exist, then compute becomes a required argument. If no compute options exist, then this will use default options and create and attach a new set of compute options with a default label. :parameter str model: (optional) name of the resulting model. If not provided this will default to 'latest'. NOTE: existing models with the same name will be overwritten - including 'latest' :parameter bool datach: [EXPERIMENTAL] whether to detach from the computation run, or wait for computations to complete. If detach is True, see :meth:`get_model` and :meth:`phoebe.parameters.parameters.JobParameter` for details on how to check the job status and retrieve the results. Alternatively, you can provide the server location (host and port) as a string to detach and the bundle will temporarily enter client mode, submit the job to the server, and leave client mode. The resulting :meth:`phoebe.parameters.parameters.JobParameter` will then contain the necessary information to pull the results from the server at anytime in the future. :parameter list times: [EXPERIMENTAL] override the times at which to compute the model. NOTE: this only (temporarily) replaces the time array for datasets with times provided (ie empty time arrays are still ignored). So if you attach a rv to a single component, the model will still only compute for that single component. ALSO NOTE: this option is ignored if detach=True (at least for now). :parameter **kwargs: any values in the compute options to temporarily override for this single compute run (parameter values will revert after run_compute is finished) :return: :class:`phoebe.parameters.parameters.ParameterSet` of the newly-created model containing the synthetic data.
387,367
def get(key, default=-1): if isinstance(key, int): return OptionNumber(key) if key not in OptionNumber._member_map_: extend_enum(OptionNumber, key, default) return OptionNumber[key]
Backport support for original codes.
387,368
def can_see_members(self, user): if self.privacy_policy == PrivacyPolicy.PUBLIC: return True elif self.privacy_policy == PrivacyPolicy.MEMBERS: return self.is_member(user) or self.is_admin(user) elif self.privacy_policy == PrivacyPolicy.ADMINS: return self.is_admin(user)
Determine if given user can see other group members. :param user: User to be checked. :returns: True or False.
387,369
def assemble_oligos(dna_list, reference=None): t assemble for any reason. :returns: A single assembled DNA sequence :rtype: coral.DNA ends on the assembly match_3 = [bind_unique(seq, dna_list, right=True) for i, seq in enumerate(dna_list)] flip = False else: current_index, matchlen = zipped[last_index][0] next_oligo = dna_list[current_index].to_ds() flip = True next_oligo = next_oligo[(matchlen - 1):] else: return assembly
Given a list of DNA sequences, assemble into a single construct. :param dna_list: List of DNA sequences - they must be single-stranded. :type dna_list: coral.DNA list :param reference: Expected sequence - once assembly completed, this will be used to reorient the DNA (assembly could potentially occur from either side of a linear DNA construct if oligos are in a random order). If this fails, an AssemblyError is raised. :type reference: coral.DNA :raises: AssemblyError if it can't assemble for any reason. :returns: A single assembled DNA sequence :rtype: coral.DNA
387,370
def bar(self, width, **_): width -= self._width_offset self._position += self._direction if self._position <= 0 and self._direction < 0: self._position = 0 self._direction = 1 elif self._position > width: self._position = width - 1 self._direction = -1 final_bar = ( self.CHAR_LEFT_BORDER + self.CHAR_EMPTY * self._position + self.CHAR_ANIMATED + self.CHAR_EMPTY * (width - self._position) + self.CHAR_RIGHT_BORDER ) return final_bar
Returns the completed progress bar. Every time this is called the animation moves. Positional arguments: width -- the width of the entire bar (including borders).
387,371
def taskfile_user_data(file_, role): if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole: return file_.user.username
Return the data for user :param file_: the file that holds the data :type file_: :class:`jukeboxcore.djadapter.models.File` :param role: item data role :type role: QtCore.Qt.ItemDataRole :returns: data for the user :rtype: depending on role :raises: None
387,372
def _get_requirement_attr(self, attr, path): for req_file in self.requirements: if path.strip("/") == req_file.path.strip("/"): return getattr(req_file, attr) return getattr(self, attr)
Gets the attribute for a given requirement file in path :param attr: string, attribute :param path: string, path :return: The attribute for the requirement, or the global default
387,373
def update(self, environments): data = {: environments} environments_ids = [str(env.get()) for env in environments] uri = % .join(environments_ids) return super(ApiEnvironmentVip, self).put(uri, data)
Method to update environments vip :param environments vip: List containing environments vip desired to updated :return: None
387,374
def dropKey(self, key): JimLarryJoeJimzam99Bill result = [] for row in self.table: result.append(internal.remove_member(row, key)) self.table = result return self
Drop an attribute/element/key-value pair from all the dictionaries. If the dictionary key does not exist in a particular dictionary, then that dictionary is left unchanged. Side effect: if the key is a number and it matches a list (interpreted as a dictionary), it will cause the "keys" to shift just as a list would be expected to. Example of use: >>> test = [ ... {"name": "Jim", "age": 18, "income": 93000, "wigs": 68 }, ... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]}, ... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]}, ... {"name": "Jim", "age": 29, "zim": {"zam": "99"} }, ... {"name": "Bill", "age": 19, "income": 29000 }, ... ] >>> print PLOD(test).dropKey("income").returnString() [ {age: 18, name: 'Jim' , wigs: 68, zim: None }, {age: 18, name: 'Larry', wigs: [3, 2, 9], zim: None }, {age: 20, name: 'Joe' , wigs: [1, 2, 3], zim: None }, {age: 29, name: 'Jim' , wigs: None , zim: {'zam': '99'}}, {age: 19, name: 'Bill' , wigs: None , zim: None } ] .. versionadded:: 0.1.2 :param key: The dictionary key (or cascading list of keys point to final key) that should be removed. :returns: self
387,375
def fit(self, X, y=None, init=None): self.fit_transform(X, init=init) return self
Computes the position of the points in the embedding space Parameters ---------- X : array, shape=[n_samples, n_features], or [n_samples, n_samples] \ if dissimilarity='precomputed' Input data. init : {None or ndarray, shape (n_samples,)}, optional If None, randomly chooses the initial configuration if ndarray, initialize the SMACOF algorithm with this array.
387,376
def owned_expansions(self): owned = {} for el in self.expansion_locations: def is_near_to_expansion(t): return t.position.distance_to(el) < self.EXPANSION_GAP_THRESHOLD th = next((x for x in self.townhalls if is_near_to_expansion(x)), None) if th: owned[el] = th return owned
List of expansions owned by the player.
387,377
def annihilate(predicate: tuple, stack: tuple) -> tuple: extra = tuple(filter(lambda x: x not in predicate, stack)) head = reduce(lambda x, y: y if y in predicate else x, stack, None) return extra + (head,) if head else extra
Squash and reduce the input stack. Removes the elements of input that match predicate and only keeps the last match at the end of the stack.
387,378
def followingPrefix(prefix): prefixBytes = array(, prefix) changeIndex = len(prefixBytes) - 1 while (changeIndex >= 0 and prefixBytes[changeIndex] == 0xff ): changeIndex = changeIndex - 1; if(changeIndex < 0): return None newBytes = array(, prefix[0:changeIndex + 1]) newBytes[changeIndex] = newBytes[changeIndex] + 1 return newBytes.tostring()
Returns a String that sorts just after all Strings beginning with a prefix
387,379
def set_circuit_breakers(mv_grid, mode=, debug=False): cos_phi_load = cfg_ding0.get(, ) cos_phi_feedin = cfg_ding0.get(, ) for ring, circ_breaker in zip(mv_grid.rings_nodes(include_root_node=False), mv_grid.circuit_breakers()): nodes_peak_load = [] nodes_peak_generation = [] for node in ring: if isinstance(node, LVStationDing0): nodes_peak_load.append(node.peak_load / cos_phi_load) nodes_peak_generation.append(node.peak_generation / cos_phi_feedin) elif isinstance(node, CableDistributorDing0): nodes_subtree = mv_grid.graph_nodes_from_subtree(node) nodes_subtree_peak_load = 0 nodes_subtree_peak_generation = 0 for node_subtree in nodes_subtree: if isinstance(node_subtree, LVStationDing0): nodes_subtree_peak_load += node_subtree.peak_load / \ cos_phi_load nodes_subtree_peak_generation += node_subtree.peak_generation / \ cos_phi_feedin if isinstance(node_subtree, GeneratorDing0): nodes_subtree_peak_generation += node_subtree.capacity / \ cos_phi_feedin nodes_peak_load.append(nodes_subtree_peak_load) nodes_peak_generation.append(nodes_subtree_peak_generation) else: raise ValueError() if mode == : node_peak_data = nodes_peak_load elif mode == :
Calculates the optimal position of a circuit breaker on all routes of mv_grid, adds and connects them to graph. Args ---- mv_grid: MVGridDing0 Description#TODO debug: bool, defaults to False If True, information is printed during process Notes ----- According to planning principles of MV grids, a MV ring is run as two strings (half-rings) separated by a circuit breaker which is open at normal operation [#]_, [#]_. Assuming a ring (route which is connected to the root node at either sides), the optimal position of a circuit breaker is defined as the position (virtual cable) between two nodes where the conveyed current is minimal on the route. Instead of the peak current, the peak load is used here (assuming a constant voltage). If a ring is dominated by loads (peak load > peak capacity of generators), only loads are used for determining the location of circuit breaker. If generators are prevailing (peak load < peak capacity of generators), only generator capacities are considered for relocation. The core of this function (calculation of the optimal circuit breaker position) is the same as in ding0.grid.mv_grid.models.Route.calc_circuit_breaker_position but here it is 1. applied to a different data type (NetworkX Graph) and it 2. adds circuit breakers to all rings. The re-location of circuit breakers is necessary because the original position (calculated during routing with method mentioned above) shifts during the connection of satellites and therefore it is no longer valid. References ---------- .. [#] X. Tao, "Automatisierte Grundsatzplanung von Mittelspannungsnetzen", Dissertation, 2006 .. [#] FGH e.V.: "Technischer Bericht 302: Ein Werkzeug zur Optimierung der Störungsbeseitigung für Planung und Betrieb von Mittelspannungsnetzen", Tech. rep., 2008
387,380
def extract_run_id(key): filename = key.split()[-2] run_id = filename.lstrip() try: datetime.strptime(run_id, ) return key except ValueError: return None
Extract date part from run id Arguments: key - full key name, such as shredded-archive/run=2012-12-11-01-31-33/ (trailing slash is required) >>> extract_run_id('shredded-archive/run=2012-12-11-01-11-33/') 'shredded-archive/run=2012-12-11-01-11-33/' >>> extract_run_id('shredded-archive/run=2012-12-11-01-11-33') >>> extract_run_id('shredded-archive/run=2012-13-11-01-11-33/')
387,381
def set_activate_user_form(self, card_id, **kwargs): kwargs[] = card_id return self._post( , data=kwargs )
设置开卡字段接口 详情请参考 https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1451025283 "6 激活会员卡" -> "6.2 一键激活" -> "步骤二:设置开卡字段接口" 参数示例: { "card_id": "pbLatjnrwUUdZI641gKdTMJzHGfc", "service_statement": { "name": "会员守则", "url": "https://www.qq.com" }, "bind_old_card": { "name": "老会员绑定", "url": "https://www.qq.com" }, "required_form": { "can_modify":false, "rich_field_list": [ { "type": "FORM_FIELD_RADIO", "name": "兴趣", "values": [ "钢琴", "舞蹈", "足球" ] }, { "type": "FORM_FIELD_SELECT", "name": "喜好", "values": [ "郭敬明", "韩寒", "南派三叔" ] }, { "type": "FORM_FIELD_CHECK_BOX", "name": "职业", "values": [ "赛车手", "旅行家" ] } ], "common_field_id_list": [ "USER_FORM_INFO_FLAG_MOBILE" ] }, "optional_form": { "can_modify":false, "common_field_id_list": [ "USER_FORM_INFO_FLAG_LOCATION", "USER_FORM_INFO_FLAG_BIRTHDAY" ], "custom_field_list": [ "喜欢的电影" ] } } common_field_id_list 值见常量 `wechatpy.constants.UserFormInfoFlag` :param card_id: 卡券ID :param kwargs: 其他非必填参数,见微信文档
387,382
def fcat(*fs): items = list() for f in fs: if isinstance(f, boolfunc.Function): items.append(f) elif isinstance(f, farray): items.extend(f.flat) else: raise TypeError("expected Function or farray") return farray(items)
Concatenate a sequence of farrays. The variadic *fs* input is a homogeneous sequence of functions or arrays.
387,383
def artifact_filename(self): def maybe_compenent(component): return .format(component) if component else return .format(org=self.org, name=self.name, rev=maybe_compenent(self.rev), classifier=maybe_compenent(self.classifier), ext=self.ext)
Returns the canonical maven-style filename for an artifact pointed at by this coordinate. :API: public :rtype: string
387,384
def set_presence(self, state, status={}, priority=0): if not isinstance(priority, numbers.Integral): raise TypeError( "invalid priority: got {}, expected integer".format( type(priority) ) ) if not isinstance(state, aioxmpp.PresenceState): raise TypeError( "invalid state: got {}, expected aioxmpp.PresenceState".format( type(state), ) ) if isinstance(status, str): new_status = {None: status} else: new_status = dict(status) new_priority = int(priority) emit_state_event = self._state != state emit_overall_event = ( emit_state_event or self._priority != new_priority or self._status != new_status ) self._state = state self._status = new_status self._priority = new_priority if emit_state_event: self.on_presence_state_changed() if emit_overall_event: self.on_presence_changed() return self.resend_presence()
Change the presence broadcast by the client. :param state: New presence state to broadcast :type state: :class:`aioxmpp.PresenceState` :param status: New status information to broadcast :type status: :class:`dict` or :class:`str` :param priority: New priority for the resource :type priority: :class:`int` :return: Stanza token of the presence stanza or :data:`None` if the presence is unchanged or the stream is not connected. :rtype: :class:`~.stream.StanzaToken` If the client is currently connected, the new presence is broadcast immediately. `status` must be either a string or something which can be passed to the :class:`dict` constructor. If it is a string, it is wrapped into a dict using ``{None: status}``. The mapping must map :class:`~.LanguageTag` objects (or :data:`None`) to strings. The information will be used to generate internationalised presence status information. If you do not need internationalisation, simply use the string version of the argument.
387,385
def is_ancestor_of_bank(self, id_, bank_id): if self._catalog_session is not None: return self._catalog_session.is_ancestor_of_catalog(id_=id_, catalog_id=bank_id) return self._hierarchy_session.is_ancestor(id_=id_, ancestor_id=bank_id)
Tests if an ``Id`` is an ancestor of a bank. arg: id (osid.id.Id): an ``Id`` arg: bank_id (osid.id.Id): the ``Id`` of a bank return: (boolean) - ``true`` if this ``id`` is an ancestor of ``bank_id,`` ``false`` otherwise raise: NotFound - ``bank_id`` is not found raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``.
387,386
def get_clusters_representation(chromosome, count_clusters=None): if count_clusters is None: count_clusters = ga_math.calc_count_centers(chromosome) clusters = [[] for _ in range(count_clusters)] for _idx_data in range(len(chromosome)): clusters[chromosome[_idx_data]].append(_idx_data) return clusters
Convert chromosome to cluster representation: chromosome : [0, 1, 1, 0, 2, 3, 3] clusters: [[0, 3], [1, 2], [4], [5, 6]]
387,387
def get_sections_2d_nts(self, sortby=None): sections_2d_nts = [] for section_name, hdrgos_actual in self.get_sections_2d(): hdrgo_nts = self.gosubdag.get_nts(hdrgos_actual, sortby=sortby) sections_2d_nts.append((section_name, hdrgo_nts)) return sections_2d_nts
Get high GO IDs that are actually used to group current set of GO IDs.
387,388
def check_key(data_object, key, cardinal=False): itype = (int, np.int32, np.int64) if not isinstance(key, itype + (slice, tuple, list, np.ndarray)): raise KeyError("Unknown key type {} for key {}".format(type(key), key)) keys = data_object.index.values if cardinal and data_object._cardinal is not None: keys = data_object[data_object._cardinal[0]].unique() elif isinstance(key, itype) and key in keys: key = list(sorted(data_object.index.values[key])) elif isinstance(key, itype) and key < 0: key = list(sorted(data_object.index.values[key])) elif isinstance(key, itype): key = [key] elif isinstance(key, slice): key = list(sorted(data_object.index.values[key])) elif isinstance(key, (tuple, list, pd.Index)) and not np.all(k in keys for k in key): key = list(sorted(data_object.index.values[key])) return key
Update the value of an index key by matching values or getting positionals.
387,389
def text(self, path, wholetext=False, lineSep=None): self._set_opts(wholetext=wholetext, lineSep=lineSep) if isinstance(path, basestring): return self._df(self._jreader.text(path)) else: raise TypeError("path can be only a single string")
Loads a text file stream and returns a :class:`DataFrame` whose schema starts with a string column named "value", and followed by partitioned columns if there are any. The text files must be encoded as UTF-8. By default, each line in the text file is a new row in the resulting DataFrame. .. note:: Evolving. :param paths: string, or list of strings, for input path(s). :param wholetext: if true, read each file from input path(s) as a single row. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. >>> text_sdf = spark.readStream.text(tempfile.mkdtemp()) >>> text_sdf.isStreaming True >>> "value" in str(text_sdf.schema) True
387,390
def field_function(self, type_code, func_name): assert func_name in (, ) name = "field_%s_%s" % (type_code.lower(), func_name) return getattr(self, name)
Return the field function.
387,391
def script(name, source, saltenv=, args=None, template=None, exec_driver=None, stdin=None, python_shell=True, output_loglevel=, ignore_retcode=False, use_vt=False, keep_env=None): s utils.vt to stream output to console. keep_env : None If not passed, only a sane default PATH environment variable will be set. If ``True``, all environment variables from the containerarg1 arg2 "arg 3"one\\ntwo\\nthree\\nfour\\nfive\\n return _script(name, source, saltenv=saltenv, args=args, template=template, exec_driver=exec_driver, stdin=stdin, python_shell=python_shell, output_loglevel=output_loglevel, ignore_retcode=ignore_retcode, use_vt=use_vt, keep_env=keep_env)
Run :py:func:`cmd.script <salt.modules.cmdmod.script>` within a container .. note:: While the command is run within the container, it is initiated from the host. Therefore, the PID in the return dict is from the host, not from the container. name Container name or ID source Path to the script. Can be a local path on the Minion or a remote file from the Salt fileserver. args A string containing additional command-line options to pass to the script. template : None Templating engine to use on the script before running. exec_driver : None If not passed, the execution driver will be detected as described :ref:`above <docker-execution-driver>`. stdin : None Standard input to be used for the script output_loglevel : debug Level at which to log the output from the script. Set to ``quiet`` to suppress logging. use_vt : False Use SaltStack's utils.vt to stream output to console. keep_env : None If not passed, only a sane default PATH environment variable will be set. If ``True``, all environment variables from the container's host will be kept. Otherwise, a comma-separated list (or Python list) of environment variable names can be passed, and those environment variables will be kept. CLI Example: .. code-block:: bash salt myminion docker.script mycontainer salt://docker_script.py salt myminion docker.script mycontainer salt://scripts/runme.sh 'arg1 arg2 "arg 3"' salt myminion docker.script mycontainer salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n' output_loglevel=quiet
387,392
def _extract_models(cls, apis): models = set() for api in apis: for op in api.get(, []): models.add(op[]) for param in op.get(, []): models.add(param.get(, )) for msg in op[]: models.add(msg.get(, )) models = map(lambda m: Model.name_to_cls[m], models) ret = {} for model in models: if model.native_type: continue obj = model.schema.copy() obj[] = model.name ret[model.name] = obj return ret
An helper function to extract all used models from the apis.
387,393
async def disconnect(self, requested=True): if self.state == PlayerState.DISCONNECTING: return await self.update_state(PlayerState.DISCONNECTING) if not requested: log.debug( f"Forcing player disconnect for guild {self.channel.guild.id}" f" due to player manager request." ) guild_id = self.channel.guild.id voice_ws = self.node.get_voice_ws(guild_id) if not voice_ws.closed: await voice_ws.voice_state(guild_id, None) await self.node.destroy_guild(guild_id) await self.close() self.manager.remove_player(self)
Disconnects this player from it's voice channel.
387,394
def download(self,age=None,metallicity=None,outdir=None,force=False): try: from urllib.error import URLError except ImportError: from urllib2 import URLError if age is None: age = float(self.age) if metallicity is None: metallicity = float(self.metallicity) if outdir is None: outdir = basename = self.params2filename(age,metallicity) outfile = os.path.join(outdir,basename) if os.path.exists(outfile) and not force: try: self.verify(outfile,self.survey,age,metallicity) logger.info("Found %s; skipping..."%(outfile)) return except Exception as e: msg = "Overwriting corrupted %s..."%(outfile) logger.warn(msg) os.remove(outfile) mkdir(outdir) self.print_info(age,metallicity) self.query_server(outfile,age,metallicity) if not os.path.exists(outfile): raise RuntimeError() try: self.verify(outfile,self.survey,age,metallicity) except Exception as e: msg = "Output file is corrupted." logger.error(msg) msg = "Removing %s."%outfile logger.info(msg) os.remove(outfile) raise(e) return outfile
Check valid parameter range and download isochrones from: http://stev.oapd.inaf.it/cgi-bin/cmd
387,395
def to_copy(self, column_names=None, selection=None, strings=True, virtual=False, selections=True): if column_names: column_names = _ensure_strings_from_expressions(column_names) df = vaex.from_items(*self.to_items(column_names=column_names, selection=selection, strings=strings, virtual=False)) if virtual: for name, value in self.virtual_columns.items(): df.add_virtual_column(name, value) if selections: for key, value in self.selection_histories.items(): if key != FILTER_SELECTION_NAME: df.selection_histories[key] = list(value) for key, value in self.selection_history_indices.items(): if key != FILTER_SELECTION_NAME: df.selection_history_indices[key] = value df.functions.update(self.functions) df.copy_metadata(self) return df
Return a copy of the DataFrame, if selection is None, it does not copy the data, it just has a reference :param column_names: list of column names, to copy, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used :param selection: {selection} :param strings: argument passed to DataFrame.get_column_names when column_names is None :param virtual: argument passed to DataFrame.get_column_names when column_names is None :param selections: copy selections to a new DataFrame :return: dict
387,396
def _get_button_label(self): dlg = wx.TextEntryDialog(self, _()) if dlg.ShowModal() == wx.ID_OK: label = dlg.GetValue() else: label = "" dlg.Destroy() return label
Gets Button label from user and returns string
387,397
def set_outputs(self, *outputs): self._outputs = OrderedDict() for output in outputs: out_name = None type_or_serialize = None if isinstance((list, tuple), output): if len(output) == 1: out_name = output[0] elif len(output) == 2: out_name = output[0] type_or_serialize = output[1] else: raise ValueError("invalid output format") else: out_name = output self.add_output(out_name, type_or_serialize)
Set the outputs of the view
387,398
def _PrintEventLabelsCounter( self, event_labels_counter, session_identifier=None): if not event_labels_counter: return title = if session_identifier: title = .format(title, session_identifier) table_view = views.ViewsFactory.GetTableView( self._views_format_type, column_names=[, ], title=title) for key, value in sorted(event_labels_counter.items()): if key == : continue table_view.AddRow([key, value]) try: total = event_labels_counter[] except KeyError: total = table_view.AddRow([, total]) table_view.Write(self._output_writer)
Prints the event labels counter. Args: event_labels_counter (collections.Counter): number of event tags per label. session_identifier (Optional[str]): session identifier.
387,399
def create_contract(self, price=0, address=None, caller=None, balance=0, init=None, gas=None): expected_address = self.create_account(self.new_address(sender=caller)) if address is None: address = expected_address elif caller is not None and address != expected_address: raise EthereumError(f"Error: contract created from address {hex(caller)} with nonce {self.get_nonce(caller)} was expected to be at address {hex(expected_address)}, but create_contract was called with address={hex(address)}") self.start_transaction(, address, price, init, caller, balance, gas=gas) self._process_pending_transaction() return address
Create a contract account. Sends a transaction to initialize the contract :param address: the address of the new account, if known. If omitted, a new address will be generated as closely to the Yellow Paper as possible. :param balance: the initial balance of the account in Wei :param init: the initialization code of the contract The way that the Solidity compiler expects the constructor arguments to be passed is by appending the arguments to the byte code produced by the Solidity compiler. The arguments are formatted as defined in the Ethereum ABI2. The arguments are then copied from the init byte array to the EVM memory through the CODECOPY opcode with appropriate values on the stack. This is done when the byte code in the init byte array is actually run on the network.