Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
385,400
def build_command_tree(pattern, cmd_params): from docopt import Either, Optional, OneOrMore, Required, Option, Command, Argument if type(pattern) in [Either, Optional, OneOrMore]: for child in pattern.children: build_command_tree(child, cmd_params) elif type(pattern) in [Required]: for child in pattern.children: cmd_params = build_command_tree(child, cmd_params) elif type(pattern) in [Option]: suffix = "=" if pattern.argcount else "" if pattern.short: cmd_params.options.append(pattern.short + suffix) if pattern.long: cmd_params.options.append(pattern.long + suffix) elif type(pattern) in [Command]: cmd_params = cmd_params.get_subcommand(pattern.name) elif type(pattern) in [Argument]: cmd_params.arguments.append(pattern.name) return cmd_params
Recursively fill in a command tree in cmd_params according to a docopt-parsed "pattern" object.
385,401
def append_columns(self, colnames, values, **kwargs): n = len(self) if np.isscalar(values): values = np.full(n, values) values = np.atleast_1d(values) if not isinstance(colnames, str) and len(colnames) > 1: values = np.atleast_2d(values) self._check_column_length(values, n) if values.ndim == 1: if len(values) > n: raise ValueError("New Column is longer than existing table!") elif len(values) > 1 and len(values) < n: raise ValueError( "New Column is shorter than existing table, " "but not just one element!" ) elif len(values) == 1: values = np.full(n, values[0]) new_arr = rfn.append_fields( self, colnames, values, usemask=False, asrecarray=True, **kwargs ) return self.__class__( new_arr, h5loc=self.h5loc, split_h5=self.split_h5, name=self.name, h5singleton=self.h5singleton )
Append new columns to the table. When appending a single column, ``values`` can be a scalar or an array of either length 1 or the same length as this array (the one it's appended to). In case of multiple columns, values must have the shape ``list(arrays)``, and the dimension of each array has to match the length of this array. See the docs for ``numpy.lib.recfunctions.append_fields`` for an explanation of the remaining options.
385,402
def getPeer(self, url): peers = list(models.Peer.select().where(models.Peer.url == url)) if len(peers) == 0: raise exceptions.PeerNotFoundException(url) return peers[0]
Finds a peer by URL and return the first peer record with that URL.
385,403
def _parse_table( self, parent_name=None ): if self._current != "[": raise self.parse_error( InternalParserError, "_parse_table() called on non-bracket character." ) indent = self.extract() self.inc() if self.end(): raise self.parse_error(UnexpectedEofError) is_aot = False if self._current == "[": if not self.inc(): raise self.parse_error(UnexpectedEofError) is_aot = True self.mark() while self._current != "]" and self.inc(): if self.end(): raise self.parse_error(UnexpectedEofError) pass name = self.extract() if not name.strip(): raise self.parse_error(EmptyTableNameError) key = Key(name, sep="") name_parts = tuple(self._split_table_name(name)) missing_table = False if parent_name: parent_name_parts = tuple(self._split_table_name(parent_name)) else: parent_name_parts = tuple() if len(name_parts) > len(parent_name_parts) + 1: missing_table = True name_parts = name_parts[len(parent_name_parts) :] values = Container(True) self.inc() if is_aot: self.inc() cws, comment, trail = self._parse_comment_trail() result = Null() if len(name_parts) > 1: if missing_table: table = Table( Container(True), Trivia(indent, cws, comment, trail), is_aot and name_parts[0].key in self._aot_stack, is_super_table=True, name=name_parts[0].key, ) result = table key = name_parts[0] for i, _name in enumerate(name_parts[1:]): if _name in table: child = table[_name] else: child = Table( Container(True), Trivia(indent, cws, comment, trail), is_aot and i == len(name_parts[1:]) - 1, is_super_table=i < len(name_parts[1:]) - 1, name=_name.key, display_name=name if i == len(name_parts[1:]) - 1 else None, ) if is_aot and i == len(name_parts[1:]) - 1: table.append(_name, AoT([child], name=table.name, parsed=True)) else: table.append(_name, child) table = child values = table.value else: if name_parts: key = name_parts[0] while not self.end(): item = self._parse_item() if item: _key, item = item if not self._merge_ws(item, values): if _key is not None and _key.is_dotted(): self._handle_dotted_key(values, _key, item) else: values.append(_key, item) else: if self._current == "[": is_aot_next, name_next = self._peek_table() if self._is_child(name, name_next): key_next, table_next = self._parse_table(name) values.append(key_next, table_next) while not self.end(): _, name_next = self._peek_table() if not self._is_child(name, name_next): break key_next, table_next = self._parse_table(name) values.append(key_next, table_next) break else: raise self.parse_error( InternalParserError, "_parse_item() returned None on a non-bracket character.", ) if isinstance(result, Null): result = Table( values, Trivia(indent, cws, comment, trail), is_aot, name=name, display_name=name, ) if is_aot and (not self._aot_stack or name != self._aot_stack[-1]): result = self._parse_aot(result, name) return key, result
Parses a table element.
385,404
def addresses_for_key(gpg, key): fingerprint = key["fingerprint"] addresses = [] for key in gpg.list_keys(): if key["fingerprint"] == fingerprint: addresses.extend([address.split("<")[-1].strip(">") for address in key["uids"] if address]) return addresses
Takes a key and extracts the email addresses for it.
385,405
def put(self, key, value, minutes): minutes = self._get_minutes(minutes) if minutes is not None: return self._store.put(self.tagged_item_key(key), value, minutes)
Store an item in the cache for a given number of minutes. :param key: The cache key :type key: str :param value: The cache value :type value: mixed :param minutes: The lifetime in minutes of the cached value :type minutes: int or datetime
385,406
def execute(self, command): try: if self.ssh.get_transport() is not None: logger.debug(.format(self.target_address, command)) stdin, stdout, stderr = self.ssh.exec_command(command) return dict(zip([, , ], [stdin, stdout, stderr])) else: raise SSHConnectionError(self.target_address, "ssh transport is closed") except (AuthenticationException, SSHException, ChannelException, SocketError) as ex: logger.critical(("{0} execution failed on {1} with exception:" "{2}".format(command, self.target_address, ex))) raise SSHCommandError(self.target_address, command, ex)
Executes command on remote hosts :type command: str :param command: command to be run on remote host
385,407
def load_tf_weights_in_transfo_xl(model, config, tf_path): try: import numpy as np import tensorflow as tf except ImportError: print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise tf_to_pt_map = build_tf_to_pytorch_map(model, config) init_vars = tf.train.list_variables(tf_path) tf_weights = {} for name, shape in init_vars: print("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) tf_weights[name] = array for name, pointer in tf_to_pt_map.items(): assert name in tf_weights array = tf_weights[name] if in name or in name: array = np.transpose(array) if ( in name or in name) and len(pointer) > 1: assert len(pointer) == array.shape[0] for i, p_i in enumerate(pointer): arr_i = array[i, ...] try: assert p_i.shape == arr_i.shape except AssertionError as e: e.args += (p_i.shape, arr_i.shape) raise print("Initialize PyTorch weight {} for layer {}".format(name, i)) p_i.data = torch.from_numpy(arr_i) else: try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise print("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) tf_weights.pop(name, None) tf_weights.pop(name + , None) tf_weights.pop(name + , None) print("Weights not copied to PyTorch model: {}".format(.join(tf_weights.keys()))) return model
Load tf checkpoints in a pytorch model
385,408
def strains(self): with open(os.path.join(self.path, )) as strains: next(strains) for line in strains: oln, seqid = line.split() self.straindict[oln] = seqid.rstrip() self.strainset.add(oln) logging.debug(oln) if self.debug: break
Create a dictionary of SEQID: OLNID from the supplied
385,409
async def get_messages(self, name): resp = await self.send_command(OPERATIONS.CMD_QUERY_MESSAGES, {: name}, MESSAGES.QueryMessagesResponse, timeout=5.0) return [states.ServiceMessage.FromDictionary(x) for x in resp]
Get stored messages for a service. Args: name (string): The name of the service to get messages from. Returns: list(ServiceMessage): A list of the messages stored for this service
385,410
def encode_request(name, expected, updated): client_message = ClientMessage(payload_size=calculate_size(name, expected, updated)) client_message.set_message_type(REQUEST_TYPE) client_message.set_retryable(RETRYABLE) client_message.append_str(name) client_message.append_long(expected) client_message.append_long(updated) client_message.update_frame_length() return client_message
Encode request into client_message
385,411
def defaulted_config(modules, params=None, yaml=None, filename=None, config=None, validate=True): with _temporary_config(): set_default_config(modules, params=params, yaml=yaml, filename=filename, config=config, validate=validate) yield get_global_config()
Context manager version of :func:`set_default_config()`. Use this with a Python 'with' statement, like >>> config_yaml = ''' ... toplevel: ... param: value ... ''' >>> with yakonfig.defaulted_config([toplevel], yaml=config_yaml) as config: ... assert 'param' in config['toplevel'] ... assert yakonfig.get_global_config('toplevel', 'param') == 'value' On exit the global configuration is restored to its previous state (if any). :param modules: modules or Configurable instances to use :type modules: iterable of :class:`~yakonfig.Configurable` :param dict params: dictionary of command-line argument key to values :param str yaml: global configuration file :param str filename: location of global configuration file :param dict config: global configuration object :param bool validate: check configuration after creating :return: the new global configuration
385,412
def from_coeff(self, chebcoeff, domain=None, prune=True, vscale=1.): coeffs = np.asarray(chebcoeff) if prune: N = self._cutoff(coeffs, vscale) pruned_coeffs = coeffs[:N] else: pruned_coeffs = coeffs values = self.polyval(pruned_coeffs) return self(values, domain, vscale)
Initialise from provided coefficients prune: Whether to prune the negligible coefficients vscale: the scale to use when pruning
385,413
def get_random(min_pt, max_pt): result = Point(random.random(), random.random()) return result.get_component_product(max_pt - min_pt) + min_pt
Returns a random vector in the given range.
385,414
def list_subcommand(vcard_list, parsable): if not vcard_list: if not parsable: print("Found no contacts") sys.exit(1) elif parsable: contact_line_list = [] for vcard in vcard_list: if config.display_by_name() == "first_name": name = vcard.get_first_name_last_name() else: name = vcard.get_last_name_first_name() contact_line_list.append(.join([vcard.get_uid(), name, vcard.address_book.name])) print(.join(contact_line_list)) else: list_contacts(vcard_list)
Print a user friendly contacts table. :param vcard_list: the vcards to print :type vcard_list: list of carddav_object.CarddavObject :param parsable: machine readable output: columns devided by tabulator (\t) :type parsable: bool :returns: None :rtype: None
385,415
def _init(self, run_conf, run_number=None): self.stop_run.clear() self.abort_run.clear() self._run_status = run_status.running self._write_run_number(run_number) self._init_run_conf(run_conf)
Initialization before a new run.
385,416
def interpolate(self, factor, minKerning, maxKerning, round=True, suppressError=True): factor = normalizers.normalizeInterpolationFactor(factor) if not isinstance(minKerning, BaseKerning): raise TypeError(("Interpolation to an instance of %r can not be " "performed from an instance of %r.") % ( self.__class__.__name__, minKerning.__class__.__name__)) if not isinstance(maxKerning, BaseKerning): raise TypeError(("Interpolation to an instance of %r can not be " "performed from an instance of %r.") % ( self.__class__.__name__, maxKerning.__class__.__name__)) round = normalizers.normalizeBoolean(round) suppressError = normalizers.normalizeBoolean(suppressError) self._interpolate(factor, minKerning, maxKerning, round=round, suppressError=suppressError)
Interpolates all pairs between two :class:`BaseKerning` objects: **minKerning** and **maxKerning**. The interpolation occurs on a 0 to 1.0 range where **minKerning** is located at 0 and **maxKerning** is located at 1.0. The kerning data is replaced by the interpolated kerning. * **factor** is the interpolation value. It may be less than 0 and greater than 1.0. It may be an :ref:`type-int-float`, ``tuple`` or ``list``. If it is a ``tuple`` or ``list``, the first number indicates the x factor and the second number indicates the y factor. * **round** is a ``bool`` indicating if the result should be rounded to ``int``\s. The default behavior is to round interpolated kerning. * **suppressError** is a ``bool`` indicating if incompatible data should be ignored or if an error should be raised when such incompatibilities are found. The default behavior is to ignore incompatible data. >>> myKerning.interpolate(kerningOne, kerningTwo)
385,417
def construct(self, request, service=None, http_args=None, **kwargs): if in kwargs: request["client_assertion"] = kwargs[] if in kwargs: request[ ] = kwargs[] else: request["client_assertion_type"] = JWT_BEARER elif in request: if not in request: request["client_assertion_type"] = JWT_BEARER else: algorithm = None _context = service.service_context request["client_assertion"] = assertion_jwt( _context.client_id, signing_key, audience, algorithm, **_args) request["client_assertion_type"] = JWT_BEARER try: del request["client_secret"] except KeyError: pass if not request.c_param["client_id"][VREQUIRED]: try: del request["client_id"] except KeyError: pass return {}
Constructs a client assertion and signs it with a key. The request is modified as a side effect. :param request: The request :param service: A :py:class:`oidcservice.service.Service` instance :param http_args: HTTP arguments :param kwargs: Extra arguments :return: Constructed HTTP arguments, in this case none
385,418
def getAccountInfo(self, fields=None): return self.make_method("getAccountInfo", { "access_token": self.access_token, "fields": json.dumps(fields) if fields else None })
Use this method to get information about a Telegraph account. :param fields: List of account fields to return. Available fields: short_name, author_name, author_url, auth_url, page_count. :type fields: list :returns: Account object on success.
385,419
def path(self, path): self._path = self.manager.get_abs_image_path(path) log.info(.format(name=self._name, id=self._id, path=self._path))
Path of the IOU executable. :param path: path to the IOU image executable
385,420
def run(self): try: proxy = config_ini.engine.open() self.LOG.info("Stats for %s - up %s, %s" % ( config_ini.engine.engine_id, fmt.human_duration(proxy.system.time() - config_ini.engine.startup, 0, 2, True).strip(), proxy )) except (error.LoggableError, xmlrpc.ERRORS), exc: self.LOG.warn(str(exc))
Statistics logger job callback.
385,421
async def chat_send(self, message: str, team_only: bool): ch = ChatChannel.Team if team_only else ChatChannel.Broadcast await self._execute( action=sc_pb.RequestAction( actions=[sc_pb.Action(action_chat=sc_pb.ActionChat(channel=ch.value, message=message))] ) )
Writes a message to the chat
385,422
def _msg(self, label, *msg): if self.quiet is False: txt = self._unpack_msg(*msg) print("[" + label + "] " + txt)
Prints a message with a label
385,423
def random_filtered_sources(sources, srcfilter, seed): random.seed(seed) while sources: src = random.choice(sources) if srcfilter.get_close_sites(src) is not None: return [src] sources.remove(src) return []
:param sources: a list of sources :param srcfilte: a SourceFilter instance :param seed: a random seed :returns: an empty list or a list with a single filtered source
385,424
def _schema(self, path, obj, app): if path.startswith(): last_token = jp_split(path)[-1] if app.version == : obj.update_field(, scope_split(last_token)[-1]) else: obj.update_field(, last_token)
fulfill 'name' field for objects under '#/definitions' and with 'properties'
385,425
def _get_api_content(self): if GITHUB_TOKEN is not None: self.add_params_to_url({ "access_token": GITHUB_TOKEN }) api_content_response = requests.get(self.api_url) self.api_content = json.loads( api_content_response.text )
Updates class api content by calling Github api and storing result
385,426
def category(self, value): if value is not None: assert type(value) is unicode, " attribute: type is not !".format( "category", value) self.__category = value
Setter for **self.__category** attribute. :param value: Attribute value. :type value: unicode
385,427
def character_to_vk(self, character): for vk in self.non_layout_keys: if self.non_layout_keys[vk] == character.lower(): return (vk, []) for vk in self.layout_specific_keys: if self.layout_specific_keys[vk][0] == character: return (vk, []) elif self.layout_specific_keys[vk][1] == character: return (vk, []) raise ValueError("Unrecognized character: {}".format(character))
Returns a tuple of (scan_code, modifiers) where ``scan_code`` is a numeric scan code and ``modifiers`` is an array of string modifier names (like 'shift')
385,428
def sky_bbox_ll(self): if self._wcs is not None: return pixel_to_skycoord(self.xmin.value - 0.5, self.ymin.value - 0.5, self._wcs, origin=0) else: return None
The sky coordinates of the lower-left vertex of the minimal bounding box of the source segment, returned as a `~astropy.coordinates.SkyCoord` object. The bounding box encloses all of the source segment pixels in their entirety, thus the vertices are at the pixel *corners*.
385,429
def _refresh_channel(self): self.channel = salt.transport.client.ReqChannel.factory(self.opts) return self.channel
Reset the channel, in the event of an interruption
385,430
def column_types(self): column_types = {} for c in self.sqla_columns: column_types[c.name] = c.type return column_types
Return a dict mapping column name to type for all columns in table
385,431
def delete(method, hmc, uri, uri_parms, logon_required): try: adapter = hmc.lookup_by_uri(uri) except KeyError: raise InvalidResourceError(method, uri) cpc = adapter.manager.parent assert cpc.dpm_enabled adapter.manager.remove(adapter.oid)
Operation: Delete Hipersocket (requires DPM mode).
385,432
def convert_descriptor_and_rows(self, descriptor, rows): primary_key = None schema = tableschema.Schema(descriptor) if len(schema.primary_key) == 1: primary_key = schema.primary_key[0] elif len(schema.primary_key) > 1: message = raise tableschema.exceptions.StorageError(message) data_rows = [] index_rows = [] jtstypes_map = {} for row in rows: values = [] index = None for field, value in zip(schema.fields, row): try: if isinstance(value, float) and np.isnan(value): value = None if value and field.type == : value = int(value) value = field.cast_value(value) except tableschema.exceptions.CastError: value = json.loads(value) if value is None and field.type in (, ): jtstypes_map[field.name] = value = np.NaN if field.name == primary_key: index = value else: values.append(value) data_rows.append(tuple(values)) index_rows.append(index) dtypes = [] for field in schema.fields: if field.name != primary_key: field_name = field.name if six.PY2: field_name = field.name.encode() dtype = self.convert_type(jtstypes_map.get(field.name, field.type)) dtypes.append((field_name, dtype)) index = None columns = schema.headers array = np.array(data_rows, dtype=dtypes) if primary_key: index_field = schema.get_field(primary_key) index_dtype = self.convert_type(index_field.type) index_class = pd.Index if index_field.type in [, ]: index_class = pd.DatetimeIndex index = index_class(index_rows, name=primary_key, dtype=index_dtype) columns = filter(lambda column: column != primary_key, schema.headers) dataframe = pd.DataFrame(array, index=index, columns=columns) return dataframe
Convert descriptor and rows to Pandas
385,433
def _set_neighbor_route_map_name_direction_out(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={: u}), is_leaf=True, yang_name="neighbor-route-map-name-direction-out", rest_name="neighbor-route-map-name-direction-out", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "common-def:name-string64", : , }) self.__neighbor_route_map_name_direction_out = t if hasattr(self, ): self._set()
Setter method for neighbor_route_map_name_direction_out, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv6/ipv6_unicast/af_ipv6_vrf/neighbor/af_ipv6_vrf_neighbor_address_holder/af_ipv6_neighbor_addr/neighbor_route_map/neighbor_route_map_direction_out/neighbor_route_map_name_direction_out (common-def:name-string64) If this variable is read-only (config: false) in the source YANG file, then _set_neighbor_route_map_name_direction_out is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_neighbor_route_map_name_direction_out() directly.
385,434
def event(self, event_data, priority="normal", event_method="EVENT"): logger.debug("event: " + str(event_data)) euuid = uuid.uuid1() logger.debug("<%s> <euuid:%s> Sending event data to server: " "%s" % (str(self.cuuid), str(euuid), str(self.server))) if not self.listener.listening: logger.warning("Neteria client is not listening.") if not self.registered: logger.warning("<%s> <euuid:%s> Client is currently not registered. " "Event not sent." % (str(self.cuuid), str(euuid))) return False packet = {"method": event_method, "cuuid": str(self.cuuid), "euuid": str(euuid), "event_data": event_data, "timestamp": str(datetime.now()), "retry": 0, "priority": priority} self.listener.send_datagram( serialize_data(packet, self.compression, self.encryption, self.server_key), self.server) logger.debug("<%s> Sending EVENT Packet: %s" % (str(self.cuuid), pformat(packet))) self.event_uuids[str(euuid)] = packet logger.debug("<%s> Scheduling retry in %s seconds" % (str(self.cuuid), str(self.timeout))) self.listener.call_later(self.timeout, self.retransmit, packet) return euuid
This function will send event packets to the server. This is the main method you would use to send data from your application to the server. Whenever an event is sent to the server, a universally unique event id (euuid) is created for each event and stored in the "event_uuids" dictionary. This dictionary contains a list of all events that are currently waiting for a response from the server. The event will only be removed from this dictionary if the server responds with LEGAL or ILLEGAL or if the request times out. Args: event_data (dict): The event data to send to the server. This data will be passed through the server's middleware to determine if the event is legal or not, and then processed by the server it is legal priority (string): The event's priority informs the server of whether or not the client is going to wait for a confirmation message from the server indicating whether its event was LEGAL or ILLEGAL. Setting this to "normal" informs the server that the client will wait for a response from the server before processing the event. Setting this to "high" informs the server that the client will NOT wait for a response. Defaults to "normal". event_method (string): The type of event to send to the server. Valid methods are "EVENT", "AUTH". Defaults to "EVENT". Returns: A universally unique identifier (uuid) of the event. Examples: >>> event_data >>> priority
385,435
def filter_kepler_lcdict(lcdict, filterflags=True, nanfilter=, timestoignore=None): sappdcsap,pdct caught by the quality flags. Returns ------- lcdict Returns an `lcdict` (this is useable by most astrobase functions for LC processing). The `lcdict` is filtered IN PLACE! columnstimesap_quality..timeapplied quality flag filter, ndet before = %s, ndet after = %ssap,pdcsapsap_fluxpdcpdcsap_fluxtimesapsapsap_fluxtimepdcpdcpdcsap_fluxtimetime..timeremoved nans, ndet before = %s, ndet after = %stimetimetime..timeremoved timestoignore, ndet before = %s, ndet after = %s' % (nbefore, nafter)) return lcdict
This filters the Kepler `lcdict`, removing nans and bad observations. By default, this function removes points in the Kepler LC that have ANY quality flags set. Parameters ---------- lcdict : lcdict An `lcdict` produced by `consolidate_kepler_fitslc` or `read_kepler_fitslc`. filterflags : bool If True, will remove any measurements that have non-zero quality flags present. This usually indicates an issue with the instrument or spacecraft. nanfilter : {'sap','pdc','sap,pdc'} Indicates the flux measurement type(s) to apply the filtering to. timestoignore : list of tuples or None This is of the form:: [(time1_start, time1_end), (time2_start, time2_end), ...] and indicates the start and end times to mask out of the final lcdict. Use this to remove anything that wasn't caught by the quality flags. Returns ------- lcdict Returns an `lcdict` (this is useable by most astrobase functions for LC processing). The `lcdict` is filtered IN PLACE!
385,436
def get_selections(pattern=None, state=None): <host><state>pkg1**python-***openssh* ret = {} cmd = [, ] cmd.append(pattern if pattern else ) stdout = __salt__[](cmd, output_loglevel=, python_shell=False) ret = _parse_selections(stdout) if state: return {state: ret.get(state, [])} return ret
View package state from the dpkg database. Returns a dict of dicts containing the state, and package names: .. code-block:: python {'<host>': {'<state>': ['pkg1', ... ] }, ... } CLI Example: .. code-block:: bash salt '*' pkg.get_selections salt '*' pkg.get_selections 'python-*' salt '*' pkg.get_selections state=hold salt '*' pkg.get_selections 'openssh*' state=hold
385,437
def hard_equals(a, b): if type(a) != type(b): return False return a == b
Implements the '===' operator.
385,438
def set_parameter(self, name, value): if name == "valuesToForecast": self._forecastUntil = None return super(BaseForecastingMethod, self).set_parameter(name, value)
Sets a parameter for the BaseForecastingMethod. :param string name: Name of the parameter. :param numeric value: Value of the parameter.
385,439
def from_bma_history(cls: Type[TransactionType], currency: str, tx_data: Dict) -> TransactionType: tx_data = tx_data.copy() tx_data["currency"] = currency for data_list in (, , , , ): tx_data[.format(data_list)] = .join(tx_data[data_list]) if tx_data["version"] >= 3: signed_raw = .format(**tx_data) else: signed_raw = .format(**tx_data) return cls.from_signed_raw(signed_raw)
Get the transaction instance from json :param currency: the currency of the tx :param tx_data: json data of the transaction :return:
385,440
def authorized_tenants(self): if self.is_authenticated and self._authorized_tenants is None: endpoint = self.endpoint try: self._authorized_tenants = utils.get_project_list( user_id=self.id, auth_url=endpoint, token=self.unscoped_token, is_federated=self.is_federated) except (keystone_exceptions.ClientException, keystone_exceptions.AuthorizationFailure): LOG.exception() return self._authorized_tenants or []
Returns a memoized list of tenants this user may access.
385,441
def img(self): SlipThumbnail.img(self) if self.rotation: mat = cv.CreateMat(2, 3, cv.CV_32FC1) cv.GetRotationMatrix2D((self.width/2,self.height/2), -self.rotation, 1.0, mat) self._rotated = cv.CloneImage(self._img) cv.WarpAffine(self._img, self._rotated, mat) else: self._rotated = self._img return self._rotated
return a cv image for the icon
385,442
def point_in_circle(pt, center, radius): d = np.linalg.norm(np.asarray(pt) - np.asarray(center)) return d <= radius
Returns true if a given point is located inside (or on the border) of a circle. >>> point_in_circle((0, 0), (0, 0), 1) True >>> point_in_circle((1, 0), (0, 0), 1) True >>> point_in_circle((1, 1), (0, 0), 1) False
385,443
def GetBaseFiles(self, diff): files = {} for line in diff.splitlines(True): if line.startswith() or line.startswith(): unused, filename = line.split(, 1) filename = to_slash(filename.strip()) files[filename] = self.GetBaseFile(filename) return files
Helper that calls GetBase file for each file in the patch. Returns: A dictionary that maps from filename to GetBaseFile's tuple. Filenames are retrieved based on lines that start with "Index:" or "Property changes on:".
385,444
def argdistincts(self, nested=False): out = self._argdistincts(absolute=False) if nested: out = self.JaggedArray.fromcounts(self.numpy.maximum(0, self.counts - 1), self.JaggedArray.fromcounts(self.index[:, :0:-1].flatten(), out._content)) return out
Return all unique combinations (up to permutation) of two elements, taken without replacement from the indices of the jagged dimension. Combinations are ordered lexicographically. nested: Return a doubly-jagged array where the first jagged dimension matches the shape of this array
385,445
def write_defaults(self): self.defaults.write() self.reset_defaults(self.defaults.filename)
Create default config file and reload.
385,446
def term_symbols(self): L_symbols = L, v_e = self.valence ml = list(range(-L, L + 1)) ms = [1 / 2, -1 / 2] ml_ms = list(product(ml, ms)) n = (2 * L + 1) * 2 e_config_combs = list(combinations(range(n), v_e)) TL = [sum([ml_ms[comb[e]][0] for e in range(v_e)]) for comb in e_config_combs] TS = [sum([ml_ms[comb[e]][1] for e in range(v_e)]) for comb in e_config_combs] comb_counter = Counter([r for r in zip(TL, TS)]) term_symbols = [] while sum(comb_counter.values()) > 0: L, S = min(comb_counter) J = list(np.arange(abs(L - S), abs(L) + abs(S) + 1)) term_symbols.append([str(int(2 * (abs(S)) + 1)) + L_symbols[abs(L)] + str(j) for j in J]) for ML in range(-L, L - 1, -1): for MS in np.arange(S, -S + 1, 1): if (ML, MS) in comb_counter: comb_counter[(ML, MS)] -= 1 if comb_counter[(ML, MS)] == 0: del comb_counter[(ML, MS)] return term_symbols
All possible Russell-Saunders term symbol of the Element eg. L = 1, n_e = 2 (s2) returns [['1D2'], ['3P0', '3P1', '3P2'], ['1S0']]
385,447
def parse_results(fields, data): master = [] for record in data[]: row = [None] * len(fields) for obj, value in record.iteritems(): if not isinstance(value, (dict, list, tuple)): if obj in fields: row[fields.index(obj)] = ensure_utf(value) elif isinstance(value, dict) and obj != : path = obj _traverse_results(value, fields, row, path) master.append(row) return master
Traverses ordered dictionary, calls _traverse_results() to recursively read into the dictionary depth of data
385,448
def deprecated(message=None): def deco(func): @functools.wraps(func) def wrapped(*args, **kwargs): warnings.warn( message or .format(func.__name__), category=PubChemPyDeprecationWarning, stacklevel=2 ) return func(*args, **kwargs) return wrapped return deco
Decorator to mark functions as deprecated. A warning will be emitted when the function is used.
385,449
def np2model_tensor(a): "Tranform numpy array `a` to a tensor of the same type." dtype = model_type(a.dtype) res = as_tensor(a) if not dtype: return res return res.type(dtype)
Tranform numpy array `a` to a tensor of the same type.
385,450
def main(): parser = create_parser() args = parser.parse_args() if hasattr(args, ): args.handler(args) else: parser.print_help()
main.
385,451
def _system_config_file(): if sys.platform == : config_path = os.path.sep.join([_windows_system_appdata(), _APP_DIRNAME, _CONFIG_FILENAME]) elif sys.platform.startswith(): config_path = os.path.sep.join([os.path.sep + , , _APP_DIRNAME, _CONFIG_FILENAME]) else: config_path = os.path.sep.join([, , , _APP_DIRNAME, _CONFIG_FILENAME]) return config_path
Returns the path to the settings.cfg file. On Windows the file is located in the AppData/Local/envipyengine directory. On Unix, the file will be located in the ~/.envipyengine directory. :return: String specifying the full path to the settings.cfg file
385,452
def circuit_to_tensorflow_runnable( circuit: circuits.Circuit, initial_state: Union[int, np.ndarray] = 0, ) -> ComputeFuncAndFeedDict: if not circuit.are_all_measurements_terminal(): raise ValueError() t = _TensorCircuit(circuit, initial_state) return ComputeFuncAndFeedDict(t.compute, t.feed_dict)
Returns a compute function and feed_dict for a `cirq.Circuit`'s output. `result.compute()` will return a `tensorflow.Tensor` with `tensorflow.placeholder` objects to be filled in by `result.feed_dict`, at which point it will evaluate to the output state vector of the circuit. You can apply further operations to the tensor returned by `result.compute`. This allows, for example, for the final result to be a small amount of computed data (e.g. an expectation value) instead of the gigantic raw state vector. The tensor returned by `result.compute` is intended to run efficiently on cloud TPUs. It will have dtype complex64 and a shape of (2**n,) where n is the number of qubits. Examples: To simulate the circuit with tensorflow in a normal session, forward this method's output into `tensorflow.Session.run` as follows: import tensorflow as tf r = circuit_to_tensorflow_runnable(...) with tf.Session() as session: output = session.run(r.compute(), feed_dict=r.feed_dict) print(output) Note that you can use the returned tensor in further computations. For example, to compute the chance of the system ending up in the first 128 computational basis states you can use `tf.norm(tensor[:128], 2)`: import tensorflow as tf r = circuit_to_tensorflow_runnable(...) expectation = lambda: tf.norm(r.compute()[:128], 2) with tf.Session() as session: output = session.run(expectation, feed_dict=r.feed_dict) print(output) For documentation on running against cloud TPUs, see https://cloud.google.com/tpu/docs/quickstart#run_example Generally speaking, from within a cloud instance, you use `tf.contrib.tpu.rewrite` to convert the tensor into a TPU compatible form, initialize the TPU system, then run the rewritten tensor: import tensorflow as tf TPU_TARGET = ??????? r = circuit_to_tensorflow_runnable(...YOUR_CIRCUIT...) rewritten_for_tpu = tf.contrib.tpu.rewrite(r.compute) with tf.Session(target=TPU_TARGET) as session: session.run(tf.contrib.tpu.initialize_system()) output = session.run(rewritten_for_tpu, feed_dict=r.feed_dict) print(output) Args: circuit: The circuit to apply to `initial_state` to produce an output state vector. initial_state: The input into the circuit. If this is an integer, it indicates that the input state is a computational basis state where the k'th qubit is set by the k'th bit of the integer. If this is a numpy array, it should directly encode a normalized wavefunction. Returns: A ComputeFuncAndFeedDict, which is a named tuple whose first element is a function that returns a Tensor representing the output state vector that results from applying the given circuit to the given, and whose second element is a feed_dict containing important parameters describing that tensor.
385,453
def midnight(arg=None): if arg: _arg = parse(arg) if isinstance(_arg, datetime.date): return datetime.datetime.combine(_arg, datetime.datetime.min.time()) elif isinstance(_arg, datetime.datetime): return datetime.datetime.combine(_arg.date(), datetime.datetime.min.time()) else: return datetime.datetime.combine(_date, datetime.datetime.min.time())
convert date to datetime as midnight or get current day's midnight :param arg: string or date/datetime :return: datetime at 00:00:00
385,454
def compose(layers, bbox=None, layer_filter=None, color=None, **kwargs): from PIL import Image if not hasattr(layers, ): layers = [layers] def _default_filter(layer): return layer.is_visible() layer_filter = layer_filter or _default_filter valid_layers = [x for x in layers if layer_filter(x)] if len(valid_layers) == 0: return None if bbox is None: bbox = extract_bbox(valid_layers) if bbox == (0, 0, 0, 0): return None mode = get_pil_mode(valid_layers[0]._psd.color_mode, True) result = Image.new( mode, (bbox[2] - bbox[0], bbox[3] - bbox[1]), color=color if color is not None else , ) result.putalpha(0) for layer in valid_layers: if intersect(layer.bbox, bbox) == (0, 0, 0, 0): continue image = layer.compose(**kwargs) if image is None: continue logger.debug( % layer) offset = (layer.left - bbox[0], layer.top - bbox[1]) result = _blend(result, image, offset) return result
Compose layers to a single :py:class:`PIL.Image`. If the layers do not have visible pixels, the function returns `None`. Example:: image = compose([layer1, layer2]) In order to skip some layers, pass `layer_filter` function which should take `layer` as an argument and return `True` to keep the layer or return `False` to skip:: image = compose( layers, layer_filter=lambda x: x.is_visible() and x.kind == 'type' ) By default, visible layers are composed. .. note:: This function is experimental and does not guarantee Photoshop-quality rendering. Currently the following are ignored: - Adjustments layers - Layer effects - Blending mode (all blending modes become normal) Shape drawing is inaccurate if the PSD file is not saved with maximum compatibility. :param layers: a layer, or an iterable of layers. :param bbox: (left, top, bottom, right) tuple that specifies a region to compose. By default, all the visible area is composed. The origin is at the top-left corner of the PSD document. :param layer_filter: a callable that takes a layer and returns `bool`. :param color: background color in `int` or `tuple`. :return: :py:class:`PIL.Image` or `None`.
385,455
def trimult(U, x, uplo=, transa=, alpha=1., inplace=False): if inplace: b = x else: b = x.copy() dtrmm_wrap(a=U, b=b, uplo=uplo, transa=transa, alpha=alpha) return b
b = trimult(U,x, uplo='U') Multiplies U x, where U is upper triangular if uplo='U' or lower triangular if uplo = 'L'.
385,456
def create_item(self, name): elem = self.controlled_list.create_item(name) if elem: return TodoElementUX(parent=self, controlled_element=elem)
create a new todo list item
385,457
def batches(dataset): seq_lengths = dataset.variables[].data seq_begins = np.concatenate(([0], np.cumsum(seq_lengths)[:-1])) def sample(): chosen = np.random.choice( list(range(len(seq_lengths))), BATCH_SIZE, replace=False) return batch_at(dataset.variables[].data, dataset.variables[].data, seq_begins[chosen], seq_lengths[chosen]) return sample
Returns a callable that chooses sequences from netcdf data.
385,458
def _find_types(pkgs): return sorted({pkg.split(, 1)[0] for pkg in pkgs if len(pkg.split(, 1)) == 2})
Form a package names list, find prefixes of packages types.
385,459
def get_annotation(self, key, result_format=): value = self.get(, {}).get(key) if not value: return value if result_format == : return value[0] return value
Is a convenience method for accessing annotations on models that have them
385,460
def _reconstruct(self, path_to_root): item_pattern = re.compile() dot_pattern = re.compile() path_segments = dot_pattern.split(path_to_root) schema_endpoint = self.schema if path_segments[1]: for i in range(1,len(path_segments)): if item_pattern.match(path_segments[i]): schema_endpoint = schema_endpoint[0] else: schema_endpoint = schema_endpoint[path_segments[i]] return schema_endpoint
a helper method for finding the schema endpoint from a path to root :param path_to_root: string with dot path to root from :return: list, dict, string, number, or boolean at path to root
385,461
def GroupBy(self: dict, f=None): if f and is_to_destruct(f): f = destruct_func(f) return _group_by(self.items(), f)
[ { 'self': [1, 2, 3], 'f': lambda x: x%2, 'assert': lambda ret: ret[0] == [2] and ret[1] == [1, 3] } ]
385,462
def _parse_raw_members( self, leaderboard_name, members, members_only=False, **options): if members_only: return [{self.MEMBER_KEY: m} for m in members] if members: return self.ranked_in_list_in(leaderboard_name, members, **options) else: return []
Parse the raw leaders data as returned from a given leader board query. Do associative lookups with the member to rank, score and potentially sort the results. @param leaderboard_name [String] Name of the leaderboard. @param members [List] A list of members as returned from a sorted set range query @param members_only [bool] Set True to return the members as is, Default is False. @param options [Hash] Options to be used when retrieving the page from the named leaderboard. @return a list of members.
385,463
def offset(self): if callable(self._offset): return util.WatchingList(self._offset(*(self.widget.pos+self.widget.size)),self._wlredraw_offset) else: return util.WatchingList(self._offset,self._wlredraw_offset)
Property to be used for setting and getting the offset of the layer. Note that setting this property causes an immediate redraw.
385,464
def __disambiguate_proper_names_1(self, docs, lexicon): for doc in docs: for word in doc[WORDS]: if len(word[ANALYSIS]) > 1: highestFreq = 0 properNameAnalyses = [] for analysis in word[ANALYSIS]: if analysis[POSTAG] == : if analysis[ROOT] in lexicon: properNameAnalyses.append( analysis ) if lexicon[analysis[ROOT]] > highestFreq: highestFreq = lexicon[analysis[ROOT]] else: raise Exception(,analysis[ROOT], \ ) if highestFreq > 0: toDelete = [] for analysis in properNameAnalyses: if lexicon[analysis[ROOT]] < highestFreq: toDelete.append(analysis) for analysis in toDelete: word[ANALYSIS].remove(analysis)
Teeme esmase yleliigsete analyyside kustutamise: kui sõnal on mitu erineva sagedusega pärisnimeanalüüsi, siis jätame alles vaid suurima sagedusega analyysi(d) ...
385,465
def scramble(name, **kwargs): name = Path(name) mdf = MDF(name) texts = {} callback = kwargs.get("callback", None) if callback: callback(0, 100) count = len(mdf.groups) if mdf.version >= "4.00": ChannelConversion = ChannelConversionV4 stream = mdf._file if mdf.header.comment_addr: stream.seek(mdf.header.comment_addr + 8) size = UINT64_u(stream.read(8))[0] - 24 texts[mdf.header.comment_addr] = randomized_string(size) for fh in mdf.file_history: addr = fh.comment_addr if addr and addr not in texts: stream.seek(addr + 8) size = UINT64_u(stream.read(8))[0] - 24 texts[addr] = randomized_string(size) for ev in mdf.events: for addr in (ev.comment_addr, ev.name_addr): if addr and addr not in texts: stream.seek(addr + 8) size = UINT64_u(stream.read(8))[0] - 24 texts[addr] = randomized_string(size) for idx, gp in enumerate(mdf.groups, 1): addr = gp.data_group.comment_addr if addr and addr not in texts: stream.seek(addr + 8) size = UINT64_u(stream.read(8))[0] - 24 texts[addr] = randomized_string(size) cg = gp.channel_group for addr in (cg.acq_name_addr, cg.comment_addr): if cg.flags & v4c.FLAG_CG_BUS_EVENT: continue if addr and addr not in texts: stream.seek(addr + 8) size = UINT64_u(stream.read(8))[0] - 24 texts[addr] = randomized_string(size) source = cg.acq_source_addr if source: source = SourceInformation(address=source, stream=stream) for addr in ( source.name_addr, source.path_addr, source.comment_addr, ): if addr and addr not in texts: stream.seek(addr + 8) size = UINT64_u(stream.read(8))[0] - 24 texts[addr] = randomized_string(size) for ch in gp.channels: for addr in (ch.name_addr, ch.unit_addr, ch.comment_addr): if addr and addr not in texts: stream.seek(addr + 8) size = UINT64_u(stream.read(8))[0] - 24 texts[addr] = randomized_string(size) source = ch.source_addr if source: source = SourceInformation(address=source, stream=stream) for addr in ( source.name_addr, source.path_addr, source.comment_addr, ): if addr and addr not in texts: stream.seek(addr + 8) size = UINT64_u(stream.read(8))[0] - 24 texts[addr] = randomized_string(size) conv = ch.conversion_addr if conv: conv = ChannelConversion(address=conv, stream=stream) for addr in (conv.name_addr, conv.unit_addr, conv.comment_addr): if addr and addr not in texts: stream.seek(addr + 8) size = UINT64_u(stream.read(8))[0] - 24 texts[addr] = randomized_string(size) if conv.conversion_type == v4c.CONVERSION_TYPE_ALG: addr = conv.formula_addr if addr and addr not in texts: stream.seek(addr + 8) size = UINT64_u(stream.read(8))[0] - 24 texts[addr] = randomized_string(size) if conv.referenced_blocks: for key, block in conv.referenced_blocks.items(): if block: if block.id == b" addr = block.address if addr not in texts: stream.seek(addr + 8) size = block.block_len - 24 texts[addr] = randomized_string(size) if callback: callback(int(idx/count*66), 100) mdf.close() dst = name.with_suffix(".scrambled.mf4") copy(name, dst) with open(dst, "rb+") as mdf: count = len(texts) chunk = max(count // 34, 1) idx = 0 for index, (addr, bts) in enumerate(texts.items()): mdf.seek(addr + 24) mdf.write(bts) if index % chunk == 0: if callback: callback(66 + idx, 100) if callback: callback(100, 100) else: ChannelConversion = ChannelConversionV3 stream = mdf._file if mdf.header.comment_addr: stream.seek(mdf.header.comment_addr + 2) size = UINT16_u(stream.read(2))[0] - 4 texts[mdf.header.comment_addr + 4] = randomized_string(size) texts[36 + 0x40] = randomized_string(32) texts[68 + 0x40] = randomized_string(32) texts[100 + 0x40] = randomized_string(32) texts[132 + 0x40] = randomized_string(32) for idx, gp in enumerate(mdf.groups, 1): cg = gp.channel_group addr = cg.comment_addr if addr and addr not in texts: stream.seek(addr + 2) size = UINT16_u(stream.read(2))[0] - 4 texts[addr + 4] = randomized_string(size) if gp.trigger: addr = gp.trigger.text_addr if addr: stream.seek(addr + 2) size = UINT16_u(stream.read(2))[0] - 4 texts[addr + 4] = randomized_string(size) for ch in gp.channels: for key in ("long_name_addr", "display_name_addr", "comment_addr"): if hasattr(ch, key): addr = getattr(ch, key) else: addr = 0 if addr and addr not in texts: stream.seek(addr + 2) size = UINT16_u(stream.read(2))[0] - 4 texts[addr + 4] = randomized_string(size) texts[ch.address + 26] = randomized_string(32) texts[ch.address + 58] = randomized_string(128) source = ch.source_addr if source: source = ChannelExtension(address=source, stream=stream) if source.type == v23c.SOURCE_ECU: texts[source.address + 12] = randomized_string(80) texts[source.address + 92] = randomized_string(32) else: texts[source.address + 14] = randomized_string(36) texts[source.address + 50] = randomized_string(36) conv = ch.conversion_addr if conv: texts[conv + 22] = randomized_string(20) conv = ChannelConversion(address=conv, stream=stream) if conv.conversion_type == v23c.CONVERSION_TYPE_FORMULA: texts[conv + 36] = randomized_string(conv.block_len - 36) if conv.referenced_blocks: for key, block in conv.referenced_blocks.items(): if block: if block.id == b"TX": addr = block.address if addr and addr not in texts: stream.seek(addr + 2) size = UINT16_u(stream.read(2))[0] - 4 texts[addr + 4] = randomized_string(size) if callback: callback(int(idx/count*66), 100) mdf.close() dst = name.with_suffix(".scrambled.mf4") copy(name, dst) with open(dst, "rb+") as mdf: chunk = count // 34 idx = 0 for index, (addr, bts) in enumerate(texts.items()): mdf.seek(addr) mdf.write(bts) if chunk and index % chunk == 0: if callback: callback(66 + idx, 100) if callback: callback(100, 100) return dst
scramble text blocks and keep original file structure Parameters ---------- name : str | pathlib.Path file name Returns ------- name : str scrambled file name
385,466
def remove_vectored_io_slice_suffix_from_name(name, slice): suffix = .format(slice) if name.endswith(suffix): return name[:-len(suffix)] else: return name
Remove vectored io (stripe) slice suffix from a given name :param str name: entity name :param int slice: slice num :rtype: str :return: name without suffix
385,467
def get(self, param=None, must=[APIKEY]): param = {} if param is None else param r = self.verify_param(param, must) if not r.is_succ(): return r handle = CommonResultHandler(lambda rsp: {VERSION_V1:rsp.get(USER), VERSION_V2:rsp}[self.version()]) return self.path().post(param, handle, r)
查账户信息 参数名 类型 是否必须 描述 示例 apikey String 是 用户唯一标识 9b11127a9701975c734b8aee81ee3526 Args: param: (Optional) Results: Result
385,468
def plot(self, feature): if isinstance(feature, gffutils.Feature): feature = asinterval(feature) self.make_fig() axes = [] for ax, method in self.panels(): feature = method(ax, feature) axes.append(ax) return axes
Spawns a new figure showing data for `feature`. :param feature: A `pybedtools.Interval` object Using the pybedtools.Interval `feature`, creates figure specified in :meth:`BaseMiniBrowser.make_fig` and plots data on panels according to `self.panels()`.
385,469
def parse(self, buffer, inlineparent = None): size = 0 v = [] for i in range(0, self.size): r = self.innerparser.parse(buffer[size:], None) if r is None: return None v.append(r[0]) size += r[1] return (v, size)
Compatible to Parser.parse()
385,470
def get_distances(rupture, mesh, param): if param == : dist = rupture.surface.get_min_distance(mesh) elif param == : dist = rupture.surface.get_rx_distance(mesh) elif param == : dist = rupture.surface.get_ry0_distance(mesh) elif param == : dist = rupture.surface.get_joyner_boore_distance(mesh) elif param == : dist = rupture.hypocenter.distance_to_mesh(mesh) elif param == : dist = rupture.hypocenter.distance_to_mesh(mesh, with_depths=False) elif param == : dist = rupture.get_cdppvalue(mesh) elif param == : dist = rupture.surface.get_azimuth(mesh) elif param == "rvolc": dist = numpy.zeros_like(mesh.lons) else: raise ValueError( % param) return dist
:param rupture: a rupture :param mesh: a mesh of points or a site collection :param param: the kind of distance to compute (default rjb) :returns: an array of distances from the given mesh
385,471
def get_learning_objectives_metadata(self): metadata = dict(self._mdata[]) metadata.update({: self._my_map[]}) return Metadata(**metadata)
Gets the metadata for learning objectives. return: (osid.Metadata) - metadata for the learning objectives *compliance: mandatory -- This method must be implemented.*
385,472
def _virtualenv_sys(venv_path): "obtain version and path info from a virtualenv." executable = os.path.join(venv_path, env_bin_dir, ) p = subprocess.Popen([executable, , ], env={}, stdout=subprocess.PIPE) stdout, err = p.communicate() assert not p.returncode and stdout lines = stdout.decode().splitlines() return lines[0], list(filter(bool, lines[1:]))
obtain version and path info from a virtualenv.
385,473
def cleanup(self): for future in self.futures: future.cancel() self.executor.shutdown(wait=10) if self.ssh.get_transport() != None: self.ssh.close()
Release resources used during shell execution
385,474
def get_results(self, title_prefix="", title_override="", rnd_dig=2): if not self.arr_res: raise ValueError("Call roll_mc before getting results.") if title_override: title = title_override else: ctitle = PBE._construct_title(self.num_dice, self.dice_type, self.add_val, self.num_attribute, self.keep_attribute, self.keep_dice, self.reroll, self.num_arrays) title = title_prefix + ctitle typ_arr = "; ".join([str(round(x, rnd_dig)) for x in self.arr_res["means"]]) res_row = [title, typ_arr, round(self.pbe_res["means"], rnd_dig), round(self.pbe_res["stds"], rnd_dig), round(self.pbe_res["5percentile"], rnd_dig), round(self.pbe_res["95percentile"], rnd_dig)] return res_row
Constructs a summary of the results as an array, which might be useful for writing the results of multiple algorithms to a table. NOTE- This method must be called AFTER "roll_mc". :param title_prefix: If desired, prefix the title (such as "Alg 1 ") :param title_override: Override the title string entirely :param rnd_dig: the number of digits to round to :return: A tuple of the raw array results and PBE results, as: [Description, Typical Array, Mean, Std, 5%, 95%]
385,475
def status(self): if isinstance(self.attrs[], dict): return self.attrs[][] return self.attrs[]
The status of the container. For example, ``running``, or ``exited``.
385,476
def main(): DATABASE.load_contents() continue_flag = False while not continue_flag: DATABASE.print_contents() try: command = raw_input(">>> ") for stmnt_unformated in sqlparse.parse(command): statement = sqlparse.parse( sqlparse.format( str( stmnt_unformated ), reindent=True ) )[0] type = statement.tokens[0] if str(type).lower() == "drop": if str(statement.tokens[2]).lower() == "table": tablename = str(statement.tokens[4]) table = DATABASE.get_table(tablename) table.rows = [] table.store_contents() DATABASE.delete_table(tablename) DATABASE.store_contents() else: raise Exception( "Invalid Syntax of DROP TABLE t" ) elif str(type).lower() == "truncate": if str(statement.tokens[2]).lower() == "table": tablename = str(statement.tokens[4]) table = DATABASE.get_table(tablename) table.rows = [] table.store_contents() else: raise Exception( "Invalid Syntax of TRUNCATE TABLE t" ) elif str(type).lower() == "delete": if str(statement.tokens[2]).lower() == "from": tablename = str(statement.tokens[4]) table = DATABASE.get_table(tablename) whereclause = statement.tokens[6] if str(whereclause.tokens[0]).lower() == "where": comparison = whereclause.tokens[2] key = str(comparison.tokens[0]) value = int(str(comparison.tokens[4])) table.delete_row(key, value) table.store_contents() else: raise Exception( "Invalid Syntax of DELETE FROM t where k = v" ) else: raise Exception( "Invalid Syntax of DELETE FROM t WHERE k = v" ) elif str(type).lower() == "insert": if str(statement.tokens[2]).lower() == "into": tablename = str(statement.tokens[4]) table = DATABASE.get_table(tablename) if str(statement.tokens[6]).lower() == "values": parenthesis = statement.tokens[8] idlist = parenthesis.tokens[1] values_list = map( lambda x: int(str(x)), idlist.get_identifiers() ) table.put_row_raw(values_list) table.store_contents() else: raise Exception( "Invalid Syntax of INSERT INTO t VALUES (v...)" ) else: raise Exception( "Invalid Syntax of INSERT INTO t VALUES (v...)" ) elif str(type).lower() == "create": if str(statement.tokens[2]).lower() == "table": sublist = list(statement.tokens[4].get_sublists()) tablename = str(sublist[0]) garbage = str(sublist[1]) column_list = map( lambda x: x.strip(" ()",).split()[0], garbage.split(",") ) DATABASE.create_table_raw( tablename=tablename, columns=column_list[:], ) DATABASE.store_contents() elif str(type).lower() == "select": col_list_or_single = statement.tokens[2] if "," not in str(col_list_or_single): if str(col_list_or_single) == "*": column_list = [] else: column_list = [str(col_list_or_single)] else: column_list = map( lambda x: str(x), col_list_or_single.get_identifiers() ) if str(statement.tokens[4]).lower() == "from": tab_list_or_single = statement.tokens[6] if "," not in str(tab_list_or_single): table_list = [str(tab_list_or_single)] else: table_list = map( lambda x: str(x), tab_list_or_single.get_identifiers() ) cross_columns = reduce( lambda x, y: x + y, map( lambda x: DATABASE.get_table( x ).get_column_list_prefixed(), table_list ) ) cross_table = parthsql.Table( name="temp", columns=cross_columns, rows=[] ) for i in itertools.product( *map( lambda x: DATABASE.get_table(x).get_all_rows(), table_list ) ): cross_table.put_row_raw( reduce( lambda x, y: x + y, i ) ) if len(statement.tokens) >= 9: whereclause = statement.tokens[8] if str(whereclause.tokens[0]).lower() == "where": comparison = whereclause.tokens[2] key = str(comparison.tokens[0]) try: value = int(str(comparison.tokens[4])) cross_table.invert_delete_row(key, value) except: value = str(comparison.tokens[4]) cross_table.invert_delete_row2(key, value) else: raise Exception( "Invalid Syntax of DELETE FROM t where k = v" ) if "*" in column_list: cross_table.print_contents() else: temp_list = [] for i in column_list: temp_list.append(cross_table.get_column(i)) print "\t\t\t".join(column_list) for i in zip(*(temp_list)): print "\t\t\t".join(map(str, i)) else: raise Exception( "Invalid Syntax of SELECT c... FROM t... WHERE k = v" ) else: raise Exception( "Unsupported Operation" ) except ValueError: print("¯\_(ツ)_/¯") except IOError: print("¯\_(ツ)_/¯") except IndexError: print("¯\_(ツ)_/¯") except AttributeError: print("¯\_(ツ)_/¯") except Exception, e: print e.message
The main loop for the commandline parser.
385,477
def _read_openjp2_common(self): with ExitStack() as stack: filename = self.filename stream = opj2.stream_create_default_file_stream(filename, True) stack.callback(opj2.stream_destroy, stream) codec = opj2.create_decompress(self._codec_format) stack.callback(opj2.destroy_codec, codec) opj2.set_error_handler(codec, _ERROR_CALLBACK) opj2.set_warning_handler(codec, _WARNING_CALLBACK) if self._verbose: opj2.set_info_handler(codec, _INFO_CALLBACK) else: opj2.set_info_handler(codec, None) opj2.setup_decoder(codec, self._dparams) raw_image = opj2.read_header(stream, codec) stack.callback(opj2.image_destroy, raw_image) if self._dparams.nb_tile_to_decode: opj2.get_decoded_tile(codec, stream, raw_image, self._dparams.tile_index) else: opj2.set_decode_area(codec, raw_image, self._dparams.DA_x0, self._dparams.DA_y0, self._dparams.DA_x1, self._dparams.DA_y1) opj2.decode(codec, stream, raw_image) opj2.end_decompress(codec, stream) image = self._extract_image(raw_image) return image
Read a JPEG 2000 image using libopenjp2. Returns ------- ndarray or lst Either the image as an ndarray or a list of ndarrays, each item corresponding to one band.
385,478
def get_fd(file_or_fd, default=None): fd = file_or_fd if fd is None: fd = default if hasattr(fd, "fileno"): fd = fd.fileno() return fd
Helper function for getting a file descriptor.
385,479
def deleteNetworkVisualProp(self, networkId, viewId, visualProperty, verbose=None): response=api(url=self.___url++str(networkId)++str(viewId)++str(visualProperty)+, method="DELETE", verbose=verbose) return response
Deletes the bypass Visual Property specificed by the `visualProperty`, `viewId`, and `networkId` parameters. When this is done, the Visual Property will be defined by the Visual Style Additional details on common Visual Properties can be found in the [Basic Visual Lexicon JavaDoc API](http://chianti.ucsd.edu/cytoscape-3.6.1/API/org/cytoscape/view/presentation/property/BasicVisualLexicon.html) :param networkId: SUID of the Network :param viewId: SUID of the Network View :param visualProperty: Name of the Visual Property :param verbose: print more :returns: 200: successful operation
385,480
def to_struct(self, value): if self.str_format: return value.strftime(self.str_format) return value.strftime(self.default_format)
Cast `date` object to string.
385,481
def make_naive(value, timezone): value = value.astimezone(timezone) if hasattr(timezone, ): value = timezone.normalize(value) return value.replace(tzinfo=None)
Makes an aware datetime.datetime naive in a given time zone.
385,482
def get_metric_values(self, group_name): if group_name not in self._metric_values: raise ValueError("Metric values for this group name do not " "exist: {}".format(group_name)) return self._metric_values[group_name]
Get the faked metric values for a metric group, by its metric group name. The result includes all metric object values added earlier for that metric group name, using :meth:`~zhmcclient.FakedMetricsContextManager.add_metric_object_values` i.e. the metric values for all resources and all points in time that were added. Parameters: group_name (:term:`string`): Name of the metric group. Returns: iterable of :class:`~zhmclient.FakedMetricObjectValues`: The metric values for that metric group, in the order they had been added. Raises: ValueError: Metric values for this group name do not exist.
385,483
def _SendTerminationMessage(self, status=None): if not self.runner_args.request_state.session_id: return if status is None: status = rdf_flows.GrrStatus() client_resources = self.context.client_resources user_cpu = client_resources.cpu_usage.user_cpu_time sys_cpu = client_resources.cpu_usage.system_cpu_time status.cpu_time_used.user_cpu_time = user_cpu status.cpu_time_used.system_cpu_time = sys_cpu status.network_bytes_sent = self.context.network_bytes_sent status.child_session_id = self.session_id request_state = self.runner_args.request_state request_state.response_count += 1 msg = rdf_flows.GrrMessage( session_id=request_state.session_id, request_id=request_state.id, response_id=request_state.response_count, auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED, type=rdf_flows.GrrMessage.Type.STATUS, payload=status) self.queue_manager.QueueResponse(msg) self.QueueNotification(session_id=request_state.session_id)
This notifies the parent flow of our termination.
385,484
def get_per_identity_records(self, events: Iterable, data_processor: DataProcessor ) -> Generator[Tuple[str, TimeAndRecord], None, None]: schema_loader = SchemaLoader() stream_bts_name = schema_loader.add_schema_spec(self._stream_bts) stream_transformer_schema: StreamingTransformerSchema = schema_loader.get_schema_object( stream_bts_name) for event in events: try: for record in data_processor.process_data(event): try: id = stream_transformer_schema.get_identity(record) time = stream_transformer_schema.get_time(record) yield (id, (time, record)) except Exception as err: logging.error(.format(err, record)) except Exception as err: logging.error(.format(err, event))
Uses the given iteratable events and the data processor convert the event into a list of Records along with its identity and time. :param events: iteratable events. :param data_processor: DataProcessor to process each event in events. :return: yields Tuple[Identity, TimeAndRecord] for all Records in events,
385,485
def profile_different_methods(search_file, screen_file, method_list, dir_path, file_name): profiler = ProfileRecorder(0.05) profiler.load_images(search_file, screen_file) profiler.profile_methods(method_list) profiler.wite_to_json(dir_path, file_name)
对指定的图片进行性能测试.
385,486
def serialize(self, queryset, **options): self.options = options self.stream = options.get("stream", StringIO()) self.selected_fields = options.get("fields") self.use_natural_keys = options.get("use_natural_keys", True) self.xml = options.get("xml", None) self.root = (self.xml == None) self.start_serialization() for obj in queryset: if hasattr(obj, ): obj.__serialize__(self.xml) else: self.serialize_object(obj) self.end_serialization() return self.getvalue()
Serialize a queryset. THE OUTPUT OF THIS SERIALIZER IS NOT MEANT TO BE SERIALIZED BACK INTO THE DB.
385,487
def _compute_distance(self, rup, dists, C): mref = 3.6 rref = 1.0 rval = np.sqrt(dists.rhypo ** 2 + C[] ** 2) return (C[] + C[] * (rup.mag - mref)) *\ np.log10(rval / rref) + C[] * (rval - rref)
Compute the distance function, equation (9):
385,488
def _are_safety_checks_disabled(self, caller=u"unknown_function"): if self.rconf.safety_checks: return False self.log_warn([u"Safety checks disabled => %s passed", caller]) return True
Return ``True`` if safety checks are disabled. :param string caller: the name of the caller function :rtype: bool
385,489
def build_package(team, username, package, subpath, yaml_path, checks_path=None, dry_run=False, env=): def find(key, value): if isinstance(value, Iterable) and not isinstance(value, string_types): for k, v in iteritems(value): if k == key: yield v elif isinstance(v, dict): for result in find(key, v): yield result elif isinstance(v, list): for item in v: for result in find(key, item): yield result build_data = load_yaml(yaml_path) checks_contents = load_yaml(checks_path, optional=True) elif checks_path is not None: checks_contents = load_yaml(checks_path) else: checks_contents = None build_package_from_contents(team, username, package, subpath, os.path.dirname(yaml_path), build_data, checks_contents=checks_contents, dry_run=dry_run, env=env)
Builds a package from a given Yaml file and installs it locally. Returns the name of the package.
385,490
def get_value(self, section, option, default=None): try: valuestr = self.get(section, option) except Exception: if default is not None: return default raise types = (int, float) for numtype in types: try: val = numtype(valuestr) if val != float(valuestr): continue return val except (ValueError, TypeError): continue vl = valuestr.lower() if vl == : return False if vl == : return True if not isinstance(valuestr, string_types): raise TypeError("Invalid value type: only int, long, float and str are allowed", valuestr) return valuestr
:param default: If not None, the given default value will be returned in case the option did not exist :return: a properly typed value, either int, float or string :raise TypeError: in case the value could not be understood Otherwise the exceptions known to the ConfigParser will be raised.
385,491
def configure(self, viewport=None, fbo_size=None, fbo_rect=None, canvas=None): if canvas is not None: self.canvas = canvas canvas = self._canvas if canvas is None: raise RuntimeError("No canvas assigned to this TransformSystem.") map_from = [(0, 0), canvas.size] map_to = [(0, canvas.physical_size[1]), (canvas.physical_size[0], 0)] self._canvas_transform.transforms[1].set_mapping(map_from, map_to) if fbo_rect is None: self._canvas_transform.transforms[0].scale = (1, 1, 1) self._canvas_transform.transforms[0].translate = (0, 0, 0) else: map_from = [(fbo_rect[0], fbo_rect[1]), (fbo_rect[0] + fbo_rect[2], fbo_rect[1] + fbo_rect[3])] map_to = [(0, 0), fbo_size] self._canvas_transform.transforms[0].set_mapping(map_from, map_to) if viewport is None: if fbo_size is None: map_from = [(0, 0), canvas.physical_size] else: map_from = [(0, 0), fbo_size] else: map_from = [viewport[:2], (viewport[0] + viewport[2], viewport[1] + viewport[3])] map_to = [(-1, -1), (1, 1)] self._framebuffer_transform.transforms[0].set_mapping(map_from, map_to)
Automatically configure the TransformSystem: * canvas_transform maps from the Canvas logical pixel coordinate system to the framebuffer coordinate system, taking into account the logical/physical pixel scale factor, current FBO position, and y-axis inversion. * framebuffer_transform maps from the current GL viewport on the framebuffer coordinate system to clip coordinates (-1 to 1). Parameters ========== viewport : tuple or None The GL viewport rectangle (x, y, w, h). If None, then it is assumed to cover the entire canvas. fbo_size : tuple or None The size of the active FBO. If None, then it is assumed to have the same size as the canvas's framebuffer. fbo_rect : tuple or None The position and size (x, y, w, h) of the FBO in the coordinate system of the canvas's framebuffer. If None, then the bounds are assumed to cover the entire active framebuffer. canvas : Canvas instance Optionally set the canvas for this TransformSystem. See the `canvas` property.
385,492
def weather_history_at_id(self, id, start=None, end=None): assert type(id) is int, " must be an int" if id < 0: raise ValueError(" value must be greater than 0") params = {: id, : self._language} if start is None and end is None: pass elif start is not None and end is not None: unix_start = timeformatutils.to_UNIXtime(start) unix_end = timeformatutils.to_UNIXtime(end) if unix_start >= unix_end: raise ValueError("Error: the start time boundary must " \ "precede the end time!") current_time = time() if unix_start > current_time: raise ValueError("Error: the start time boundary must " \ "precede the current time!") params[] = str(unix_start) params[] = str(unix_end) else: raise ValueError("Error: one of the time boundaries is None, " \ "while the other is not!") uri = http_client.HttpClient.to_url(CITY_WEATHER_HISTORY_URL, self._API_key, self._subscription_type, self._use_ssl) _, json_data = self._wapi.cacheable_get_json(uri, params=params) return self._parsers[].parse_JSON(json_data)
Queries the OWM Weather API for weather history for the specified city ID. A list of *Weather* objects is returned. It is possible to query for weather history in a closed time period, whose boundaries can be passed as optional parameters. :param id: the city ID :type id: int :param start: the object conveying the time value for the start query boundary (defaults to ``None``) :type start: int, ``datetime.datetime`` or ISO8601-formatted string :param end: the object conveying the time value for the end query boundary (defaults to ``None``) :type end: int, ``datetime.datetime`` or ISO8601-formatted string :returns: a list of *Weather* instances or ``None`` if history data is not available for the specified location :raises: *ParseResponseException* when OWM Weather API responses' data cannot be parsed, *APICallException* when OWM Weather API can not be reached, *ValueError* if the time boundaries are not in the correct chronological order, if one of the time boundaries is not ``None`` and the other is or if one or both of the time boundaries are after the current time
385,493
def _prepare_data_dir(self, data): logger.debug(__("Preparing data directory for Data with id {}.", data.id)) with transaction.atomic(): temporary_location_string = uuid.uuid4().hex[:10] data_location = DataLocation.objects.create(subpath=temporary_location_string) data_location.subpath = str(data_location.id) data_location.save() data_location.data.add(data) output_path = self._get_per_data_dir(, data_location.subpath) dir_mode = self.settings_actual.get(, {}).get(, 0o755) os.mkdir(output_path, mode=dir_mode) os.chmod(output_path, dir_mode) return output_path
Prepare destination directory where the data will live. :param data: The :class:`~resolwe.flow.models.Data` object for which to prepare the private execution directory. :return: The prepared data directory path. :rtype: str
385,494
def omitted_parcov(self): if self.__omitted_parcov is None: self.log("loading omitted_parcov") self.__load_omitted_parcov() self.log("loading omitted_parcov") return self.__omitted_parcov
get the omitted prior parameter covariance matrix Returns ------- omitted_parcov : pyemu.Cov Note ---- returns a reference If ErrorVariance.__omitted_parcov is None, attribute is dynamically loaded
385,495
def stylesheet_declarations(string, is_merc=False, scale=1): display_map = Declaration(Selector(SelectorElement([], [])), Property(), Value(, False), (False, (0, 0, 0), (0, 0))) declarations = [display_map] tokens = cssTokenizer().tokenize(string) variables = {} while True: try: for declaration in parse_rule(tokens, variables, [], [], is_merc): if scale != 1: declaration.scaleBy(scale) declarations.append(declaration) except StopIteration: break return sorted(declarations, key=operator.attrgetter())
Parse a string representing a stylesheet into a list of declarations. Required boolean is_merc indicates whether the projection should be interpreted as spherical mercator, so we know what to do with zoom/scale-denominator in parse_rule().
385,496
def add_infos(self, *keyvals, **kwargs): kv_pairs = [] for key, val in keyvals: key = key.strip() val = str(val).strip() if in key: raise ValueError(.format(key)) kv_pairs.append((key, val)) for k, v in kv_pairs: if k in self._info: raise ValueError( .format(k, self._info[k], v)) self._info[k] = v try: with open(self._info_file, ) as outfile: for k, v in kv_pairs: outfile.write(.format(k, v)) except IOError: if not kwargs.get(, False): raise
Adds the given info and returns a dict composed of just this added info.
385,497
def write(models, out=None, base=None, propertybase=None, shorteners=None, logger=logging): assert out is not None if not isinstance(models, list): models = [models] shorteners = shorteners or {} all_propertybase = [propertybase] if propertybase else [] all_propertybase.append(VERSA_BASEIRI) if any((base, propertybase, shorteners)): out.write() if base: out.write(.format(base)) out.write() origin_space = set() for m in models: origin_space.update(all_origins(m)) for o in origin_space: out.write(.format(o)) for o_, r, t, a in m.match(o): abbr_r = abbreviate(r, all_propertybase) value_format(t) out.write(.format(abbr_r, value_format(t))) for k, v in a.items(): abbr_k = abbreviate(k, all_propertybase) out.write(.format(k, value_format(v))) out.write() return
models - input Versa models from which output is generated. Must be a sequence object, not an iterator
385,498
def mandel(x, y, max_iters): i = 0 c = complex(x,y) z = 0.0j for i in range(max_iters): z = z*z + c if (z.real*z.real + z.imag*z.imag) >= 4: return i return 255
Given the real and imaginary parts of a complex number, determine if it is a candidate for membership in the Mandelbrot set given a fixed number of iterations.
385,499
def nvmlUnitGetCount(): r c_count = c_uint() fn = _nvmlGetFunctionPointer("nvmlUnitGetCount") ret = fn(byref(c_count)) _nvmlCheckReturn(ret) return bytes_to_str(c_count.value)
r""" /** * Retrieves the number of units in the system. * * For S-class products. * * @param unitCount Reference in which to return the number of units * * @return * - \ref NVML_SUCCESS if \a unitCount has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unitCount is NULL * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlUnitGetCount