Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
15,600
def run(edges, iterations=1000, force_strength=5.0, dampening=0.01, max_velocity=2.0, max_distance=50, is_3d=True): nodes = set(e[] for e in edges) | set(e[] for e in edges) d = 3 if is_3d else 2 nodes = {n: {: [0.0] * d, : [0.0] * d} for n in nodes} for _ in repeat(None, iterations): for node1, node2 in combinations(nodes.values(), 2): _coulomb(node1, node2, force_strength, max_distance) for edge in edges: _hooke(nodes[edge[]], nodes[edge[]], force_strength * edge.get(, 1), max_distance) for node in nodes.values(): force = [_constrain(dampening * f, -max_velocity, max_velocity) for f in node[]] node[] = [v + dv for v, dv in zip(node[], force)] node[] = [0] * d for node in nodes.values(): del node[] node[] = node[] del node[] if not is_3d: node[] += [0.0] return nodes
Runs a force-directed-layout algorithm on the input graph. iterations - Number of FDL iterations to run in coordinate generation force_strength - Strength of Coulomb and Hooke forces (edit this to scale the distance between nodes) dampening - Multiplier to reduce force applied to nodes max_velocity - Maximum distance a node can move in one step max_distance - The maximum distance considered for interactions
15,601
def get_event_attendees(self, id, **data): return self.get("/events/{0}/attendees/".format(id), data=data)
GET /events/:id/attendees/ Returns a :ref:`paginated <pagination>` response with a key of ``attendees``, containing a list of :format:`attendee`.
15,602
def get_context_data(self, **kwargs): context = super(FriendListView, self).get_context_data(**kwargs) friends = [] for friend_list in self.social_friend_lists: fs = friend_list.existing_social_friends() for f in fs: friends.append(f) context[] = friends connected_providers = [] for sa in self.social_auths: connected_providers.append(sa.provider) context[] = connected_providers return context
checks if there is SocialFrind model record for the user if not attempt to create one if all fail, redirects to the next page
15,603
def _paths_must_exists(path): path = to_unicode(path) if not os.path.exists(path): raise argparse.ArgumentTypeError("{} is not a valid file/folder.".format(path)) return path
Raises error if path doesn't exist. :param path: str path to check :return: str same path passed in
15,604
def column_spec_path(cls, project, location, dataset, table_spec, column_spec): return google.api_core.path_template.expand( "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}/columnSpecs/{column_spec}", project=project, location=location, dataset=dataset, table_spec=table_spec, column_spec=column_spec, )
Return a fully-qualified column_spec string.
15,605
def check_update(): r = requests.get("https://pypi.python.org/pypi/prof/json") data = r.json() if versiontuple(data[][]) > versiontuple(__version__): return True return False
Return True if an update is available on pypi
15,606
def _update_assignment_email_status(offer_assignment_id, send_id, status, site_code=None): api = get_ecommerce_client(url_postfix=, site_code=site_code) post_data = { : offer_assignment_id, : send_id, : status, } try: api_response = api.status().post(post_data) except RequestException: logger.exception( .format( token_offer=offer_assignment_id, token_send_id=send_id ) ) return False return True if api_response.get() == else False
Update the offer_assignment and offer_assignment_email model using the Ecommerce assignmentemail api. Arguments: offer_assignment_id (str): Key of the entry in the offer_assignment model. send_id (str): Unique message id from Sailthru status (str): status to be sent to the api site_code (str): site code Returns: True or False based on model update status from Ecommerce api
15,607
def msg_curse(self, args=None, max_width=None): ret = [] if not self.stats or self.is_disable(): return ret name_max_width = max_width - 7 msg = .format(, width=name_max_width) ret.append(self.curse_add_line(msg, "TITLE")) for i in self.stats: ret.append(self.curse_new_line()) if len(i[]) > name_max_width: path = + i[][-name_max_width + 1:] else: path = i[] msg = .format(nativestr(path), width=name_max_width) ret.append(self.curse_add_line(msg)) try: msg = .format(self.auto_unit(i[])) except (TypeError, ValueError): msg = .format(i[]) ret.append(self.curse_add_line(msg, self.get_alert(i, header= + i[]))) return ret
Return the dict to display in the curse interface.
15,608
def list_available_tools(self): benchmarks = [] if self.alternative_config_dir: for n in glob.glob(os.path.join(self.alternative_config_dir, self.BENCHMARKS_DIR, )): benchmarks.append(BenchmarkToolConfiguration(n)) for n in glob.glob(os.path.join(self.default_config_dir, self.BENCHMARKS_DIR, )): benchmarks.append(BenchmarkToolConfiguration(n)) return benchmarks
Lists all the Benchmarks configuration files found in the configuration folders :return:
15,609
def inDignities(self, idA, idB): objA = self.chart.get(idA) info = essential.getInfo(objA.sign, objA.signlon) return [dign for (dign, ID) in info.items() if ID == idB]
Returns the dignities of A which belong to B.
15,610
def _find_key_cols(df): keys = [] for col in df: if len(df[col].unique()) == len(df[col]): keys.append(col) return keys
Identify columns in a DataFrame that could be a unique key
15,611
def is_seq_of(seq, expected_type, seq_type=None): if seq_type is None: exp_seq_type = collections_abc.Sequence else: assert isinstance(seq_type, type) exp_seq_type = seq_type if not isinstance(seq, exp_seq_type): return False for item in seq: if not isinstance(item, expected_type): return False return True
Check whether it is a sequence of some type. Args: seq (Sequence): The sequence to be checked. expected_type (type): Expected type of sequence items. seq_type (type, optional): Expected sequence type. Returns: bool: Whether the sequence is valid.
15,612
def _publish_queue_grpc(self): messages = EventHub_pb2.Messages(msg=self._tx_queue) publish_request = EventHub_pb2.PublishRequest(messages=messages) self.grpc_manager.send_message(publish_request)
send the messages in the tx queue to the GRPC manager :return: None
15,613
def build_api_struct(self): self.clean() data = {"type": self.measurement_type} for option in self.used_options: option_key, option_value = self.v2_translator(option) data.update({option_key: option_value}) return data
Calls the clean method of the class and returns the info in a structure that Atlas API is accepting.
15,614
def attach_attachment(self, analysis, attachment): if not attachment: return if isinstance(attachment, list): for attach in attachment: self.attach_attachment(analysis, attach) return an_atts = analysis.getAttachment() atts_filenames = [att.getAttachmentFile().filename for att in an_atts] if attachment.getAttachmentFile().filename not in atts_filenames: an_atts.append(attachment) logger.info( "Attaching %s to %s" % (attachment.UID(), analysis)) analysis.setAttachment([att.UID() for att in an_atts]) analysis.reindexObject() else: self.warn("Attachment %s was not linked to analysis %s" % (attachment.UID(), analysis))
Attach a file or a given set of files to an analysis :param analysis: analysis where the files are to be attached :param attachment: files to be attached. This can be either a single file or a list of files :return: None
15,615
def setup(self, phase=None, quantity=, conductance=, **kwargs): r if phase: self.settings[] = phase.name if quantity: self.settings[] = quantity if conductance: self.settings[] = conductance self.settings.update(**kwargs)
r""" This method takes several arguments that are essential to running the algorithm and adds them to the settings. Parameters ---------- phase : OpenPNM Phase object The phase on which the algorithm is to be run. quantity : string The name of the physical quantity to be calculated. conductance : string The name of the pore-scale transport conductance values. These are typically calculated by a model attached to a *Physics* object associated with the given *Phase*. solver : string To use the default scipy solver, set this value to `spsolve` or `umfpack`. To use an iterative solver or a non-scipy solver, additional arguments are required as described next. solver_family : string The solver package to use. OpenPNM currently supports ``scipy``, ``pyamg`` and ``petsc`` (if you have it installed). The default is ``scipy``. solver_type : string The specific solver to use. For instance, if ``solver_family`` is ``scipy`` then you can specify any of the iterative solvers such as ``cg`` or ``gmres``. [More info here] (https://docs.scipy.org/doc/scipy/reference/sparse.linalg.html) solver_preconditioner : string This is used by the PETSc solver to specify which preconditioner to use. The default is ``jacobi``. solver_atol : scalar Used to control the accuracy to which the iterative solver aims. The default is 1e-6. solver_rtol : scalar Used by PETSc as an additional tolerance control. The default is 1e-6. solver_maxiter : scalar Limits the number of iterations to attempt before quiting when aiming for the specified tolerance. The default is 5000.
15,616
def _clip_line( self, line_pt_1, line_pt_2 ): x_min = min(line_pt_1[0], line_pt_2[0]) x_max = max(line_pt_1[0], line_pt_2[0]) y_min = min(line_pt_1[1], line_pt_2[1]) y_max = max(line_pt_1[1], line_pt_2[1]) extent = self.extent() if line_pt_1[0] == line_pt_2[0]: return ( (line_pt_1[0], max(y_min, extent[1])), (line_pt_1[0], min(y_max, extent[3])) ) if line_pt_1[1] == line_pt_2[1]: return ( (max(x_min, extent[0]), line_pt_1[1]), (min(x_max, extent[2]), line_pt_1[1]) ) if ((extent[0] <= line_pt_1[0] < extent[2]) and (extent[1] <= line_pt_1[1] < extent[3]) and (extent[0] <= line_pt_2[0] < extent[2]) and (extent[1] <= line_pt_2[1] < extent[3])): return line_pt_1, line_pt_2 ts = [0.0, 1.0, float(extent[0] - line_pt_1[0]) / (line_pt_2[0] - line_pt_1[0]), float(extent[2] - line_pt_1[0]) / (line_pt_2[0] - line_pt_1[0]), float(extent[1] - line_pt_1[1]) / (line_pt_2[1] - line_pt_1[1]), float(extent[3] - line_pt_1[1]) / (line_pt_2[1] - line_pt_1[1]) ] ts.sort() if (ts[2] < 0) or (ts[2] >= 1) or (ts[3] < 0) or (ts[2] >= 1): return None result =\ [(pt_1 + t * (pt_2 - pt_1))\ for t in (ts[2], ts[3])\ for (pt_1, pt_2) in zip(line_pt_1, line_pt_2)] return (result[:2], result[2:])
clip line to canvas
15,617
def get_stack_index(self, stack_index, plugin_index): other_plugins_count = sum([other_tabs[0].count() \ for other_tabs in \ self.plugins_tabs[:plugin_index]]) real_index = stack_index - other_plugins_count return real_index
Get the real index of the selected item.
15,618
async def analog_write(self, command): pin = int(command[0]) value = int(command[1]) await self.core.analog_write(pin, value)
This method writes a value to an analog pin. It is used to set the output of a PWM pin or the angle of a Servo. :param command: {"method": "analog_write", "params": [PIN, WRITE_VALUE]} :returns: No return message.
15,619
def multiget(client, keys, **options): transient_pool = False outq = Queue() if in options: pool = options[] del options[] else: pool = MultiGetPool() transient_pool = True try: pool.start() for bucket_type, bucket, key in keys: task = Task(client, outq, bucket_type, bucket, key, None, options) pool.enq(task) results = [] for _ in range(len(keys)): if pool.stopped(): raise RuntimeError( ) results.append(outq.get()) outq.task_done() finally: if transient_pool: pool.stop() return results
Executes a parallel-fetch across multiple threads. Returns a list containing :class:`~riak.riak_object.RiakObject` or :class:`~riak.datatypes.Datatype` instances, or 4-tuples of bucket-type, bucket, key, and the exception raised. If a ``pool`` option is included, the request will use the given worker pool and not a transient :class:`~riak.client.multi.MultiGetPool`. This option will be passed by the client if the ``multiget_pool_size`` option was set on client initialization. :param client: the client to use :type client: :class:`~riak.client.RiakClient` :param keys: the keys to fetch in parallel :type keys: list of three-tuples -- bucket_type/bucket/key :param options: request options to :meth:`RiakBucket.get <riak.bucket.RiakBucket.get>` :type options: dict :rtype: list
15,620
def info(self, message, domain=None): if domain is None: domain = self.extension_name info(message, domain)
Shortcut function for `utils.loggable.info` Args: message: see `utils.loggable.info` domain: see `utils.loggable.info`
15,621
def add_pyspark_path(): try: spark_home = os.environ[] sys.path.append(os.path.join(spark_home, )) py4j_src_zip = glob(os.path.join(spark_home, , , )) if len(py4j_src_zip) == 0: raise ValueError( % os.path.join(spark_home, , )) else: py4j_src_zip = sorted(py4j_src_zip)[::-1] sys.path.append(py4j_src_zip[0]) except KeyError: logging.error() exit(-1) except ValueError as e: logging.error(str(e)) exit(-1)
Add PySpark to the library path based on the value of SPARK_HOME.
15,622
def result(self, wait=False): if wait: self._async_resp.wait() if not self.finished(): raise RuntimeError("Result is not ready yet") raw_response = self._async_resp.get() return Result(result=raw_response["result"], error=raw_response["error"], id=raw_response["id"], method_call=self.request)
Gets the result of the method call. If the call was successful, return the result, otherwise, reraise the exception. :param wait: Block until the result is available, or just get the result. :raises: RuntimeError when called and the result is not yet available.
15,623
def cmd_follow(self, args): if len(args) < 2: print("map follow 0|1") return follow = int(args[1]) self.map.set_follow(follow)
control following of vehicle
15,624
def _serialize_uint(value, size=32, padding=0): if size <= 0 or size > 32: raise ValueError from .account import EVMAccount if not isinstance(value, (int, BitVec, EVMAccount)): raise ValueError if issymbolic(value): bytes = ArrayVariable(index_bits=256, index_max=32, value_bits=8, name=.format(uuid.uuid1())) if value.size <= size * 8: value = Operators.ZEXTEND(value, size * 8) else: value = Operators.EXTRACT(value, 0, size * 8) bytes = ArrayProxy(bytes.write_BE(padding, value, size)) else: value = int(value) bytes = bytearray() for _ in range(padding): bytes.append(0) for position in reversed(range(size)): bytes.append(Operators.EXTRACT(value, position * 8, 8)) assert len(bytes) == size + padding return bytes
Translates a python integral or a BitVec into a 32 byte string, MSB first
15,625
def invalidate(self, key): if key not in self.data: return del self.data[key] for cname in self.components: if key in self.depends[cname]: for downstream_key in self.provides[cname]: self.invalidate(downstream_key)
Remove the given data item along with all items that depend on it in the graph.
15,626
def from_parmed(cls, path, *args, **kwargs): st = parmed.load_file(path, structure=True, *args, **kwargs) box = kwargs.pop(, getattr(st, , None)) velocities = kwargs.pop(, getattr(st, , None)) positions = kwargs.pop(, getattr(st, , None)) return cls(master=st, topology=st.topology, positions=positions, box=box, velocities=velocities, path=path, **kwargs)
Try to load a file automatically with ParmEd. Not guaranteed to work, but might be useful if it succeeds. Arguments --------- path : str Path to file that ParmEd can load
15,627
def create_textfile_with_contents(filename, contents, encoding=): ensure_directory_exists(os.path.dirname(filename)) if os.path.exists(filename): os.remove(filename) outstream = codecs.open(filename, "w", encoding) outstream.write(contents) if contents and not contents.endswith("\n"): outstream.write("\n") outstream.flush() outstream.close() assert os.path.exists(filename), "ENSURE file exists: %s" % filename
Creates a textual file with the provided contents in the workdir. Overwrites an existing file.
15,628
def get_new_document(self, cursor_pos=None): lines = [] if self.original_document.text_before_cursor: lines.append(self.original_document.text_before_cursor) for line_no in sorted(self.selected_lines): lines.append(self.history_lines[line_no]) if self.original_document.text_after_cursor: lines.append(self.original_document.text_after_cursor) text = .join(lines) if cursor_pos is not None and cursor_pos > len(text): cursor_pos = len(text) return Document(text, cursor_pos)
Create a `Document` instance that contains the resulting text.
15,629
def volume_present(name, bricks, stripe=False, replica=False, device_vg=False, transport=, start=False, force=False, arbiter=False): ret = {: name, : {}, : , : False} if suc.check_name(name, ): ret[] = return ret volumes = __salt__[]() if name not in volumes: if __opts__[]: comment = .format(name) if start: comment += ret[] = comment ret[] = None return ret vol_created = __salt__[]( name, bricks, stripe, replica, device_vg, transport, start, force, arbiter) if not vol_created: ret[] = .format(name) return ret old_volumes = volumes volumes = __salt__[]() if name in volumes: ret[] = {: volumes, : old_volumes} ret[] = .format(name) else: ret[] = .format(name) if start: if __opts__[]: ret[] = ret[] + ret[] = None return ret if int(__salt__[]()[name][]) == 1: ret[] = True ret[] = ret[] + else: vol_started = __salt__[](name) if vol_started: ret[] = True ret[] = ret[] + if not ret[]: ret[] = {: , : } else: ret[] = ret[] + return ret if __opts__[]: ret[] = None else: ret[] = True return ret
Ensure that the volume exists name name of the volume bricks list of brick paths replica replica count for volume arbiter use every third brick as arbiter (metadata only) .. versionadded:: 2019.2.0 start ensure that the volume is also started .. code-block:: yaml myvolume: glusterfs.volume_present: - bricks: - host1:/srv/gluster/drive1 - host2:/srv/gluster/drive2 Replicated Volume: glusterfs.volume_present: - name: volume2 - bricks: - host1:/srv/gluster/drive2 - host2:/srv/gluster/drive3 - replica: 2 - start: True Replicated Volume with arbiter brick: glusterfs.volume_present: - name: volume3 - bricks: - host1:/srv/gluster/drive2 - host2:/srv/gluster/drive3 - host3:/srv/gluster/drive4 - replica: 3 - arbiter: True - start: True
15,630
def get_user(self, login): return youtrack.User(self._get("/admin/user/" + urlquote(login.encode())), self)
http://confluence.jetbrains.net/display/YTD2/GET+user
15,631
def get_converter(rule): for converter, _, _ in parse_rule(str(rule)): if converter is not None: return converter return None
Parse rule will extract the converter from the rule as a generator We iterate through the parse_rule results to find the converter parse_url returns the static rule part in the first iteration parse_url returns the dynamic rule part in the second iteration if its dynamic
15,632
def _name_to_index(self, channels): if hasattr(channels, ) \ and not isinstance(channels, six.string_types): return [self._name_to_index(ch) for ch in channels] if isinstance(channels, six.string_types): if channels in self.channels: return self.channels.index(channels) else: raise ValueError("{} is not a valid channel name." .format(channels)) if isinstance(channels, int): if (channels < len(self.channels) and channels >= -len(self.channels)): return channels else: raise ValueError("index out of range") else: raise TypeError("input argument should be an integer, string or " "list of integers or strings")
Return the channel indices for the specified channel names. Integers contained in `channel` are returned unmodified, if they are within the range of ``self.channels``. Parameters ---------- channels : int or str or list of int or list of str Name(s) of the channel(s) of interest. Returns ------- int or list of int Numerical index(ces) of the specified channels.
15,633
def on_proposal(self, proposal, proto): "called to inform about synced peers" assert isinstance(proto, HDCProtocol) assert isinstance(proposal, Proposal) if proposal.height >= self.cm.height: assert proposal.lockset.is_valid self.last_active_protocol = proto
called to inform about synced peers
15,634
def buffer(self, frame): frame.buffer = self.temporary_identifier() self.writeline( % frame.buffer)
Enable buffering for the frame from that point onwards.
15,635
def _create_variables(self, n_features, n_classes): self.W_ = tf.Variable( tf.zeros([n_features, n_classes]), name=) self.b_ = tf.Variable( tf.zeros([n_classes]), name=)
Create the TensorFlow variables for the model. :param n_features: number of features :param n_classes: number of classes :return: self
15,636
def list_cluster_role_binding(self, **kwargs): kwargs[] = True if kwargs.get(): return self.list_cluster_role_binding_with_http_info(**kwargs) else: (data) = self.list_cluster_role_binding_with_http_info(**kwargs) return data
list_cluster_role_binding # noqa: E501 list or watch objects of kind ClusterRoleBinding # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_cluster_role_binding(async_req=True) >>> result = thread.get() :param async_req bool :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1ClusterRoleBindingList If the method is called asynchronously, returns the request thread.
15,637
def _is_junction(arg): return isinstance(arg, dict) and len(arg) == 1 and next(six.iterkeys(arg)) ==
Return True, if arg is a junction statement.
15,638
def _save_function_initial_state(self, function_key, function_address, state): l.debug(, function_address, function_key ) if function_key in self._function_initial_states[function_address]: existing_state = self._function_initial_states[function_address][function_key] merged_state, _, _ = existing_state.merge(state) self._function_initial_states[function_address][function_key] = merged_state else: self._function_initial_states[function_address][function_key] = state
Save the initial state of a function, and merge it with existing ones if there are any. :param FunctionKey function_key: The key to this function. :param int function_address: Address of the function. :param SimState state: Initial state of the function. :return: None
15,639
def get_urlclass_from (scheme, assume_local_file=False): if scheme in ("http", "https"): klass = httpurl.HttpUrl elif scheme == "ftp": klass = ftpurl.FtpUrl elif scheme == "file": klass = fileurl.FileUrl elif scheme == "telnet": klass = telneturl.TelnetUrl elif scheme == "mailto": klass = mailtourl.MailtoUrl elif scheme in ("nntp", "news", "snews"): klass = nntpurl.NntpUrl elif scheme == "dns": klass = dnsurl.DnsUrl elif scheme == "itms-services": klass = itmsservicesurl.ItmsServicesUrl elif scheme and unknownurl.is_unknown_scheme(scheme): klass = unknownurl.UnknownUrl elif assume_local_file: klass = fileurl.FileUrl else: klass = unknownurl.UnknownUrl return klass
Return checker class for given URL scheme. If the scheme cannot be matched and assume_local_file is True, assume a local file.
15,640
def set_local_address(ams_netid): if isinstance(ams_netid, str): ams_netid_st = _parse_ams_netid(ams_netid) else: ams_netid_st = ams_netid assert isinstance(ams_netid_st, SAmsNetId) if linux: return adsSetLocalAddress(ams_netid_st) else: raise ADSError( text="SetLocalAddress is not supported for Windows clients." )
Set the local NetID (**Linux only**). :param str: new AmsNetID :rtype: None **Usage:** >>> import pyads >>> pyads.open_port() >>> pyads.set_local_address('0.0.0.0.1.1')
15,641
def add_property(self, prop, objects=()): self._properties.add(prop) self._objects |= objects self._pairs.update((o, prop) for o in objects)
Add a property to the definition and add ``objects`` as related.
15,642
def pathparts(self): try: parts = self.parent.pathparts() parts.append(self.name) return parts except AttributeError: return []
A list of the parts of the path, with the root node returning an empty list.
15,643
def read_message_handler(stream): while True: packet = yield from stream.get() session_id = packet.get() user_opponent = packet.get() message_id = packet.get() if session_id and user_opponent and message_id is not None: user_owner = get_user_from_session(session_id) if user_owner: message = models.Message.objects.filter(id=message_id).first() if message: message.read = True message.save() logger.debug( + str(message_id) + ) opponent_socket = ws_connections.get((user_opponent, user_owner.username)) if opponent_socket: yield from target_message(opponent_socket, {: , : user_opponent, : message_id}) else: pass else: pass else: pass
Send message to user if the opponent has read the message
15,644
def predict(self, temp_type): if temp_type == : temp = self.temp() return temp.format(class_name=self.class_name, method_name=self.method_name, n_features=self.n_features) if temp_type == : method = self.create_method_embedded() return self.create_class_embedded(method)
Transpile the predict method. Parameters ---------- :param temp_type : string The kind of export type (embedded, separated, exported). Returns ------- :return : string The transpiled predict method as string.
15,645
def risk(self, domain, **kwargs): return self._results(, , items_path=(, ), domain=domain, cls=Reputation, **kwargs)
Returns back the risk score for a given domain
15,646
def _generate_non_lastnames_variations(non_lastnames): if not non_lastnames: return [] for idx, non_lastname in enumerate(non_lastnames): non_lastnames[idx] = (u, non_lastname[0], non_lastname) return [ (u.join(var_elem for var_elem in variation if var_elem)).strip() for variation in product(*non_lastnames) ]
Generate variations for all non-lastnames. E.g. For 'John Richard', this method generates: [ 'John', 'J', 'Richard', 'R', 'John Richard', 'John R', 'J Richard', 'J R', ]
15,647
def api_auth(func): @wraps(func) def _decorator(request, *args, **kwargs): authentication = APIAuthentication(request) if authentication.authenticate(): return func(request, *args, **kwargs) raise Http404 return _decorator
If the user is not logged in, this decorator looks for basic HTTP auth data in the request header.
15,648
def exception_info(self, timeout=None): with self._condition: if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: raise CancelledError() elif self._state == FINISHED: return self._exception, self._traceback self._condition.wait(timeout) if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: raise CancelledError() elif self._state == FINISHED: return self._exception, self._traceback else: raise TimeoutError()
Return a tuple of (exception, traceback) raised by the call that the future represents. Args: timeout: The number of seconds to wait for the exception if the future isn't done. If None, then there is no limit on the wait time. Returns: The exception raised by the call that the future represents or None if the call completed without raising. Raises: CancelledError: If the future was cancelled. TimeoutError: If the future didn't finish executing before the given timeout.
15,649
def insert(self, i, arg): r arg = self.__coerce(arg) if isinstance(arg, Arg): super().insert(i, arg) if len(self) <= 1: self.all.append(arg) else: if i > len(self): i = len(self) - 1 before = self[i - 1] index_before = self.all.index(before) self.all.insert(index_before + 1, arg)
r"""Insert whitespace, an unparsed argument string, or an argument object. :param int i: Index to insert argument into :param Arg arg: Argument to insert >>> arguments = TexArgs(['\n', RArg('arg0'), '[arg2]']) >>> arguments.insert(1, '[arg1]') >>> len(arguments) 3 >>> arguments [RArg('arg0'), OArg('arg1'), OArg('arg2')] >>> arguments.all ['\n', RArg('arg0'), OArg('arg1'), OArg('arg2')] >>> arguments.insert(10, '[arg3]') >>> arguments[3] OArg('arg3')
15,650
def _mkdirs(d): try: os.makedirs(d) except OSError as e: if e.errno != errno.EEXIST: raise
Make all directories up to d. No exception is raised if d exists.
15,651
def normalize(u): u = np.asarray(u) unorm = np.sqrt(np.sum(u**2, axis=0)) z = np.isclose(unorm, 0) c = np.logical_not(z) / (unorm + z) return u * c
normalize(u) yields a vetor with the same direction as u but unit length, or, if u has zero length, yields u.
15,652
def fetch(self, minutes=values.unset, start_date=values.unset, end_date=values.unset, task_queue_sid=values.unset, task_queue_name=values.unset, friendly_name=values.unset, task_channel=values.unset): params = values.of({ : minutes, : serialize.iso8601_datetime(start_date), : serialize.iso8601_datetime(end_date), : task_queue_sid, : task_queue_name, : friendly_name, : task_channel, }) payload = self._version.fetch( , self._uri, params=params, ) return WorkersStatisticsInstance( self._version, payload, workspace_sid=self._solution[], )
Fetch a WorkersStatisticsInstance :param unicode minutes: Filter cumulative statistics by up to 'x' minutes in the past. :param datetime start_date: Filter cumulative statistics by a start date. :param datetime end_date: Filter cumulative statistics by a end date. :param unicode task_queue_sid: Filter the real-time and cumulative statistics based on Workers tied to a particular queue :param unicode task_queue_name: Filter the real-time and cumulative statistics based on Workers tied to a particular queue :param unicode friendly_name: The friendly_name :param unicode task_channel: Filter cumulative statistics by TaskChannel. :returns: Fetched WorkersStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.worker.workers_statistics.WorkersStatisticsInstance
15,653
def _create_and_add_parameters(params): global _current_parameter if _is_simple_type(params): _current_parameter = SimpleParameter(params) _current_option.add_parameter(_current_parameter) else: for i in params: if _is_simple_type(i): _current_parameter = SimpleParameter(i) else: _current_parameter = TypedParameter() _parse_typed_parameter(i) _current_option.add_parameter(_current_parameter)
Parses the configuration and creates Parameter instances.
15,654
def _load(self, scale=1.0): LOG.debug("File: %s", str(self.requested_band_filename)) ncf = Dataset(self.requested_band_filename, ) wvl = ncf.variables[][:] * scale resp = ncf.variables[][:] self.rsr = {: wvl, : resp}
Load the SLSTR relative spectral responses
15,655
def update(self, auth_payload=values.unset): return self._proxy.update(auth_payload=auth_payload, )
Update the ChallengeInstance :param unicode auth_payload: Optional payload to verify the Challenge :returns: Updated ChallengeInstance :rtype: twilio.rest.authy.v1.service.entity.factor.challenge.ChallengeInstance
15,656
def as_url(self): if in self._scope: return self._finalize().as_url if not self._is_endpoint: raise UrlBuildingError(.format(repr(self))) if self._ready: path, host = self._path, self._host else: return self().as_url if in host: domain, port = host.split() else: domain = host port = None if self._bound_env: request = self._bound_env.request scheme_port = {: , : }.get(request.scheme, ) primary_domain = self._bound_env._route_state.primary_domain host_split = request.host.split() request_domain = host_split[0] request_port = host_split[1] if len(host_split) > 1 else scheme_port port = port or request_port return URL(path, host=domain or request_domain, port=port if port != scheme_port else None, scheme=request.scheme, fragment=self._fragment, show_host=host and (domain != primary_domain \ or port != request_port)) return URL(path, host=domain, port=port, fragment=self._fragment, show_host=True)
Reverse object converted to `web.URL`. If Reverse is bound to env: * try to build relative URL, * use current domain name, port and scheme as default
15,657
def pool(data, batch_size, key, batch_size_fn=lambda new, count, sofar: count, random_shuffler=None, shuffle=False, sort_within_batch=False): if random_shuffler is None: random_shuffler = random.shuffle for p in batch(data, batch_size * 100, batch_size_fn): p_batch = batch(sorted(p, key=key), batch_size, batch_size_fn) \ if sort_within_batch \ else batch(p, batch_size, batch_size_fn) if shuffle: for b in random_shuffler(list(p_batch)): yield b else: for b in list(p_batch): yield b
Sort within buckets, then batch, then shuffle batches. Partitions data into chunks of size 100*batch_size, sorts examples within each chunk using sort_key, then batch these examples and shuffle the batches.
15,658
def ellipsemode(self, mode=None): if mode in (self.CORNER, self.CENTER, self.CORNERS): self.ellipsemode = mode return self.ellipsemode elif mode is None: return self.ellipsemode else: raise ShoebotError(_("ellipsemode: invalid input"))
Set the current ellipse drawing mode. :param mode: CORNER, CENTER, CORNERS :return: ellipsemode if mode is None or valid.
15,659
def read_data(self,variable_instance): if self.inst is None: return if variable_instance.visavariable.device_property.upper() == : return self.parse_value(self.inst.query()) elif variable_instance.visavariable.device_property.upper() == : return self.parse_value(self.inst.query()) elif variable_instance.visavariable.device_property.upper() == : return self.parse_value(self.inst.query()) elif variable_instance.visavariable.device_property.upper() == : return self.parse_value(self.inst.query()) elif variable_instance.visavariable.device_property.upper() == : return self.parse_value(self.inst.query()) elif variable_instance.visavariable.device_property.upper() == : return self.parse_value(self.inst.query()) return None
read values from the device
15,660
def load(self, path=None): if not path: path = self._path if not os.path.exists(path): return words, synsets, labels = {}, {}, {} xml = cElementTree.parse(path) xml = xml.getroot() for w in xml.findall("word"): if self._confidence is None \ or self._confidence <= float(w.attrib.get("confidence", 0.0)): w, pos, p, s, i, label, synset = ( w.attrib.get("form"), w.attrib.get("pos"), w.attrib.get("polarity", 0.0), w.attrib.get("subjectivity", 0.0), w.attrib.get("intensity", 1.0), w.attrib.get("label"), w.attrib.get(self._synset) ) psi = (float(p), float(s), float(i)) if w: words.setdefault(w, {}).setdefault(pos, []).append(psi) if w and label: labels[w] = label if synset: synsets.setdefault(synset, []).append(psi) self._language = xml.attrib.get("language", self._language) for w in words: words[w] = dict((pos, [avg(each) for each in zip(*psi)]) for pos, psi in words[w].items()) for w, pos in list(words.items()): words[w][None] = [avg(each) for each in zip(*pos.values())] for id, psi in synsets.items(): synsets[id] = [avg(each) for each in zip(*psi)] dict.update(self, words) dict.update(self.labeler, labels) dict.update(self._synsets, synsets)
Loads the XML-file (with sentiment annotations) from the given path. By default, Sentiment.path is lazily loaded.
15,661
def add_subnet(self, subnet_type, quantity=None, vlan_id=None, version=4, test_order=False): package = self.client[] category = desc = if version == 4: if subnet_type == : quantity = 0 category = elif subnet_type == : category = else: category = if subnet_type == : quantity = 0 category = desc = elif subnet_type == : desc = price_id = None quantity_str = str(quantity) for item in package.getItems(id=0, mask=): category_code = utils.lookup(item, , ) if all([category_code == category, item.get() == quantity_str, version == 4 or (version == 6 and desc in item[])]): price_id = item[][0][] break order = { : 0, : [{: price_id}], : 1, : , } if subnet_type != : order[] = vlan_id if test_order: return self.client[].verifyOrder(order) else: return self.client[].placeOrder(order)
Orders a new subnet :param str subnet_type: Type of subnet to add: private, public, global :param int quantity: Number of IPs in the subnet :param int vlan_id: VLAN id for the subnet to be placed into :param int version: 4 for IPv4, 6 for IPv6 :param bool test_order: If true, this will only verify the order.
15,662
def create_event_handler(event_type, handler): target_name = .format(hash=hash(handler), event_type=event_type) def handle_comm_opened(comm, msg): @comm.on_msg def _handle_msg(msg): data = msg[][] event = json.loads(data) return_value = handler(event) if return_value: comm.send(return_value) comm.send(.format(target_name=target_name)) if get_ipython(): get_ipython().kernel.comm_manager.register_target(target_name, handle_comm_opened) return target_name
Register a comm and return a serializable object with target name
15,663
def disconnect(self, connection): proto = self.getLocalProtocol(connection) proto.transport.loseConnection() return {}
The other side has asked us to disconnect.
15,664
def camera_position(self, camera_location): if camera_location is None: return if isinstance(camera_location, str): camera_location = camera_location.lower() if camera_location == : self.view_xy() elif camera_location == : self.view_xz() elif camera_location == : self.view_yz() elif camera_location == : self.view_xy(True) elif camera_location == : self.view_xz(True) elif camera_location == : self.view_yz(True) return if isinstance(camera_location[0], (int, float)): return self.view_vector(camera_location) self.camera.SetPosition(camera_location[0]) self.camera.SetFocalPoint(camera_location[1]) self.camera.SetViewUp(camera_location[2]) self.ResetCameraClippingRange() self.camera_set = True
Set camera position of all active render windows
15,665
def get_unit_id(unit_name): unit_name = unit_name.lower() attribute = response = LDAP_search( pattern_search=.format(unit_name), attribute=attribute ) unit_id = "" try: for element in response: if in element and element[].startswith(.format(unit_name)): unit_id = element[][attribute][0] except Exception: raise EpflLdapException("The unit named was not found".format(unit_name)) finally: if not unit_id: raise EpflLdapException("The unit named was not found".format(unit_name)) return unit_id
Return the unit id to the unit 'unit_name'
15,666
def clone(self, snapshot_name_or_id=None, mode=library.CloneMode.machine_state, options=None, name=None, uuid=None, groups=None, basefolder=, register=True): if options is None: options = [library.CloneOptions.link] if groups is None: groups = [] vbox = virtualbox.VirtualBox() if snapshot_name_or_id is not None: if isinstance(snapshot_name_or_id, basestring): snapshot = self.find_snapshot(snapshot_name_or_id) else: snapshot = snapshot_name_or_id vm = snapshot.machine else: if library.CloneOptions.link in options: vm = self.current_snapshot.machine else: vm = self if name is None: name = "%s Clone" % vm.name create_flags = if uuid is not None: create_flags = "UUID=%s" % uuid primary_group = if groups: primary_group = groups[0] test_name = name settings_file = for i in range(1, 1000): settings_file = vbox.compose_machine_filename(test_name, primary_group, create_flags, basefolder) if not os.path.exists(os.path.dirname(settings_file)): break test_name = "%s (%s)" % (name, i) name = test_name vm_clone = vbox.create_machine(settings_file, name, groups, , create_flags) progress = vm.clone_to(vm_clone, mode, options) progress.wait_for_completion(-1) if register: vbox.register_machine(vm_clone) return vm_clone
Clone this Machine Options: snapshot_name_or_id - value can be either ISnapshot, name, or id mode - set the CloneMode value options - define the CloneOptions options name - define a name of the new VM uuid - set the uuid of the new VM groups - specify which groups the new VM will exist under basefolder - specify which folder to set the VM up under register - register this VM with the server Note: Default values create a linked clone from the current machine state Return a IMachine object for the newly cloned vm
15,667
def is_stop(self): if len(self._processed_coordinators) > 0: self.free_processed_queue() return self._cancel_called or self._processing_stop
has either of the stop processing flags been set
15,668
def get_transition(self, line, line_index, column, is_escaped, comment_system_transitions, eof=False): parser_transition = { STATE_IN_COMMENT: InCommentParser, STATE_IN_QUOTE: InQuoteParser } (state, start_state_from, waiting_until) = comment_system_transitions.from_text(line, line_index, column, is_escaped) if state != STATE_IN_TEXT: return (parser_transition[state](start_state_from, waiting_until), start_state_from[1] - column, None) else: return (self, 1, None)
Get transition from InTextParser.
15,669
def write_summary(all_procs, summary_file): if not summary_file: return with summary_file: writer = csv.writer(summary_file, delimiter=, lineterminator=) writer.writerow((, , , , , , )) rows = ((p.working_dir, .join(p.command), p.start_time, p.end_time, p.running_time, p.return_code, p.status) for p in all_procs) writer.writerows(rows)
Write a summary of all run processes to summary_file in tab-delimited format.
15,670
def check_theta(self): lower, upper = self.theta_interval if (not lower <= self.theta <= upper) or (self.theta in self.invalid_thetas): message = raise ValueError(message.format(self.theta, self.copula_type.name))
Validate the computed theta against the copula specification. This method is used to assert the computed theta is in the valid range for the copula.
15,671
def stacked_node_layout(self,EdgeAttribute=None,network=None,NodeAttribute=None,\ nodeList=None,x_position=None,y_start_position=None,verbose=None): network=check_network(self,network,verbose=verbose) PARAMS=set_param([,,,,\ ,],[EdgeAttribute,network,NodeAttribute,\ nodeList,x_position,y_start_position]) response=api(url=self.__url+"/stacked-node-layout", PARAMS=PARAMS, method="POST", verbose=verbose) return response
Execute the Stacked Node Layout on a network. :param EdgeAttribute (string, optional): The name of the edge column contai ning numeric values that will be used as weights in the layout algor ithm. Only columns containing numeric values are shown :param network (string, optional): Specifies a network by name, or by SUID if the prefix SUID: is used. The keyword CURRENT, or a blank value c an also be used to specify the current network. :param NodeAttribute (string, optional): The name of the node column contai ning numeric values that will be used as weights in the layout algor ithm. Only columns containing numeric values are shown :param nodeList (string, optional): Specifies a list of nodes. The keywords all, selected, or unselected can be used to specify nodes by their selection state. The pattern COLUMN:VALUE sets this parameter to any rows that contain the specified column value; if the COLUMN prefix is not used, the NAME column is matched by default. A list of COLUMN :VALUE pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be used to match multiple values. :param x_position (string, optional): X start position, in numeric value :param y_start_position (string, optional): Y start position, in numeric va lue
15,672
def create_api_stage(restApiId, stageName, deploymentId, description=, cacheClusterEnabled=False, cacheClusterSize=, variables=None, region=None, key=None, keyid=None, profile=None): 0.5{"name": "value"} try: variables = dict() if variables is None else variables conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) stage = conn.create_stage(restApiId=restApiId, stageName=stageName, deploymentId=deploymentId, description=description, cacheClusterEnabled=cacheClusterEnabled, cacheClusterSize=cacheClusterSize, variables=variables) return {: True, : _convert_datetime_str(stage)} except ClientError as e: return {: False, : __utils__[](e)}
Creates a new API stage for a given restApiId and deploymentId. CLI Example: .. code-block:: bash salt myminion boto_apigateway.create_api_stage restApiId stagename deploymentId \\ description='' cacheClusterEnabled=True|False cacheClusterSize='0.5' variables='{"name": "value"}'
15,673
def get_all_service_user_objects(self, include_machine = False): logger.debug(% include_machine) if include_machine == True: ldap_filter = r else: ldap_filter = r attributes = MSADUser.ATTRS for entry in self.pagedsearch(ldap_filter, attributes): yield MSADUser.from_ldap(entry, self._ldapinfo) logger.debug()
Fetches all service user objects from the AD, and returns MSADUser object. Service user refers to an user whith SPN (servicePrincipalName) attribute set
15,674
def _extractall(self, path=".", members=None): import copy import operator from tarfile import ExtractError directories = [] if members is None: members = self for tarinfo in members: if tarinfo.isdir(): directories.append(tarinfo) tarinfo = copy.copy(tarinfo) tarinfo.mode = 448 self.extract(tarinfo, path) if sys.version_info < (2, 4): def sorter(dir1, dir2): return cmp(dir1.name, dir2.name) directories.sort(sorter) directories.reverse() else: directories.sort(key=operator.attrgetter(), reverse=True) for tarinfo in directories: dirpath = os.path.join(path, tarinfo.name) try: self.chown(tarinfo, dirpath) self.utime(tarinfo, dirpath) self.chmod(tarinfo, dirpath) except ExtractError: e = sys.exc_info()[1] if self.errorlevel > 1: raise else: self._dbg(1, "tarfile: %s" % e)
Extract all members from the archive to the current working directory and set owner, modification time and permissions on directories afterwards. `path' specifies a different directory to extract to. `members' is optional and must be a subset of the list returned by getmembers().
15,675
def run_prepare(*data): out_dir = os.path.join(dd.get_work_dir(data[0][0]), "seqcluster", "prepare") out_dir = os.path.abspath(safe_makedir(out_dir)) prepare_dir = os.path.join(out_dir, "prepare") tools = dd.get_expression_caller(data[0][0]) if len(tools) == 0: logger.info("You didnsamplecollapseargsdebug print_debug minc minl maxl outww') as seq_handle: logger.info("Prepare seqs.fastq with -minl 17 -maxl 40 -minc 2 --min_shared 0.1") prepare._create_matrix_uniq_seq(sample_l, seq_l, ma_handle, seq_handle, min_shared) for sample in data: sample[0]["seqcluster_prepare_ma"] = ma_out sample[0]["seqcluster_prepare_fastq"] = seq_out return data
Run seqcluster prepare to merge all samples in one file
15,676
def _set_properties(self): self.set_icon(icons["PyspreadLogo"]) self.minSizeSet = False post_command_event(self, self.SafeModeExitMsg)
Setup title, icon, size, scale, statusbar, main grid
15,677
def _validate_checksum(self): phrase = self.phrase.split(" ") if self.word_list.get_checksum(self.phrase) == phrase[-1]: return True raise ValueError("Invalid checksum")
Given a mnemonic word string, confirm seed checksum (last word) matches the computed checksum. :rtype: bool
15,678
def deleteAllStyles(self, verbose=None): response=api(url=self.___url+, method="DELETE", verbose=verbose) return response
Deletes all vision styles except for default style :param verbose: print more :returns: default: successful operation
15,679
def _setuie(self, i): if i < 0: raise CreationError("Cannot use negative initialiser for unsigned " "interleaved exponential-Golomb.") self._setbin_unsafe( if i == 0 else + .join(bin(i + 1)[3:]) + )
Initialise bitstring with unsigned interleaved exponential-Golomb code for integer i. Raises CreationError if i < 0.
15,680
def is_done(self): return (not self.wait_for_output or self.eof) and \ not (hasattr(self.to_stream, ) and self.to_stream.needs_write())
Returns True if the read stream is done (either it's returned EOF or the pump doesn't have wait_for_output set), and the write side does not have pending bytes to send.
15,681
def POST(self): form = self.form() if not form.validates(): todos = model.get_todos() return render.index(todos, form) model.new_todo(form.d.title) raise web.seeother()
Add new entry
15,682
def upgrade(): op.create_table( , sa.Column(, sa.DateTime(), nullable=False), sa.Column(, sa.DateTime(), nullable=False), sa.Column(, sa.Integer(), nullable=False), sa.Column(, sa.Integer(), nullable=False), sa.Column(, sa.SmallInteger(), nullable=False), sa.Column(, sa.Integer(), nullable=True), sa.ForeignKeyConstraint( [], [], name=op.f(), onupdate=, ondelete= ), sa.ForeignKeyConstraint( [], [], name=op.f(), onupdate=, ondelete=), sa.PrimaryKeyConstraint( , , name=op.f() ) )
Upgrade database.
15,683
def parse(cls, value, record_bytes): if isinstance(value, six.string_types): return cls(value, 0) if isinstance(value, list): if len(value) == 1: return cls(value[0], 0) if len(value) == 2: return cls(value[0], cls._parse_bytes(value[1], record_bytes)) raise ValueError() return cls(None, cls._parse_bytes(value, record_bytes))
Parses the pointer label. Parameters ---------- pointer_data Supported values for `pointer_data` are:: ^PTR = nnn ^PTR = nnn <BYTES> ^PTR = "filename" ^PTR = ("filename") ^PTR = ("filename", nnn) ^PTR = ("filename", nnn <BYTES>) record_bytes Record multiplier value Returns ------- Pointer object
15,684
def make_key_url(self, key): if type(key) is bytes: key = key.decode() buf = io.StringIO() buf.write(u) if not key.startswith(u): buf.write(u) buf.write(key) return self.make_url(buf.getvalue())
Gets a URL for a key.
15,685
def add(self, properties): new_hba = super(FakedHbaManager, self).add(properties) partition = self.parent assert in partition.properties partition.properties[].append(new_hba.uri) if not in new_hba.properties: devno = partition.devno_alloc() new_hba.properties[] = devno if not in new_hba.properties: wwpn = partition.wwpn_alloc() new_hba.properties[] = wwpn return new_hba
Add a faked HBA resource. Parameters: properties (dict): Resource properties. Special handling and requirements for certain properties: * 'element-id' will be auto-generated with a unique value across all instances of this resource type, if not specified. * 'element-uri' will be auto-generated based upon the element ID, if not specified. * 'class' will be auto-generated to 'hba', if not specified. * 'adapter-port-uri' identifies the backing FCP port for this HBA and is required to be specified. * 'device-number' will be auto-generated with a unique value within the partition in the range 0x8000 to 0xFFFF, if not specified. This method also updates the 'hba-uris' property in the parent faked Partition resource, by adding the URI for the faked HBA resource. Returns: :class:`~zhmcclient_mock.FakedHba`: The faked HBA resource. Raises: :exc:`zhmcclient_mock.InputError`: Some issue with the input properties.
15,686
def stop(self): self.child.terminate() self._cleanup() return self.child.exitcode
Stop this process. Once closed, it should not, and cannot be used again. :return: :py:attr:`~exitcode`.
15,687
def ping(self, params=None): try: return self.transport.perform_request("HEAD", "/", params=params) except TransportError: return False
Returns True if the cluster is up, False otherwise. `<http://www.elastic.co/guide/>`_
15,688
def ntp_authentication_key_encryption_type_md5_type_md5(self, **kwargs): config = ET.Element("config") ntp = ET.SubElement(config, "ntp", xmlns="urn:brocade.com:mgmt:brocade-ntp") authentication_key = ET.SubElement(ntp, "authentication-key") keyid_key = ET.SubElement(authentication_key, "keyid") keyid_key.text = kwargs.pop() encryption_type = ET.SubElement(authentication_key, "encryption-type") md5_type = ET.SubElement(encryption_type, "md5-type") md5 = ET.SubElement(md5_type, "md5") md5.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
15,689
def parse(self): log.debug(self) self.parse_composite() self.split_line() self.convert_coordinates() self.convert_meta() self.make_shape() log.debug(self)
Convert line to shape object
15,690
def save_images(self): res_dict = self.treeview.get_selected() clobber = self.settings.get(, False) self.treeview.clear_selection() if self.suffix: sfx = + self.suffix else: sfx = if self.settings.get(, True): sfx += + self.chname for infile in res_dict: f_pfx = os.path.splitext(infile)[0] f_ext = oname = f_pfx + sfx + f_ext outfile = os.path.join(self.outdir, oname) self.w.status.set_text( .format(shorten_name(infile, 10), shorten_name(oname, 10))) self.logger.debug( .format(infile, oname)) if os.path.exists(outfile) and not clobber: self.logger.error(.format(outfile)) continue bnch = res_dict[infile] if bnch.path is None or not os.path.isfile(bnch.path): self._write_mosaic(f_pfx, outfile) else: shutil.copyfile(bnch.path, outfile) self._write_mef(f_pfx, bnch.extlist, outfile) self.logger.info(.format(outfile)) self.w.status.set_text()
Save selected images. This uses Astropy FITS package to save the outputs no matter what user chose to load the images.
15,691
def get_custom_value(self, field_name): custom_field = self.get_custom_field(field_name) return CustomFieldValue.objects.get_or_create( field=custom_field, object_id=self.id)[0].value
Get a value for a specified custom field field_name - Name of the custom field you want.
15,692
def contingency_table(y, z): y = K.cast(K.round(y), K.floatx()) z = K.cast(K.round(z), K.floatx()) def count_matches(y, z): return K.sum(K.cast(y, K.floatx()) * K.cast(z, K.floatx())) ones = K.ones_like(y) zeros = K.zeros_like(y) y_ones = K.equal(y, ones) y_zeros = K.equal(y, zeros) z_ones = K.equal(z, ones) z_zeros = K.equal(z, zeros) tp = count_matches(y_ones, z_ones) tn = count_matches(y_zeros, z_zeros) fp = count_matches(y_zeros, z_ones) fn = count_matches(y_ones, z_zeros) return (tp, tn, fp, fn)
Note: if y and z are not rounded to 0 or 1, they are ignored
15,693
def add_variable(self, name): if name in self._variables: raise ValueError( "A variable named " + name + " already exists." ) self._variables[name] = len(self._variables) self.bounds[name] = (0, None) new_col = np.zeros(shape=[len(self._constraints), 1]) self._add_col_to_A(new_col) self._reset_solution()
Add a variable to the problem
15,694
def matrix_to_marching_cubes(matrix, pitch, origin): from skimage import measure from .base import Trimesh matrix = np.asanyarray(matrix, dtype=np.bool) rev_matrix = np.logical_not(matrix) pad_width = 1 rev_matrix = np.pad(rev_matrix, pad_width=(pad_width), mode=, constant_values=(1)) if hasattr(measure, ): func = measure.marching_cubes_lewiner else: func = measure.marching_cubes meshed = func(volume=rev_matrix, level=.5, spacing=(pitch, pitch, pitch)) if len(meshed) == 2: log.warning() vertices, faces = meshed normals = None elif len(meshed) == 4: vertices, faces, normals, vals = meshed vertices = np.subtract(np.add(vertices, origin), pad_width * pitch) mesh = Trimesh(vertices=vertices, faces=faces, vertex_normals=normals) return mesh
Convert an (n,m,p) matrix into a mesh, using marching_cubes. Parameters ----------- matrix: (n,m,p) bool, voxel matrix pitch: float, what pitch was the voxel matrix computed with origin: (3,) float, what is the origin of the voxel matrix Returns ---------- mesh: Trimesh object, generated by meshing voxels using the marching cubes algorithm in skimage
15,695
def find(self,cell_designation,cell_filter=lambda x,c: in x and x[] == c): res = [i for i,sc in enumerate(self.spike_containers) if cell_filter(sc.meta,cell_designation)] if len(res) > 0: return res[0]
finds spike containers in a multi spike containers collection
15,696
def rosen_nesterov(self, x, rho=100): f = 0.25 * (x[0] - 1)**2 f += rho * sum((x[1:] - 2 * x[:-1]**2 + 1)**2) return f
needs exponential number of steps in a non-increasing f-sequence. x_0 = (-1,1,...,1) See Jarre (2011) "On Nesterov's Smooth Chebyshev-Rosenbrock Function"
15,697
def load(self, modules): self.modules_assoc = [] for module in modules: if not module.enabled: logger.info("Module %s is declared but not enabled", module.name) self.modules[module.uuid] = module continue logger.info("Importing Python module for %s...", module.python_name, module.name) try: python_module = importlib.import_module(module.python_name) if not hasattr(python_module, ): self.configuration_errors.append("Module %s is missing a " "dictionary" % module.python_name) raise AttributeError logger.info("Module properties: %s", getattr(python_module, )) if not hasattr(python_module, ) or \ not isinstance(getattr(python_module, ), collections.Callable): self.configuration_errors.append("Module %s is missing a " "function" % module.python_name) raise AttributeError self.modules_assoc.append((module, python_module)) logger.info("Imported for %s", module.python_name, module.name) except ImportError as exp: self.configuration_errors.append("Module %s (%s) cant be loaded, " "module configuration" % (module.python_name, module.name)) else: logger.info("Loaded Python module (%s)", module.python_name, module.name)
Load Python modules and check their usability :param modules: list of the modules that must be loaded :return:
15,698
def interact(self, **local): import code code.interact(local=dict(sess=self, **local))
Drops the user into an interactive Python session with the ``sess`` variable set to the current session instance. If keyword arguments are supplied, these names will also be available within the session.
15,699
def find_distinct(self, collection, key): obj = getattr(self.db, collection) result = obj.distinct(key) return result
Search a collection for the distinct key values provided. Args: collection: The db collection. See main class documentation. key: The name of the key to find distinct values. For example with the indicators collection, the key could be "type". Returns: List of distinct values.