Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
27,300
def buildTreeFromAlignment(filename,WorkingDir=None,SuppressStderr=None): app = Clustalw({:None,:filename},SuppressStderr=\ SuppressStderr,WorkingDir=WorkingDir) app.Parameters[].off() return app()
Builds a new tree from an existing alignment filename: string, name of file containing the seqs or alignment
27,301
def simxGetIntegerParameter(clientID, paramIdentifier, operationMode): paramValue = ct.c_int() return c_GetIntegerParameter(clientID, paramIdentifier, ct.byref(paramValue), operationMode), paramValue.value
Please have a look at the function description/documentation in the V-REP user manual
27,302
def partial(__fn, *a, **kw): return (PARTIAL, (__fn, a, tuple(kw.items())))
Wrap a note for injection of a partially applied function. This allows for annotated functions to be injected for composition:: from jeni import annotate @annotate('foo', bar=annotate.maybe('bar')) def foobar(foo, bar=None): return @annotate('foo', annotate.partial(foobar)) def bazquux(foo, fn): # fn: injector.partial(foobar) return Keyword arguments are treated as `maybe` when using partial, in order to allow partial application of only the notes which can be provided, where the caller could then apply arguments known to be unavailable in the injector. Note that with Python 3 function annotations, all annotations are injected as keyword arguments. Injections on the partial function are lazy and not applied until the injected partial function is called. See `eager_partial` to inject eagerly.
27,303
def read(self, size): try: return_val = self.handle.read(size) if return_val == : print() print("Piksi disconnected") print() raise IOError return return_val except OSError: print() print("Piksi disconnected") print() raise IOError
Read wrapper. Parameters ---------- size : int Number of bytes to read.
27,304
def _start_app_and_connect(self): self._check_app_installed() self.disable_hidden_api_blacklist() persists_shell_cmd = self._get_persist_command() self.log.info(, self.package, _PROTOCOL_MAJOR_VERSION, _PROTOCOL_MINOR_VERSION) cmd = _LAUNCH_CMD % (persists_shell_cmd, self.package) start_time = time.time() self._proc = self._do_start_app(cmd) line = self._read_protocol_line() match = re.match(, line) if not match or match.group(1) != : raise ProtocolVersionError(self._ad, line) line = self._read_protocol_line() match = re.match(, line) if not match: raise ProtocolVersionError(self._ad, line) self.device_port = int(match.group(1)) self.host_port = utils.get_available_host_port() self._adb.forward( [ % self.host_port, % self.device_port]) self.connect() self.package, time.time() - start_time, self.host_port)
Starts snippet apk on the device and connects to it. After prechecks, this launches the snippet apk with an adb cmd in a standing subprocess, checks the cmd response from the apk for protocol version, then sets up the socket connection over adb port-forwarding. Args: ProtocolVersionError, if protocol info or port info cannot be retrieved from the snippet apk.
27,305
def unregister(self, gadgets): gadgets = maintenance.ensure_list(gadgets) for gadget in gadgets: while gadget in self._registry: self._registry.remove(gadget)
Unregisters the specified gadget(s) if it/they has/have already been registered. "gadgets" can be a single class or a tuple/list of classes to unregister.
27,306
def output(self): if (self.__ccore_legion_dynamic_pointer is not None): return wrapper.legion_dynamic_get_output(self.__ccore_legion_dynamic_pointer); return self.__output;
! @brief Returns output dynamic of the network.
27,307
def cors_wrapper(func): def _setdefault(obj, key, value): if value == None: return obj.setdefault(key, value) def output(*args, **kwargs): response = func(*args, **kwargs) headers = response.headers _setdefault(headers, "Access-Control-Allow-Origin", "*") _setdefault(headers, "Access-Control-Allow-Headers", flask.request.headers.get("Access-Control-Request-Headers")) _setdefault(headers, "Access-Control-Allow-Methods", flask.request.headers.get("Access-Control-Request-Methods")) _setdefault(headers, "Content-Type", "application/json") _setdefault(headers, "Strict-Transport-Security", "max-age=31536000; includeSubDomains; preload") return response output.provide_automatic_options = False output.__name__ = func.__name__ return output
Decorator for CORS :param func: Flask method that handles requests and returns a response :return: Same, but with permissive CORS headers set
27,308
def rename(name, new_name): * if six.PY2: name = _to_unicode(name) new_name = _to_unicode(new_name) current_info = info(name) if not current_info: raise CommandExecutionError({0}\.format(name)) new_info = info(new_name) if new_info: raise CommandExecutionError( {0}\.format(new_name) ) with salt.utils.winapi.Com(): c = wmi.WMI(find_classes=0) try: user = c.Win32_UserAccount(Name=name)[0] except IndexError: raise CommandExecutionError({0}\.format(name)) result = user.Rename(new_name)[0] if not result == 0: error_dict = {0: , 1: , 2: , 3: , 4: , 5: , 6: , 7: , 8: , 9: , 10: } raise CommandExecutionError( {0}\{1}\ .format(name, new_name, error_dict[result]) ) return info(new_name).get() == new_name
Change the username for a named user Args: name (str): The user name to change new_name (str): The new name for the current user Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' user.rename jsnuffy jshmoe
27,309
def conditions(self): for idx in six.moves.range(1, len(self.children), 2): yield (self.children[idx - 1], self.children[idx])
The if-else pairs.
27,310
def matrix_mult_opt_order(M): n = len(M) r = [len(Mi) for Mi in M] c = [len(Mi[0]) for Mi in M] opt = [[0 for j in range(n)] for i in range(n)] arg = [[None for j in range(n)] for i in range(n)] for j_i in range(1, n): for i in range(n - j_i): j = i + j_i opt[i][j] = float() for k in range(i, j): alt = opt[i][k] + opt[k + 1][j] + r[i] * c[k] * c[j] if opt[i][j] > alt: opt[i][j] = alt arg[i][j] = k return opt, arg
Matrix chain multiplication optimal order :param M: list of matrices :returns: matrices opt, arg, such that opt[i][j] is the optimal number of operations to compute M[i] * ... * M[j] when done in the order (M[i] * ... * M[k]) * (M[k + 1] * ... * M[j]) for k = arg[i][j] :complexity: :math:`O(n^2)`
27,311
def push_zipkin_attrs(zipkin_attr): from py_zipkin.storage import ThreadLocalStack log.warning( ) return ThreadLocalStack().push(zipkin_attr)
Stores the zipkin attributes to thread local. .. deprecated:: Use the Tracer interface which offers better multi-threading support. push_zipkin_attrs will be removed in version 1.0. :param zipkin_attr: tuple containing zipkin related attrs :type zipkin_attr: :class:`zipkin.ZipkinAttrs`
27,312
def __Login(host, port, user, pwd, service, adapter, version, path, keyFile, certFile, thumbprint, sslContext, connectionPoolTimeout=CONNECTION_POOL_IDLE_TIMEOUT_SEC): content, si, stub = __RetrieveContent(host, port, adapter, version, path, keyFile, certFile, thumbprint, sslContext, connectionPoolTimeout) try: (user, pwd) = GetLocalTicket(si, user) except: pass try: x = content.sessionManager.Login(user, pwd, None) except vim.fault.InvalidLogin: raise except Exception as e: raise return si, stub
Private method that performs the actual Connect and returns a connected service instance object. @param host: Which host to connect to. @type host: string @param port: Port @type port: int @param user: User @type user: string @param pwd: Password @type pwd: string @param service: Service @type service: string @param adapter: Adapter @type adapter: string @param version: Version @type version: string @param path: Path @type path: string @param keyFile: ssl key file path @type keyFile: string @param certFile: ssl cert file path @type certFile: string @param thumbprint: host cert thumbprint @type thumbprint: string @param sslContext: SSL Context describing the various SSL options. It is only supported in Python 2.7.9 or higher. @type sslContext: SSL.Context @param connectionPoolTimeout: Timeout in secs for idle connections to close, specify negative numbers for never closing the connections @type connectionPoolTimeout: int
27,313
def read_parquet(cls, path, engine, columns, **kwargs): from pyarrow.parquet import ParquetFile if cls.read_parquet_remote_task is None: return super(RayIO, cls).read_parquet(path, engine, columns, **kwargs) if not columns: pf = ParquetFile(path) columns = [ name for name in pf.metadata.schema.names if not PQ_INDEX_REGEX.match(name) ] num_partitions = cls.frame_mgr_cls._compute_num_partitions() num_splits = min(len(columns), num_partitions) column_splits = ( len(columns) // num_partitions if len(columns) % num_partitions == 0 else len(columns) // num_partitions + 1 ) col_partitions = [ columns[i : i + column_splits] for i in range(0, len(columns), column_splits) ] blk_partitions = np.array( [ cls.read_parquet_remote_task._remote( args=(path, cols, num_splits, kwargs), num_return_vals=num_splits + 1, ) for cols in col_partitions ] ).T remote_partitions = np.array( [ [cls.frame_partition_cls(obj) for obj in row] for row in blk_partitions[:-1] ] ) index_len = ray.get(blk_partitions[-1][0]) index = pandas.RangeIndex(index_len) new_query_compiler = cls.query_compiler_cls( cls.frame_mgr_cls(remote_partitions), index, columns ) return new_query_compiler
Load a parquet object from the file path, returning a DataFrame. Ray DataFrame only supports pyarrow engine for now. Args: path: The filepath of the parquet file. We only support local files for now. engine: Ray only support pyarrow reader. This argument doesn't do anything for now. kwargs: Pass into parquet's read_pandas function. Notes: ParquetFile API is used. Please refer to the documentation here https://arrow.apache.org/docs/python/parquet.html
27,314
def search_text(self, text_cursor, search_txt, search_flags): def compare_cursors(cursor_a, cursor_b): return (cursor_b.selectionStart() >= cursor_a.selectionStart() and cursor_b.selectionEnd() <= cursor_a.selectionEnd()) text_document = self._editor.document() occurrences = [] index = -1 cursor = text_document.find(search_txt, 0, search_flags) original_cursor = text_cursor while not cursor.isNull(): if compare_cursors(cursor, original_cursor): index = len(occurrences) occurrences.append((cursor.selectionStart(), cursor.selectionEnd())) cursor.setPosition(cursor.position() + 1) cursor = text_document.find(search_txt, cursor, search_flags) return occurrences, index
Searches a text in a text document. :param text_cursor: Current text cursor :param search_txt: Text to search :param search_flags: QTextDocument.FindFlags :returns: the list of occurrences, the current occurrence index :rtype: tuple([], int)
27,315
def disable_ipv6(): exit_code = subprocess.call([, , , ]) if exit_code == 0: hookenv.log(, level=) else: hookenv.log("Couldnt disable IPv6 support in ufw")
Disable ufw IPv6 support in /etc/default/ufw
27,316
def result(self, state, row): "Place the next queen at the given row." col = state.index(None) new = state[:] new[col] = row return new
Place the next queen at the given row.
27,317
def get_attrs(self, *names): attrs = [getattr(self, name) for name in names] return attrs
Get multiple attributes from multiple objects.
27,318
def is_special_orthogonal( matrix: np.ndarray, *, rtol: float = 1e-5, atol: float = 1e-8) -> bool: return (is_orthogonal(matrix, rtol=rtol, atol=atol) and (matrix.shape[0] == 0 or np.allclose(np.linalg.det(matrix), 1, rtol=rtol, atol=atol)))
Determines if a matrix is approximately special orthogonal. A matrix is special orthogonal if it is square and real and its transpose is its inverse and its determinant is one. Args: matrix: The matrix to check. rtol: The per-matrix-entry relative tolerance on equality. atol: The per-matrix-entry absolute tolerance on equality. Returns: Whether the matrix is special orthogonal within the given tolerance.
27,319
def add_patches(self, patches, after=None): if after is None: self.insert_patches(patches) else: self._check_patch(after) patchlines = self._patchlines_before(after) patchlines.append(self.patch2line[after]) for patch in patches: patchline = PatchLine(patch) patchlines.append(patchline) self.patch2line[patchline.get_patch()] = patchline patchlines.extend(self._patchlines_after(after)) self.patchlines = patchlines
Add a list of patches to the patches list
27,320
def quadrature_weights(Ntheta): import numpy as np weights = np.empty(2*(Ntheta-1), dtype=np.complex128) _quadrature_weights(Ntheta, weights) return weights
Fourier-space weights needed to evaluate I_{mm'} This is mostly an internal function, included here for backwards compatibility. See map2salm and salm2map for more useful functions.
27,321
def prepare_service(data): if not data: return None, {} if isinstance(data, str): return data, {} if all(field in data for field in ("Node", "Service", "Checks")): return data["Service"]["ID"], data["Service"] if all(field in data for field in ("ServiceName", "ServiceID")): return data["ServiceID"], { "ID": data["ServiceID"], "Service": data["ServiceName"], "Tags": data.get("ServiceTags"), "Address": data.get("ServiceAddress"), "Port": data.get("ServicePort"), } if list(data) == ["ID"]: return data["ID"], {} result = {} if "Name" in data: result["Service"] = data["Name"] for k in ("Service", "ID", "Tags", "Address", "Port"): if k in data: result[k] = data[k] return result.get("ID"), result
Prepare service for catalog endpoint Parameters: data (Union[str, dict]): Service ID or service definition Returns: Tuple[str, dict]: str is ID and dict is service Transform ``/v1/health/state/<state>``:: { "Node": "foobar", "CheckID": "service:redis", "Name": "Service 'redis' check", "Status": "passing", "Notes": "", "Output": "", "ServiceID": "redis1", "ServiceName": "redis" } to:: { "ID": "redis1", "Service": "redis" } Extract from /v1/health/service/<service>:: { "Node": {...}, "Service": { "ID": "redis1", "Service": "redis", "Tags": None, "Address": "10.1.10.12", "Port": 8000 }, "Checks": [...] }
27,322
def extract_library_properties_from_selected_row(self): (model, row) = self.view.get_selection().get_selected() tree_item_key = model[row][self.ID_STORAGE_ID] library_item = model[row][self.ITEM_STORAGE_ID] library_path = model[row][self.LIB_PATH_STORAGE_ID] if isinstance(library_item, dict): os_path = model[row][self.OS_PATH_STORAGE_ID] return os_path, None, None, tree_item_key assert isinstance(library_item, string_types) library_os_path = library_item library_name = library_os_path.split(os.path.sep)[-1] return library_os_path, library_path, library_name, tree_item_key
Extracts properties library_os_path, library_path, library_name and tree_item_key from tree store row
27,323
def custom_layouts_menu(self, value): if value is not None: assert type(value) is QMenu, " attribute: type is not !".format( "custom_layouts_menu", value) self.__custom_layouts_menu = value
Setter for **self.__custom_layouts_menu** attribute. :param value: Attribute value. :type value: QMenu
27,324
def _get_common_block_structure(lhs_bs, rhs_bs): if isinstance(lhs_bs, Circuit): lhs_bs = lhs_bs.block_structure if isinstance(rhs_bs, Circuit): rhs_bs = rhs_bs.block_structure if sum(lhs_bs) != sum(rhs_bs): raise IncompatibleBlockStructures( ) if len(lhs_bs) == len(rhs_bs) == 0: return () i = j = 1 lsum = 0 while True: lsum = sum(lhs_bs[:i]) rsum = sum(rhs_bs[:j]) if lsum < rsum: i += 1 elif rsum < lsum: j += 1 else: break return (lsum, ) + _get_common_block_structure(lhs_bs[i:], rhs_bs[j:])
For two block structures ``aa = (a1, a2, ..., an)``, ``bb = (b1, b2, ..., bm)`` generate the maximal common block structure so that every block from aa and bb is contained in exactly one block of the resulting structure. This is useful for determining how to apply the distributive law when feeding two concatenated Circuit objects into each other. Examples: ``(1, 1, 1), (2, 1) -> (2, 1)`` ``(1, 1, 2, 1), (2, 1, 2) -> (2, 3)`` Args: lhs_bs (tuple): first block structure rhs_bs (tuple): second block structure
27,325
def sanitize_http_request_cookies(client, event): try: cookies = event["context"]["request"]["cookies"] event["context"]["request"]["cookies"] = varmap(_sanitize, cookies) except (KeyError, TypeError): pass try: cookie_string = event["context"]["request"]["headers"]["cookie"] event["context"]["request"]["headers"]["cookie"] = _sanitize_string(cookie_string, "; ", "=") except (KeyError, TypeError): pass return event
Sanitizes http request cookies :param client: an ElasticAPM client :param event: a transaction or error event :return: The modified event
27,326
def find_expected_error(self, delta_params=): grad = self.calc_grad() if list(delta_params) in [list(), list()]: jtj = (self.JTJ if delta_params == else self._calc_damped_jtj(self.JTJ)) delta_params = self._calc_lm_step(jtj, self.calc_grad()) expected_error = (self.error + np.dot(grad, delta_params) + np.dot(np.dot(self.JTJ, delta_params), delta_params)) return expected_error
Returns the error expected after an update if the model were linear. Parameters ---------- delta_params : {numpy.ndarray, 'calc', or 'perfect'}, optional The relative change in parameters. If 'calc', uses update calculated from the current damping, J, etc; if 'perfect', uses the update calculated with zero damping. Returns ------- numpy.float64 The expected error after the update with `delta_params`
27,327
def slugify(text, delim=): result = [] for word in _punct_re.split((text or ).lower()): result.extend(codecs.encode(word, , ).split()) return delim.join([str(r) for r in result])
Generate an ASCII-only slug.
27,328
def split_log(logf): flashpatt = re.compile( r, flags=re.DOTALL) return flashpatt.findall(logf)
split concat log into individual samples
27,329
def wavfile_to_examples(wav_file): from scipy.io import wavfile sr, wav_data = wavfile.read(wav_file) assert wav_data.dtype == np.int16, % wav_data.dtype samples = wav_data / 32768.0 return waveform_to_examples(samples, sr)
Convenience wrapper around waveform_to_examples() for a common WAV format. Args: wav_file: String path to a file, or a file-like object. The file is assumed to contain WAV audio data with signed 16-bit PCM samples. Returns: See waveform_to_examples.
27,330
def get_files_by_layer(self, layer_name, file_pattern=): layer_path = os.path.join(self.path, layer_name) return list(dg.find_files(layer_path, file_pattern))
returns a list of all files with the given filename pattern in the given PCC annotation layer
27,331
def delete_network_resource_property_entry(resource, prop): def delete_func(cmd, resource_group_name, resource_name, item_name, no_wait=False): client = getattr(network_client_factory(cmd.cli_ctx), resource) item = client.get(resource_group_name, resource_name) keep_items = \ [x for x in item.__getattribute__(prop) if x.name.lower() != item_name.lower()] _set_param(item, prop, keep_items) if no_wait: sdk_no_wait(no_wait, client.create_or_update, resource_group_name, resource_name, item) else: result = sdk_no_wait(no_wait, client.create_or_update, resource_group_name, resource_name, item).result() if next((x for x in getattr(result, prop) if x.name.lower() == item_name.lower()), None): raise CLIError("Failed to delete on ".format(item_name, resource_name)) func_name = .format(resource, prop) setattr(sys.modules[__name__], func_name, delete_func) return func_name
Factory method for creating delete functions.
27,332
def relations(): rels = {} for reltype in relation_types(): relids = {} for relid in relation_ids(reltype): units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} for unit in related_units(relid): reldata = relation_get(unit=unit, rid=relid) units[unit] = reldata relids[relid] = units rels[reltype] = relids return rels
Get a nested dictionary of relation data for all related units
27,333
def iqi(ql, qs, ns=None, rc=None, ot=None, coe=None, moc=DEFAULT_ITER_MAXOBJECTCOUNT,): return CONN.IterQueryInstances(FilterQueryLanguage=ql, FilterQuery=qs, namespace=ns, ReturnQueryResultClass=rc, OperationTimeout=ot, ContinueOnError=coe, MaxObjectCount=moc)
*New in pywbem 0.10 as experimental and finalized in 0.12.* This function is a wrapper for :meth:`~pywbem.WBEMConnection.IterQueryInstances`. Execute a query in a namespace, using the corresponding pull operations if supported by the WBEM server or otherwise the corresponding traditional operation, and using the Python :term:`py:generator` idiom to return the result. This method is an alternative to using the pull operations directly, that frees the user of having to know whether the WBEM server supports pull operations. Other than the other i...() functions, this function does not return a generator object directly, but as a property of the returned object. Parameters: ql (:term:`string`): Name of the query language used in the `qs` parameter, e.g. "DMTF:CQL" for CIM Query Language, and "WQL" for WBEM Query Language. Because this is not a filter query, "DMTF:FQL" is not a valid query language for this request. qs (:term:`string`): Query string in the query language specified in the `ql` parameter. ns (:term:`string`): Name of the CIM namespace to be used (case independent). If `None`, defaults to the default namespace of the connection. rc (:class:`py:bool`): Controls whether a class definition describing the properties of the returned instances will be returned. `None` will cause the server to use its default of `False`. ot (:class:`~pywbem.Uint32`): Operation timeout in seconds. This is the minimum time the WBEM server must keep the enumeration session open between requests on that session. A value of 0 indicates that the server should never time out. The server may reject the proposed value. `None` will cause the server to use its default timeout. coe (:class:`py:bool`): Continue on error flag. `None` will cause the server to use its default of `False`. moc (:class:`~pywbem.Uint32`): Maximum number of instances the WBEM server may return for each of the open and pull requests issued during the iterations over the returned generator object. Zero and `None` are not allowed. Returns: :class:`~pywbem.IterQueryInstancesReturn`: An object with the following properties: * **query_result_class** (:class:`~pywbem.CIMClass`): The query result class, if requested via the `rc` parameter. `None`, if a query result class was not requested. * **generator** (:term:`py:generator` iterating :class:`~pywbem.CIMInstance`): A generator object that iterates the CIM instances representing the query result. These instances do not have an instance path set.
27,334
def username_role(self, **kwargs): config = ET.Element("config") username = ET.SubElement(config, "username", xmlns="urn:brocade.com:mgmt:brocade-aaa") name_key = ET.SubElement(username, "name") name_key.text = kwargs.pop() role = ET.SubElement(username, "role") role.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
27,335
def update_sg(self, context, sg, rule_id, action): db_sg = db_api.security_group_find(context, id=sg, scope=db_api.ONE) if not db_sg: return None with context.session.begin(): job_body = dict(action="%s sg rule %s" % (action, rule_id), resource_id=rule_id, tenant_id=db_sg[]) job_body = dict(job=job_body) job = job_api.create_job(context.elevated(), job_body) rpc_client = QuarkSGAsyncProducerClient() try: rpc_client.populate_subtasks(context, sg, job[]) except om_exc.MessagingTimeout: LOG.error("Failed to create subtasks. Rabbit running?") return None return {"job_id": job[]}
Begins the async update process.
27,336
def make_request(self, action, body=, object_hook=None): headers = { : % (self.ServiceName, self.Version, action), : , : str(len(body))} http_request = self.build_base_http_request(, , , {}, headers, body, None) response = self._mexe(http_request, sender=None, override_num_retries=10, retry_handler=self._retry_handler) response_body = response.read() boto.log.debug(response_body) return json.loads(response_body, object_hook=object_hook)
:raises: ``DynamoDBExpiredTokenError`` if the security token expires.
27,337
def copy(self, target=None, name=None): if target is None and name is None: raise ValueError() if isinstance(target, Folder): target_id = target.object_id drive_id = target.drive_id elif isinstance(target, Drive): root_folder = target.get_root_folder() if not root_folder: return None target_id = root_folder.object_id drive_id = root_folder.drive_id elif target is None: target_id = None drive_id = None else: raise ValueError() if not self.object_id: return None if target_id == : raise ValueError("When copying, target id canrootcopyparentReferenceiddriveIdnameLocationmonitor/')[-1] return CopyOperation(parent=self.drive, item_id=item_id)
Asynchronously creates a copy of this DriveItem and all it's child elements. :param target: target location to move to. If it's a drive the item will be moved to the root folder. :type target: drive.Folder or Drive :param name: a new name for the copy. :rtype: CopyOperation
27,338
def _clear_zones(self, zone): cleared_zones = [] found_last_faulted = found_current = at_end = False it = iter(self._zones_faulted) try: while not found_last_faulted: z = next(it) if z == self._last_zone_fault: found_last_faulted = True break except StopIteration: at_end = True try: while not at_end and not found_current: z = next(it) if z == zone: found_current = True break else: cleared_zones += [z] except StopIteration: pass if not found_current: it = iter(self._zones_faulted) try: while not found_current: z = next(it) if z == zone: found_current = True break else: cleared_zones += [z] except StopIteration: pass for z in cleared_zones: self._update_zone(z, Zone.CLEAR)
Clear all expired zones from our status list. :param zone: current zone being processed :type zone: int
27,339
def get_box_office_films(self): today = datetime.date.today() next_wednesday = (today + datetime.timedelta((2 - today.weekday()) % 7)).strftime() films = self.get_films(cinema=79, date = next_wednesday) films = filter(lambda x: not in x[], films) for film in films: if in film[]: film[]=film[][5:] return films
uses a certain cinema (O2) and a certain day when non specialist films show (Wednesday) to get a list of the latest box office films
27,340
def generate_encodeable_characters(characters: Iterable[str], encodings: Iterable[str]) -> Iterable[str]: for c in characters: for encoding in encodings: try: c.encode(encoding) yield c except UnicodeEncodeError: pass
Generates the subset of 'characters' that can be encoded by 'encodings'. Args: characters: The characters to check for encodeability e.g. 'abcd'. encodings: The encodings to check against e.g. ['cp1252', 'iso-8859-5']. Returns: The subset of 'characters' that can be encoded using one of the provided encodings.
27,341
def getTextualNode(self, textId, subreference=None, prevnext=False, metadata=False): key = _cache_key("Nautilus", self.name, "Passage", textId, subreference) o = self.cache.get(key) if o is not None: return o text, text_metadata = self.__getText__(textId) if subreference is not None: subreference = Reference(subreference) passage = text.getTextualNode(subreference) passage.set_metadata_from_collection(text_metadata) self.cache.set(key, passage) return passage
Retrieve a text node from the API :param textId: PrototypeText Identifier :type textId: str :param subreference: Passage Reference :type subreference: str :param prevnext: Retrieve graph representing previous and next passage :type prevnext: boolean :param metadata: Retrieve metadata about the passage and the text :type metadata: boolean :return: Passage :rtype: Passage
27,342
def _get_user_info(self, cmd, section, required=True, accept_just_who=False): line = self.next_line() if line.startswith(section + b): return self._who_when(line[len(section + b):], cmd, section, accept_just_who=accept_just_who) elif required: self.abort(errors.MissingSection, cmd, section) else: self.push_line(line) return None
Parse a user section.
27,343
def milestones(self): if self.cache[]: return self.cache[] milestone_xml = self.bc.list_milestones(self.id) milestones = [] for node in ET.fromstring(milestone_xml).findall("milestone"): milestones.append(Milestone(node)) milestones.sort() milestones.reverse() self.cache[] = milestones return self.cache[]
Array of all milestones
27,344
def sub_description(self): gd = self.geo_description td = self.time_description if gd and td: return .format(gd, td, self._p.count) elif gd: return .format(gd, self._p.count) elif td: return .format(td, self._p.count) else: return .format(self._p.count)
Time and space dscription
27,345
def refs_to(cls, sha1, repo): matching = [] for refname in repo.listall_references(): symref = repo.lookup_reference(refname) dref = symref.resolve() oid = dref.target commit = repo.get(oid) if commit.hex == sha1: matching.append(symref.shorthand) return matching
Returns all refs pointing to the given SHA1.
27,346
def _symbol_bars( self, symbols, size, _from=None, to=None, limit=None): dayminute assert size in (, ) query_limit = limit if query_limit is not None: query_limit *= 2 @skip_http_error((404, 504)) def fetch(symbol): df = self._api.polygon.historic_agg( size, symbol, _from, to, query_limit).df if size == : df.index += pd.Timedelta() mask = self._cal.minutes_in_range( df.index[0], df.index[-1], ).tz_convert(NY) df = df.reindex(mask) if limit is not None: df = df.iloc[-limit:] return df return parallelize(fetch)(symbols)
Query historic_agg either minute or day in parallel for multiple symbols, and return in dict. symbols: list[str] size: str ('day', 'minute') _from: str or pd.Timestamp to: str or pd.Timestamp limit: str or int return: dict[str -> pd.DataFrame]
27,347
def to(location, code=falcon.HTTP_302): raise falcon.http_status.HTTPStatus(code, {: location})
Redirects to the specified location using the provided http_code (defaults to HTTP_302 FOUND)
27,348
def determine_elected_candidates_in_order(self, candidate_votes): eligible_by_vote = defaultdict(list) for candidate_id, votes in candidate_votes.candidate_votes_iter(): if candidate_id in self.candidates_elected: continue if votes < self.quota: continue eligible_by_vote[votes].append(candidate_id) elected = [] for votes in reversed(sorted(eligible_by_vote)): candidate_ids = eligible_by_vote[votes] candidate_ids.sort(key=self.candidate_order_fn) if len(candidate_ids) == 1: elected.append(candidate_ids[0]) else: tie_breaker_round = self.find_tie_breaker(candidate_ids) if tie_breaker_round is not None: self.results.provision_used( ActProvision("Multiple candidates elected with %d votes. Tie broken from previous totals." % (votes))) for candidate_id in reversed(sorted(candidate_ids, key=tie_breaker_round.get_vote_count)): elected.append(candidate_id) else: self.results.provision_used( ActProvision("Multiple candidates elected with %d votes. Input required from Australian Electoral Officer." % (votes))) permutations = list(itertools.permutations(candidate_ids)) permutations.sort() choice = self.resolve_election_order(permutations) for candidate_id in permutations[choice]: elected.append(candidate_id) return elected
determine all candidates with at least a quota of votes in `candidate_votes'. returns results in order of decreasing vote count. Any ties are resolved within this method.
27,349
def _copy(query_dict): memo = { } result = query_dict.__class__(, encoding=query_dict.encoding, mutable=True) memo[id(query_dict)] = result for key, value in dict.items(query_dict): dict.__setitem__(result, copy.deepcopy(key, memo), copy.deepcopy(value, memo)) return result
Return a mutable copy of `query_dict`. This is a workaround to Django bug #13572, which prevents QueryDict.copy from working.
27,350
def print_result(result): try: print result except UnicodeEncodeError: if sys.stdout.encoding: print result.encode(sys.stdout.encoding, ) else: print result.encode() except: print "Unexpected error attempting to print result"
Print the result, ascii encode if necessary
27,351
def open_file(filename, file_mode=): if six.PY2: return codecs.open(filename, mode=file_mode, encoding=) return open(filename, file_mode, encoding=)
A static convenience function that performs the open of the recorder file correctly for different versions of Python. *New in pywbem 0.10.* This covers the issue where the file should be opened in text mode but that is done differently in Python 2 and Python 3. The returned file-like object must be closed by the caller. Parameters: filename(:term:`string`): Name of the file where the recorder output will be written file_mode(:term:`string`): Optional file mode. The default is 'w' which overwrites any existing file. if 'a' is used, the data is appended to any existing file. Returns: File-like object. Example:: recorder = TestClientRecorder(...) recorder_file = recorder.open_file('recorder.log') . . . # Perform WBEM operations using the recorder recorder_file.close()
27,352
def named_side_effect(original, name): def wrapper(callable, *args, **kwargs): return _side_effect_wrapper(callable, args, kwargs, name) return wrapper
Decorator for function or method that do not modify the recorder state but have some side effects that can't be replayed. What it does in recording mode is keep the function name, arguments, keyword and result as a side effect that will be recorded in the journal. In replay mode, it will only pop the next expected side-effect, verify the function name, arguments and keywords and return the expected result without executing the real function code. If the function name, arguments or keywords were to be different than the expected ones, it would raise L{ReplayError}. Should work for any function or method.
27,353
def vals(cls): if cls._vals: return cls._vals _vals = [] for mro in cls.__bases__: if issubclass(mro, KeyCollection): _vals.extend(mro.vals()) _vals.extend([ vv for kk, vv in vars(cls).items() if not kk.startswith() and not callable(getattr(cls, kk)) ]) cls._vals = _vals return cls._vals
Return this class's attribute values (those not stating with '_'). Returns ------- _vals : list of objects List of values of internal attributes. Order is effectiely random.
27,354
def find_all_valid_decks(provider: Provider, deck_version: int, prod: bool=True) -> Generator: pa_params = param_query(provider.network) if prod: p2th = pa_params.P2TH_addr else: p2th = pa_params.test_P2TH_addr if isinstance(provider, RpcNode): deck_spawns = (provider.getrawtransaction(i, 1) for i in find_deck_spawns(provider)) else: try: deck_spawns = (provider.getrawtransaction(i, 1) for i in provider.listtransactions(p2th)) except TypeError as err: raise EmptyP2THDirectory(err) with concurrent.futures.ThreadPoolExecutor(max_workers=2) as th: for result in th.map(deck_parser, ((provider, rawtx, deck_version, p2th) for rawtx in deck_spawns)): if result: yield result
Scan the blockchain for PeerAssets decks, returns list of deck objects. : provider - provider instance : version - deck protocol version (0, 1, 2, ...) : test True/False - test or production P2TH
27,355
def user_parser(user): if __is_deleted(user): return deleted_parser(user) if user[] in item_types: raise Exception() if type(user[]) == int: raise Exception() return User( user[], user[], user[], user[], user[], user[], )
Parses a user object
27,356
def wait_with_ioloop(self, ioloop, timeout=None): f = Future() def cb(): return gen.chain_future(self.until_set(), f) ioloop.add_callback(cb) try: f.result(timeout) return True except TimeoutError: return self._flag
Do blocking wait until condition is event is set. Parameters ---------- ioloop : tornadio.ioloop.IOLoop instance MUST be the same ioloop that set() / clear() is called from timeout : float, int or None If not None, only wait up to `timeout` seconds for event to be set. Return Value ------------ flag : True if event was set within timeout, otherwise False. Notes ----- This will deadlock if called in the ioloop!
27,357
def capabilities(self, width, height, rotate, mode="1"): assert mode in ("1", "RGB", "RGBA") assert rotate in (0, 1, 2, 3) self._w = width self._h = height self.width = width if rotate % 2 == 0 else height self.height = height if rotate % 2 == 0 else width self.size = (self.width, self.height) self.bounding_box = (0, 0, self.width - 1, self.height - 1) self.rotate = rotate self.mode = mode self.persist = False
Assigns attributes such as ``width``, ``height``, ``size`` and ``bounding_box`` correctly oriented from the supplied parameters. :param width: The device width. :type width: int :param height: The device height. :type height: int :param rotate: An integer value of 0 (default), 1, 2 or 3 only, where 0 is no rotation, 1 is rotate 90° clockwise, 2 is 180° rotation and 3 represents 270° rotation. :type rotate: int :param mode: The supported color model, one of ``"1"``, ``"RGB"`` or ``"RGBA"`` only. :type mode: str
27,358
def __generate_method(name): try: func = getattr(DataFrame, name) except AttributeError as e: def func(self, *args, **kwargs): raise e return func wraps = getattr(functools, "wraps", lambda _: lambda f: f) @wraps(func) def _wrapper(self, *args, **kwargs): dataframe = func(self, *args, **kwargs) if self.__class__ != SourcedDataFrame \ and isinstance(self, SourcedDataFrame) \ and isinstance(dataframe, DataFrame): return self.__class__(dataframe._jdf, self._session, self._implicits) return dataframe return _wrapper
Wraps the DataFrame's original method by name to return the derived class instance.
27,359
def serial_udb_extra_f8_encode(self, sue_HEIGHT_TARGET_MAX, sue_HEIGHT_TARGET_MIN, sue_ALT_HOLD_THROTTLE_MIN, sue_ALT_HOLD_THROTTLE_MAX, sue_ALT_HOLD_PITCH_MIN, sue_ALT_HOLD_PITCH_MAX, sue_ALT_HOLD_PITCH_HIGH): return MAVLink_serial_udb_extra_f8_message(sue_HEIGHT_TARGET_MAX, sue_HEIGHT_TARGET_MIN, sue_ALT_HOLD_THROTTLE_MIN, sue_ALT_HOLD_THROTTLE_MAX, sue_ALT_HOLD_PITCH_MIN, sue_ALT_HOLD_PITCH_MAX, sue_ALT_HOLD_PITCH_HIGH)
Backwards compatible version of SERIAL_UDB_EXTRA F8: format sue_HEIGHT_TARGET_MAX : Serial UDB Extra HEIGHT_TARGET_MAX (float) sue_HEIGHT_TARGET_MIN : Serial UDB Extra HEIGHT_TARGET_MIN (float) sue_ALT_HOLD_THROTTLE_MIN : Serial UDB Extra ALT_HOLD_THROTTLE_MIN (float) sue_ALT_HOLD_THROTTLE_MAX : Serial UDB Extra ALT_HOLD_THROTTLE_MAX (float) sue_ALT_HOLD_PITCH_MIN : Serial UDB Extra ALT_HOLD_PITCH_MIN (float) sue_ALT_HOLD_PITCH_MAX : Serial UDB Extra ALT_HOLD_PITCH_MAX (float) sue_ALT_HOLD_PITCH_HIGH : Serial UDB Extra ALT_HOLD_PITCH_HIGH (float)
27,360
def assemble_binary_rules(self, main, jar, custom_rules=None): rules = list(custom_rules or []) rules.extend(self.shade_package(pkg) for pkg in sorted(self._iter_jar_packages(jar)) if pkg != main_package) return rules
Creates an ordered list of rules suitable for fully shading the given binary. The default rules will ensure the `main` class name is un-changed along with a minimal set of support classes but that everything else will be shaded. Any `custom_rules` are given highest precedence and so they can interfere with this automatic binary shading. In general it's safe to add exclusion rules to open up classes that need to be shared between the binary and the code it runs over. An example would be excluding the `org.junit.Test` annotation class from shading since a tool running junit needs to be able to scan for this annotation inside the user code it tests. :param unicode main: The main class to preserve as the entry point. :param unicode jar: The path of the binary jar the `main` class lives in. :param list custom_rules: An optional list of custom `Shader.Rule`s. :returns: a precedence-ordered list of `Shader.Rule`s
27,361
def intermediate_cpfs(self) -> List[CPF]: _, cpfs = self.cpfs interm_cpfs = [cpf for cpf in cpfs if cpf.name in self.intermediate_fluents] interm_cpfs = sorted(interm_cpfs, key=lambda cpf: (self.intermediate_fluents[cpf.name].level, cpf.name)) return interm_cpfs
Returns list of intermediate-fluent CPFs in level order.
27,362
def getsuffix(subject): index = subject.rfind() if index > subject.replace(, ).rfind(): return subject[index+1:] return None
Returns the suffix of a filename. If the file has no suffix, returns None. Can return an empty string if the filenam ends with a period.
27,363
def _parse_type(self, element, types): name = element.attrib["name"] type = element.attrib["type"] if not type.startswith("tns:"): raise RuntimeError("Unexpected element type %s" % type) type = type[4:] [children] = types[type][0] types[type][1] = True self._remove_namespace_from_tag(children) if children.tag not in ("sequence", "choice"): raise RuntimeError("Unexpected children type %s" % children.tag) if children[0].attrib["name"] == "item": schema = SequenceSchema(name) else: schema = NodeSchema(name) for child in children: self._remove_namespace_from_tag(child) if child.tag == "element": name, type, min_occurs, max_occurs = self._parse_child(child) if type in self.leaf_types: if max_occurs != 1: raise RuntimeError("Unexpected max value for leaf") if not isinstance(schema, NodeSchema): raise RuntimeError("Attempt to add leaf to a non-node") schema.add(LeafSchema(name), min_occurs=min_occurs) else: if name == "item": if not isinstance(schema, SequenceSchema): raise RuntimeError("Attempt to set child for " "non-sequence") schema.set(self._parse_type(child, types), min_occurs=min_occurs, max_occurs=max_occurs) else: if max_occurs != 1: raise RuntimeError("Unexpected max for node") if not isinstance(schema, NodeSchema): raise RuntimeError("Unexpected schema type") schema.add(self._parse_type(child, types), min_occurs=min_occurs) elif child.tag == "choice": pass else: raise RuntimeError("Unexpected child type") return schema
Parse a 'complexType' element. @param element: The top-level complexType element @param types: A map of the elements of all available complexType's. @return: The schema for the complexType.
27,364
def partial_distance_covariance(x, y, z): a = _u_distance_matrix(x) b = _u_distance_matrix(y) c = _u_distance_matrix(z) proj = u_complementary_projection(c) return u_product(proj(a), proj(b))
Partial distance covariance estimator. Compute the estimator for the partial distance covariance of the random vectors corresponding to :math:`x` and :math:`y` with respect to the random variable corresponding to :math:`z`. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. z: array_like Random vector with respect to which the partial distance covariance is computed. The columns correspond with the individual random variables while the rows are individual instances of the random vector. Returns ------- numpy scalar Value of the estimator of the partial distance covariance. See Also -------- partial_distance_correlation Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> c = np.array([[1, 3, 4], ... [5, 7, 8], ... [9, 11, 15], ... [13, 15, 16]]) >>> dcor.partial_distance_covariance(a, a, c) # doctest: +ELLIPSIS 0.0024298... >>> dcor.partial_distance_covariance(a, b, c) 0.0347030... >>> dcor.partial_distance_covariance(b, b, c) 0.4956241...
27,365
def hash_file(path, block_size=65536): sha256 = hashlib.sha256() with open(path, ) as f: for block in iter(lambda: f.read(block_size), b): sha256.update(block) return sha256.hexdigest()
Returns SHA256 checksum of a file Args: path (string): Absolute file path of file to hash block_size (int, optional): Number of bytes to read per block
27,366
def get_delete_api(self, resource): parameters = self.delete_item_parameters(resource) get_item_api = { : % resource.get_api_name(), : % resource.model.__name__, "responseClass": "void", : [ { : , : % resource.model.__name__, : % resource.model.__name__, : parameters, } ] } return get_item_api
Generates the meta descriptor for the resource item api.
27,367
def exec_command_on_nodes(nodes, cmd, label, conn_params=None): if isinstance(nodes, BASESTRING): nodes = [nodes] if conn_params is None: conn_params = DEFAULT_CONN_PARAMS logger.debug("Running %s on %s ", label, nodes) remote = ex.get_remote(cmd, nodes, conn_params) remote.run() if not remote.finished_ok: raise Exception() return remote
Execute a command on a node (id or hostname) or on a set of nodes. :param nodes: list of targets of the command cmd. Each must be an execo.Host. :param cmd: string representing the command to run on the remote nodes. :param label: string for debugging purpose. :param conn_params: connection parameters passed to the execo.Remote function
27,368
def register_types(name, *types): type_names.setdefault(name, set()) for t in types: if t in media_types: type_names[media_types[t]].discard(t) media_types[t] = name type_names[name].add(t)
Register a short name for one or more content types.
27,369
def _set_anycast_rp_ip(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("anycast_rp_ip_addr",anycast_rp_ip.anycast_rp_ip, yang_name="anycast-rp-ip", rest_name="anycast-rp-ip", parent=self, is_container=, user_ordered=False, path_helper=self._path_helper, yang_keys=, extensions={u: {u: u, u: None, u: u, u: u, u: None}}), is_container=, yang_name="anycast-rp-ip", rest_name="anycast-rp-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None, u: u, u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "list", : , }) self.__anycast_rp_ip = t if hasattr(self, ): self._set()
Setter method for anycast_rp_ip, mapped from YANG variable /routing_system/router/hide_pim_holder/pim/anycast_rp_ip (list) If this variable is read-only (config: false) in the source YANG file, then _set_anycast_rp_ip is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_anycast_rp_ip() directly.
27,370
def register_instances(name, instances, region=None, key=None, keyid=None, profile=None): ret = {: name, : True, : , : {}} lb = __salt__[](name, region, key, keyid, profile) if not lb: msg = .format(name) log.error(msg) ret.update({: msg, : False}) return ret health = __salt__[]( name, region, key, keyid, profile) nodes = [value[] for value in health if value[] != ] new = [value for value in instances if value not in nodes] if not new: msg = .format(six.text_type(instances).strip()) log.debug(msg) ret.update({: msg}) return ret if __opts__[]: ret[] = .format(name, new) ret[] = None return ret state = __salt__[]( name, instances, region, key, keyid, profile) if state: msg = .format(name) log.info(msg) new = set().union(nodes, instances) ret.update({: msg, : {: .join(nodes), : .join(list(new))}}) else: msg = .format(name) log.error(msg) ret.update({: msg, : False}) return ret
Add EC2 instance(s) to an Elastic Load Balancer. Removing an instance from the ``instances`` list does not remove it from the ELB. name The name of the Elastic Load Balancer to add EC2 instances to. instances A list of EC2 instance IDs that this Elastic Load Balancer should distribute traffic to. This state will only ever append new instances to the ELB. EC2 instances already associated with this ELB will not be removed if they are not in the ``instances`` list. .. versionadded:: 2015.8.0 .. code-block:: yaml add-instances: boto_elb.register_instances: - name: myloadbalancer - instances: - instance-id1 - instance-id2
27,371
def await_any_transforms_exist(cli, transform_paths, does_exist=DEFAULT_TRANSFORM_EXISTS, timeout_seconds=DEFAULT_TIMEOUT_SECONDS): message_payload = { "transform_paths": transform_paths, "do_exist": does_exist, "match_mode": "Any", "timeout": timeout_seconds } msg = message.Message("await.unity.transform.exists", message_payload) cli.send_message(msg) response = cli.read_message() verify_response(response) return bool(response[][])
Waits for a transform to exist based on does_exist. :param cli: :param transform_paths: An array of transform paths [...] :param does_exist: Whether or not to await for exist state (True | False) :param timeout_seconds: How long until this returns with failure :return: bool
27,372
def set_cte(self, cte_id, sql): for cte in self.extra_ctes: if cte[] == cte_id: cte[] = sql break else: self.extra_ctes.append( {: cte_id, : sql} )
This is the equivalent of what self.extra_ctes[cte_id] = sql would do if extra_ctes were an OrderedDict
27,373
def _search(self, searchterm, pred, **args): searchterm = searchterm.replace(,) namedGraph = get_named_graph(self.handle) query = .format(pred=pred, s=searchterm, g=namedGraph) bindings = run_sparql(query) return [r[][] for r in bindings]
Search for things using labels
27,374
def _proxy(self): if self._context is None: self._context = FieldTypeContext( self._version, assistant_sid=self._solution[], sid=self._solution[], ) return self._context
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: FieldTypeContext for this FieldTypeInstance :rtype: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeContext
27,375
def run(self, module, options): logger.debug("Running halstead harvester") results = {} for filename, details in dict(self.harvester.results).items(): results[filename] = {} for instance in details: if isinstance(instance, list): for item in instance: function, report = item results[filename][function] = self._report_to_dict(report) else: if isinstance(instance, str) and instance == "error": logger.warning( f"Failed to run Halstead harvester on {filename} : {details[]}" ) continue results[filename] = self._report_to_dict(instance) return results
Run the operator. :param module: The target module path. :type module: ``str`` :param options: Any runtime options. :type options: ``dict`` :return: The operator results. :rtype: ``dict``
27,376
def create_osd( conn, cluster, data, journal, zap, fs_type, dmcrypt, dmcrypt_dir, storetype, block_wal, block_db, **kw): ceph_volume_executable = system.executable_path(conn, ) args = [ ceph_volume_executable, , cluster, , , % storetype, , data ] if zap: LOG.warning() if dmcrypt: args.append() LOG.warning() if storetype == : if block_wal: args.append() args.append(block_wal) if block_db: args.append() args.append(block_db) elif storetype == : if not journal: raise RuntimeError() args.append() args.append(journal) if kw.get(): remoto.process.run( conn, args, extend_env={: } ) else: remoto.process.run( conn, args )
Run on osd node, creates an OSD from a data disk.
27,377
def drop_namespaces(self): self.session.query(NamespaceEntry).delete() self.session.query(Namespace).delete() self.session.commit()
Drop all namespaces.
27,378
def likelihood(self, x, cl): logger.debug("likel " + str(x.shape)) if self.modelparams["type"] == "gmmsame": px = self.mdl[cl].score_samples(x) elif self.modelparams["type"] == "kernel": px = self.mdl[cl].score_samples(x) elif self.modelparams["type"] == "gaussian_kde": px = np.log(self.mdl[cl](x)) elif self.modelparams["type"] == "dpgmm": logger.warning(".score() replaced with .score_samples() . Check it.") px = self.mdl[cl].score_samples(x * 0.01) elif self.modelparams["type"] == "stored": px = self.mdl[cl].score(x) return px
X = numpy.random.random([2,3,4]) # we have data 2x3 with fature vector with 4 fatures Use likelihoodFromImage() function for 3d image input m.likelihood(X,0)
27,379
def get_context_data(self, **kwargs): context = super(ForumView, self).get_context_data(**kwargs) context[] = self.get_forum() context[] = ForumVisibilityContentTree.from_forums( self.request.forum_permission_handler.forum_list_filter( context[].get_descendants(), self.request.user, ), ) context[] = list( self.get_forum() .topics.select_related(, , ) .filter(type=Topic.TOPIC_ANNOUNCE) ) context[] = TrackingHandler(self.request).get_unread_topics( list(context[self.context_object_name]) + context[], self.request.user, ) return context
Returns the context data to provide to the template.
27,380
def start_plasma_store(stdout_file=None, stderr_file=None, object_store_memory=None, plasma_directory=None, huge_pages=False, plasma_store_socket_name=None): object_store_memory, plasma_directory = determine_plasma_store_config( object_store_memory, plasma_directory, huge_pages) if object_store_memory < ray_constants.OBJECT_STORE_MINIMUM_MEMORY_BYTES: raise ValueError("Attempting to cap object store memory usage at {} " "bytes, but the minimum allowed is {} bytes.".format( object_store_memory, ray_constants.OBJECT_STORE_MINIMUM_MEMORY_BYTES)) object_store_memory_str = (object_store_memory / 10**7) / 10**2 logger.info("Starting the Plasma object store with {} GB memory " "using {}.".format( round(object_store_memory_str, 2), plasma_directory)) process_info = _start_plasma_store( object_store_memory, use_profiler=RUN_PLASMA_STORE_PROFILER, stdout_file=stdout_file, stderr_file=stderr_file, plasma_directory=plasma_directory, huge_pages=huge_pages, socket_name=plasma_store_socket_name) return process_info
This method starts an object store process. Args: stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. object_store_memory: The amount of memory (in bytes) to start the object store with. plasma_directory: A directory where the Plasma memory mapped files will be created. huge_pages: Boolean flag indicating whether to start the Object Store with hugetlbfs support. Requires plasma_directory. Returns: ProcessInfo for the process that was started.
27,381
def d2logpdf_dlink2(self, link_f, y, Y_metadata=None): c = np.zeros_like(y) if Y_metadata is not None and in Y_metadata.keys(): c = Y_metadata[] uncensored = (1-c)*(1/link_f**2 -2*y**self.r/link_f**3) censored = -c*2*y**self.r/link_f**3 hess = uncensored + censored return hess
Hessian at y, given link(f), w.r.t link(f) i.e. second derivative logpdf at y given link(f_i) and link(f_j) w.r.t link(f_i) and link(f_j) The hessian will be 0 unless i == j .. math:: \\frac{d^{2} \\ln p(y_{i}|\lambda(f_{i}))}{d^{2}\\lambda(f)} = -\\beta^{2}\\frac{d\\Psi(\\alpha_{i})}{d\\alpha_{i}}\\\\ \\alpha_{i} = \\beta y_{i} :param link_f: latent variables link(f) :type link_f: Nx1 array :param y: data :type y: Nx1 array :param Y_metadata: Y_metadata which is not used in gamma distribution :returns: Diagonal of hessian matrix (second derivative of likelihood evaluated at points f) :rtype: Nx1 array .. Note:: Will return diagonal of hessian, since every where else it is 0, as the likelihood factorizes over cases (the distribution for y_i depends only on link(f_i) not on link(f_(j!=i))
27,382
def _box_click(self, event): x, y, widget = event.x, event.y, event.widget elem = widget.identify("element", x, y) if "image" in elem: item = self.identify_row(y) if self.tag_has("unchecked", item) or self.tag_has("tristate", item): self._check_ancestor(item) self._check_descendant(item) else: self._uncheck_descendant(item) self._uncheck_ancestor(item)
Check or uncheck box when clicked.
27,383
def calculate_size(name, permits, timeout): data_size = 0 data_size += calculate_size_str(name) data_size += INT_SIZE_IN_BYTES data_size += LONG_SIZE_IN_BYTES return data_size
Calculates the request payload size
27,384
def new_context(self, vars=None, shared=False, locals=None): return new_context(self.environment, self.name, self.blocks, vars, shared, self.globals, locals)
Create a new :class:`Context` for this template. The vars provided will be passed to the template. Per default the globals are added to the context. If shared is set to `True` the data is passed as it to the context without adding the globals. `locals` can be a dict of local variables for internal usage.
27,385
def read_sample(filename): file = open(filename, ) sample = [[float(val) for val in line.split()] for line in file if len(line.strip()) > 0] file.close() return sample
! @brief Returns data sample from simple text file. @details This function should be used for text file with following format: @code point_1_coord_1 point_1_coord_2 ... point_1_coord_n point_2_coord_1 point_2_coord_2 ... point_2_coord_n ... ... @endcode @param[in] filename (string): Path to file with data. @return (list) Points where each point represented by list of coordinates.
27,386
def has_scheduled_methods(cls): for member in cls.__dict__.values(): if hasattr(member, ): member.__wrapped__.__member_of__ = cls return cls
Decorator; use this on a class for which some methods have been decorated with :func:`schedule` or :func:`schedule_hint`. Those methods are then tagged with the attribute `__member_of__`, so that we may serialise and retrieve the correct method. This should be considered a patch to a flaw in the Python object model.
27,387
def tune(device, **kwargs): * kwarg_map = {: , : , : , : } opts = args = [] for key in kwargs: if key in kwarg_map: switch = kwarg_map[key] if key != : args.append(switch.replace(, )) else: args.append() if kwargs[key] == or kwargs[key] is True: opts += .format(key) else: opts += .format(switch, kwargs[key]) cmd = .format(opts, device) out = __salt__[](cmd, python_shell=False).splitlines() return dump(device, args)
Set attributes for the specified device CLI Example: .. code-block:: bash salt '*' disk.tune /dev/sda1 read-ahead=1024 read-write=True Valid options are: ``read-ahead``, ``filesystem-read-ahead``, ``read-only``, ``read-write``. See the ``blockdev(8)`` manpage for a more complete description of these options.
27,388
def to_aws_format(tags): if TAG_RAY_NODE_NAME in tags: tags["Name"] = tags[TAG_RAY_NODE_NAME] del tags[TAG_RAY_NODE_NAME] return tags
Convert the Ray node name tag to the AWS-specific 'Name' tag.
27,389
def connect(self): if self.token: self.phab_session = {: self.token} return req = self.req_session.post( % self.host, data={ : json.dumps(self.connect_params), : , : True, }) result = req.json()[] self.phab_session = { : result[], : result[], }
Sets up your Phabricator session, it's not necessary to call this directly
27,390
def get_token(request): if (not request.META.get(header_name_to_django(auth_token_settings.HEADER_NAME)) and config.CHAMBER_MULTIDOMAINS_OVERTAKER_AUTH_COOKIE_NAME): ovetaker_auth_token = request.COOKIES.get(config.CHAMBER_MULTIDOMAINS_OVERTAKER_AUTH_COOKIE_NAME) token = get_object_or_none(Token, key=ovetaker_auth_token, is_active=True) if utils.get_user_from_token(token).is_authenticated(): return token return utils.get_token(request)
Returns the token model instance associated with the given request token key. If no user is retrieved AnonymousToken is returned.
27,391
def size(self): return sum(q.qsize() for q in self._connections.values()) + len(self._fairies)
Returns the number of connections cached by the pool.
27,392
def _parse_broadcast(self, msg): return self._unescape(self._get_type(msg[self.broadcast_prefix_len:]))
Given a broacast message, returns the message that was broadcast.
27,393
def get_random_submission(self, subreddit=): url = self.config[].format( subreddit=six.text_type(subreddit)) try: item = self.request_json(url, params={: self._unique_count}) self._unique_count += 1 return objects.Submission.from_json(item) except errors.RedirectException as exc: self._unique_count += 1 return self.get_submission(exc.response_url) raise errors.ClientException()
Return a random Submission object. :param subreddit: Limit the submission to the specified subreddit(s). Default: all
27,394
def send_produce_request(self, payloads=None, acks=1, timeout=DEFAULT_REPLICAS_ACK_MSECS, fail_on_error=True, callback=None): encoder = partial( KafkaCodec.encode_produce_request, acks=acks, timeout=timeout) if acks == 0: decoder = None else: decoder = KafkaCodec.decode_produce_response resps = yield self._send_broker_aware_request( payloads, encoder, decoder) returnValue(self._handle_responses(resps, fail_on_error, callback))
Encode and send some ProduceRequests ProduceRequests will be grouped by (topic, partition) and then sent to a specific broker. Output is a list of responses in the same order as the list of payloads specified Parameters ---------- payloads: list of ProduceRequest acks: How many Kafka broker replicas need to write before the leader replies with a response timeout: How long the server has to receive the acks from the replicas before returning an error. fail_on_error: boolean, should we raise an Exception if we encounter an API error? callback: function, instead of returning the ProduceResponse, first pass it through this function Return ------ a deferred which callbacks with a list of ProduceResponse Raises ------ FailedPayloadsError, LeaderUnavailableError, PartitionUnavailableError
27,395
def str_repl(self, inputstring, **kwargs): out = [] comment = None string = None for i, c in enumerate(append_it(inputstring, None)): try: if comment is not None: if c is not None and c in nums: comment += c elif c == unwrapper and comment: ref = self.get_ref("comment", comment) if out and not out[-1].endswith("\n"): out[-1] = out[-1].rstrip(" ") if not self.minify: out[-1] += " " out.append(" comment = None else: raise CoconutInternalException("invalid comment marker in", getline(i, inputstring)) elif string is not None: if c is not None and c in nums: string += c elif c == unwrapper and string: text, strchar = self.get_ref("str", string) out.append(strchar + text + strchar) string = None else: raise CoconutInternalException("invalid string marker in", getline(i, inputstring)) elif c is not None: if c == " comment = "" elif c == strwrapper: string = "" else: out.append(c) except CoconutInternalException as err: complain(err) if comment is not None: out.append(comment) comment = None if string is not None: out.append(string) string = None out.append(c) return "".join(out)
Add back strings.
27,396
def mklink(): from optparse import OptionParser parser = OptionParser(usage="usage: %prog [options] link target") parser.add_option( , , help="Target is a directory (only necessary if not present)", action="store_true") options, args = parser.parse_args() try: link, target = args except ValueError: parser.error("incorrect number of arguments") symlink(target, link, options.directory) sys.stdout.write("Symbolic link created: %(link)s --> %(target)s\n" % vars())
Like cmd.exe's mklink except it will infer directory status of the target.
27,397
def _query_entities(self, table_name, filter=None, select=None, max_results=None, marker=None, accept=TablePayloadFormat.JSON_MINIMAL_METADATA, property_resolver=None, timeout=None): _validate_not_none(, table_name) _validate_not_none(, accept) next_partition_key = None if marker is None else marker.get() next_row_key = None if marker is None else marker.get() request = HTTPRequest() request.method = request.host = self._get_host() request.path = + _to_str(table_name) + request.headers = [(, _to_str(accept))] request.query = [ (, _to_str(filter)), (, _to_str(select)), (, _int_to_str(max_results)), (, _to_str(next_partition_key)), (, _to_str(next_row_key)), (, _int_to_str(timeout)), ] response = self._perform_request(request) return _convert_json_response_to_entities(response, property_resolver)
Returns a list of entities under the specified table. Makes a single list request to the service. Used internally by the query_entities method. :param str table_name: The name of the table to query. :param str filter: Returns only entities that satisfy the specified filter. Note that no more than 15 discrete comparisons are permitted within a $filter string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx for more information on constructing filters. :param str select: Returns only the desired properties of an entity from the set. :param int top: The maximum number of entities to return. :param marker: A dictionary which identifies the portion of the query to be returned with the next query operation. The operation returns a next_marker element within the response body if the list returned was not complete. This value may then be used as a query parameter in a subsequent call to request the next portion of the list of queues. The marker value is opaque to the client. :type marker: obj :param str accept: Specifies the accepted content type of the response payload. See :class:`~azure.storage.table.models.TablePayloadFormat` for possible values. :param property_resolver: A function which given the partition key, row key, property name, property value, and the property EdmType if returned by the service, returns the EdmType of the property. Generally used if accept is set to JSON_NO_METADATA. :type property_resolver: callback function in format of func(pk, rk, prop_name, prop_value, service_edm_type) :param int timeout: The server timeout, expressed in seconds. :return: A list of entities, potentially with a next_marker property. :rtype: list of :class:`~azure.storage.table.models.Entity`
27,398
def get_src_address_from_data(self, decoded=True): src_address_label = next((lbl for lbl in self.message_type if lbl.field_type and lbl.field_type.function == FieldType.Function.SRC_ADDRESS), None) if src_address_label: start, end = self.get_label_range(src_address_label, view=1, decode=decoded) if decoded: src_address = self.decoded_hex_str[start:end] else: src_address = self.plain_hex_str[start:end] else: src_address = None return src_address
Return the SRC address of a message if SRC_ADDRESS label is present in message type of the message Return None otherwise :param decoded: :return:
27,399
async def get_suggested_entities(self, get_suggested_entities_request): response = hangouts_pb2.GetSuggestedEntitiesResponse() await self._pb_request(, get_suggested_entities_request, response) return response
Return suggested contacts.